index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
31,597
laspy.point.format
__getitem__
null
def __getitem__(self, item): if isinstance(item, str): return self.dimension_by_name(item) return self.dimensions[item]
(self, item)
31,598
laspy.point.format
__init__
Parameters ---------- point_format_id: int point format id
def __init__( self, point_format_id: int, ): """ Parameters ---------- point_format_id: int point format id """ self.id: int = point_format_id self.dimensions: List[dims.DimensionInfo] = [] composed_dims = dims.COMPOSED_FIELDS[self.id] for dim_name in dims.ALL_POINT_FORMATS_DIMENSIONS[self.id]: try: sub_fields = composed_dims[dim_name] except KeyError: dimension = dims.DimensionInfo.from_dtype( dim_name, dims.DIMENSIONS_TO_TYPE[dim_name], is_standard=True ) self.dimensions.append(dimension) else: for sub_field in sub_fields: dimension = dims.DimensionInfo.from_bitmask( sub_field.name, sub_field.mask, is_standard=True ) self.dimensions.append(dimension)
(self, point_format_id: int)
31,599
laspy.point.format
__repr__
null
def __repr__(self): return "<PointFormat({}, {} bytes of extra dims)>".format( self.id, self.num_extra_bytes )
(self)
31,600
laspy.point.format
add_extra_dimension
Add an extra, user-defined dimension
def add_extra_dimension(self, param: ExtraBytesParams) -> None: """Add an extra, user-defined dimension""" dim_info = dims.DimensionInfo.from_extra_bytes_param(param) # todo: this should be checked in extra bytes param ctor if ( dim_info.num_elements > 3 and dim_info.kind != dims.DimensionKind.UnsignedInteger ): raise LaspyException("Extra Dimensions do not support more than 3 elements") self.dimensions.append(dim_info)
(self, param: laspy.point.format.ExtraBytesParams) -> NoneType
31,601
laspy.point.format
dimension_by_name
Returns the dimension info for the dimension by name ValueError is raised if the dimension does not exist un the point format >>> info = PointFormat(2).dimension_by_name('number_of_returns') >>> info.name == 'number_of_returns' True >>> info.num_bits == 3 True >>> info = PointFormat(2).dimension_by_name('gps_time') Traceback (most recent call last): ... ValueError: Dimension 'gps_time' does not exist
def dimension_by_name(self, name: str) -> dims.DimensionInfo: """Returns the dimension info for the dimension by name ValueError is raised if the dimension does not exist un the point format >>> info = PointFormat(2).dimension_by_name('number_of_returns') >>> info.name == 'number_of_returns' True >>> info.num_bits == 3 True >>> info = PointFormat(2).dimension_by_name('gps_time') Traceback (most recent call last): ... ValueError: Dimension 'gps_time' does not exist """ for dim in self.dimensions: if dim.name == name: return dim raise ValueError(f"Dimension '{name}' does not exist")
(self, name: str) -> laspy.point.dims.DimensionInfo
31,602
laspy.point.format
dtype
Returns the numpy.dtype used to store the point records in a numpy array .. note:: The dtype corresponds to the dtype with sub_fields *packed* into their composed fields
def dtype(self): """Returns the numpy.dtype used to store the point records in a numpy array .. note:: The dtype corresponds to the dtype with sub_fields *packed* into their composed fields """ dtype = dims.ALL_POINT_FORMATS_DTYPE[self.id] descr = dtype.descr for extra_dim in self.extra_dimensions: descr.append((extra_dim.name, extra_dim.type_str())) return np.dtype(descr)
(self)
31,603
laspy.point.format
remove_extra_dimension
null
def remove_extra_dimension(self, name: str) -> None: dimensions = [ dim for dim in self.dimensions if dim.name == name and not dim.is_standard ] try: dimension = dimensions[0] except IndexError: if name in self.standard_dimension_names: raise LaspyException( f"The dimension named '{name}' is not an extra dimension, " "so it cannot be removed" ) else: raise LaspyException( f"'No extra dimension named '{name}' exist" ) from None self.dimensions = [dim for dim in self.dimensions if dim is not dimension]
(self, name: str) -> NoneType
31,604
laspy.point.record
ScaleAwarePointRecord
A ScaleAwarePointRecord is a point record that knows the scales and offets to use, and is thus able to get and set the scaled x, y, z coordinates To create one, use :meth:`.ScaleAwarePointRecord.zeros` or :meth:`.ScaleAwarePointRecord.empty`
class ScaleAwarePointRecord(PackedPointRecord): """A ScaleAwarePointRecord is a point record that knows the scales and offets to use, and is thus able to get and set the scaled x, y, z coordinates To create one, use :meth:`.ScaleAwarePointRecord.zeros` or :meth:`.ScaleAwarePointRecord.empty` """ def __init__(self, array, point_format, scales, offsets): super().__init__(array, point_format) self.scales = np.array(scales) self.offsets = np.array(offsets) if self.scales.shape != (3,): raise ValueError("scales must be an array of 3 elements") if self.offsets.shape != (3,): raise ValueError("offsets must be an array of 3 elements") @staticmethod def zeros( point_count, *, point_format=None, scales=None, offsets=None, header=None ): """Creates a new point record with all dimensions initialized to zero Examples -------- >>> record = ScaleAwarePointRecord.zeros( ... 5, point_format=PointFormat(3), scales=[1.0, 1.0, 1.0], offsets=[0.1, 0.5, 17.5]) >>> len(record) 5 >>> import laspy >>> hdr = laspy.LasHeader() >>> record = ScaleAwarePointRecord.zeros(5, header=hdr) >>> len(record) 5 >>> hdr = laspy.LasHeader() >>> record = ScaleAwarePointRecord.zeros(5, header=hdr, scales=[1.0, 1.0, 1.0]) Traceback (most recent call last): ValueError: header argument is mutually exclusive with point_format, scales and offets >>> record = ScaleAwarePointRecord.zeros(5, point_format=PointFormat(3)) Traceback (most recent call last): ValueError: You have to provide all 3: point_format, scale and offsets """ first_set = (point_format, scales, offsets) if header is not None: if any(arg is not None for arg in first_set): raise ValueError( "header argument is mutually exclusive with point_format, scales and offets" ) point_format = header.point_format scales = header.scales offsets = header.offsets else: if any(arg is None for arg in first_set): raise ValueError( "You have to provide all 3: " "point_format, scale and offsets" ) data = np.zeros(point_count, point_format.dtype()) return ScaleAwarePointRecord(data, point_format, scales, offsets) @staticmethod def empty(point_format=None, scales=None, offsets=None, header=None): """Creates an empty point record.""" return ScaleAwarePointRecord.zeros( point_count=0, point_format=point_format, scales=scales, offsets=offsets, header=header, ) def change_scaling(self, scales=None, offsets=None) -> None: """See :meth:`.LasData.change_scaling`""" if scales is None: scales = self.scales if offsets is None: offsets = self.offsets apply_new_scaling(self, scales, offsets) self.scales = scales self.offsets = offsets def __getitem__(self, item): if isinstance(item, (int, slice, np.ndarray, list, tuple)): if isinstance(item, (list, tuple)): # x, y ,z do not really exists in the array, but they are computed from X, Y, Z item = [ item if item not in ("x", "y", "z") else item.upper() for item in item ] return ScaleAwarePointRecord( self.array[item], self.point_format, self.scales, self.offsets ) if item == "x": return ScaledArrayView(self.array["X"], self.scales[0], self.offsets[0]) elif item == "y": return ScaledArrayView(self.array["Y"], self.scales[1], self.offsets[1]) elif item == "z": return ScaledArrayView(self.array["Z"], self.scales[2], self.offsets[2]) else: return super().__getitem__(item) def __setattr__(self, key, value): if key in ("x", "y", "z"): self[key][:] = value else: return super().__setattr__(key, value)
(array, point_format, scales, offsets)
31,607
laspy.point.record
__getitem__
null
def __getitem__(self, item): if isinstance(item, (int, slice, np.ndarray, list, tuple)): if isinstance(item, (list, tuple)): # x, y ,z do not really exists in the array, but they are computed from X, Y, Z item = [ item if item not in ("x", "y", "z") else item.upper() for item in item ] return ScaleAwarePointRecord( self.array[item], self.point_format, self.scales, self.offsets ) if item == "x": return ScaledArrayView(self.array["X"], self.scales[0], self.offsets[0]) elif item == "y": return ScaledArrayView(self.array["Y"], self.scales[1], self.offsets[1]) elif item == "z": return ScaledArrayView(self.array["Z"], self.scales[2], self.offsets[2]) else: return super().__getitem__(item)
(self, item)
31,608
laspy.point.record
__init__
null
def __init__(self, array, point_format, scales, offsets): super().__init__(array, point_format) self.scales = np.array(scales) self.offsets = np.array(offsets) if self.scales.shape != (3,): raise ValueError("scales must be an array of 3 elements") if self.offsets.shape != (3,): raise ValueError("offsets must be an array of 3 elements")
(self, array, point_format, scales, offsets)
31,611
laspy.point.record
__setattr__
null
def __setattr__(self, key, value): if key in ("x", "y", "z"): self[key][:] = value else: return super().__setattr__(key, value)
(self, key, value)
31,614
laspy.point.record
change_scaling
See :meth:`.LasData.change_scaling`
def change_scaling(self, scales=None, offsets=None) -> None: """See :meth:`.LasData.change_scaling`""" if scales is None: scales = self.scales if offsets is None: offsets = self.offsets apply_new_scaling(self, scales, offsets) self.scales = scales self.offsets = offsets
(self, scales=None, offsets=None) -> NoneType
31,617
laspy.point.record
empty
Creates an empty point record.
@staticmethod def empty(point_format=None, scales=None, offsets=None, header=None): """Creates an empty point record.""" return ScaleAwarePointRecord.zeros( point_count=0, point_format=point_format, scales=scales, offsets=offsets, header=header, )
(point_format=None, scales=None, offsets=None, header=None)
31,621
laspy.point.record
zeros
Creates a new point record with all dimensions initialized to zero Examples -------- >>> record = ScaleAwarePointRecord.zeros( ... 5, point_format=PointFormat(3), scales=[1.0, 1.0, 1.0], offsets=[0.1, 0.5, 17.5]) >>> len(record) 5 >>> import laspy >>> hdr = laspy.LasHeader() >>> record = ScaleAwarePointRecord.zeros(5, header=hdr) >>> len(record) 5 >>> hdr = laspy.LasHeader() >>> record = ScaleAwarePointRecord.zeros(5, header=hdr, scales=[1.0, 1.0, 1.0]) Traceback (most recent call last): ValueError: header argument is mutually exclusive with point_format, scales and offets >>> record = ScaleAwarePointRecord.zeros(5, point_format=PointFormat(3)) Traceback (most recent call last): ValueError: You have to provide all 3: point_format, scale and offsets
@staticmethod def zeros( point_count, *, point_format=None, scales=None, offsets=None, header=None ): """Creates a new point record with all dimensions initialized to zero Examples -------- >>> record = ScaleAwarePointRecord.zeros( ... 5, point_format=PointFormat(3), scales=[1.0, 1.0, 1.0], offsets=[0.1, 0.5, 17.5]) >>> len(record) 5 >>> import laspy >>> hdr = laspy.LasHeader() >>> record = ScaleAwarePointRecord.zeros(5, header=hdr) >>> len(record) 5 >>> hdr = laspy.LasHeader() >>> record = ScaleAwarePointRecord.zeros(5, header=hdr, scales=[1.0, 1.0, 1.0]) Traceback (most recent call last): ValueError: header argument is mutually exclusive with point_format, scales and offets >>> record = ScaleAwarePointRecord.zeros(5, point_format=PointFormat(3)) Traceback (most recent call last): ValueError: You have to provide all 3: point_format, scale and offsets """ first_set = (point_format, scales, offsets) if header is not None: if any(arg is not None for arg in first_set): raise ValueError( "header argument is mutually exclusive with point_format, scales and offets" ) point_format = header.point_format scales = header.scales offsets = header.offsets else: if any(arg is None for arg in first_set): raise ValueError( "You have to provide all 3: " "point_format, scale and offsets" ) data = np.zeros(point_count, point_format.dtype()) return ScaleAwarePointRecord(data, point_format, scales, offsets)
(point_count, *, point_format=None, scales=None, offsets=None, header=None)
31,622
laspy.vlrs.vlr
VLR
>>> import laspy >>> my_vlr = laspy.VLR( ... user_id="MyUserId", ... record_id=0, ... description="An Example VLR", ... record_data=int(42).to_bytes(8, byteorder='little'), ... ) >>> my_vlr.user_id 'MyUserId' >>> int.from_bytes(my_vlr.record_data, byteorder='little') 42
class VLR(BaseVLR): """ >>> import laspy >>> my_vlr = laspy.VLR( ... user_id="MyUserId", ... record_id=0, ... description="An Example VLR", ... record_data=int(42).to_bytes(8, byteorder='little'), ... ) >>> my_vlr.user_id 'MyUserId' >>> int.from_bytes(my_vlr.record_data, byteorder='little') 42 """ def __init__(self, user_id, record_id, description="", record_data=b""): super().__init__(user_id, record_id, description=description) #: The record_data as bytes, length cannot exceed 65_535 self.record_data: bytes = record_data def record_data_bytes(self) -> bytes: return self.record_data def __eq__(self, other): return ( self.record_id == other.record_id and self.user_id == other.user_id and self.description == other.description and self.record_data == other.record_data ) def __repr__(self): return "<{}(user_id: '{}', record_id: '{}', data len: {})>".format( self.__class__.__name__, self.user_id, self.record_id, len(self.record_data) )
(user_id, record_id, description='', record_data=b'')
31,623
laspy.vlrs.vlr
__eq__
null
def __eq__(self, other): return ( self.record_id == other.record_id and self.user_id == other.user_id and self.description == other.description and self.record_data == other.record_data )
(self, other)
31,624
laspy.vlrs.vlr
__init__
null
def __init__(self, user_id, record_id, description="", record_data=b""): super().__init__(user_id, record_id, description=description) #: The record_data as bytes, length cannot exceed 65_535 self.record_data: bytes = record_data
(self, user_id, record_id, description='', record_data=b'')
31,625
laspy.vlrs.vlr
__repr__
null
def __repr__(self): return "<{}(user_id: '{}', record_id: '{}', data len: {})>".format( self.__class__.__name__, self.user_id, self.record_id, len(self.record_data) )
(self)
31,626
laspy.vlrs.vlr
record_data_bytes
null
def record_data_bytes(self) -> bytes: return self.record_data
(self) -> bytes
31,632
laspy.lib
convert
Converts a Las from one point format to another Automatically upgrades the file version if source file version is not compatible with the new point_format_id convert to point format 0 >>> las = read_las('tests/data/simple.las') >>> las.header.version Version(major=1, minor=2) >>> las = convert(las, point_format_id=0) >>> las.header.point_format.id 0 >>> str(las.header.version) '1.2' convert to point format 6, which need version >= 1.4 then convert back to point format 0, version is not downgraded >>> las = read_las('tests/data/simple.las') >>> str(las.header.version) '1.2' >>> las = convert(las, point_format_id=6) >>> las.header.point_format.id 6 >>> str(las.header.version) '1.4' >>> las = convert(las, point_format_id=0) >>> str(las.header.version) '1.4' an exception is raised if the requested point format is not compatible with the file version >>> las = read_las('tests/data/simple.las') >>> convert(las, point_format_id=6, file_version='1.2') Traceback (most recent call last): ... laspy.errors.LaspyException: Point format 6 is not compatible with file version 1.2 Parameters ---------- source_las : laspy.lasdatas.base.LasBase The source data to be converted point_format_id : int, optional The new point format id (the default is None, which won't change the source format id) file_version : str, optional, The new file version. None by default which means that the file_version may be upgraded for compatibility with the new point_format. The file version will not be downgraded. Returns ------- laspy.lasdatas.base.LasBase
def convert(source_las, *, point_format_id=None, file_version=None): """Converts a Las from one point format to another Automatically upgrades the file version if source file version is not compatible with the new point_format_id convert to point format 0 >>> las = read_las('tests/data/simple.las') >>> las.header.version Version(major=1, minor=2) >>> las = convert(las, point_format_id=0) >>> las.header.point_format.id 0 >>> str(las.header.version) '1.2' convert to point format 6, which need version >= 1.4 then convert back to point format 0, version is not downgraded >>> las = read_las('tests/data/simple.las') >>> str(las.header.version) '1.2' >>> las = convert(las, point_format_id=6) >>> las.header.point_format.id 6 >>> str(las.header.version) '1.4' >>> las = convert(las, point_format_id=0) >>> str(las.header.version) '1.4' an exception is raised if the requested point format is not compatible with the file version >>> las = read_las('tests/data/simple.las') >>> convert(las, point_format_id=6, file_version='1.2') Traceback (most recent call last): ... laspy.errors.LaspyException: Point format 6 is not compatible with file version 1.2 Parameters ---------- source_las : laspy.lasdatas.base.LasBase The source data to be converted point_format_id : int, optional The new point format id (the default is None, which won't change the source format id) file_version : str, optional, The new file version. None by default which means that the file_version may be upgraded for compatibility with the new point_format. The file version will not be downgraded. Returns ------- laspy.lasdatas.base.LasBase """ if point_format_id is None: point_format_id = source_las.point_format.id if file_version is None: file_version = max( str(source_las.header.version), dims.preferred_file_version_for_point_format(point_format_id), ) else: file_version = str(file_version) dims.raise_if_version_not_compatible_with_fmt(point_format_id, file_version) version = Version.from_str(file_version) point_format = PointFormat(point_format_id) point_format.dimensions.extend(source_las.point_format.extra_dimensions) header = copy.deepcopy(source_las.header) header.set_version_and_point_format(version, point_format) if source_las.evlrs is not None: evlrs = VLRList(source_las.evlrs.copy()) else: evlrs = None points = record.PackedPointRecord.from_point_record( source_las.points, header.point_format ) las = LasData(header=header, points=points) if file_version < "1.4" and evlrs is not None and evlrs: logger.warning( "The source contained {} EVLRs," " they will be lost as version {} doest not support them".format( len(evlrs), file_version ) ) else: las.evlrs = evlrs return las
(source_las, *, point_format_id=None, file_version=None)
31,634
laspy.lib
create_las
Function to create a new empty las data object .. note:: If you provide both point_format and file_version an exception will be raised if they are not compatible >>> las = create_las(point_format=6,file_version="1.2") Traceback (most recent call last): ... laspy.errors.LaspyException: Point format 6 is not compatible with file version 1.2 If you provide only the point_format the file_version will automatically selected for you. >>> las = create_las(point_format=0) >>> las.header.version == '1.2' True >>> las = create_las(point_format=PointFormat(6)) >>> las.header.version == '1.4' True Parameters ---------- point_format: The point format you want the created file to have file_version: The las version you want the created las to have Returns ------- laspy.lasdatas.base.LasBase A new las data object
def create_las( *, point_format: Optional[Union[int, PointFormat]] = None, file_version: Optional[Union[str, Version]] = None, ): """Function to create a new empty las data object .. note:: If you provide both point_format and file_version an exception will be raised if they are not compatible >>> las = create_las(point_format=6,file_version="1.2") Traceback (most recent call last): ... laspy.errors.LaspyException: Point format 6 is not compatible with file version 1.2 If you provide only the point_format the file_version will automatically selected for you. >>> las = create_las(point_format=0) >>> las.header.version == '1.2' True >>> las = create_las(point_format=PointFormat(6)) >>> las.header.version == '1.4' True Parameters ---------- point_format: The point format you want the created file to have file_version: The las version you want the created las to have Returns ------- laspy.lasdatas.base.LasBase A new las data object """ header = LasHeader(point_format=point_format, version=file_version) return LasData(header=header)
(*, point_format: Union[laspy.point.format.PointFormat, int, NoneType] = None, file_version: Union[laspy.header.Version, str, NoneType] = None)
31,646
laspy.point.format
lost_dimensions
Returns a list of the names of the dimensions that will be lost when converting from point_fmt_in to point_fmt_out
def lost_dimensions(point_fmt_in, point_fmt_out): """Returns a list of the names of the dimensions that will be lost when converting from point_fmt_in to point_fmt_out """ dimensions_in = set(PointFormat(point_fmt_in).dimension_names) dimensions_out = set(PointFormat(point_fmt_out).dimension_names) completely_lost = [] for dim_name in dimensions_in: if dim_name not in dimensions_out: completely_lost.append(dim_name) return completely_lost
(point_fmt_in, point_fmt_out)
31,647
laspy.lib
mmap_las
MMap a file, much like laspy did
def mmap_las(filename): """MMap a file, much like laspy did""" return LasMMAP(filename)
(filename)
31,648
laspy.lib
open_las
The laspy.open opens a LAS/LAZ file in one of the 3 supported mode: - "r" => Reading => a :class:`laspy.LasReader` will be returned - "w" => Writing => a :class:`laspy.LasWriter` will be returned - "a" => Appending => a :class:`laspy.LasAppender` will be returned When opening a file in 'w' mode, a header (:class:`laspy.LasHeader`) is required >>> with open_las('tests/data/simple.las') as f: ... print(f.header.point_format.id) 3 >>> f = open('tests/data/simple.las', mode='rb') >>> with open_las(f,closefd=False) as flas: ... print(flas.header) <LasHeader(1.2, <PointFormat(3, 0 bytes of extra dims)>)> >>> f.closed False >>> f.close() >>> f.closed True >>> f = open('tests/data/simple.las', mode='rb') >>> with open_las(f) as flas: ... las = flas.read() >>> f.closed True Parameters ---------- source: str or bytes or io.BytesIO if source is a str it must be a filename mode: Optional, the mode to open the file: - "r" for reading (default) - "w" for writing - "a" for appending laz_backend: Optional, laspy.LazBackend, the LAZ backend to use to handle decompression/compression By default available backends are detected, see LazBackend to see the preference order when multiple backends are available header: The header to use when opening in write mode. do_compress: optional, bool, only meaningful in writing mode: - None (default) guess if compression is needed using the file extension or if a laz_backend was explicitely provided - True compresses the file - False do not compress the file closefd: optional, bool, True by default Whether the stream/file object shall be closed, this only work when using open_las in a with statement. An exception is raised if closefd is specified and the source is a filename encoding_errors: str, default 'strict' Only used in writing and appending mode. How encoding errors should be treated. Possible values and their explanation can be seen here: https://docs.python.org/3/library/codecs.html#error-handlers. read_evlrs: bool, default True Only applies to 'r' mode. If True the evlrs will be read during the __init__ / file opening along with the LasHeader. It is fine for most of the cases, but can be problematic when opening file from a data stream like AWS S3 as EVLRs are located at the end of the files, thus will require to pull the whole file. Does nothing if the input file does not support EVLRs decompression_selection: DecompressionSelection, default All Only applies to 'r' mode and for files which suport selective decompression (version >= 1.4 and point format id >= 6), ignored otherwise. Allows to select which fields should be decompressed or not, allowing to save time by not decompressing unused fields. By default all fields are decompressed .. versionadded:: 2.4 The ``read_evlrs`` and ``decompression_selection`` parameters.
def open_las( source, mode="r", closefd=True, laz_backend=None, header=None, do_compress=None, encoding_errors: str = "strict", read_evlrs: bool = True, decompression_selection: DecompressionSelection = DecompressionSelection.all(), ) -> Union[LasReader, LasWriter, LasAppender]: """The laspy.open opens a LAS/LAZ file in one of the 3 supported mode: - "r" => Reading => a :class:`laspy.LasReader` will be returned - "w" => Writing => a :class:`laspy.LasWriter` will be returned - "a" => Appending => a :class:`laspy.LasAppender` will be returned When opening a file in 'w' mode, a header (:class:`laspy.LasHeader`) is required >>> with open_las('tests/data/simple.las') as f: ... print(f.header.point_format.id) 3 >>> f = open('tests/data/simple.las', mode='rb') >>> with open_las(f,closefd=False) as flas: ... print(flas.header) <LasHeader(1.2, <PointFormat(3, 0 bytes of extra dims)>)> >>> f.closed False >>> f.close() >>> f.closed True >>> f = open('tests/data/simple.las', mode='rb') >>> with open_las(f) as flas: ... las = flas.read() >>> f.closed True Parameters ---------- source: str or bytes or io.BytesIO if source is a str it must be a filename mode: Optional, the mode to open the file: - "r" for reading (default) - "w" for writing - "a" for appending laz_backend: Optional, laspy.LazBackend, the LAZ backend to use to handle decompression/compression By default available backends are detected, see LazBackend to see the preference order when multiple backends are available header: The header to use when opening in write mode. do_compress: optional, bool, only meaningful in writing mode: - None (default) guess if compression is needed using the file extension or if a laz_backend was explicitely provided - True compresses the file - False do not compress the file closefd: optional, bool, True by default Whether the stream/file object shall be closed, this only work when using open_las in a with statement. An exception is raised if closefd is specified and the source is a filename encoding_errors: str, default 'strict' Only used in writing and appending mode. How encoding errors should be treated. Possible values and their explanation can be seen here: https://docs.python.org/3/library/codecs.html#error-handlers. read_evlrs: bool, default True Only applies to 'r' mode. If True the evlrs will be read during the __init__ / file opening along with the LasHeader. It is fine for most of the cases, but can be problematic when opening file from a data stream like AWS S3 as EVLRs are located at the end of the files, thus will require to pull the whole file. Does nothing if the input file does not support EVLRs decompression_selection: DecompressionSelection, default All Only applies to 'r' mode and for files which suport selective decompression (version >= 1.4 and point format id >= 6), ignored otherwise. Allows to select which fields should be decompressed or not, allowing to save time by not decompressing unused fields. By default all fields are decompressed .. versionadded:: 2.4 The ``read_evlrs`` and ``decompression_selection`` parameters. """ if mode == "r": if header is not None: raise LaspyException( "header argument is not used when opening in read mode, " "did you meant to open in write mode ?" ) if do_compress is not None: raise LaspyException( "do_compress argument is not used when opening in read mode, " "did you meant to open in write mode ?" ) if isinstance(source, (str, Path)): stream = open(source, mode="rb", closefd=closefd) elif isinstance(source, bytes): stream = io.BytesIO(source) else: stream = source try: return LasReader( stream, closefd=closefd, laz_backend=laz_backend, read_evlrs=read_evlrs, decompression_selection=decompression_selection, ) except: if closefd: stream.close() raise elif mode == "w": if header is None: raise ValueError("A header is needed when opening a file for writing") if isinstance(source, (str, Path)): if do_compress is None: do_compress = os.path.splitext(source)[1].lower() == ".laz" stream = open(source, mode="wb+", closefd=closefd) elif isinstance(source, bytes): stream = io.BytesIO(source) else: assert source.seekable() stream = source try: return LasWriter( stream, header=header, do_compress=do_compress, laz_backend=laz_backend, closefd=closefd, encoding_errors=encoding_errors, ) except: if closefd: stream.close() raise elif mode == "a": if isinstance(source, (str, Path)): stream = open(source, mode="rb+", closefd=closefd) elif isinstance(source, bytes): stream = io.BytesIO(source) else: stream = source try: return LasAppender( stream, closefd=closefd, laz_backend=laz_backend, encoding_errors=encoding_errors, ) except: if closefd: stream.close() raise else: raise ValueError(f"Unknown mode '{mode}'")
(source, mode='r', closefd=True, laz_backend=None, header=None, do_compress=None, encoding_errors: str = 'strict', read_evlrs: bool = True, decompression_selection: laspy._compression.selection.DecompressionSelection = <DecompressionSelection.ALL_EXTRA_BYTES|WAVEPACKET|NIR|RGB|GPS_TIME|POINT_SOURCE_ID|USER_DATA|SCAN_ANGLE|INTENSITY|FLAGS|CLASSIFICATION|Z|XY_RETURNS_CHANNEL: 8191>) -> Union[laspy.lasreader.LasReader, laspy.laswriter.LasWriter, laspy.lasappender.LasAppender]
31,650
laspy.lib
read_las
Entry point for reading las data in laspy Reads the whole file into memory. >>> las = read_las("tests/data/simple.las") >>> las.classification <SubFieldView([1 1 1 ... 1 1 1])> Parameters ---------- source : str or io.BytesIO The source to read data from laz_backend: Optional, the backend to use when the file is as LAZ file. By default laspy will find the backend to use by itself. Use if you want a specific backend to be used closefd: bool if True and the source is a stream, the function will close it after it is done reading decompression_selection: DecompressionSelection, see :func:`laspy.open` Returns ------- laspy.LasData The object you can interact with to get access to the LAS points & VLRs .. versionadded:: 2.4 The ``decompression_selection`` parameter.
def read_las( source, closefd=True, laz_backend=LazBackend.detect_available(), decompression_selection: DecompressionSelection = DecompressionSelection.all(), ): """Entry point for reading las data in laspy Reads the whole file into memory. >>> las = read_las("tests/data/simple.las") >>> las.classification <SubFieldView([1 1 1 ... 1 1 1])> Parameters ---------- source : str or io.BytesIO The source to read data from laz_backend: Optional, the backend to use when the file is as LAZ file. By default laspy will find the backend to use by itself. Use if you want a specific backend to be used closefd: bool if True and the source is a stream, the function will close it after it is done reading decompression_selection: DecompressionSelection, see :func:`laspy.open` Returns ------- laspy.LasData The object you can interact with to get access to the LAS points & VLRs .. versionadded:: 2.4 The ``decompression_selection`` parameter. """ with open_las( source, closefd=closefd, laz_backend=laz_backend, decompression_selection=decompression_selection, ) as reader: return reader.read()
(source, closefd=True, laz_backend=(), decompression_selection: laspy._compression.selection.DecompressionSelection = <DecompressionSelection.ALL_EXTRA_BYTES|WAVEPACKET|NIR|RGB|GPS_TIME|POINT_SOURCE_ID|USER_DATA|SCAN_ANGLE|INTENSITY|FLAGS|CLASSIFICATION|Z|XY_RETURNS_CHANNEL: 8191>)
31,651
laspy.point.dims
supported_point_formats
Returns a set of all the point formats supported in laspy
def supported_point_formats() -> Set[int]: """Returns a set of all the point formats supported in laspy""" return set(POINT_FORMAT_DIMENSIONS.keys())
() -> Set[int]
31,652
laspy.point.dims
supported_versions
Returns the set of supported file versions
def supported_versions() -> Set[str]: """Returns the set of supported file versions""" return set(VERSION_TO_POINT_FMT.keys())
() -> Set[str]
31,656
jupyterlab_optuna
_jupyter_labextension_paths
null
def _jupyter_labextension_paths(): return [{ "src": "labextension", "dest": "jupyterlab-optuna" }]
()
31,657
jupyterlab_optuna
_jupyter_server_extension_points
null
def _jupyter_server_extension_points(): return [{ "module": "jupyterlab_optuna" }]
()
31,658
jupyterlab_optuna
_load_jupyter_server_extension
Registers the API handler to receive HTTP requests from the frontend extension. Parameters ---------- server_app: jupyterlab.labapp.LabApp JupyterLab application instance
def _load_jupyter_server_extension(server_app): """Registers the API handler to receive HTTP requests from the frontend extension. Parameters ---------- server_app: jupyterlab.labapp.LabApp JupyterLab application instance """ setup_handlers(server_app.web_app) name = "jupyterlab_optuna" server_app.log.info(f"Registered {name} server extension")
(server_app)
31,661
jupyterlab_optuna.handlers
setup_handlers
null
def setup_handlers(web_app): host_pattern = ".*$" base_url = web_app.settings["base_url"] # Prepend the base_url so that it works in a JupyterHub setting initialize_route_pattern = url_path_join(base_url, API_NAMESPACE, "api/is_initialized") handlers = [(initialize_route_pattern, InitializedStateHandler)] web_app.add_handlers(host_pattern, handlers) resister_route_pattern = url_path_join(base_url, API_NAMESPACE, "api/register_dashboard_app") handlers = [(resister_route_pattern, RouteHandler)] web_app.add_handlers(host_pattern, handlers) route_pattern = url_path_join(base_url, API_NAMESPACE, r"(.*)") handlers = [ (route_pattern, FallbackHandler, dict(fallback=WSGIContainer(dashboard_app))), ] web_app.add_handlers(host_pattern, handlers)
(web_app)
31,662
pybuilder
bootstrap
null
def bootstrap(): import sys import inspect from pybuilder.errors import BuildFailedException try: current_frame = inspect.currentframe() previous_frame = current_frame.f_back name_of_previous_frame = previous_frame.f_globals['__name__'] if name_of_previous_frame == '__main__': import pybuilder.cli sys.exit(pybuilder.cli.main(*sys.argv[1:])) except BuildFailedException: sys.exit(1)
()
31,663
adjust_precision_for_schema.schema_adjuster
adjust_decimal_precision_for_schema
Adjust the decimal context's precision based on the precision specified in the JSON schema For example, the default precision is 28 characters (significant digits + decimal places), but MongoDB handles 34 decimal places. This will increase the context's precision to 34 to handle the schema
def adjust_decimal_precision_for_schema(schema, context=None): """Adjust the decimal context's precision based on the precision specified in the JSON schema For example, the default precision is 28 characters (significant digits + decimal places), but MongoDB handles 34 decimal places. This will increase the context's precision to 34 to handle the schema""" if not context: context = decimal.getcontext() if isinstance(schema, list): for v in schema: adjust_decimal_precision_for_schema(v) elif isinstance(schema, dict): if numeric_schema_with_precision(schema): scale, digits = get_schema_precision( minimum=schema.get("minimum"), maximum=schema.get("maxium"), multiple_of=schema.get("multipleOf"), ) precision = 2 + digits + scale if context.prec < precision: logger.debug("Setting decimal precision to {}".format(precision)) context.prec = precision else: for v in schema.values(): adjust_decimal_precision_for_schema(v)
(schema, context=None)
31,664
adjust_precision_for_schema.schema_adjuster
calc_digits
Calculates the total number of digits required to store a number
def calc_digits(num): """Calculates the total number of digits required to store a number""" v = abs(Decimal(num or 1)).log10() if v < 0: precision = round(math.floor(v)) else: precision = round(math.ceil(v)) return abs(precision)
(num)
31,676
wradlib.util
show_versions
null
def show_versions(file=None): import sys import xarray as xr if file is None: file = sys.stdout xr.show_versions(file) print("", file=file) print(f"wradlib: {version.version}")
(file=None)
31,686
cookies
Cookie
Provide a simple interface for creating, modifying, and rendering individual HTTP cookies. Cookie attributes are represented as normal Python object attributes. Parsing, rendering and validation are reconfigurable per-attribute. The default behavior is intended to comply with RFC 6265, URL-encoding illegal characters where necessary. For example: the default behavior for the Expires attribute is to parse strings as datetimes using parse_date, validate that any set value is a datetime, and render the attribute per the preferred date format in RFC 1123.
class Cookie(object): """Provide a simple interface for creating, modifying, and rendering individual HTTP cookies. Cookie attributes are represented as normal Python object attributes. Parsing, rendering and validation are reconfigurable per-attribute. The default behavior is intended to comply with RFC 6265, URL-encoding illegal characters where necessary. For example: the default behavior for the Expires attribute is to parse strings as datetimes using parse_date, validate that any set value is a datetime, and render the attribute per the preferred date format in RFC 1123. """ def __init__(self, name, value, **kwargs): # If we don't have or can't set a name value, we don't want to return # junk, so we must break control flow. And we don't want to use # InvalidCookieAttributeError, because users may want to catch that to # suppress all complaining about funky attributes. try: self.name = name except InvalidCookieAttributeError: raise InvalidCookieError(message="invalid name for new Cookie", data=name) value = value or '' try: self.value = value except InvalidCookieAttributeError: raise InvalidCookieError(message="invalid value for new Cookie", data=value) if kwargs: self._set_attributes(kwargs, ignore_bad_attributes=False) def _set_attributes(self, attrs, ignore_bad_attributes=False): for attr_name, attr_value in attrs.items(): if not attr_name in self.attribute_names: if not ignore_bad_attributes: raise InvalidCookieAttributeError( attr_name, attr_value, "unknown cookie attribute '%s'" % attr_name) _report_unknown_attribute(attr_name) try: setattr(self, attr_name, attr_value) except InvalidCookieAttributeError as error: if not ignore_bad_attributes: raise _report_invalid_attribute(attr_name, attr_value, error.reason) continue @classmethod def from_dict(cls, cookie_dict, ignore_bad_attributes=True): """Construct an instance from a dict of strings to parse. The main difference between this and Cookie(name, value, **kwargs) is that the values in the argument to this method are parsed. If ignore_bad_attributes=True (default), values which did not parse are set to '' in order to avoid passing bad data. """ name = cookie_dict.get('name', None) if not name: raise InvalidCookieError("Cookie must have name") raw_value = cookie_dict.get('value', '') # Absence or failure of parser here is fatal; errors in present name # and value should be found by Cookie.__init__. value = cls.attribute_parsers['value'](raw_value) cookie = cls(name, value) # Parse values from serialized formats into objects parsed = {} for key, value in cookie_dict.items(): # Don't want to pass name/value to _set_attributes if key in ('name', 'value'): continue parser = cls.attribute_parsers.get(key) if not parser: # Don't let totally unknown attributes pass silently if not ignore_bad_attributes: raise InvalidCookieAttributeError( key, value, "unknown cookie attribute '%s'" % key) _report_unknown_attribute(key) continue try: parsed_value = parser(value) except Exception as e: reason = "did not parse with %r: %r" % (parser, e) if not ignore_bad_attributes: raise InvalidCookieAttributeError( key, value, reason) _report_invalid_attribute(key, value, reason) parsed_value = '' parsed[key] = parsed_value # Set the parsed objects (does object validation automatically) cookie._set_attributes(parsed, ignore_bad_attributes) return cookie @classmethod def from_string(cls, line, ignore_bad_cookies=False, ignore_bad_attributes=True): "Construct a Cookie object from a line of Set-Cookie header data." cookie_dict = parse_one_response( line, ignore_bad_cookies=ignore_bad_cookies, ignore_bad_attributes=ignore_bad_attributes) if not cookie_dict: return None return cls.from_dict( cookie_dict, ignore_bad_attributes=ignore_bad_attributes) def to_dict(self): this_dict = {'name': self.name, 'value': self.value} this_dict.update(self.attributes()) return this_dict def validate(self, name, value): """Validate a cookie attribute with an appropriate validator. The value comes in already parsed (for example, an expires value should be a datetime). Called automatically when an attribute value is set. """ validator = self.attribute_validators.get(name, None) if validator: return True if validator(value) else False return True def __setattr__(self, name, value): """Attributes mentioned in attribute_names get validated using functions in attribute_validators, raising an exception on failure. Others get left alone. """ if name in self.attribute_names or name in ("name", "value"): if name == 'name' and not value: raise InvalidCookieError(message="Cookies must have names") # Ignore None values indicating unset attr. Other invalids should # raise error so users of __setattr__ can learn. if value is not None: if not self.validate(name, value): raise InvalidCookieAttributeError( name, value, "did not validate with " + repr(self.attribute_validators.get(name))) object.__setattr__(self, name, value) def __getattr__(self, name): """Provide for acting like everything in attribute_names is automatically set to None, rather than having to do so explicitly and only at import time. """ if name in self.attribute_names: return None raise AttributeError(name) def attributes(self): """Export this cookie's attributes as a dict of encoded values. This is an important part of the code for rendering attributes, e.g. render_response(). """ dictionary = {} # Only look for attributes registered in attribute_names. for python_attr_name, cookie_attr_name in self.attribute_names.items(): value = getattr(self, python_attr_name) renderer = self.attribute_renderers.get(python_attr_name, None) if renderer: value = renderer(value) # If renderer returns None, or it's just natively none, then the # value is suppressed entirely - does not appear in any rendering. if not value: continue dictionary[cookie_attr_name] = value return dictionary def render_request(self): """Render as a string formatted for HTTP request headers (simple 'Cookie: ' style). """ # Use whatever renderers are defined for name and value. name, value = self.name, self.value renderer = self.attribute_renderers.get('name', None) if renderer: name = renderer(name) renderer = self.attribute_renderers.get('value', None) if renderer: value = renderer(value) return ''.join((name, "=", value)) def render_response(self): """Render as a string formatted for HTTP response headers (detailed 'Set-Cookie: ' style). """ # Use whatever renderers are defined for name and value. # (.attributes() is responsible for all other rendering.) name, value = self.name, self.value renderer = self.attribute_renderers.get('name', None) if renderer: name = renderer(name) renderer = self.attribute_renderers.get('value', None) if renderer: value = renderer(value) return '; '.join( ['{0}={1}'.format(name, value)] + [key if isinstance(val, bool) else '='.join((key, val)) for key, val in self.attributes().items()] ) def __eq__(self, other): attrs = ['name', 'value'] + list(self.attribute_names.keys()) for attr in attrs: mine = getattr(self, attr, None) his = getattr(other, attr, None) if isinstance(mine, bytes): mine = mine.decode('utf-8') if isinstance(his, bytes): his = his.decode('utf-8') if attr == 'domain': if mine and mine[0] == '.': mine = mine[1:] if his and his[0] == '.': his = his[1:] if mine != his: return False return True def __ne__(self, other): return not self.__eq__(other) # Add a name and its proper rendering to this dict to register an attribute # as exportable. The key is the name of the Cookie object attribute in # Python, and it is mapped to the name you want in the output. # 'name' and 'value' should not be here. attribute_names = { 'expires': 'Expires', 'max_age': 'Max-Age', 'domain': 'Domain', 'path': 'Path', 'comment': 'Comment', 'version': 'Version', 'secure': 'Secure', 'httponly': 'HttpOnly', } # Register single-parameter functions in this dictionary to have them # used for encoding outgoing values (e.g. as RFC compliant strings, # as base64, encrypted stuff, etc.) # These are called by the property generated by cookie_attribute(). # Usually it would be wise not to define a renderer for name, but it is # supported in case there is ever a real need. attribute_renderers = { 'value': encode_cookie_value, 'domain': render_domain, 'expires': render_date, 'max_age': lambda item: str(item) if item is not None else None, 'secure': lambda item: True if item else False, 'httponly': lambda item: True if item else False, 'comment': encode_extension_av, 'version': lambda item: (str(item) if isinstance(item, int) else encode_extension_av(item)), } # Register single-parameter functions in this dictionary to have them used # for decoding incoming values for use in the Python API (e.g. into nice # objects, numbers, unicode strings, etc.) # These are called by the property generated by cookie_attribute(). attribute_parsers = { 'value': parse_value, 'expires': parse_date, 'domain': parse_domain, 'path': parse_path, 'max_age': lambda item: long(strip_spaces_and_quotes(item)), 'comment': parse_string, 'version': lambda item: int(strip_spaces_and_quotes(item)), 'secure': lambda item: True if item else False, 'httponly': lambda item: True if item else False, } # Register single-parameter functions which return a true value for # acceptable values, and a false value for unacceptable ones. An # attribute's validator is run after it is parsed or when it is directly # set, and InvalidCookieAttribute is raised if validation fails (and the # validator doesn't raise a different exception prior) attribute_validators = { 'name': valid_name, 'value': valid_value, 'expires': valid_date, 'domain': valid_domain, 'path': valid_path, 'max_age': valid_max_age, 'comment': valid_value, 'version': lambda number: re.match("^\d+\Z", str(number)), 'secure': lambda item: item is True or item is False, 'httponly': lambda item: item is True or item is False, }
(name, value, **kwargs)
31,687
cookies
__eq__
null
def __eq__(self, other): attrs = ['name', 'value'] + list(self.attribute_names.keys()) for attr in attrs: mine = getattr(self, attr, None) his = getattr(other, attr, None) if isinstance(mine, bytes): mine = mine.decode('utf-8') if isinstance(his, bytes): his = his.decode('utf-8') if attr == 'domain': if mine and mine[0] == '.': mine = mine[1:] if his and his[0] == '.': his = his[1:] if mine != his: return False return True
(self, other)
31,688
cookies
__getattr__
Provide for acting like everything in attribute_names is automatically set to None, rather than having to do so explicitly and only at import time.
def __getattr__(self, name): """Provide for acting like everything in attribute_names is automatically set to None, rather than having to do so explicitly and only at import time. """ if name in self.attribute_names: return None raise AttributeError(name)
(self, name)
31,689
cookies
__init__
null
def __init__(self, name, value, **kwargs): # If we don't have or can't set a name value, we don't want to return # junk, so we must break control flow. And we don't want to use # InvalidCookieAttributeError, because users may want to catch that to # suppress all complaining about funky attributes. try: self.name = name except InvalidCookieAttributeError: raise InvalidCookieError(message="invalid name for new Cookie", data=name) value = value or '' try: self.value = value except InvalidCookieAttributeError: raise InvalidCookieError(message="invalid value for new Cookie", data=value) if kwargs: self._set_attributes(kwargs, ignore_bad_attributes=False)
(self, name, value, **kwargs)
31,691
cookies
__setattr__
Attributes mentioned in attribute_names get validated using functions in attribute_validators, raising an exception on failure. Others get left alone.
def __setattr__(self, name, value): """Attributes mentioned in attribute_names get validated using functions in attribute_validators, raising an exception on failure. Others get left alone. """ if name in self.attribute_names or name in ("name", "value"): if name == 'name' and not value: raise InvalidCookieError(message="Cookies must have names") # Ignore None values indicating unset attr. Other invalids should # raise error so users of __setattr__ can learn. if value is not None: if not self.validate(name, value): raise InvalidCookieAttributeError( name, value, "did not validate with " + repr(self.attribute_validators.get(name))) object.__setattr__(self, name, value)
(self, name, value)
31,692
cookies
_set_attributes
null
def _set_attributes(self, attrs, ignore_bad_attributes=False): for attr_name, attr_value in attrs.items(): if not attr_name in self.attribute_names: if not ignore_bad_attributes: raise InvalidCookieAttributeError( attr_name, attr_value, "unknown cookie attribute '%s'" % attr_name) _report_unknown_attribute(attr_name) try: setattr(self, attr_name, attr_value) except InvalidCookieAttributeError as error: if not ignore_bad_attributes: raise _report_invalid_attribute(attr_name, attr_value, error.reason) continue
(self, attrs, ignore_bad_attributes=False)
31,693
cookies
attributes
Export this cookie's attributes as a dict of encoded values. This is an important part of the code for rendering attributes, e.g. render_response().
def attributes(self): """Export this cookie's attributes as a dict of encoded values. This is an important part of the code for rendering attributes, e.g. render_response(). """ dictionary = {} # Only look for attributes registered in attribute_names. for python_attr_name, cookie_attr_name in self.attribute_names.items(): value = getattr(self, python_attr_name) renderer = self.attribute_renderers.get(python_attr_name, None) if renderer: value = renderer(value) # If renderer returns None, or it's just natively none, then the # value is suppressed entirely - does not appear in any rendering. if not value: continue dictionary[cookie_attr_name] = value return dictionary
(self)
31,694
cookies
render_request
Render as a string formatted for HTTP request headers (simple 'Cookie: ' style).
def render_request(self): """Render as a string formatted for HTTP request headers (simple 'Cookie: ' style). """ # Use whatever renderers are defined for name and value. name, value = self.name, self.value renderer = self.attribute_renderers.get('name', None) if renderer: name = renderer(name) renderer = self.attribute_renderers.get('value', None) if renderer: value = renderer(value) return ''.join((name, "=", value))
(self)
31,695
cookies
render_response
Render as a string formatted for HTTP response headers (detailed 'Set-Cookie: ' style).
def render_response(self): """Render as a string formatted for HTTP response headers (detailed 'Set-Cookie: ' style). """ # Use whatever renderers are defined for name and value. # (.attributes() is responsible for all other rendering.) name, value = self.name, self.value renderer = self.attribute_renderers.get('name', None) if renderer: name = renderer(name) renderer = self.attribute_renderers.get('value', None) if renderer: value = renderer(value) return '; '.join( ['{0}={1}'.format(name, value)] + [key if isinstance(val, bool) else '='.join((key, val)) for key, val in self.attributes().items()] )
(self)
31,696
cookies
to_dict
null
def to_dict(self): this_dict = {'name': self.name, 'value': self.value} this_dict.update(self.attributes()) return this_dict
(self)
31,697
cookies
validate
Validate a cookie attribute with an appropriate validator. The value comes in already parsed (for example, an expires value should be a datetime). Called automatically when an attribute value is set.
def validate(self, name, value): """Validate a cookie attribute with an appropriate validator. The value comes in already parsed (for example, an expires value should be a datetime). Called automatically when an attribute value is set. """ validator = self.attribute_validators.get(name, None) if validator: return True if validator(value) else False return True
(self, name, value)
31,698
cookies
CookieError
Base class for this module's exceptions, so you can catch them all if you want to.
class CookieError(Exception): """Base class for this module's exceptions, so you can catch them all if you want to. """ def __init__(self): Exception.__init__(self)
()
31,699
cookies
__init__
null
def __init__(self): Exception.__init__(self)
(self)
31,700
cookies
Cookies
Represent a set of cookies indexed by name. This class bundles together a set of Cookie objects and provides a convenient interface to them. for parsing and producing cookie headers. In basic operation it acts just like a dict of Cookie objects, but it adds additional convenience methods for the usual cookie tasks: add cookie objects by their names, create new cookie objects under specified names, parse HTTP request or response data into new cookie objects automatically stored in the dict, and render the set in formats suitable for HTTP request or response headers.
class Cookies(dict): """Represent a set of cookies indexed by name. This class bundles together a set of Cookie objects and provides a convenient interface to them. for parsing and producing cookie headers. In basic operation it acts just like a dict of Cookie objects, but it adds additional convenience methods for the usual cookie tasks: add cookie objects by their names, create new cookie objects under specified names, parse HTTP request or response data into new cookie objects automatically stored in the dict, and render the set in formats suitable for HTTP request or response headers. """ DEFAULT_COOKIE_CLASS = Cookie def __init__(self, *args, **kwargs): dict.__init__(self) self.all_cookies = [] self.cookie_class = kwargs.get( "_cookie_class", self.DEFAULT_COOKIE_CLASS) self.add(*args, **kwargs) def add(self, *args, **kwargs): """Add Cookie objects by their names, or create new ones under specified names. Any unnamed arguments are interpreted as existing cookies, and are added under the value in their .name attribute. With keyword arguments, the key is interpreted as the cookie name and the value as the UNENCODED value stored in the cookie. """ # Only the first one is accessible through the main interface, # others accessible through get_all (all_cookies). for cookie in args: self.all_cookies.append(cookie) if cookie.name in self: continue self[cookie.name] = cookie for key, value in kwargs.items(): cookie = self.cookie_class(key, value) self.all_cookies.append(cookie) if key in self: continue self[key] = cookie def get_all(self, key): return [cookie for cookie in self.all_cookies if cookie.name == key] def parse_request(self, header_data, ignore_bad_cookies=False): """Parse 'Cookie' header data into Cookie objects, and add them to this Cookies object. :arg header_data: string containing only 'Cookie:' request headers or header values (as in CGI/WSGI HTTP_COOKIE); if more than one, they must be separated by CRLF (\\r\\n). :arg ignore_bad_cookies: if set, will log each syntactically invalid cookie (at the granularity of semicolon-delimited blocks) rather than raising an exception at the first bad cookie. :returns: a Cookies instance containing Cookie objects parsed from header_data. .. note:: If you want to parse 'Set-Cookie:' response headers, please use parse_response instead. parse_request will happily turn 'expires=frob' into a separate cookie without complaining, according to the grammar. """ cookies_dict = _parse_request( header_data, ignore_bad_cookies=ignore_bad_cookies) cookie_objects = [] for name, values in cookies_dict.items(): for value in values: # Use from_dict to check name and parse value cookie_dict = {'name': name, 'value': value} try: cookie = self.cookie_class.from_dict(cookie_dict) except InvalidCookieError: if not ignore_bad_cookies: raise else: cookie_objects.append(cookie) try: self.add(*cookie_objects) except InvalidCookieError: if not ignore_bad_cookies: raise _report_invalid_cookie(header_data) return self def parse_response(self, header_data, ignore_bad_cookies=False, ignore_bad_attributes=True): """Parse 'Set-Cookie' header data into Cookie objects, and add them to this Cookies object. :arg header_data: string containing only 'Set-Cookie:' request headers or their corresponding header values; if more than one, they must be separated by CRLF (\\r\\n). :arg ignore_bad_cookies: if set, will log each syntactically invalid cookie rather than raising an exception at the first bad cookie. (This includes cookies which have noncompliant characters in the attribute section). :arg ignore_bad_attributes: defaults to True, which means to log but not raise an error when a particular attribute is unrecognized. (This does not necessarily mean that the attribute is invalid, although that would often be the case.) if unset, then an error will be raised at the first semicolon-delimited block which has an unknown attribute. :returns: a Cookies instance containing Cookie objects parsed from header_data, each with recognized attributes populated. .. note:: If you want to parse 'Cookie:' headers (i.e., data like what's sent with an HTTP request, which has only name=value pairs and no attributes), then please use parse_request instead. Such lines often contain multiple name=value pairs, and parse_response will throw away the pairs after the first one, which will probably generate errors or confusing behavior. (Since there's no perfect way to automatically determine which kind of parsing to do, you have to tell it manually by choosing correctly from parse_request between part_response.) """ cookie_dicts = _parse_response( header_data, ignore_bad_cookies=ignore_bad_cookies, ignore_bad_attributes=ignore_bad_attributes) cookie_objects = [] for cookie_dict in cookie_dicts: cookie = self.cookie_class.from_dict(cookie_dict) cookie_objects.append(cookie) self.add(*cookie_objects) return self @classmethod def from_request(cls, header_data, ignore_bad_cookies=False): "Construct a Cookies object from request header data." cookies = cls() cookies.parse_request( header_data, ignore_bad_cookies=ignore_bad_cookies) return cookies @classmethod def from_response(cls, header_data, ignore_bad_cookies=False, ignore_bad_attributes=True): "Construct a Cookies object from response header data." cookies = cls() cookies.parse_response( header_data, ignore_bad_cookies=ignore_bad_cookies, ignore_bad_attributes=ignore_bad_attributes) return cookies def render_request(self, sort=True): """Render the dict's Cookie objects into a string formatted for HTTP request headers (simple 'Cookie: ' style). """ if not sort: return ("; ".join( cookie.render_request() for cookie in self.values())) return ("; ".join(sorted( cookie.render_request() for cookie in self.values()))) def render_response(self, sort=True): """Render the dict's Cookie objects into list of strings formatted for HTTP response headers (detailed 'Set-Cookie: ' style). """ rendered = [cookie.render_response() for cookie in self.values()] return rendered if not sort else sorted(rendered) def __repr__(self): return "Cookies(%s)" % ', '.join("%s=%r" % (name, cookie.value) for (name, cookie) in self.items()) def __eq__(self, other): """Test if a Cookies object is globally 'equal' to another one by seeing if it looks like a dict such that d[k] == self[k]. This depends on each Cookie object reporting its equality correctly. """ if not hasattr(other, "keys"): return False try: keys = sorted(set(self.keys()) | set(other.keys())) for key in keys: if not key in self: return False if not key in other: return False if self[key] != other[key]: return False except (TypeError, KeyError): raise return True def __ne__(self, other): return not self.__eq__(other)
(*args, **kwargs)
31,701
cookies
__eq__
Test if a Cookies object is globally 'equal' to another one by seeing if it looks like a dict such that d[k] == self[k]. This depends on each Cookie object reporting its equality correctly.
def __eq__(self, other): """Test if a Cookies object is globally 'equal' to another one by seeing if it looks like a dict such that d[k] == self[k]. This depends on each Cookie object reporting its equality correctly. """ if not hasattr(other, "keys"): return False try: keys = sorted(set(self.keys()) | set(other.keys())) for key in keys: if not key in self: return False if not key in other: return False if self[key] != other[key]: return False except (TypeError, KeyError): raise return True
(self, other)
31,702
cookies
__init__
null
def __init__(self, *args, **kwargs): dict.__init__(self) self.all_cookies = [] self.cookie_class = kwargs.get( "_cookie_class", self.DEFAULT_COOKIE_CLASS) self.add(*args, **kwargs)
(self, *args, **kwargs)
31,704
cookies
__repr__
null
def __repr__(self): return "Cookies(%s)" % ', '.join("%s=%r" % (name, cookie.value) for (name, cookie) in self.items())
(self)
31,705
cookies
add
Add Cookie objects by their names, or create new ones under specified names. Any unnamed arguments are interpreted as existing cookies, and are added under the value in their .name attribute. With keyword arguments, the key is interpreted as the cookie name and the value as the UNENCODED value stored in the cookie.
def add(self, *args, **kwargs): """Add Cookie objects by their names, or create new ones under specified names. Any unnamed arguments are interpreted as existing cookies, and are added under the value in their .name attribute. With keyword arguments, the key is interpreted as the cookie name and the value as the UNENCODED value stored in the cookie. """ # Only the first one is accessible through the main interface, # others accessible through get_all (all_cookies). for cookie in args: self.all_cookies.append(cookie) if cookie.name in self: continue self[cookie.name] = cookie for key, value in kwargs.items(): cookie = self.cookie_class(key, value) self.all_cookies.append(cookie) if key in self: continue self[key] = cookie
(self, *args, **kwargs)
31,706
cookies
get_all
null
def get_all(self, key): return [cookie for cookie in self.all_cookies if cookie.name == key]
(self, key)
31,707
cookies
parse_request
Parse 'Cookie' header data into Cookie objects, and add them to this Cookies object. :arg header_data: string containing only 'Cookie:' request headers or header values (as in CGI/WSGI HTTP_COOKIE); if more than one, they must be separated by CRLF (\r\n). :arg ignore_bad_cookies: if set, will log each syntactically invalid cookie (at the granularity of semicolon-delimited blocks) rather than raising an exception at the first bad cookie. :returns: a Cookies instance containing Cookie objects parsed from header_data. .. note:: If you want to parse 'Set-Cookie:' response headers, please use parse_response instead. parse_request will happily turn 'expires=frob' into a separate cookie without complaining, according to the grammar.
def parse_request(self, header_data, ignore_bad_cookies=False): """Parse 'Cookie' header data into Cookie objects, and add them to this Cookies object. :arg header_data: string containing only 'Cookie:' request headers or header values (as in CGI/WSGI HTTP_COOKIE); if more than one, they must be separated by CRLF (\\r\\n). :arg ignore_bad_cookies: if set, will log each syntactically invalid cookie (at the granularity of semicolon-delimited blocks) rather than raising an exception at the first bad cookie. :returns: a Cookies instance containing Cookie objects parsed from header_data. .. note:: If you want to parse 'Set-Cookie:' response headers, please use parse_response instead. parse_request will happily turn 'expires=frob' into a separate cookie without complaining, according to the grammar. """ cookies_dict = _parse_request( header_data, ignore_bad_cookies=ignore_bad_cookies) cookie_objects = [] for name, values in cookies_dict.items(): for value in values: # Use from_dict to check name and parse value cookie_dict = {'name': name, 'value': value} try: cookie = self.cookie_class.from_dict(cookie_dict) except InvalidCookieError: if not ignore_bad_cookies: raise else: cookie_objects.append(cookie) try: self.add(*cookie_objects) except InvalidCookieError: if not ignore_bad_cookies: raise _report_invalid_cookie(header_data) return self
(self, header_data, ignore_bad_cookies=False)
31,708
cookies
parse_response
Parse 'Set-Cookie' header data into Cookie objects, and add them to this Cookies object. :arg header_data: string containing only 'Set-Cookie:' request headers or their corresponding header values; if more than one, they must be separated by CRLF (\r\n). :arg ignore_bad_cookies: if set, will log each syntactically invalid cookie rather than raising an exception at the first bad cookie. (This includes cookies which have noncompliant characters in the attribute section). :arg ignore_bad_attributes: defaults to True, which means to log but not raise an error when a particular attribute is unrecognized. (This does not necessarily mean that the attribute is invalid, although that would often be the case.) if unset, then an error will be raised at the first semicolon-delimited block which has an unknown attribute. :returns: a Cookies instance containing Cookie objects parsed from header_data, each with recognized attributes populated. .. note:: If you want to parse 'Cookie:' headers (i.e., data like what's sent with an HTTP request, which has only name=value pairs and no attributes), then please use parse_request instead. Such lines often contain multiple name=value pairs, and parse_response will throw away the pairs after the first one, which will probably generate errors or confusing behavior. (Since there's no perfect way to automatically determine which kind of parsing to do, you have to tell it manually by choosing correctly from parse_request between part_response.)
def parse_response(self, header_data, ignore_bad_cookies=False, ignore_bad_attributes=True): """Parse 'Set-Cookie' header data into Cookie objects, and add them to this Cookies object. :arg header_data: string containing only 'Set-Cookie:' request headers or their corresponding header values; if more than one, they must be separated by CRLF (\\r\\n). :arg ignore_bad_cookies: if set, will log each syntactically invalid cookie rather than raising an exception at the first bad cookie. (This includes cookies which have noncompliant characters in the attribute section). :arg ignore_bad_attributes: defaults to True, which means to log but not raise an error when a particular attribute is unrecognized. (This does not necessarily mean that the attribute is invalid, although that would often be the case.) if unset, then an error will be raised at the first semicolon-delimited block which has an unknown attribute. :returns: a Cookies instance containing Cookie objects parsed from header_data, each with recognized attributes populated. .. note:: If you want to parse 'Cookie:' headers (i.e., data like what's sent with an HTTP request, which has only name=value pairs and no attributes), then please use parse_request instead. Such lines often contain multiple name=value pairs, and parse_response will throw away the pairs after the first one, which will probably generate errors or confusing behavior. (Since there's no perfect way to automatically determine which kind of parsing to do, you have to tell it manually by choosing correctly from parse_request between part_response.) """ cookie_dicts = _parse_response( header_data, ignore_bad_cookies=ignore_bad_cookies, ignore_bad_attributes=ignore_bad_attributes) cookie_objects = [] for cookie_dict in cookie_dicts: cookie = self.cookie_class.from_dict(cookie_dict) cookie_objects.append(cookie) self.add(*cookie_objects) return self
(self, header_data, ignore_bad_cookies=False, ignore_bad_attributes=True)
31,709
cookies
render_request
Render the dict's Cookie objects into a string formatted for HTTP request headers (simple 'Cookie: ' style).
def render_request(self, sort=True): """Render the dict's Cookie objects into a string formatted for HTTP request headers (simple 'Cookie: ' style). """ if not sort: return ("; ".join( cookie.render_request() for cookie in self.values())) return ("; ".join(sorted( cookie.render_request() for cookie in self.values())))
(self, sort=True)
31,710
cookies
render_response
Render the dict's Cookie objects into list of strings formatted for HTTP response headers (detailed 'Set-Cookie: ' style).
def render_response(self, sort=True): """Render the dict's Cookie objects into list of strings formatted for HTTP response headers (detailed 'Set-Cookie: ' style). """ rendered = [cookie.render_response() for cookie in self.values()] return rendered if not sort else sorted(rendered)
(self, sort=True)
31,711
cookies
Definitions
Namespace to hold definitions used in cookie parsing (mostly pieces of regex). These are separated out for individual testing against examples and RFC grammar, and kept here to avoid cluttering other namespaces.
class Definitions(object): """Namespace to hold definitions used in cookie parsing (mostly pieces of regex). These are separated out for individual testing against examples and RFC grammar, and kept here to avoid cluttering other namespaces. """ # Most of the following are set down or cited in RFC 6265 4.1.1 # This is the grammar's 'cookie-name' defined as 'token' per RFC 2616 2.2. COOKIE_NAME = r"!#$%&'*+\-.0-9A-Z^_`a-z|~" # 'cookie-octet' - as used twice in definition of 'cookie-value' COOKIE_OCTET = r"\x21\x23-\x2B\--\x3A\x3C-\x5B\]-\x7E" # extension-av - also happens to be a superset of cookie-av and path-value EXTENSION_AV = """ !"#$%&\\\\'()*+,\-./0-9:<=>?@A-Z[\\]^_`a-z{|}~""" # This is for the first pass parse on a Set-Cookie: response header. It # includes cookie-value, cookie-pair, set-cookie-string, cookie-av. # extension-av is used to extract the chunk containing variable-length, # unordered attributes. The second pass then uses ATTR to break out each # attribute and extract it appropriately. # As compared with the RFC production grammar, it is must more liberal with # space characters, in order not to break on data made by barbarians. SET_COOKIE_HEADER = """(?x) # Verbose mode ^(?:Set-Cookie:[ ]*)? (?P<name>[{name}:]+) [ ]*=[ ]* # Accept anything in quotes - this is not RFC 6265, but might ease # working with older code that half-heartedly works with 2965. Accept # spaces inside tokens up front, so we can deal with that error one # cookie at a time, after this first pass. (?P<value>(?:"{value}*")|(?:[{cookie_octet} ]*)) [ ]* # Extract everything up to the end in one chunk, which will be broken # down in the second pass. Don't match if there's any unexpected # garbage at the end (hence the \Z; $ matches before newline). (?P<attrs>(?:;[ ]*[{cookie_av}]+)*) """.format(name=COOKIE_NAME, cookie_av=EXTENSION_AV + ";", cookie_octet=COOKIE_OCTET, value="[^;]") # Now we specify the individual patterns for the attribute extraction pass # of Set-Cookie parsing (mapping to *-av in the RFC grammar). Things which # don't match any of these but are in extension-av are simply ignored; # anything else should be rejected in the first pass (SET_COOKIE_HEADER). # Max-Age attribute. These are digits, they are expressed this way # because that is how they are expressed in the RFC. MAX_AGE_AV = "Max-Age=(?P<max_age>[\x30-\x39]+)" # Domain attribute; a label is one part of the domain LABEL = '{let_dig}(?:(?:{let_dig_hyp}+)?{let_dig})?'.format( let_dig="[A-Za-z0-9]", let_dig_hyp="[0-9A-Za-z\-]") DOMAIN = "\.?(?:{label}\.)*(?:{label})".format(label=LABEL) # Parse initial period though it's wrong, as RFC 6265 4.1.2.3 DOMAIN_AV = "Domain=(?P<domain>{domain})".format(domain=DOMAIN) # Path attribute. We don't take special care with quotes because # they are hardly used, they don't allow invalid characters per RFC 6265, # and " is a valid character to occur in a path value anyway. PATH_AV = 'Path=(?P<path>[%s]+)' % EXTENSION_AV # Expires attribute. This gets big because of date parsing, which needs to # support a large range of formats, so it's broken down into pieces. # Generate a mapping of months to use in render/parse, to avoid # localizations which might be produced by strftime (e.g. %a -> Mayo) month_list = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"] month_abbr_list = [item[:3] for item in month_list] month_numbers = {} for index, name in enumerate(month_list): name = name.lower() month_numbers[name[:3]] = index + 1 month_numbers[name] = index + 1 # Use the same list to create regexps for months. MONTH_SHORT = "(?:" + "|".join(item[:3] for item in month_list) + ")" MONTH_LONG = "(?:" + "|".join(item for item in month_list) + ")" # Same drill with weekdays, for the same reason. weekday_list = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] weekday_abbr_list = [item[:3] for item in weekday_list] WEEKDAY_SHORT = "(?:" + "|".join(item[:3] for item in weekday_list) + ")" WEEKDAY_LONG = "(?:" + "|".join(item for item in weekday_list) + ")" # This regexp tries to exclude obvious nonsense in the first pass. DAY_OF_MONTH = "(?:[0 ]?[1-9]|[12][0-9]|[3][01])(?!\d)" # Here is the overall date format; ~99% of cases fold into one generalized # syntax like RFC 1123, and many of the rest use asctime-like formats. # (see test_date_formats for a full exegesis) DATE = """(?ix) # Case-insensitive mode, verbose mode (?: (?P<weekday>(?:{wdy}|{weekday}),[ ])? (?P<day>{day}) [ \-] (?P<month>{mon}|{month}) [ \-] # This does not support 3-digit years, which are rare and don't # seem to have one canonical interpretation. (?P<year>(?:\d{{2}}|\d{{4}})) [ ] # HH:MM[:SS] GMT (?P<hour>(?:[ 0][0-9]|[01][0-9]|2[0-3])) :(?P<minute>(?:0[0-9]|[1-5][0-9])) (?::(?P<second>\d{{2}}))? [ ]GMT | # Support asctime format, e.g. 'Sun Nov 6 08:49:37 1994' (?P<weekday2>{wdy})[ ] (?P<month2>{mon})[ ] (?P<day2>[ ]\d|\d\d)[ ] (?P<hour2>\d\d): (?P<minute2>\d\d) (?::(?P<second2>\d\d)?)[ ] (?P<year2>\d\d\d\d) (?:[ ]GMT)? # GMT (Amazon) ) """ DATE = DATE.format(wdy=WEEKDAY_SHORT, weekday=WEEKDAY_LONG, day=DAY_OF_MONTH, mon=MONTH_SHORT, month=MONTH_LONG) EXPIRES_AV = "Expires=(?P<expires>%s)" % DATE # Now we're ready to define a regexp which can match any number of attrs # in the variable portion of the Set-Cookie header (like the unnamed latter # part of set-cookie-string in the grammar). Each regexp of any complexity # is split out for testing by itself. ATTR = """(?ix) # Case-insensitive mode, verbose mode # Always start with start or semicolon and any number of spaces (?:^|;)[ ]*(?: # Big disjunction of attribute patterns (*_AV), with named capture # groups to extract everything in one pass. Anything unrecognized # goes in the 'unrecognized' capture group for reporting. {expires} |{max_age} |{domain} |{path} |(?P<secure>Secure=?) |(?P<httponly>HttpOnly=?) |Version=(?P<version>[{stuff}]+) |Comment=(?P<comment>[{stuff}]+) |(?P<unrecognized>[{stuff}]+) ) # End with any number of spaces not matched by the preceding (up to the # next semicolon) - but do not capture these. [ ]* """.format(expires=EXPIRES_AV, max_age=MAX_AGE_AV, domain=DOMAIN_AV, path=PATH_AV, stuff=EXTENSION_AV) # For request data ("Cookie: ") parsing, with finditer cf. RFC 6265 4.2.1 COOKIE = """(?x) # Verbose mode (?: # Either something close to valid... # Match starts at start of string, or at separator. # Split on comma for the sake of legacy code (RFC 2109/2965), # and since it only breaks when invalid commas are put in values. # see http://bugs.python.org/issue1210326 (?:^Cookie:|^|;|,) # 1 or more valid token characters making up the name (captured) # with colon added to accommodate users of some old Java apps, etc. [ ]* (?P<name>[{name}:]+) [ ]* = [ ]* # While 6265 provides only for cookie-octet, this allows just about # anything in quotes (like in RFC 2616); people stuck on RFC # 2109/2965 will expect it to work this way. The non-quoted token # allows interior spaces ('\x20'), which is not valid. In both # cases, the decision of whether to allow these is downstream. (?P<value> ["][^\00-\31"]*["] | [{value}] | [{value}][{value} ]*[{value}]+ | ) # ... Or something way off-spec - extract to report and move on | (?P<invalid>[^;]+) ) # Trailing spaces after value [ ]* # Must end with ; or be at end of string (don't consume this though, # so use the lookahead assertion ?= (?=;|\Z) """.format(name=COOKIE_NAME, value=COOKIE_OCTET) # Precompile externally useful definitions into re objects. COOKIE_NAME_RE = re.compile("^([%s:]+)\Z" % COOKIE_NAME) COOKIE_RE = re.compile(COOKIE) SET_COOKIE_HEADER_RE = re.compile(SET_COOKIE_HEADER) ATTR_RE = re.compile(ATTR) DATE_RE = re.compile(DATE) DOMAIN_RE = re.compile(DOMAIN) PATH_RE = re.compile('^([%s]+)\Z' % EXTENSION_AV) EOL = re.compile("(?:\r\n|\n)")
()
31,712
cookies
InvalidCookieAttributeError
Raised when setting an invalid attribute on a Cookie.
class InvalidCookieAttributeError(CookieError): """Raised when setting an invalid attribute on a Cookie. """ def __init__(self, name, value, reason=None): CookieError.__init__(self) self.name = name self.value = value self.reason = reason def __str__(self): prefix = ("%s: " % self.reason) if self.reason else "" if self.name is None: return '%s%r' % (prefix, self.value) return '%s%r = %r' % (prefix, self.name, self.value)
(name, value, reason=None)
31,713
cookies
__init__
null
def __init__(self, name, value, reason=None): CookieError.__init__(self) self.name = name self.value = value self.reason = reason
(self, name, value, reason=None)
31,714
cookies
__str__
null
def __str__(self): prefix = ("%s: " % self.reason) if self.reason else "" if self.name is None: return '%s%r' % (prefix, self.value) return '%s%r = %r' % (prefix, self.name, self.value)
(self)
31,715
cookies
InvalidCookieError
Raised when attempting to parse or construct a cookie which is syntactically invalid (in any way that has possibly serious implications).
class InvalidCookieError(CookieError): """Raised when attempting to parse or construct a cookie which is syntactically invalid (in any way that has possibly serious implications). """ def __init__(self, data=None, message=""): CookieError.__init__(self) self.data = data self.message = message def __str__(self): return '%r %r' % (self.message, self.data)
(data=None, message='')
31,716
cookies
__init__
null
def __init__(self, data=None, message=""): CookieError.__init__(self) self.data = data self.message = message
(self, data=None, message='')
31,717
cookies
__str__
null
def __str__(self): return '%r %r' % (self.message, self.data)
(self)
31,720
cookies
_parse_request
Turn one or more lines of 'Cookie:' header data into a dict mapping cookie names to cookie values (raw strings).
def _parse_request(header_data, ignore_bad_cookies=False): """Turn one or more lines of 'Cookie:' header data into a dict mapping cookie names to cookie values (raw strings). """ cookies_dict = {} for line in Definitions.EOL.split(header_data.strip()): matches = Definitions.COOKIE_RE.finditer(line) matches = [item for item in matches] for match in matches: invalid = match.group('invalid') if invalid: if not ignore_bad_cookies: raise InvalidCookieError(data=invalid) _report_invalid_cookie(invalid) continue name = match.group('name') values = cookies_dict.get(name) value = match.group('value').strip('"') if values: values.append(value) else: cookies_dict[name] = [value] if not matches: if not ignore_bad_cookies: raise InvalidCookieError(data=line) _report_invalid_cookie(line) return cookies_dict
(header_data, ignore_bad_cookies=False)
31,721
cookies
_parse_response
Turn one or more lines of 'Set-Cookie:' header data into a list of dicts mapping attribute names to attribute values (as plain strings).
def _parse_response(header_data, ignore_bad_cookies=False, ignore_bad_attributes=True): """Turn one or more lines of 'Set-Cookie:' header data into a list of dicts mapping attribute names to attribute values (as plain strings). """ cookie_dicts = [] for line in Definitions.EOL.split(header_data.strip()): if not line: break cookie_dict = parse_one_response( line, ignore_bad_cookies=ignore_bad_cookies, ignore_bad_attributes=ignore_bad_attributes) if not cookie_dict: continue cookie_dicts.append(cookie_dict) if not cookie_dicts: if not ignore_bad_cookies: raise InvalidCookieError(data=header_data) _report_invalid_cookie(header_data) return cookie_dicts
(header_data, ignore_bad_cookies=False, ignore_bad_attributes=True)
31,722
cookies
_report_invalid_attribute
How this module logs a bad attribute when exception suppressed
def _report_invalid_attribute(name, value, reason): "How this module logs a bad attribute when exception suppressed" logging.error("invalid Cookie attribute (%s): %r=%r", reason, name, value)
(name, value, reason)
31,723
cookies
_report_invalid_cookie
How this module logs a bad cookie when exception suppressed
def _report_invalid_cookie(data): "How this module logs a bad cookie when exception suppressed" logging.error("invalid Cookie: %r", data)
(data)
31,724
cookies
_report_unknown_attribute
How this module logs an unknown attribute when exception suppressed
def _report_unknown_attribute(name): "How this module logs an unknown attribute when exception suppressed" logging.error("unknown Cookie attribute: %r", name)
(name)
31,725
cookies
_total_seconds
Wrapper to work around lack of .total_seconds() method in Python 3.1.
def _total_seconds(td): """Wrapper to work around lack of .total_seconds() method in Python 3.1. """ if hasattr(td, "total_seconds"): return td.total_seconds() return td.days * 3600 * 24 + td.seconds + td.microseconds / 100000.0
(td)
31,728
cookies
<lambda>
null
default_cookie_quote = lambda item: _default_quote( item, safe='!#$%&\'()*+/:<=>?@[]^`{|}~')
(item)
31,729
cookies
<lambda>
null
default_extension_quote = lambda item: _default_quote( item, safe=' !"#$%&\'()*+,/:<=>?@[\\]^`{|}~')
(item)
31,731
cookies
encode_cookie_value
URL-encode strings to make them safe for a cookie value. By default this uses urllib quoting, as used in many other cookie implementations and in other Python code, instead of an ad hoc escaping mechanism which includes backslashes (these also being illegal chars in RFC 6265).
def encode_cookie_value(data, quote=default_cookie_quote): """URL-encode strings to make them safe for a cookie value. By default this uses urllib quoting, as used in many other cookie implementations and in other Python code, instead of an ad hoc escaping mechanism which includes backslashes (these also being illegal chars in RFC 6265). """ if data is None: return None # encode() to ASCII bytes so quote won't crash on non-ASCII. # but doing that to bytes objects is nonsense. # On Python 2 encode crashes if s is bytes containing non-ASCII. # On Python 3 encode crashes on all byte objects. if not isinstance(data, bytes): data = data.encode("utf-8") # URL encode data so it is safe for cookie value quoted = quote(data) # Don't force to bytes, so that downstream can use proper string API rather # than crippled bytes, and to encourage encoding to be done just once. return quoted
(data, quote=<function <lambda> at 0x7f9a26497c70>)
31,732
cookies
encode_extension_av
URL-encode strings to make them safe for an extension-av (extension attribute value): <any CHAR except CTLs or ";">
def encode_extension_av(data, quote=default_extension_quote): """URL-encode strings to make them safe for an extension-av (extension attribute value): <any CHAR except CTLs or ";"> """ if not data: return '' return quote(data)
(data, quote=<function <lambda> at 0x7f9a26497d00>)
31,735
cookies
parse_date
Parse an RFC 1123 or asctime-like format date string to produce a Python datetime object (without a timezone).
def parse_date(value): """Parse an RFC 1123 or asctime-like format date string to produce a Python datetime object (without a timezone). """ # Do the regex magic; also enforces 2 or 4 digit years match = Definitions.DATE_RE.match(value) if value else None if not match: return None # We're going to extract and prepare captured data in 'data'. data = {} captured = match.groupdict() fields = ['year', 'month', 'day', 'hour', 'minute', 'second'] # If we matched on the RFC 1123 family format if captured['year']: for field in fields: data[field] = captured[field] # If we matched on the asctime format, use year2 etc. else: for field in fields: data[field] = captured[field + "2"] year = data['year'] # Interpret lame 2-digit years - base the cutoff on UNIX epoch, in case # someone sets a '70' cookie meaning 'distant past'. This won't break for # 58 years and people who use 2-digit years are asking for it anyway. if len(year) == 2: if int(year) < 70: year = "20" + year else: year = "19" + year year = int(year) # Clamp to [1900, 9999]: strftime has min 1900, datetime has max 9999 data['year'] = max(1900, min(year, 9999)) # Other things which are numbers should convert to integer for field in ['day', 'hour', 'minute', 'second']: if data[field] is None: data[field] = 0 data[field] = int(data[field]) # Look up the number datetime needs for the named month data['month'] = Definitions.month_numbers[data['month'].lower()] return datetime.datetime(**data)
(value)
31,736
cookies
parse_domain
Parse and validate an incoming Domain attribute value.
def parse_domain(value): """Parse and validate an incoming Domain attribute value. """ value = strip_spaces_and_quotes(value) if value: assert valid_domain(value) return value
(value)
31,737
cookies
parse_one_response
Turn one 'Set-Cookie:' line into a dict mapping attribute names to attribute values (raw strings).
def parse_one_response(line, ignore_bad_cookies=False, ignore_bad_attributes=True): """Turn one 'Set-Cookie:' line into a dict mapping attribute names to attribute values (raw strings). """ cookie_dict = {} # Basic validation, extract name/value/attrs-chunk match = Definitions.SET_COOKIE_HEADER_RE.match(line) if not match: if not ignore_bad_cookies: raise InvalidCookieError(data=line) _report_invalid_cookie(line) return None cookie_dict.update({ 'name': match.group('name'), 'value': match.group('value')}) # Extract individual attrs from the attrs chunk for match in Definitions.ATTR_RE.finditer(match.group('attrs')): captured = dict((k, v) for (k, v) in match.groupdict().items() if v) unrecognized = captured.get('unrecognized', None) if unrecognized: if not ignore_bad_attributes: raise InvalidCookieAttributeError(None, unrecognized, "unrecognized") _report_unknown_attribute(unrecognized) continue # for unary flags for key in ('secure', 'httponly'): if captured.get(key): captured[key] = True # ignore subcomponents of expires - they're still there to avoid doing # two passes timekeys = ('weekday', 'month', 'day', 'hour', 'minute', 'second', 'year') if 'year' in captured: for key in timekeys: del captured[key] elif 'year2' in captured: for key in timekeys: del captured[key + "2"] cookie_dict.update(captured) return cookie_dict
(line, ignore_bad_cookies=False, ignore_bad_attributes=True)
31,738
cookies
parse_path
Parse and validate an incoming Path attribute value.
def parse_path(value): """Parse and validate an incoming Path attribute value. """ value = strip_spaces_and_quotes(value) assert valid_path(value) return value
(value)
31,739
cookies
parse_string
Decode URL-encoded strings to UTF-8 containing the escaped chars.
def parse_string(data, unquote=default_unquote): """Decode URL-encoded strings to UTF-8 containing the escaped chars. """ if data is None: return None # We'll soon need to unquote to recover our UTF-8 data. # In Python 2, unquote crashes on chars beyond ASCII. So encode functions # had better not include anything beyond ASCII in data. # In Python 3, unquote crashes on bytes objects, requiring conversion to # str objects (unicode) using decode(). # But in Python 2, the same decode causes unquote to butcher the data. # So in that case, just leave the bytes. if isinstance(data, bytes): if sys.version_info > (3, 0, 0): # pragma: no cover data = data.decode('ascii') # Recover URL encoded data unquoted = unquote(data) # Without this step, Python 2 may have good URL decoded *bytes*, # which will therefore not normalize as unicode and not compare to # the original. if isinstance(unquoted, bytes): unquoted = unquoted.decode('utf-8') return unquoted
(data, unquote=<function unquote at 0x7f9abff93d00>)
31,740
cookies
parse_value
Process a cookie value
def parse_value(value, allow_spaces=True, unquote=default_unquote): "Process a cookie value" if value is None: return None value = strip_spaces_and_quotes(value) value = parse_string(value, unquote=unquote) if not allow_spaces: assert ' ' not in value return value
(value, allow_spaces=True, unquote=<function unquote at 0x7f9abff93d00>)
31,742
cookies
render_date
Render a date (e.g. an Expires value) per RFCs 6265/2616/1123. Don't give this localized (timezone-aware) datetimes. If you use them, convert them to GMT before passing them to this. There are too many conversion corner cases to handle this universally.
def render_date(date): """Render a date (e.g. an Expires value) per RFCs 6265/2616/1123. Don't give this localized (timezone-aware) datetimes. If you use them, convert them to GMT before passing them to this. There are too many conversion corner cases to handle this universally. """ if not date: return None assert valid_date(date) # Avoid %a and %b, which can change with locale, breaking compliance weekday = Definitions.weekday_abbr_list[date.weekday()] month = Definitions.month_abbr_list[date.month - 1] return date.strftime("{day}, %d {month} %Y %H:%M:%S GMT" ).format(day=weekday, month=month)
(date)
31,743
cookies
render_domain
null
def render_domain(domain): if not domain: return None if domain[0] == '.': return domain[1:] return domain
(domain)
31,744
cookies
strip_spaces_and_quotes
Remove invalid whitespace and/or single pair of dquotes and return None for empty strings. Used to prepare cookie values, path, and domain attributes in a way which tolerates simple formatting mistakes and standards variations.
def strip_spaces_and_quotes(value): """Remove invalid whitespace and/or single pair of dquotes and return None for empty strings. Used to prepare cookie values, path, and domain attributes in a way which tolerates simple formatting mistakes and standards variations. """ value = value.strip() if value else "" if value and len(value) > 1 and (value[0] == value[-1] == '"'): value = value[1:-1] if not value: value = "" return value
(value)
31,746
cookies
valid_date
Validate an expires datetime object
def valid_date(date): "Validate an expires datetime object" # We want something that acts like a datetime. In particular, # strings indicate a failure to parse down to an object and ints are # nonstandard and ambiguous at best. if not hasattr(date, 'tzinfo'): return False # Relevant RFCs define UTC as 'close enough' to GMT, and the maximum # difference between UTC and GMT is often stated to be less than a second. if date.tzinfo is None or _total_seconds(date.utcoffset()) < 1.1: return True return False
(date)
31,747
cookies
valid_domain
Validate a cookie domain ASCII string
def valid_domain(domain): "Validate a cookie domain ASCII string" # Using encoding on domain would confuse browsers into not sending cookies. # Generate UnicodeDecodeError up front if it can't store as ASCII. domain.encode('ascii') # Domains starting with periods are not RFC-valid, but this is very common # in existing cookies, so they should still parse with DOMAIN_AV. if Definitions.DOMAIN_RE.match(domain): return True return False
(domain)
31,748
cookies
valid_max_age
Validate a cookie Max-Age
def valid_max_age(number): "Validate a cookie Max-Age" if isinstance(number, basestring): try: number = long(number) except (ValueError, TypeError): return False if number >= 0 and number % 1 == 0: return True return False
(number)
31,749
cookies
valid_name
Validate a cookie name string
def valid_name(name): "Validate a cookie name string" if isinstance(name, bytes): name = name.decode('ascii') if not Definitions.COOKIE_NAME_RE.match(name): return False # This module doesn't support $identifiers, which are part of an obsolete # and highly complex standard which is never used. if name[0] == "$": return False return True
(name)
31,750
cookies
valid_path
Validate a cookie path ASCII string
def valid_path(value): "Validate a cookie path ASCII string" # Generate UnicodeDecodeError if path can't store as ASCII. value.encode("ascii") # Cookies without leading slash will likely be ignored, raise ASAP. if not (value and value[0] == "/"): return False if not Definitions.PATH_RE.match(value): return False return True
(value)
31,751
cookies
valid_value
Validate a cookie value string. This is generic across quote/unquote functions because it directly verifies the encoding round-trip using the specified quote/unquote functions. So if you use different quote/unquote functions, use something like this as a replacement for valid_value:: my_valid_value = lambda value: valid_value(value, quote=my_quote, unquote=my_unquote)
def valid_value(value, quote=default_cookie_quote, unquote=default_unquote): """Validate a cookie value string. This is generic across quote/unquote functions because it directly verifies the encoding round-trip using the specified quote/unquote functions. So if you use different quote/unquote functions, use something like this as a replacement for valid_value:: my_valid_value = lambda value: valid_value(value, quote=my_quote, unquote=my_unquote) """ if value is None: return False # Put the value through a round trip with the given quote and unquote # functions, so we will know whether data will get lost or not in the event # that we don't complain. encoded = encode_cookie_value(value, quote=quote) decoded = parse_string(encoded, unquote=unquote) # If the original string made the round trip, this is a valid value for the # given quote and unquote functions. Since the round trip can generate # different unicode forms, normalize before comparing, so we can ignore # trivial inequalities. decoded_normalized = (normalize("NFKD", decoded) if not isinstance(decoded, bytes) else decoded) value_normalized = (normalize("NFKD", value) if not isinstance(value, bytes) else value) if decoded_normalized == value_normalized: return True return False
(value, quote=<function <lambda> at 0x7f9a26497c70>, unquote=<function unquote at 0x7f9abff93d00>)
31,752
oathlink.services.agent.ip.add
addIP
null
def addIP(certificateFilename: str, keyFilename: str, ip: str) -> str: extension = 'agent/ip/add' payload = {"ip": ip} response = _post(certificateFilename=certificateFilename, keyFilename=keyFilename, extension=extension, payload=payload) return response
(certificateFilename: str, keyFilename: str, ip: str) -> str
31,753
oathlink.main.oathlink
agent_create
null
def agent_create(certificate_filename_pem: str, certificate_filename_key: str, serial: str, description: str) -> str: return createAgent(certificateFilename=certificate_filename_pem, keyFilename=certificate_filename_key, serial=serial, description=description)
(certificate_filename_pem: str, certificate_filename_key: str, serial: str, description: str) -> str
31,754
oathlink.main.oathlink
agent_ip_add
null
def agent_ip_add(certificate_filename_pem: str, certificate_filename_key: str, ip: str) -> str: return addIP(certificateFilename=certificate_filename_pem, keyFilename=certificate_filename_key, ip=ip)
(certificate_filename_pem: str, certificate_filename_key: str, ip: str) -> str
31,755
oathlink.main.oathlink
agent_ip_remove
null
def agent_ip_remove(certificate_filename_pem: str, certificate_filename_key: str, ip: str) -> str: return removeIP(certificateFilename=certificate_filename_pem, keyFilename=certificate_filename_key, ip=ip)
(certificate_filename_pem: str, certificate_filename_key: str, ip: str) -> str
31,756
oathlink.main.oathlink
agent_link
null
def agent_link(certificate_filename_pem: str, certificate_filename_key: str, user_id: str) -> str: return linkAgents(certificateFilename=certificate_filename_pem, keyFilename=certificate_filename_key, userId=user_id)
(certificate_filename_pem: str, certificate_filename_key: str, user_id: str) -> str
31,757
oathlink.main.oathlink
agent_unlink
null
def agent_unlink(certificate_filename_pem: str, certificate_filename_key: str, user_id: str) -> str: return unlinkAgents(certificateFilename=certificate_filename_pem, keyFilename=certificate_filename_key, userId=user_id)
(certificate_filename_pem: str, certificate_filename_key: str, user_id: str) -> str
31,758
oathlink.services.record.archive
archiveOathlink
null
def archiveOathlink(certificateFilename: str, keyFilename: str, recordId: [str, list]) -> list: extension = 'delete' recordId = _recordIdToList(recordId=recordId) payload = {'uuid': recordId} response = _post(certificateFilename=certificateFilename, keyFilename=keyFilename, extension=extension, payload=payload) return response
(certificateFilename: str, keyFilename: str, recordId: [<class 'str'>, <class 'list'>]) -> list
31,759
oathlink.main.oathlink
cancel
null
def cancel(certificate_filename_pem: str, certificate_filename_key: str, record_id: str = None) -> list: return cancelOathlink(certificateFilename=certificate_filename_pem, keyFilename=certificate_filename_key, recordId=record_id)
(certificate_filename_pem: str, certificate_filename_key: str, record_id: Optional[str] = None) -> list
31,760
oathlink.services.record.cancel
cancelOathlink
null
def cancelOathlink(certificateFilename: str, keyFilename: str, recordId: [str, list]) -> list: extension = 'cancel' recordId = archive._recordIdToList(recordId=recordId) payload = {'uuid': recordId} response = _post(certificateFilename=certificateFilename, keyFilename=keyFilename, extension=extension, payload=payload) return response
(certificateFilename: str, keyFilename: str, recordId: [<class 'str'>, <class 'list'>]) -> list
31,761
oathlink.services.agent.account.create
createAgent
null
def createAgent(certificateFilename: str, keyFilename: str, serial: str, description: str) -> str: extension = 'agent/create' payload = {"serial": serial, "description": description} response = _post(certificateFilename=certificateFilename, keyFilename=keyFilename, extension=extension, payload=payload) return response
(certificateFilename: str, keyFilename: str, serial: str, description: str) -> str