diff --git a/ckpts/universal/global_step80/zero/24.attention.dense.weight/fp32.pt b/ckpts/universal/global_step80/zero/24.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..550c0b79d6962d38954cd11cf50624eac368832b --- /dev/null +++ b/ckpts/universal/global_step80/zero/24.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7650f092df2ee10108669c65ab8399bc04bef7415e2ba2e0ed08347f92acc2d +size 16778317 diff --git a/ckpts/universal/global_step80/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..c7604c9c3f5d9db3842b71dd776d70391e3e06f9 --- /dev/null +++ b/ckpts/universal/global_step80/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61299b23a1e4dfadfb19e0d5685db32abd6a57045df2bc6ffd12e5ffe85c0a56 +size 9387 diff --git a/ckpts/universal/global_step80/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..5223f8674013be4aedfb7a323f8e4402481ad1d7 --- /dev/null +++ b/ckpts/universal/global_step80/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edbb344876a6a1b7717b9987cd3e45a2702ac3bdc89143b186cca9b5e287d7be +size 33555627 diff --git a/ckpts/universal/global_step80/zero/7.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/7.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..9f025873120f04bd62639f7e8543739082d8f486 --- /dev/null +++ b/ckpts/universal/global_step80/zero/7.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19076cfd454afeebec12bff407e2a1ac218f80f30af4f067597544549e009326 +size 16778411 diff --git a/venv/lib/python3.10/site-packages/annotated_types/__init__.py b/venv/lib/python3.10/site-packages/annotated_types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2f989504caa131c634ab51bcea435f3196a546bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/annotated_types/__init__.py @@ -0,0 +1,396 @@ +import math +import sys +from dataclasses import dataclass +from datetime import timezone +from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, SupportsFloat, SupportsIndex, TypeVar, Union + +if sys.version_info < (3, 8): + from typing_extensions import Protocol, runtime_checkable +else: + from typing import Protocol, runtime_checkable + +if sys.version_info < (3, 9): + from typing_extensions import Annotated, Literal +else: + from typing import Annotated, Literal + +if sys.version_info < (3, 10): + EllipsisType = type(Ellipsis) + KW_ONLY = {} + SLOTS = {} +else: + from types import EllipsisType + + KW_ONLY = {"kw_only": True} + SLOTS = {"slots": True} + + +__all__ = ( + 'BaseMetadata', + 'GroupedMetadata', + 'Gt', + 'Ge', + 'Lt', + 'Le', + 'Interval', + 'MultipleOf', + 'MinLen', + 'MaxLen', + 'Len', + 'Timezone', + 'Predicate', + 'LowerCase', + 'UpperCase', + 'IsDigits', + 'IsFinite', + 'IsNotFinite', + 'IsNan', + 'IsNotNan', + 'IsInfinite', + 'IsNotInfinite', + 'doc', + 'DocInfo', + '__version__', +) + +__version__ = '0.6.0' + + +T = TypeVar('T') + + +# arguments that start with __ are considered +# positional only +# see https://peps.python.org/pep-0484/#positional-only-arguments + + +class SupportsGt(Protocol): + def __gt__(self: T, __other: T) -> bool: + ... + + +class SupportsGe(Protocol): + def __ge__(self: T, __other: T) -> bool: + ... + + +class SupportsLt(Protocol): + def __lt__(self: T, __other: T) -> bool: + ... + + +class SupportsLe(Protocol): + def __le__(self: T, __other: T) -> bool: + ... + + +class SupportsMod(Protocol): + def __mod__(self: T, __other: T) -> T: + ... + + +class SupportsDiv(Protocol): + def __div__(self: T, __other: T) -> T: + ... + + +class BaseMetadata: + """Base class for all metadata. + + This exists mainly so that implementers + can do `isinstance(..., BaseMetadata)` while traversing field annotations. + """ + + __slots__ = () + + +@dataclass(frozen=True, **SLOTS) +class Gt(BaseMetadata): + """Gt(gt=x) implies that the value must be greater than x. + + It can be used with any type that supports the ``>`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + gt: SupportsGt + + +@dataclass(frozen=True, **SLOTS) +class Ge(BaseMetadata): + """Ge(ge=x) implies that the value must be greater than or equal to x. + + It can be used with any type that supports the ``>=`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + ge: SupportsGe + + +@dataclass(frozen=True, **SLOTS) +class Lt(BaseMetadata): + """Lt(lt=x) implies that the value must be less than x. + + It can be used with any type that supports the ``<`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + lt: SupportsLt + + +@dataclass(frozen=True, **SLOTS) +class Le(BaseMetadata): + """Le(le=x) implies that the value must be less than or equal to x. + + It can be used with any type that supports the ``<=`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + le: SupportsLe + + +@runtime_checkable +class GroupedMetadata(Protocol): + """A grouping of multiple BaseMetadata objects. + + `GroupedMetadata` on its own is not metadata and has no meaning. + All it the the constraint and metadata should be fully expressable + in terms of the `BaseMetadata`'s returned by `GroupedMetadata.__iter__()`. + + Concrete implementations should override `GroupedMetadata.__iter__()` + to add their own metadata. + For example: + + >>> @dataclass + >>> class Field(GroupedMetadata): + >>> gt: float | None = None + >>> description: str | None = None + ... + >>> def __iter__(self) -> Iterable[BaseMetadata]: + >>> if self.gt is not None: + >>> yield Gt(self.gt) + >>> if self.description is not None: + >>> yield Description(self.gt) + + Also see the implementation of `Interval` below for an example. + + Parsers should recognize this and unpack it so that it can be used + both with and without unpacking: + + - `Annotated[int, Field(...)]` (parser must unpack Field) + - `Annotated[int, *Field(...)]` (PEP-646) + """ # noqa: trailing-whitespace + + @property + def __is_annotated_types_grouped_metadata__(self) -> Literal[True]: + return True + + def __iter__(self) -> Iterator[BaseMetadata]: + ... + + if not TYPE_CHECKING: + __slots__ = () # allow subclasses to use slots + + def __init_subclass__(cls, *args: Any, **kwargs: Any) -> None: + # Basic ABC like functionality without the complexity of an ABC + super().__init_subclass__(*args, **kwargs) + if cls.__iter__ is GroupedMetadata.__iter__: + raise TypeError("Can't subclass GroupedMetadata without implementing __iter__") + + def __iter__(self) -> Iterator[BaseMetadata]: # noqa: F811 + raise NotImplementedError # more helpful than "None has no attribute..." type errors + + +@dataclass(frozen=True, **KW_ONLY, **SLOTS) +class Interval(GroupedMetadata): + """Interval can express inclusive or exclusive bounds with a single object. + + It accepts keyword arguments ``gt``, ``ge``, ``lt``, and/or ``le``, which + are interpreted the same way as the single-bound constraints. + """ + + gt: Union[SupportsGt, None] = None + ge: Union[SupportsGe, None] = None + lt: Union[SupportsLt, None] = None + le: Union[SupportsLe, None] = None + + def __iter__(self) -> Iterator[BaseMetadata]: + """Unpack an Interval into zero or more single-bounds.""" + if self.gt is not None: + yield Gt(self.gt) + if self.ge is not None: + yield Ge(self.ge) + if self.lt is not None: + yield Lt(self.lt) + if self.le is not None: + yield Le(self.le) + + +@dataclass(frozen=True, **SLOTS) +class MultipleOf(BaseMetadata): + """MultipleOf(multiple_of=x) might be interpreted in two ways: + + 1. Python semantics, implying ``value % multiple_of == 0``, or + 2. JSONschema semantics, where ``int(value / multiple_of) == value / multiple_of`` + + We encourage users to be aware of these two common interpretations, + and libraries to carefully document which they implement. + """ + + multiple_of: Union[SupportsDiv, SupportsMod] + + +@dataclass(frozen=True, **SLOTS) +class MinLen(BaseMetadata): + """ + MinLen() implies minimum inclusive length, + e.g. ``len(value) >= min_length``. + """ + + min_length: Annotated[int, Ge(0)] + + +@dataclass(frozen=True, **SLOTS) +class MaxLen(BaseMetadata): + """ + MaxLen() implies maximum inclusive length, + e.g. ``len(value) <= max_length``. + """ + + max_length: Annotated[int, Ge(0)] + + +@dataclass(frozen=True, **SLOTS) +class Len(GroupedMetadata): + """ + Len() implies that ``min_length <= len(value) <= max_length``. + + Upper bound may be omitted or ``None`` to indicate no upper length bound. + """ + + min_length: Annotated[int, Ge(0)] = 0 + max_length: Optional[Annotated[int, Ge(0)]] = None + + def __iter__(self) -> Iterator[BaseMetadata]: + """Unpack a Len into zone or more single-bounds.""" + if self.min_length > 0: + yield MinLen(self.min_length) + if self.max_length is not None: + yield MaxLen(self.max_length) + + +@dataclass(frozen=True, **SLOTS) +class Timezone(BaseMetadata): + """Timezone(tz=...) requires a datetime to be aware (or ``tz=None``, naive). + + ``Annotated[datetime, Timezone(None)]`` must be a naive datetime. + ``Timezone[...]`` (the ellipsis literal) expresses that the datetime must be + tz-aware but any timezone is allowed. + + You may also pass a specific timezone string or timezone object such as + ``Timezone(timezone.utc)`` or ``Timezone("Africa/Abidjan")`` to express that + you only allow a specific timezone, though we note that this is often + a symptom of poor design. + """ + + tz: Union[str, timezone, EllipsisType, None] + + +@dataclass(frozen=True, **SLOTS) +class Predicate(BaseMetadata): + """``Predicate(func: Callable)`` implies `func(value)` is truthy for valid values. + + Users should prefer statically inspectable metadata, but if you need the full + power and flexibility of arbitrary runtime predicates... here it is. + + We provide a few predefined predicates for common string constraints: + ``IsLower = Predicate(str.islower)``, ``IsUpper = Predicate(str.isupper)``, and + ``IsDigit = Predicate(str.isdigit)``. Users are encouraged to use methods which + can be given special handling, and avoid indirection like ``lambda s: s.lower()``. + + Some libraries might have special logic to handle certain predicates, e.g. by + checking for `str.isdigit` and using its presence to both call custom logic to + enforce digit-only strings, and customise some generated external schema. + + We do not specify what behaviour should be expected for predicates that raise + an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently + skip invalid constraints, or statically raise an error; or it might try calling it + and then propogate or discard the resulting exception. + """ + + func: Callable[[Any], bool] + + +@dataclass +class Not: + func: Callable[[Any], bool] + + def __call__(self, __v: Any) -> bool: + return not self.func(__v) + + +_StrType = TypeVar("_StrType", bound=str) + +LowerCase = Annotated[_StrType, Predicate(str.islower)] +""" +Return True if the string is a lowercase string, False otherwise. + +A string is lowercase if all cased characters in the string are lowercase and there is at least one cased character in the string. +""" # noqa: E501 +UpperCase = Annotated[_StrType, Predicate(str.isupper)] +""" +Return True if the string is an uppercase string, False otherwise. + +A string is uppercase if all cased characters in the string are uppercase and there is at least one cased character in the string. +""" # noqa: E501 +IsDigits = Annotated[_StrType, Predicate(str.isdigit)] +""" +Return True if the string is a digit string, False otherwise. + +A string is a digit string if all characters in the string are digits and there is at least one character in the string. +""" # noqa: E501 +IsAscii = Annotated[_StrType, Predicate(str.isascii)] +""" +Return True if all characters in the string are ASCII, False otherwise. + +ASCII characters have code points in the range U+0000-U+007F. Empty string is ASCII too. +""" + +_NumericType = TypeVar('_NumericType', bound=Union[SupportsFloat, SupportsIndex]) +IsFinite = Annotated[_NumericType, Predicate(math.isfinite)] +"""Return True if x is neither an infinity nor a NaN, and False otherwise.""" +IsNotFinite = Annotated[_NumericType, Predicate(Not(math.isfinite))] +"""Return True if x is one of infinity or NaN, and False otherwise""" +IsNan = Annotated[_NumericType, Predicate(math.isnan)] +"""Return True if x is a NaN (not a number), and False otherwise.""" +IsNotNan = Annotated[_NumericType, Predicate(Not(math.isnan))] +"""Return True if x is anything but NaN (not a number), and False otherwise.""" +IsInfinite = Annotated[_NumericType, Predicate(math.isinf)] +"""Return True if x is a positive or negative infinity, and False otherwise.""" +IsNotInfinite = Annotated[_NumericType, Predicate(Not(math.isinf))] +"""Return True if x is neither a positive or negative infinity, and False otherwise.""" + +try: + from typing_extensions import DocInfo, doc # type: ignore [attr-defined] +except ImportError: + + @dataclass(frozen=True, **SLOTS) + class DocInfo: # type: ignore [no-redef] + """ " + The return value of doc(), mainly to be used by tools that want to extract the + Annotated documentation at runtime. + """ + + documentation: str + """The documentation string passed to doc().""" + + def doc( + documentation: str, + ) -> DocInfo: + """ + Add documentation to a type annotation inside of Annotated. + + For example: + + >>> def hi(name: Annotated[int, doc("The name of the user")]) -> None: ... + """ + return DocInfo(documentation) diff --git a/venv/lib/python3.10/site-packages/annotated_types/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/annotated_types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb249fb383feef1dfbd525a9cd8d2cc3beadbd79 Binary files /dev/null and b/venv/lib/python3.10/site-packages/annotated_types/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/annotated_types/__pycache__/test_cases.cpython-310.pyc b/venv/lib/python3.10/site-packages/annotated_types/__pycache__/test_cases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7a130169e37e85732f4ec4386f5dada71d939e1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/annotated_types/__pycache__/test_cases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/annotated_types/py.typed b/venv/lib/python3.10/site-packages/annotated_types/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/annotated_types/test_cases.py b/venv/lib/python3.10/site-packages/annotated_types/test_cases.py new file mode 100644 index 0000000000000000000000000000000000000000..f54df700283bb31f60106443af0e54c8cfbdb834 --- /dev/null +++ b/venv/lib/python3.10/site-packages/annotated_types/test_cases.py @@ -0,0 +1,147 @@ +import math +import sys +from datetime import date, datetime, timedelta, timezone +from decimal import Decimal +from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Set, Tuple + +if sys.version_info < (3, 9): + from typing_extensions import Annotated +else: + from typing import Annotated + +import annotated_types as at + + +class Case(NamedTuple): + """ + A test case for `annotated_types`. + """ + + annotation: Any + valid_cases: Iterable[Any] + invalid_cases: Iterable[Any] + + +def cases() -> Iterable[Case]: + # Gt, Ge, Lt, Le + yield Case(Annotated[int, at.Gt(4)], (5, 6, 1000), (4, 0, -1)) + yield Case(Annotated[float, at.Gt(0.5)], (0.6, 0.7, 0.8, 0.9), (0.5, 0.0, -0.1)) + yield Case( + Annotated[datetime, at.Gt(datetime(2000, 1, 1))], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(2000, 1, 1), datetime(1999, 12, 31)], + ) + yield Case( + Annotated[datetime, at.Gt(date(2000, 1, 1))], + [date(2000, 1, 2), date(2000, 1, 3)], + [date(2000, 1, 1), date(1999, 12, 31)], + ) + yield Case( + Annotated[datetime, at.Gt(Decimal('1.123'))], + [Decimal('1.1231'), Decimal('123')], + [Decimal('1.123'), Decimal('0')], + ) + + yield Case(Annotated[int, at.Ge(4)], (4, 5, 6, 1000, 4), (0, -1)) + yield Case(Annotated[float, at.Ge(0.5)], (0.5, 0.6, 0.7, 0.8, 0.9), (0.4, 0.0, -0.1)) + yield Case( + Annotated[datetime, at.Ge(datetime(2000, 1, 1))], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(1998, 1, 1), datetime(1999, 12, 31)], + ) + + yield Case(Annotated[int, at.Lt(4)], (0, -1), (4, 5, 6, 1000, 4)) + yield Case(Annotated[float, at.Lt(0.5)], (0.4, 0.0, -0.1), (0.5, 0.6, 0.7, 0.8, 0.9)) + yield Case( + Annotated[datetime, at.Lt(datetime(2000, 1, 1))], + [datetime(1999, 12, 31), datetime(1999, 12, 31)], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + ) + + yield Case(Annotated[int, at.Le(4)], (4, 0, -1), (5, 6, 1000)) + yield Case(Annotated[float, at.Le(0.5)], (0.5, 0.0, -0.1), (0.6, 0.7, 0.8, 0.9)) + yield Case( + Annotated[datetime, at.Le(datetime(2000, 1, 1))], + [datetime(2000, 1, 1), datetime(1999, 12, 31)], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + ) + + # Interval + yield Case(Annotated[int, at.Interval(gt=4)], (5, 6, 1000), (4, 0, -1)) + yield Case(Annotated[int, at.Interval(gt=4, lt=10)], (5, 6), (4, 10, 1000, 0, -1)) + yield Case(Annotated[float, at.Interval(ge=0.5, le=1)], (0.5, 0.9, 1), (0.49, 1.1)) + yield Case( + Annotated[datetime, at.Interval(gt=datetime(2000, 1, 1), le=datetime(2000, 1, 3))], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(2000, 1, 1), datetime(2000, 1, 4)], + ) + + yield Case(Annotated[int, at.MultipleOf(multiple_of=3)], (0, 3, 9), (1, 2, 4)) + yield Case(Annotated[float, at.MultipleOf(multiple_of=0.5)], (0, 0.5, 1, 1.5), (0.4, 1.1)) + + # lengths + + yield Case(Annotated[str, at.MinLen(3)], ('123', '1234', 'x' * 10), ('', '1', '12')) + yield Case(Annotated[str, at.Len(3)], ('123', '1234', 'x' * 10), ('', '1', '12')) + yield Case(Annotated[List[int], at.MinLen(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2])) + yield Case(Annotated[List[int], at.Len(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2])) + + yield Case(Annotated[str, at.MaxLen(4)], ('', '1234'), ('12345', 'x' * 10)) + yield Case(Annotated[str, at.Len(0, 4)], ('', '1234'), ('12345', 'x' * 10)) + yield Case(Annotated[List[str], at.MaxLen(4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10)) + yield Case(Annotated[List[str], at.Len(0, 4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10)) + + yield Case(Annotated[str, at.Len(3, 5)], ('123', '12345'), ('', '1', '12', '123456', 'x' * 10)) + yield Case(Annotated[str, at.Len(3, 3)], ('123',), ('12', '1234')) + + yield Case(Annotated[Dict[int, int], at.Len(2, 3)], [{1: 1, 2: 2}], [{}, {1: 1}, {1: 1, 2: 2, 3: 3, 4: 4}]) + yield Case(Annotated[Set[int], at.Len(2, 3)], ({1, 2}, {1, 2, 3}), (set(), {1}, {1, 2, 3, 4})) + yield Case(Annotated[Tuple[int, ...], at.Len(2, 3)], ((1, 2), (1, 2, 3)), ((), (1,), (1, 2, 3, 4))) + + # Timezone + + yield Case( + Annotated[datetime, at.Timezone(None)], [datetime(2000, 1, 1)], [datetime(2000, 1, 1, tzinfo=timezone.utc)] + ) + yield Case( + Annotated[datetime, at.Timezone(...)], [datetime(2000, 1, 1, tzinfo=timezone.utc)], [datetime(2000, 1, 1)] + ) + yield Case( + Annotated[datetime, at.Timezone(timezone.utc)], + [datetime(2000, 1, 1, tzinfo=timezone.utc)], + [datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))], + ) + yield Case( + Annotated[datetime, at.Timezone('Europe/London')], + [datetime(2000, 1, 1, tzinfo=timezone(timedelta(0), name='Europe/London'))], + [datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))], + ) + + # predicate types + + yield Case(at.LowerCase[str], ['abc', 'foobar'], ['', 'A', 'Boom']) + yield Case(at.UpperCase[str], ['ABC', 'DEFO'], ['', 'a', 'abc', 'AbC']) + yield Case(at.IsDigits[str], ['123'], ['', 'ab', 'a1b2']) + yield Case(at.IsAscii[str], ['123', 'foo bar'], ['£100', '😊', 'whatever 👀']) + + yield Case(Annotated[int, at.Predicate(lambda x: x % 2 == 0)], [0, 2, 4], [1, 3, 5]) + + yield Case(at.IsFinite[float], [1.23], [math.nan, math.inf, -math.inf]) + yield Case(at.IsNotFinite[float], [math.nan, math.inf], [1.23]) + yield Case(at.IsNan[float], [math.nan], [1.23, math.inf]) + yield Case(at.IsNotNan[float], [1.23, math.inf], [math.nan]) + yield Case(at.IsInfinite[float], [math.inf], [math.nan, 1.23]) + yield Case(at.IsNotInfinite[float], [math.nan, 1.23], [math.inf]) + + # check stacked predicates + yield Case(at.IsInfinite[Annotated[float, at.Predicate(lambda x: x > 0)]], [math.inf], [-math.inf, 1.23, math.nan]) + + # doc + yield Case(Annotated[int, at.doc("A number")], [1, 2], []) + + # custom GroupedMetadata + class MyCustomGroupedMetadata(at.GroupedMetadata): + def __iter__(self) -> Iterator[at.Predicate]: + yield at.Predicate(lambda x: float(x).is_integer()) + + yield Case(Annotated[float, MyCustomGroupedMetadata()], [0, 2.0], [0.01, 1.5]) diff --git a/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/LICENSE b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..033c86b7a40a331f281bd406e991ef1db597c208 --- /dev/null +++ b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/LICENSE @@ -0,0 +1,13 @@ +Copyright 2016-2020 aio-libs collaboration. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/METADATA b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..d8dd6d12d6d35bcff6b00d4b3ed960f5eef4b9d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/METADATA @@ -0,0 +1,131 @@ +Metadata-Version: 2.1 +Name: async-timeout +Version: 4.0.3 +Summary: Timeout context manager for asyncio programs +Home-page: https://github.com/aio-libs/async-timeout +Author: Andrew Svetlov +Author-email: andrew.svetlov@gmail.com +License: Apache 2 +Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby +Project-URL: CI: GitHub Actions, https://github.com/aio-libs/async-timeout/actions +Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/async-timeout +Project-URL: GitHub: issues, https://github.com/aio-libs/async-timeout/issues +Project-URL: GitHub: repo, https://github.com/aio-libs/async-timeout +Classifier: Development Status :: 5 - Production/Stable +Classifier: Topic :: Software Development :: Libraries +Classifier: Framework :: AsyncIO +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: typing-extensions >=3.6.5 ; python_version < "3.8" + +async-timeout +============= +.. image:: https://travis-ci.com/aio-libs/async-timeout.svg?branch=master + :target: https://travis-ci.com/aio-libs/async-timeout +.. image:: https://codecov.io/gh/aio-libs/async-timeout/branch/master/graph/badge.svg + :target: https://codecov.io/gh/aio-libs/async-timeout +.. image:: https://img.shields.io/pypi/v/async-timeout.svg + :target: https://pypi.python.org/pypi/async-timeout +.. image:: https://badges.gitter.im/Join%20Chat.svg + :target: https://gitter.im/aio-libs/Lobby + :alt: Chat on Gitter + +asyncio-compatible timeout context manager. + + +Usage example +------------- + + +The context manager is useful in cases when you want to apply timeout +logic around block of code or in cases when ``asyncio.wait_for()`` is +not suitable. Also it's much faster than ``asyncio.wait_for()`` +because ``timeout`` doesn't create a new task. + +The ``timeout(delay, *, loop=None)`` call returns a context manager +that cancels a block on *timeout* expiring:: + + from async_timeout import timeout + async with timeout(1.5): + await inner() + +1. If ``inner()`` is executed faster than in ``1.5`` seconds nothing + happens. +2. Otherwise ``inner()`` is cancelled internally by sending + ``asyncio.CancelledError`` into but ``asyncio.TimeoutError`` is + raised outside of context manager scope. + +*timeout* parameter could be ``None`` for skipping timeout functionality. + + +Alternatively, ``timeout_at(when)`` can be used for scheduling +at the absolute time:: + + loop = asyncio.get_event_loop() + now = loop.time() + + async with timeout_at(now + 1.5): + await inner() + + +Please note: it is not POSIX time but a time with +undefined starting base, e.g. the time of the system power on. + + +Context manager has ``.expired`` property for check if timeout happens +exactly in context manager:: + + async with timeout(1.5) as cm: + await inner() + print(cm.expired) + +The property is ``True`` if ``inner()`` execution is cancelled by +timeout context manager. + +If ``inner()`` call explicitly raises ``TimeoutError`` ``cm.expired`` +is ``False``. + +The scheduled deadline time is available as ``.deadline`` property:: + + async with timeout(1.5) as cm: + cm.deadline + +Not finished yet timeout can be rescheduled by ``shift_by()`` +or ``shift_to()`` methods:: + + async with timeout(1.5) as cm: + cm.shift(1) # add another second on waiting + cm.update(loop.time() + 5) # reschedule to now+5 seconds + +Rescheduling is forbidden if the timeout is expired or after exit from ``async with`` +code block. + + +Installation +------------ + +:: + + $ pip install async-timeout + +The library is Python 3 only! + + + +Authors and License +------------------- + +The module is written by Andrew Svetlov. + +It's *Apache 2* licensed and freely available. diff --git a/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/RECORD b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..d343f6fbfb7feddb711b102ef4b683a1c1fa539e --- /dev/null +++ b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/RECORD @@ -0,0 +1,10 @@ +async_timeout-4.0.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +async_timeout-4.0.3.dist-info/LICENSE,sha256=4Y17uPUT4sRrtYXJS1hb0wcg3TzLId2weG9y0WZY-Sw,568 +async_timeout-4.0.3.dist-info/METADATA,sha256=WQVcnDIXQ2ntebcm-vYjhNLg_VMeTWw13_ReT-U36J4,4209 +async_timeout-4.0.3.dist-info/RECORD,, +async_timeout-4.0.3.dist-info/WHEEL,sha256=5sUXSg9e4bi7lTLOHcm6QEYwO5TIF1TNbTSVFVjcJcc,92 +async_timeout-4.0.3.dist-info/top_level.txt,sha256=9oM4e7Twq8iD_7_Q3Mz0E6GPIB6vJvRFo-UBwUQtBDU,14 +async_timeout-4.0.3.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +async_timeout/__init__.py,sha256=A0VOqDGQ3cCPFp0NZJKIbx_VRP1Y2xPtQOZebVIUB88,7242 +async_timeout/__pycache__/__init__.cpython-310.pyc,, +async_timeout/py.typed,sha256=tyozzRT1fziXETDxokmuyt6jhOmtjUbnVNJdZcG7ik0,12 diff --git a/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/WHEEL b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..2c08da084599354e5b2dbccb3ab716165e63d1a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..ad29955ef909f5f38e96b6d8a6c9ba54d9bccd53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/top_level.txt @@ -0,0 +1 @@ +async_timeout diff --git a/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/zip-safe b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/zip-safe new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/venv/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/zip-safe @@ -0,0 +1 @@ + diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be48be2e19d6b83b9062d2d94d5790206a6048b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/_compute_docstrings.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/_compute_docstrings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7a3742033153b60dbe5b23e563d19e01a82bd5a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/_compute_docstrings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/_generated_version.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/_generated_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe12cae36890e8ed853e8c3016b8399620e841d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/_generated_version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/acero.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/acero.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9e6fa8557877e35b3da49ba7bcd45e5aceb6c73 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/acero.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/benchmark.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/benchmark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3781c9dc9528b4846bdbab6f95b6a3177f925031 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/benchmark.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/cffi.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/cffi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fd964d1935b6861fffae19d18d4b3c8e015b944 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/cffi.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/compute.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/compute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32537d30eeaa7a8a70083b52ed11f1fd3763ce9c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/compute.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a298fb1f877e1a3bc4019299f131a0e879a8d748 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/csv.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/csv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d03fd51ac8e484d520be9fd443b5196f2cf12a1e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/csv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/cuda.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/cuda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1778a5ca045745bb2b552adf97c5e39e5309779f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/cuda.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/dataset.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a35190111e8d2efb338ae2a33bdab18188a73ddf Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/dataset.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/feather.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/feather.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a01cf42c5a3549ec52c61e69bd5e4583b82a5685 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/feather.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/flight.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/flight.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..431a8b85a25577cea250bdce58b9e480e2a081b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/flight.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/fs.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/fs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3a160a39516dd77b66dd295d1d9ae6e454bc133 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/fs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/ipc.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/ipc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfb4f147e3845395d1c09a9db4f458b39bd27be8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/ipc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/json.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/json.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa84b949b48e4c0e27601e190fc8b1d4ab7ad06f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/json.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/jvm.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/jvm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5c00cc870fffdebbbb311aa1a8292763a379f2c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/jvm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/orc.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/orc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8050c47b4a562d2a16f98b3cafcb4bf277052bd7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/orc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/pandas_compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/pandas_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5260e25bb02fc77970e9e83bbcb28af7c2266e87 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/pandas_compat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/substrait.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/substrait.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2adc2be059830591909f304b2e25bb0ff2d788a3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/substrait.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/types.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c70ca8866a4d27c8862079d70f5f3ce912cc1e2a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb70bfe33513e3c807677ec102a146e6251cf95e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h new file mode 100644 index 0000000000000000000000000000000000000000..6411aebf804426cce5916410124f5f64a3865f94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h @@ -0,0 +1,301 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" +#include "arrow/visitor.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// User array accessor types + +/// \brief Array base type +/// Immutable data array with some logical type and some length. +/// +/// Any memory is owned by the respective Buffer instance (or its parents). +/// +/// The base class is only required to have a null bitmap buffer if the null +/// count is greater than 0 +/// +/// If known, the null count can be provided in the base Array constructor. If +/// the null count is not known, pass -1 to indicate that the null count is to +/// be computed on the first call to null_count() +class ARROW_EXPORT Array { + public: + virtual ~Array() = default; + + /// \brief Return true if value at index is null. Does not boundscheck + bool IsNull(int64_t i) const { return !IsValid(i); } + + /// \brief Return true if value at index is valid (not null). Does not + /// boundscheck + bool IsValid(int64_t i) const { + if (null_bitmap_data_ != NULLPTR) { + return bit_util::GetBit(null_bitmap_data_, i + data_->offset); + } + // Dispatching with a few conditionals like this makes IsNull more + // efficient for how it is used in practice. Making IsNull virtual + // would add a vtable lookup to every call and prevent inlining + + // a potential inner-branch removal. + if (type_id() == Type::SPARSE_UNION) { + return !internal::IsNullSparseUnion(*data_, i); + } + if (type_id() == Type::DENSE_UNION) { + return !internal::IsNullDenseUnion(*data_, i); + } + if (type_id() == Type::RUN_END_ENCODED) { + return !internal::IsNullRunEndEncoded(*data_, i); + } + return data_->null_count != data_->length; + } + + /// \brief Return a Scalar containing the value of this array at i + Result> GetScalar(int64_t i) const; + + /// Size in the number of elements this array contains. + int64_t length() const { return data_->length; } + + /// A relative position into another array's data, to enable zero-copy + /// slicing. This value defaults to zero + int64_t offset() const { return data_->offset; } + + /// The number of null entries in the array. If the null count was not known + /// at time of construction (and set to a negative value), then the null + /// count will be computed and cached on the first invocation of this + /// function + int64_t null_count() const; + + /// \brief Computes the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// null_count(). For types that have no validity bitmap, this function will + /// recompute the null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + const std::shared_ptr& type() const { return data_->type; } + Type::type type_id() const { return data_->type->id(); } + + /// Buffer for the validity (null) bitmap, if any. Note that Union types + /// never have a null bitmap. + /// + /// Note that for `null_count == 0` or for null type, this will be null. + /// This buffer does not account for any slice offset + const std::shared_ptr& null_bitmap() const { return data_->buffers[0]; } + + /// Raw pointer to the null bitmap. + /// + /// Note that for `null_count == 0` or for null type, this will be null. + /// This buffer does not account for any slice offset + const uint8_t* null_bitmap_data() const { return null_bitmap_data_; } + + /// Equality comparison with another array + bool Equals(const Array& arr, const EqualOptions& = EqualOptions::Defaults()) const; + bool Equals(const std::shared_ptr& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// \brief Return the formatted unified diff of arrow::Diff between this + /// Array and another Array + std::string Diff(const Array& other) const; + + /// Approximate equality comparison with another array + /// + /// epsilon is only used if this is FloatArray or DoubleArray + bool ApproxEquals(const std::shared_ptr& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + bool ApproxEquals(const Array& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// Compare if the range of slots specified are equal for the given array and + /// this array. end_idx exclusive. This methods does not bounds check. + bool RangeEquals(int64_t start_idx, int64_t end_idx, int64_t other_start_idx, + const Array& other, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(int64_t start_idx, int64_t end_idx, int64_t other_start_idx, + const std::shared_ptr& other, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(const Array& other, int64_t start_idx, int64_t end_idx, + int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(const std::shared_ptr& other, int64_t start_idx, + int64_t end_idx, int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// \brief Apply the ArrayVisitor::Visit() method specialized to the array type + Status Accept(ArrayVisitor* visitor) const; + + /// Construct a zero-copy view of this array with the given type. + /// + /// This method checks if the types are layout-compatible. + /// Nested types are traversed in depth-first order. Data buffers must have + /// the same item sizes, even though the logical types may be different. + /// An error is returned if the types are not layout-compatible. + Result> View(const std::shared_ptr& type) const; + + /// \brief Construct a copy of the array with all buffers on destination + /// Memory Manager + /// + /// This method recursively copies the array's buffers and those of its children + /// onto the destination MemoryManager device and returns the new Array. + Result> CopyTo(const std::shared_ptr& to) const; + + /// \brief Construct a new array attempting to zero-copy view if possible. + /// + /// Like CopyTo this method recursively goes through all of the array's buffers + /// and those of it's children and first attempts to create zero-copy + /// views on the destination MemoryManager device. If it can't, it falls back + /// to performing a copy. See Buffer::ViewOrCopy. + Result> ViewOrCopyTo( + const std::shared_ptr& to) const; + + /// Construct a zero-copy slice of the array with the indicated offset and + /// length + /// + /// \param[in] offset the position of the first element in the constructed + /// slice + /// \param[in] length the length of the slice. If there are not enough + /// elements in the array, the length will be adjusted accordingly + /// + /// \return a new object wrapped in std::shared_ptr + std::shared_ptr Slice(int64_t offset, int64_t length) const; + + /// Slice from offset until end of the array + std::shared_ptr Slice(int64_t offset) const; + + /// Input-checking variant of Array::Slice + Result> SliceSafe(int64_t offset, int64_t length) const; + /// Input-checking variant of Array::Slice + Result> SliceSafe(int64_t offset) const; + + const std::shared_ptr& data() const { return data_; } + + int num_fields() const { return static_cast(data_->child_data.size()); } + + /// \return PrettyPrint representation of array suitable for debugging + std::string ToString() const; + + /// \brief Perform cheap validation checks to determine obvious inconsistencies + /// within the array's internal data. + /// + /// This is O(k) where k is the number of descendents. + /// + /// \return Status + Status Validate() const; + + /// \brief Perform extensive validation checks to determine inconsistencies + /// within the array's internal data. + /// + /// This is potentially O(k*n) where k is the number of descendents and n + /// is the array length. + /// + /// \return Status + Status ValidateFull() const; + + protected: + Array() = default; + ARROW_DEFAULT_MOVE_AND_ASSIGN(Array); + + std::shared_ptr data_; + const uint8_t* null_bitmap_data_ = NULLPTR; + + /// Protected method for constructors + void SetData(const std::shared_ptr& data) { + if (data->buffers.size() > 0) { + null_bitmap_data_ = data->GetValuesSafe(0, /*offset=*/0); + } else { + null_bitmap_data_ = NULLPTR; + } + data_ = data; + } + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(Array); + + ARROW_FRIEND_EXPORT friend void PrintTo(const Array& x, std::ostream* os); +}; + +static inline std::ostream& operator<<(std::ostream& os, const Array& x) { + os << x.ToString(); + return os; +} + +/// Base class for non-nested arrays +class ARROW_EXPORT FlatArray : public Array { + protected: + using Array::Array; +}; + +/// Base class for arrays of fixed-size logical types +class ARROW_EXPORT PrimitiveArray : public FlatArray { + public: + PrimitiveArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// Does not account for any slice offset + const std::shared_ptr& values() const { return data_->buffers[1]; } + + protected: + PrimitiveArray() : raw_values_(NULLPTR) {} + + void SetData(const std::shared_ptr& data) { + this->Array::SetData(data); + raw_values_ = data->GetValuesSafe(1, /*offset=*/0); + } + + explicit PrimitiveArray(const std::shared_ptr& data) { SetData(data); } + + const uint8_t* raw_values_; +}; + +/// Degenerate null type Array +class ARROW_EXPORT NullArray : public FlatArray { + public: + using TypeClass = NullType; + + explicit NullArray(const std::shared_ptr& data) { SetData(data); } + explicit NullArray(int64_t length); + + private: + void SetData(const std::shared_ptr& data) { + null_bitmap_data_ = NULLPTR; + data->null_count = data->length; + data_ = data; + } +}; + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h new file mode 100644 index 0000000000000000000000000000000000000000..768a630e0af54da969c1dd9a00de75e7fada8b3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h @@ -0,0 +1,863 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes for List, LargeList, ListView, LargeListView, FixedSizeList, +// Map, Struct, and Union + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup nested-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// VarLengthListLikeArray + +template +class VarLengthListLikeArray; + +namespace internal { + +// Private helper for [Large]List[View]Array::SetData. +// Unfortunately, trying to define VarLengthListLikeArray::SetData outside of this header +// doesn't play well with MSVC. +template +void SetListData(VarLengthListLikeArray* self, + const std::shared_ptr& data, + Type::type expected_type_id = TYPE::type_id); + +} // namespace internal + +/// Base class for variable-sized list and list-view arrays, regardless of offset size. +template +class VarLengthListLikeArray : public Array { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + + const TypeClass* var_length_list_like_type() const { return this->list_type_; } + + /// \brief Return array object containing the list's values + /// + /// Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& values() const { return values_; } + + /// Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& value_offsets() const { return data_->buffers[1]; } + + const std::shared_ptr& value_type() const { return list_type_->value_type(); } + + /// Return pointer to raw value offsets accounting for any slice offset + const offset_type* raw_value_offsets() const { + return raw_value_offsets_ + data_->offset; + } + + // The following functions will not perform boundschecking + + offset_type value_offset(int64_t i) const { + return raw_value_offsets_[i + data_->offset]; + } + + /// \brief Return the size of the value at a particular index + /// + /// Since non-empty null lists and list-views are possible, avoid calling this + /// function when the list at slot i is null. + /// + /// \pre IsValid(i) + virtual offset_type value_length(int64_t i) const = 0; + + /// \pre IsValid(i) + std::shared_ptr value_slice(int64_t i) const { + return values_->Slice(value_offset(i), value_length(i)); + } + + protected: + friend void internal::SetListData(VarLengthListLikeArray* self, + const std::shared_ptr& data, + Type::type expected_type_id); + + const TypeClass* list_type_ = NULLPTR; + std::shared_ptr values_; + const offset_type* raw_value_offsets_ = NULLPTR; +}; + +// ---------------------------------------------------------------------- +// ListArray / LargeListArray + +template +class BaseListArray : public VarLengthListLikeArray { + public: + using TypeClass = TYPE; + using offset_type = typename TYPE::offset_type; + + const TypeClass* list_type() const { return this->var_length_list_like_type(); } + + /// \brief Return the size of the value at a particular index + /// + /// Since non-empty null lists are possible, avoid calling this + /// function when the list at slot i is null. + /// + /// \pre IsValid(i) + offset_type value_length(int64_t i) const final { + i += this->data_->offset; + return this->raw_value_offsets_[i + 1] - this->raw_value_offsets_[i]; + } +}; + +/// Concrete Array class for list data +class ARROW_EXPORT ListArray : public BaseListArray { + public: + explicit ListArray(std::shared_ptr data); + + ListArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct ListArray from array of offsets and child value array + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' + /// null bitmap. But if a null_bitmap is provided, the offsets array can't have nulls. + /// + /// And when a null_bitmap is provided, the offsets array cannot be a slice (i.e. an + /// array with offset() > 0). + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int32 type + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// allocated because of null values + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a ListArray from a ListViewArray + static Result> FromListView(const ListViewArray& source, + MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration of this array's offsets as well as null elements backed + /// by non-empty lists (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list offsets as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + protected: + // This constructor defers SetData to a derived array class + ListArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +/// Concrete Array class for large list data (with 64-bit offsets) +class ARROW_EXPORT LargeListArray : public BaseListArray { + public: + explicit LargeListArray(const std::shared_ptr& data); + + LargeListArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct LargeListArray from array of offsets and child value array + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' + /// null bitmap. But if a null_bitmap is provided, the offsets array can't have nulls. + /// + /// And when a null_bitmap is provided, the offsets array cannot be a slice (i.e. an + /// array with offset() > 0). + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int64 type + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// allocated because of null values + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a LargeListArray from a LargeListViewArray + static Result> FromListView( + const LargeListViewArray& source, MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration of this array's offsets as well as null elements backed + /// by non-empty lists (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list offsets as an Int64Array + std::shared_ptr offsets() const; + + protected: + void SetData(const std::shared_ptr& data); +}; + +// ---------------------------------------------------------------------- +// ListViewArray / LargeListViewArray + +template +class BaseListViewArray : public VarLengthListLikeArray { + public: + using TypeClass = TYPE; + using offset_type = typename TYPE::offset_type; + + const TypeClass* list_view_type() const { return this->var_length_list_like_type(); } + + /// \brief Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& value_sizes() const { return this->data_->buffers[2]; } + + /// \brief Return pointer to raw value offsets accounting for any slice offset + const offset_type* raw_value_sizes() const { + return raw_value_sizes_ + this->data_->offset; + } + + /// \brief Return the size of the value at a particular index + /// + /// This should not be called if the list-view at slot i is null. + /// The returned size in those cases could be any value from 0 to the + /// length of the child values array. + /// + /// \pre IsValid(i) + offset_type value_length(int64_t i) const final { + return this->raw_value_sizes_[i + this->data_->offset]; + } + + protected: + const offset_type* raw_value_sizes_ = NULLPTR; +}; + +/// \brief Concrete Array class for list-view data +class ARROW_EXPORT ListViewArray : public BaseListViewArray { + public: + explicit ListViewArray(std::shared_ptr data); + + ListViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, + std::shared_ptr value_sizes, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct ListViewArray from array of offsets, sizes, and child + /// value array + /// + /// Construct a ListViewArray using buffers from offsets and sizes arrays + /// that project views into the child values array. + /// + /// This function does the bare minimum of validation of the offsets/sizes and + /// input types. The offset and length of the offsets and sizes arrays must + /// match and that will be checked, but their contents will be assumed to be + /// well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the + /// offsets's null bitmap. But if a null_bitmap is provided, the offsets array + /// can't have nulls. + /// + /// And when a null_bitmap is provided, neither the offsets or sizes array can be a + /// slice (i.e. an array with offset() > 0). + /// + /// \param[in] offsets An array of int32 offsets into the values array. NULL values are + /// supported if the corresponding values in sizes is NULL or 0. + /// \param[in] sizes An array containing the int32 sizes of every view. NULL values are + /// taken to represent a NULL list-view in the array being created. + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& sizes, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& sizes, + const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a ListViewArray from a ListArray + static Result> FromList(const ListArray& list_array, + MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the list-views in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration this array's offsets (which can be in any order) + /// and sizes. Nulls are skipped. + /// + /// This function invokes Concatenate() if list-views are non-contiguous. It + /// will try to minimize the number of array slices passed to Concatenate() by + /// maximizing the size of each slice (containing as many contiguous + /// list-views as possible). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list-view offsets as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + /// \brief Return list-view sizes as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListViewArray::FromArrays() and get back the same list + /// array if the original one has nulls. + std::shared_ptr sizes() const; + + protected: + // This constructor defers SetData to a derived array class + ListViewArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +/// \brief Concrete Array class for large list-view data (with 64-bit offsets +/// and sizes) +class ARROW_EXPORT LargeListViewArray : public BaseListViewArray { + public: + explicit LargeListViewArray(std::shared_ptr data); + + LargeListViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, + std::shared_ptr value_sizes, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct LargeListViewArray from array of offsets, sizes, and child + /// value array + /// + /// Construct an LargeListViewArray using buffers from offsets and sizes arrays + /// that project views into the values array. + /// + /// This function does the bare minimum of validation of the offsets/sizes and + /// input types. The offset and length of the offsets and sizes arrays must + /// match and that will be checked, but their contents will be assumed to be + /// well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' or + /// sizes' null bitmap. Only one of these two is allowed to have a null bitmap. But if a + /// null_bitmap is provided, the offsets array and the sizes array can't have nulls. + /// + /// And when a null_bitmap is provided, neither the offsets or sizes array can be a + /// slice (i.e. an array with offset() > 0). + /// + /// \param[in] offsets An array of int64 offsets into the values array. NULL values are + /// supported if the corresponding values in sizes is NULL or 0. + /// \param[in] sizes An array containing the int64 sizes of every view. NULL values are + /// taken to represent a NULL list-view in the array being created. + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& sizes, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& sizes, + const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a LargeListViewArray from a LargeListArray + static Result> FromList( + const LargeListArray& list_array, MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the large list-views in this + /// array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration this array's offsets (which can be in any order) + /// and sizes. Nulls are skipped. + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list-view offsets as an Int64Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to LargeListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + /// \brief Return list-view sizes as an Int64Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to LargeListViewArray::FromArrays() and get back the same list + /// array if the original one has nulls. + std::shared_ptr sizes() const; + + protected: + // This constructor defers SetData to a derived array class + LargeListViewArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +// ---------------------------------------------------------------------- +// MapArray + +/// Concrete Array class for map data +/// +/// NB: "value" in this context refers to a pair of a key and the corresponding item +class ARROW_EXPORT MapArray : public ListArray { + public: + using TypeClass = MapType; + + explicit MapArray(const std::shared_ptr& data); + + MapArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + MapArray(const std::shared_ptr& type, int64_t length, BufferVector buffers, + const std::shared_ptr& keys, const std::shared_ptr& items, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + MapArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct MapArray from array of offsets and child key, item arrays + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int32 type + /// \param[in] keys Array containing key values + /// \param[in] items Array containing item values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// allocated because of null values + static Result> FromArrays( + const std::shared_ptr& offsets, const std::shared_ptr& keys, + const std::shared_ptr& items, MemoryPool* pool = default_memory_pool()); + + static Result> FromArrays( + std::shared_ptr type, const std::shared_ptr& offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + MemoryPool* pool = default_memory_pool()); + + const MapType* map_type() const { return map_type_; } + + /// \brief Return array object containing all map keys + const std::shared_ptr& keys() const { return keys_; } + + /// \brief Return array object containing all mapped items + const std::shared_ptr& items() const { return items_; } + + /// Validate child data before constructing the actual MapArray. + static Status ValidateChildData( + const std::vector>& child_data); + + protected: + void SetData(const std::shared_ptr& data); + + static Result> FromArraysInternal( + std::shared_ptr type, const std::shared_ptr& offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + MemoryPool* pool); + + private: + const MapType* map_type_; + std::shared_ptr keys_, items_; +}; + +// ---------------------------------------------------------------------- +// FixedSizeListArray + +/// Concrete Array class for fixed size list data +class ARROW_EXPORT FixedSizeListArray : public Array { + public: + using TypeClass = FixedSizeListType; + using offset_type = TypeClass::offset_type; + + explicit FixedSizeListArray(const std::shared_ptr& data); + + FixedSizeListArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const FixedSizeListType* list_type() const; + + /// \brief Return array object containing the list's values + const std::shared_ptr& values() const; + + const std::shared_ptr& value_type() const; + + // The following functions will not perform boundschecking + int64_t value_offset(int64_t i) const { + i += data_->offset; + return list_size_ * i; + } + /// \brief Return the fixed-size of the values + /// + /// No matter the value of the index parameter, the result is the same. + /// So even when the value at slot i is null, this function will return a + /// non-zero size. + /// + /// \pre IsValid(i) + int32_t value_length(int64_t i = 0) const { + ARROW_UNUSED(i); + return list_size_; + } + /// \pre IsValid(i) + std::shared_ptr value_slice(int64_t i) const { + return values_->Slice(value_offset(i), value_length(i)); + } + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration null elements (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Construct FixedSizeListArray from child value array and value_length + /// + /// \param[in] values Array containing list values + /// \param[in] list_size The fixed length of each list + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + /// \return Will have length equal to values.length() / list_size + static Result> FromArrays( + const std::shared_ptr& values, int32_t list_size, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Construct FixedSizeListArray from child value array and type + /// + /// \param[in] values Array containing list values + /// \param[in] type The fixed sized list type + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + /// \return Will have length equal to values.length() / type.list_size() + static Result> FromArrays( + const std::shared_ptr& values, std::shared_ptr type, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + protected: + void SetData(const std::shared_ptr& data); + int32_t list_size_; + + private: + std::shared_ptr values_; +}; + +// ---------------------------------------------------------------------- +// Struct + +/// Concrete Array class for struct data +class ARROW_EXPORT StructArray : public Array { + public: + using TypeClass = StructType; + + explicit StructArray(const std::shared_ptr& data); + + StructArray(const std::shared_ptr& type, int64_t length, + const std::vector>& children, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Return a StructArray from child arrays and field names. + /// + /// The length and data type are automatically inferred from the arguments. + /// There should be at least one child array. + static Result> Make( + const ArrayVector& children, const std::vector& field_names, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Return a StructArray from child arrays and fields. + /// + /// The length is automatically inferred from the arguments. + /// There should be at least one child array. This method does not + /// check that field types and child array types are consistent. + static Result> Make( + const ArrayVector& children, const FieldVector& fields, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const StructType* struct_type() const; + + // Return a shared pointer in case the requestor desires to share ownership + // with this array. The returned array has its offset, length and null + // count adjusted. + const std::shared_ptr& field(int pos) const; + + const ArrayVector& fields() const; + + /// Returns null if name not found + std::shared_ptr GetFieldByName(const std::string& name) const; + + /// Indicate if field named `name` can be found unambiguously in the struct. + Status CanReferenceFieldByName(const std::string& name) const; + + /// Indicate if fields named `names` can be found unambiguously in the struct. + Status CanReferenceFieldsByNames(const std::vector& names) const; + + /// \brief Flatten this array as a vector of arrays, one for each field + /// + /// \param[in] pool The pool to allocate null bitmaps from, if necessary + Result Flatten(MemoryPool* pool = default_memory_pool()) const; + + /// \brief Get one of the child arrays, combining its null bitmap + /// with the parent struct array's bitmap. + /// + /// \param[in] index Which child array to get + /// \param[in] pool The pool to allocate null bitmaps from, if necessary + Result> GetFlattenedField( + int index, MemoryPool* pool = default_memory_pool()) const; + + private: + // For caching boxed child data + // XXX This is not handled in a thread-safe manner. + mutable ArrayVector boxed_fields_; +}; + +// ---------------------------------------------------------------------- +// Union + +/// Base class for SparseUnionArray and DenseUnionArray +class ARROW_EXPORT UnionArray : public Array { + public: + using type_code_t = int8_t; + + /// Note that this buffer does not account for any slice offset + const std::shared_ptr& type_codes() const { return data_->buffers[1]; } + + const type_code_t* raw_type_codes() const { return raw_type_codes_ + data_->offset; } + + /// The logical type code of the value at index. + type_code_t type_code(int64_t i) const { return raw_type_codes_[i + data_->offset]; } + + /// The physical child id containing value at index. + int child_id(int64_t i) const { + return union_type_->child_ids()[raw_type_codes_[i + data_->offset]]; + } + + const UnionType* union_type() const { return union_type_; } + + UnionMode::type mode() const { return union_type_->mode(); } + + /// \brief Return the given field as an individual array. + /// + /// For sparse unions, the returned array has its offset, length and null + /// count adjusted. + std::shared_ptr field(int pos) const; + + protected: + void SetData(std::shared_ptr data); + + const type_code_t* raw_type_codes_; + const UnionType* union_type_; + + // For caching boxed child data + mutable std::vector> boxed_fields_; +}; + +/// Concrete Array class for sparse union data +class ARROW_EXPORT SparseUnionArray : public UnionArray { + public: + using TypeClass = SparseUnionType; + + explicit SparseUnionArray(std::shared_ptr data); + + SparseUnionArray(std::shared_ptr type, int64_t length, ArrayVector children, + std::shared_ptr type_ids, int64_t offset = 0); + + /// \brief Construct SparseUnionArray from type_ids and children + /// + /// This function does the bare minimum of validation of the input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, ArrayVector children, + std::vector type_codes) { + return Make(std::move(type_ids), std::move(children), std::vector{}, + std::move(type_codes)); + } + + /// \brief Construct SparseUnionArray with custom field names from type_ids and children + /// + /// This function does the bare minimum of validation of the input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] field_names Vector of strings containing the name of each field. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, ArrayVector children, + std::vector field_names = {}, + std::vector type_codes = {}); + + const SparseUnionType* union_type() const { + return internal::checked_cast(union_type_); + } + + /// \brief Get one of the child arrays, adjusting its null bitmap + /// where the union array type code does not match. + /// + /// \param[in] index Which child array to get (i.e. the physical index, not the type + /// code) \param[in] pool The pool to allocate null bitmaps from, if necessary + Result> GetFlattenedField( + int index, MemoryPool* pool = default_memory_pool()) const; + + protected: + void SetData(std::shared_ptr data); +}; + +/// \brief Concrete Array class for dense union data +/// +/// Note that union types do not have a validity bitmap +class ARROW_EXPORT DenseUnionArray : public UnionArray { + public: + using TypeClass = DenseUnionType; + + explicit DenseUnionArray(const std::shared_ptr& data); + + DenseUnionArray(std::shared_ptr type, int64_t length, ArrayVector children, + std::shared_ptr type_ids, + std::shared_ptr value_offsets = NULLPTR, int64_t offset = 0); + + /// \brief Construct DenseUnionArray from type_ids, value_offsets, and children + /// + /// This function does the bare minimum of validation of the offsets and + /// input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] value_offsets An array of signed int32 values indicating the + /// relative offset into the respective child array for the type in a given slot. + /// The respective offsets for each child value array must be in order / increasing. + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, + const Array& value_offsets, + ArrayVector children, + std::vector type_codes) { + return Make(type_ids, value_offsets, std::move(children), std::vector{}, + std::move(type_codes)); + } + + /// \brief Construct DenseUnionArray with custom field names from type_ids, + /// value_offsets, and children + /// + /// This function does the bare minimum of validation of the offsets and + /// input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] value_offsets An array of signed int32 values indicating the + /// relative offset into the respective child array for the type in a given slot. + /// The respective offsets for each child value array must be in order / increasing. + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] field_names Vector of strings containing the name of each field. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, + const Array& value_offsets, + ArrayVector children, + std::vector field_names = {}, + std::vector type_codes = {}); + + const DenseUnionType* union_type() const { + return internal::checked_cast(union_type_); + } + + /// Note that this buffer does not account for any slice offset + const std::shared_ptr& value_offsets() const { return data_->buffers[2]; } + + int32_t value_offset(int64_t i) const { return raw_value_offsets_[i + data_->offset]; } + + const int32_t* raw_value_offsets() const { return raw_value_offsets_ + data_->offset; } + + protected: + const int32_t* raw_value_offsets_; + + void SetData(const std::shared_ptr& data); +}; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h new file mode 100644 index 0000000000000000000000000000000000000000..b46b0855ab36776eec4e22cef1a35112e2d18fa8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes run-end encoded arrays + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup run-end-encoded-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// RunEndEncoded + +/// \brief Array type for run-end encoded data +class ARROW_EXPORT RunEndEncodedArray : public Array { + private: + std::shared_ptr run_ends_array_; + std::shared_ptr values_array_; + + public: + using TypeClass = RunEndEncodedType; + + explicit RunEndEncodedArray(const std::shared_ptr& data); + + /// \brief Construct a RunEndEncodedArray from all parameters + /// + /// The length and offset parameters refer to the dimensions of the logical + /// array which is the array we would get after expanding all the runs into + /// repeated values. As such, length can be much greater than the length of + /// the child run_ends and values arrays. + RunEndEncodedArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& run_ends, + const std::shared_ptr& values, int64_t offset = 0); + + /// \brief Construct a RunEndEncodedArray from all parameters + /// + /// The length and offset parameters refer to the dimensions of the logical + /// array which is the array we would get after expanding all the runs into + /// repeated values. As such, length can be much greater than the length of + /// the child run_ends and values arrays. + static Result> Make( + const std::shared_ptr& type, int64_t logical_length, + const std::shared_ptr& run_ends, const std::shared_ptr& values, + int64_t logical_offset = 0); + + /// \brief Construct a RunEndEncodedArray from values and run ends arrays + /// + /// The data type is automatically inferred from the arguments. + /// The run_ends and values arrays must have the same length. + static Result> Make( + int64_t logical_length, const std::shared_ptr& run_ends, + const std::shared_ptr& values, int64_t logical_offset = 0); + + protected: + void SetData(const std::shared_ptr& data); + + public: + /// \brief Returns an array holding the logical indexes of each run-end + /// + /// The physical offset to the array is applied. + const std::shared_ptr& run_ends() const { return run_ends_array_; } + + /// \brief Returns an array holding the values of each run + /// + /// The physical offset to the array is applied. + const std::shared_ptr& values() const { return values_array_; } + + /// \brief Returns an array holding the logical indexes of each run end + /// + /// If a non-zero logical offset is set, this function allocates a new + /// array and rewrites all the run end values to be relative to the logical + /// offset and cuts the end of the array to the logical length. + Result> LogicalRunEnds(MemoryPool* pool) const; + + /// \brief Returns an array holding the values of each run + /// + /// If a non-zero logical offset is set, this function allocates a new + /// array containing only the values within the logical range. + std::shared_ptr LogicalValues() const; + + /// \brief Find the physical offset of this REE array + /// + /// This function uses binary-search, so it has a O(log N) cost. + int64_t FindPhysicalOffset() const; + + /// \brief Find the physical length of this REE array + /// + /// The physical length of an REE is the number of physical values (and + /// run-ends) necessary to represent the logical range of values from offset + /// to length. + /// + /// Avoid calling this function if the physical length can be established in + /// some other way (e.g. when iterating over the runs sequentially until the + /// end). This function uses binary-search, so it has a O(log N) cost. + int64_t FindPhysicalLength() const; +}; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h new file mode 100644 index 0000000000000000000000000000000000000000..11036797e014f499db6f34604fb3344bf8bb660d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h @@ -0,0 +1,370 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include // IWYU pragma: keep +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_primitive.h" +#include "arrow/buffer.h" +#include "arrow/buffer_builder.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +namespace internal { + +template +class ArrayBuilderExtraOps { + public: + /// \brief Append a value from an optional or null if it has no value. + Status AppendOrNull(const std::optional& value) { + auto* self = static_cast(this); + return value.has_value() ? self->Append(*value) : self->AppendNull(); + } + + /// \brief Append a value from an optional or null if it has no value. + /// + /// Unsafe methods don't check existing size. + void UnsafeAppendOrNull(const std::optional& value) { + auto* self = static_cast(this); + return value.has_value() ? self->UnsafeAppend(*value) : self->UnsafeAppendNull(); + } +}; + +} // namespace internal + +/// \defgroup numeric-builders Concrete builder subclasses for numeric types +/// @{ +/// @} + +/// \defgroup temporal-builders Concrete builder subclasses for temporal types +/// @{ +/// @} + +/// \defgroup binary-builders Concrete builder subclasses for binary types +/// @{ +/// @} + +/// \defgroup nested-builders Concrete builder subclasses for nested types +/// @{ +/// @} + +/// \defgroup dictionary-builders Concrete builder subclasses for dictionary types +/// @{ +/// @} + +/// \defgroup run-end-encoded-builders Concrete builder subclasses for run-end encoded +/// arrays +/// @{ +/// @} + +constexpr int64_t kMinBuilderCapacity = 1 << 5; +constexpr int64_t kListMaximumElements = std::numeric_limits::max() - 1; + +/// Base class for all data array builders. +/// +/// This class provides a facilities for incrementally building the null bitmap +/// (see Append methods) and as a side effect the current number of slots and +/// the null count. +/// +/// \note Users are expected to use builders as one of the concrete types below. +/// For example, ArrayBuilder* pointing to BinaryBuilder should be downcast before use. +class ARROW_EXPORT ArrayBuilder { + public: + explicit ArrayBuilder(MemoryPool* pool, int64_t alignment = kDefaultBufferAlignment) + : pool_(pool), alignment_(alignment), null_bitmap_builder_(pool, alignment) {} + + ARROW_DEFAULT_MOVE_AND_ASSIGN(ArrayBuilder); + + virtual ~ArrayBuilder() = default; + + /// For nested types. Since the objects are owned by this class instance, we + /// skip shared pointers and just return a raw pointer + ArrayBuilder* child(int i) { return children_[i].get(); } + + const std::shared_ptr& child_builder(int i) const { return children_[i]; } + + int num_children() const { return static_cast(children_.size()); } + + virtual int64_t length() const { return length_; } + int64_t null_count() const { return null_count_; } + int64_t capacity() const { return capacity_; } + + /// \brief Ensure that enough memory has been allocated to fit the indicated + /// number of total elements in the builder, including any that have already + /// been appended. Does not account for reallocations that may be due to + /// variable size data, like binary values. To make space for incremental + /// appends, use Reserve instead. + /// + /// \param[in] capacity the minimum number of total array values to + /// accommodate. Must be greater than the current capacity. + /// \return Status + virtual Status Resize(int64_t capacity); + + /// \brief Ensure that there is enough space allocated to append the indicated + /// number of elements without any further reallocation. Overallocation is + /// used in order to minimize the impact of incremental Reserve() calls. + /// Note that additional_capacity is relative to the current number of elements + /// rather than to the current capacity, so calls to Reserve() which are not + /// interspersed with addition of new elements may not increase the capacity. + /// + /// \param[in] additional_capacity the number of additional array values + /// \return Status + Status Reserve(int64_t additional_capacity) { + auto current_capacity = capacity(); + auto min_capacity = length() + additional_capacity; + if (min_capacity <= current_capacity) return Status::OK(); + + // leave growth factor up to BufferBuilder + auto new_capacity = BufferBuilder::GrowByFactor(current_capacity, min_capacity); + return Resize(new_capacity); + } + + /// Reset the builder. + virtual void Reset(); + + /// \brief Append a null value to builder + virtual Status AppendNull() = 0; + /// \brief Append a number of null values to builder + virtual Status AppendNulls(int64_t length) = 0; + + /// \brief Append a non-null value to builder + /// + /// The appended value is an implementation detail, but the corresponding + /// memory slot is guaranteed to be initialized. + /// This method is useful when appending a null value to a parent nested type. + virtual Status AppendEmptyValue() = 0; + + /// \brief Append a number of non-null values to builder + /// + /// The appended values are an implementation detail, but the corresponding + /// memory slot is guaranteed to be initialized. + /// This method is useful when appending null values to a parent nested type. + virtual Status AppendEmptyValues(int64_t length) = 0; + + /// \brief Append a value from a scalar + Status AppendScalar(const Scalar& scalar) { return AppendScalar(scalar, 1); } + virtual Status AppendScalar(const Scalar& scalar, int64_t n_repeats); + virtual Status AppendScalars(const ScalarVector& scalars); + + /// \brief Append a range of values from an array. + /// + /// The given array must be the same type as the builder. + virtual Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) { + return Status::NotImplemented("AppendArraySlice for builder for ", *type()); + } + + /// \brief Return result of builder as an internal generic ArrayData + /// object. Resets builder except for dictionary builder + /// + /// \param[out] out the finalized ArrayData object + /// \return Status + virtual Status FinishInternal(std::shared_ptr* out) = 0; + + /// \brief Return result of builder as an Array object. + /// + /// The builder is reset except for DictionaryBuilder. + /// + /// \param[out] out the finalized Array object + /// \return Status + Status Finish(std::shared_ptr* out); + + /// \brief Return result of builder as an Array object. + /// + /// The builder is reset except for DictionaryBuilder. + /// + /// \return The finalized Array object + Result> Finish(); + + /// \brief Return the type of the built Array + virtual std::shared_ptr type() const = 0; + + protected: + /// Append to null bitmap + Status AppendToBitmap(bool is_valid); + + /// Vector append. Treat each zero byte as a null. If valid_bytes is null + /// assume all of length bits are valid. + Status AppendToBitmap(const uint8_t* valid_bytes, int64_t length); + + /// Uniform append. Append N times the same validity bit. + Status AppendToBitmap(int64_t num_bits, bool value); + + /// Set the next length bits to not null (i.e. valid). + Status SetNotNull(int64_t length); + + // Unsafe operations (don't check capacity/don't resize) + + void UnsafeAppendNull() { UnsafeAppendToBitmap(false); } + + // Append to null bitmap, update the length + void UnsafeAppendToBitmap(bool is_valid) { + null_bitmap_builder_.UnsafeAppend(is_valid); + ++length_; + if (!is_valid) ++null_count_; + } + + // Vector append. Treat each zero byte as a nullzero. If valid_bytes is null + // assume all of length bits are valid. + void UnsafeAppendToBitmap(const uint8_t* valid_bytes, int64_t length) { + if (valid_bytes == NULLPTR) { + return UnsafeSetNotNull(length); + } + null_bitmap_builder_.UnsafeAppend(valid_bytes, length); + length_ += length; + null_count_ = null_bitmap_builder_.false_count(); + } + + // Vector append. Copy from a given bitmap. If bitmap is null assume + // all of length bits are valid. + void UnsafeAppendToBitmap(const uint8_t* bitmap, int64_t offset, int64_t length) { + if (bitmap == NULLPTR) { + return UnsafeSetNotNull(length); + } + null_bitmap_builder_.UnsafeAppend(bitmap, offset, length); + length_ += length; + null_count_ = null_bitmap_builder_.false_count(); + } + + // Append the same validity value a given number of times. + void UnsafeAppendToBitmap(const int64_t num_bits, bool value) { + if (value) { + UnsafeSetNotNull(num_bits); + } else { + UnsafeSetNull(num_bits); + } + } + + void UnsafeAppendToBitmap(const std::vector& is_valid); + + // Set the next validity bits to not null (i.e. valid). + void UnsafeSetNotNull(int64_t length); + + // Set the next validity bits to null (i.e. invalid). + void UnsafeSetNull(int64_t length); + + static Status TrimBuffer(const int64_t bytes_filled, ResizableBuffer* buffer); + + /// \brief Finish to an array of the specified ArrayType + template + Status FinishTyped(std::shared_ptr* out) { + std::shared_ptr out_untyped; + ARROW_RETURN_NOT_OK(Finish(&out_untyped)); + *out = std::static_pointer_cast(std::move(out_untyped)); + return Status::OK(); + } + + // Check the requested capacity for validity + Status CheckCapacity(int64_t new_capacity) { + if (ARROW_PREDICT_FALSE(new_capacity < 0)) { + return Status::Invalid( + "Resize capacity must be positive (requested: ", new_capacity, ")"); + } + + if (ARROW_PREDICT_FALSE(new_capacity < length_)) { + return Status::Invalid("Resize cannot downsize (requested: ", new_capacity, + ", current length: ", length_, ")"); + } + + return Status::OK(); + } + + // Check for array type + Status CheckArrayType(const std::shared_ptr& expected_type, + const Array& array, const char* message); + Status CheckArrayType(Type::type expected_type, const Array& array, + const char* message); + + MemoryPool* pool_; + int64_t alignment_; + + TypedBufferBuilder null_bitmap_builder_; + int64_t null_count_ = 0; + + // Array length, so far. Also, the index of the next element to be added + int64_t length_ = 0; + int64_t capacity_ = 0; + + // Child value array builders. These are owned by this class + std::vector> children_; + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(ArrayBuilder); +}; + +/// \brief Construct an empty ArrayBuilder corresponding to the data +/// type +/// \param[in] pool the MemoryPool to use for allocations +/// \param[in] type the data type to create the builder for +/// \param[out] out the created ArrayBuilder +ARROW_EXPORT +Status MakeBuilder(MemoryPool* pool, const std::shared_ptr& type, + std::unique_ptr* out); + +inline Result> MakeBuilder( + const std::shared_ptr& type, MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeBuilder(pool, type, &out)); + return std::move(out); +} + +/// \brief Construct an empty ArrayBuilder corresponding to the data +/// type, where any top-level or nested dictionary builders return the +/// exact index type specified by the type. +ARROW_EXPORT +Status MakeBuilderExactIndex(MemoryPool* pool, const std::shared_ptr& type, + std::unique_ptr* out); + +inline Result> MakeBuilderExactIndex( + const std::shared_ptr& type, MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeBuilderExactIndex(pool, type, &out)); + return std::move(out); +} + +/// \brief Construct an empty DictionaryBuilder initialized optionally +/// with a preexisting dictionary +/// \param[in] pool the MemoryPool to use for allocations +/// \param[in] type the dictionary type to create the builder for +/// \param[in] dictionary the initial dictionary, if any. May be nullptr +/// \param[out] out the created ArrayBuilder +ARROW_EXPORT +Status MakeDictionaryBuilder(MemoryPool* pool, const std::shared_ptr& type, + const std::shared_ptr& dictionary, + std::unique_ptr* out); + +inline Result> MakeDictionaryBuilder( + const std::shared_ptr& type, const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, type, dictionary, &out)); + return std::move(out); +} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..3f0d711dc5bb588c3abcbb301902338cf60d32bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h @@ -0,0 +1,737 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_binary.h" +#include "arrow/array/builder_adaptive.h" // IWYU pragma: export +#include "arrow/array/builder_base.h" // IWYU pragma: export +#include "arrow/array/builder_primitive.h" // IWYU pragma: export +#include "arrow/array/data.h" +#include "arrow/array/util.h" +#include "arrow/scalar.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/bit_block_counter.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/decimal.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// Dictionary builder + +namespace internal { + +template +struct DictionaryValue { + using type = typename T::c_type; + using PhysicalType = T; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = + typename std::conditional::value, + BinaryType, LargeBinaryType>::type; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = BinaryViewType; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = BinaryType; +}; + +class ARROW_EXPORT DictionaryMemoTable { + public: + DictionaryMemoTable(MemoryPool* pool, const std::shared_ptr& type); + DictionaryMemoTable(MemoryPool* pool, const std::shared_ptr& dictionary); + ~DictionaryMemoTable(); + + Status GetArrayData(int64_t start_offset, std::shared_ptr* out); + + /// \brief Insert new memo values + Status InsertValues(const Array& values); + + int32_t size() const; + + template + Status GetOrInsert(typename DictionaryValue::type value, int32_t* out) { + // We want to keep the DictionaryMemoTable implementation private, also we can't + // use extern template classes because of compiler issues (MinGW?). Instead, + // we expose explicit function overrides for each supported physical type. + const typename DictionaryValue::PhysicalType* physical_type = NULLPTR; + return GetOrInsert(physical_type, value, out); + } + + private: + Status GetOrInsert(const BooleanType*, bool value, int32_t* out); + Status GetOrInsert(const Int8Type*, int8_t value, int32_t* out); + Status GetOrInsert(const Int16Type*, int16_t value, int32_t* out); + Status GetOrInsert(const Int32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Int64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const UInt8Type*, uint8_t value, int32_t* out); + Status GetOrInsert(const UInt16Type*, uint16_t value, int32_t* out); + Status GetOrInsert(const UInt32Type*, uint32_t value, int32_t* out); + Status GetOrInsert(const UInt64Type*, uint64_t value, int32_t* out); + Status GetOrInsert(const DurationType*, int64_t value, int32_t* out); + Status GetOrInsert(const TimestampType*, int64_t value, int32_t* out); + Status GetOrInsert(const Date32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Date64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const Time32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Time64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const MonthDayNanoIntervalType*, + MonthDayNanoIntervalType::MonthDayNanos value, int32_t* out); + Status GetOrInsert(const DayTimeIntervalType*, + DayTimeIntervalType::DayMilliseconds value, int32_t* out); + Status GetOrInsert(const MonthIntervalType*, int32_t value, int32_t* out); + Status GetOrInsert(const FloatType*, float value, int32_t* out); + Status GetOrInsert(const DoubleType*, double value, int32_t* out); + + Status GetOrInsert(const BinaryType*, std::string_view value, int32_t* out); + Status GetOrInsert(const LargeBinaryType*, std::string_view value, int32_t* out); + Status GetOrInsert(const BinaryViewType*, std::string_view value, int32_t* out); + + class DictionaryMemoTableImpl; + std::unique_ptr impl_; +}; + +} // namespace internal + +/// \addtogroup dictionary-builders +/// +/// @{ + +namespace internal { + +/// \brief Array builder for created encoded DictionaryArray from +/// dense array +/// +/// Unlike other builders, dictionary builder does not completely +/// reset the state on Finish calls. +template +class DictionaryBuilderBase : public ArrayBuilder { + public: + using TypeClass = DictionaryType; + using Value = typename DictionaryValue::type; + + // WARNING: the type given below is the value type, not the DictionaryType. + // The DictionaryType is instantiated on the Finish() call. + template + DictionaryBuilderBase(uint8_t start_int_size, + enable_if_t::value && + !is_fixed_size_binary_type::value, + const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(start_int_size, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_t::value, const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + const std::shared_ptr& index_type, + enable_if_t::value, const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(index_type, pool, alignment), + value_type_(value_type) {} + + template + DictionaryBuilderBase(uint8_t start_int_size, + enable_if_t::value && + is_fixed_size_binary_type::value, + const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(start_int_size, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_fixed_size_binary&> value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + const std::shared_ptr& index_type, + enable_if_fixed_size_binary&> value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(index_type, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_parameter_free pool = default_memory_pool()) + : DictionaryBuilderBase(TypeTraits::type_singleton(), pool) {} + + // This constructor doesn't check for errors. Use InsertMemoValues instead. + explicit DictionaryBuilderBase(const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, dictionary)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(pool, alignment), + value_type_(dictionary->type()) {} + + ~DictionaryBuilderBase() override = default; + + /// \brief The current number of entries in the dictionary + int64_t dictionary_length() const { return memo_table_->size(); } + + /// \brief The value byte width (for FixedSizeBinaryType) + template + enable_if_fixed_size_binary byte_width() const { + return byte_width_; + } + + /// \brief Append a scalar value + Status Append(Value value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + + int32_t memo_index; + ARROW_RETURN_NOT_OK(memo_table_->GetOrInsert(value, &memo_index)); + ARROW_RETURN_NOT_OK(indices_builder_.Append(memo_index)); + length_ += 1; + + return Status::OK(); + } + + /// \brief Append a fixed-width string (only for FixedSizeBinaryType) + template + enable_if_fixed_size_binary Append(const uint8_t* value) { + return Append(std::string_view(reinterpret_cast(value), byte_width_)); + } + + /// \brief Append a fixed-width string (only for FixedSizeBinaryType) + template + enable_if_fixed_size_binary Append(const char* value) { + return Append(std::string_view(value, byte_width_)); + } + + /// \brief Append a string (only for binary types) + template + enable_if_binary_like Append(const uint8_t* value, int32_t length) { + return Append(reinterpret_cast(value), length); + } + + /// \brief Append a string (only for binary types) + template + enable_if_binary_like Append(const char* value, int32_t length) { + return Append(std::string_view(value, length)); + } + + /// \brief Append a string (only for string types) + template + enable_if_string_like Append(const char* value, int32_t length) { + return Append(std::string_view(value, length)); + } + + /// \brief Append a decimal (only for Decimal128Type) + template + enable_if_decimal128 Append(const Decimal128& value) { + uint8_t data[16]; + value.ToBytes(data); + return Append(data, 16); + } + + /// \brief Append a decimal (only for Decimal128Type) + template + enable_if_decimal256 Append(const Decimal256& value) { + uint8_t data[32]; + value.ToBytes(data); + return Append(data, 32); + } + + /// \brief Append a scalar null value + Status AppendNull() final { + length_ += 1; + null_count_ += 1; + + return indices_builder_.AppendNull(); + } + + Status AppendNulls(int64_t length) final { + length_ += length; + null_count_ += length; + + return indices_builder_.AppendNulls(length); + } + + Status AppendEmptyValue() final { + length_ += 1; + + return indices_builder_.AppendEmptyValue(); + } + + Status AppendEmptyValues(int64_t length) final { + length_ += length; + + return indices_builder_.AppendEmptyValues(length); + } + + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override { + if (!scalar.is_valid) return AppendNulls(n_repeats); + + const auto& dict_ty = internal::checked_cast(*scalar.type); + const DictionaryScalar& dict_scalar = + internal::checked_cast(scalar); + const auto& dict = internal::checked_cast::ArrayType&>( + *dict_scalar.value.dictionary); + ARROW_RETURN_NOT_OK(Reserve(n_repeats)); + switch (dict_ty.index_type()->id()) { + case Type::UINT8: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT8: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT16: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT16: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT32: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT32: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT64: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT64: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + default: + return Status::TypeError("Invalid index type: ", dict_ty); + } + return Status::OK(); + } + + Status AppendScalars(const ScalarVector& scalars) override { + for (const auto& scalar : scalars) { + ARROW_RETURN_NOT_OK(AppendScalar(*scalar, /*n_repeats=*/1)); + } + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, int64_t length) final { + // Visit the indices and insert the unpacked values. + const auto& dict_ty = internal::checked_cast(*array.type); + // See if possible to avoid using ToArrayData here + const typename TypeTraits::ArrayType dict(array.dictionary().ToArrayData()); + ARROW_RETURN_NOT_OK(Reserve(length)); + switch (dict_ty.index_type()->id()) { + case Type::UINT8: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT8: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT16: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT16: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT32: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT32: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT64: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT64: + return AppendArraySliceImpl(dict, array, offset, length); + default: + return Status::TypeError("Invalid index type: ", dict_ty); + } + return Status::OK(); + } + + /// \brief Insert values into the dictionary's memo, but do not append any + /// indices. Can be used to initialize a new builder with known dictionary + /// values + /// \param[in] values dictionary values to add to memo. Type must match + /// builder type + Status InsertMemoValues(const Array& values) { + return memo_table_->InsertValues(values); + } + + /// \brief Append a whole dense array to the builder + template + enable_if_t::value, Status> AppendArray( + const Array& array) { + using ArrayType = typename TypeTraits::ArrayType; + +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + value_type_, array, "Wrong value type of array to be appended")); +#endif + + const auto& concrete_array = static_cast(array); + for (int64_t i = 0; i < array.length(); i++) { + if (array.IsNull(i)) { + ARROW_RETURN_NOT_OK(AppendNull()); + } else { + ARROW_RETURN_NOT_OK(Append(concrete_array.GetView(i))); + } + } + return Status::OK(); + } + + template + enable_if_fixed_size_binary AppendArray(const Array& array) { +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + value_type_, array, "Wrong value type of array to be appended")); +#endif + + const auto& concrete_array = static_cast(array); + for (int64_t i = 0; i < array.length(); i++) { + if (array.IsNull(i)) { + ARROW_RETURN_NOT_OK(AppendNull()); + } else { + ARROW_RETURN_NOT_OK(Append(concrete_array.GetValue(i))); + } + } + return Status::OK(); + } + + void Reset() override { + // Perform a partial reset. Call ResetFull to also reset the accumulated + // dictionary values + ArrayBuilder::Reset(); + indices_builder_.Reset(); + } + + /// \brief Reset and also clear accumulated dictionary values in memo table + void ResetFull() { + Reset(); + memo_table_.reset(new internal::DictionaryMemoTable(pool_, value_type_)); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(indices_builder_.Resize(capacity)); + capacity_ = indices_builder_.capacity(); + return Status::OK(); + } + + /// \brief Return dictionary indices and a delta dictionary since the last + /// time that Finish or FinishDelta were called, and reset state of builder + /// (except the memo table) + Status FinishDelta(std::shared_ptr* out_indices, + std::shared_ptr* out_delta) { + std::shared_ptr indices_data; + std::shared_ptr delta_data; + ARROW_RETURN_NOT_OK(FinishWithDictOffset(delta_offset_, &indices_data, &delta_data)); + *out_indices = MakeArray(indices_data); + *out_delta = MakeArray(delta_data); + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { + return ::arrow::dictionary(indices_builder_.type(), value_type_); + } + + protected: + template + Status AppendArraySliceImpl(const typename TypeTraits::ArrayType& dict, + const ArraySpan& array, int64_t offset, int64_t length) { + const c_type* values = array.GetValues(1) + offset; + return VisitBitBlocks( + array.buffers[0].data, array.offset + offset, length, + [&](const int64_t position) { + const int64_t index = static_cast(values[position]); + if (dict.IsValid(index)) { + return Append(dict.GetView(index)); + } + return AppendNull(); + }, + [&]() { return AppendNull(); }); + } + + template + Status AppendScalarImpl(const typename TypeTraits::ArrayType& dict, + const Scalar& index_scalar, int64_t n_repeats) { + using ScalarType = typename TypeTraits::ScalarType; + const auto index = internal::checked_cast(index_scalar).value; + if (index_scalar.is_valid && dict.IsValid(index)) { + const auto& value = dict.GetView(index); + for (int64_t i = 0; i < n_repeats; i++) { + ARROW_RETURN_NOT_OK(Append(value)); + } + return Status::OK(); + } + return AppendNulls(n_repeats); + } + + Status FinishInternal(std::shared_ptr* out) override { + std::shared_ptr dictionary; + ARROW_RETURN_NOT_OK(FinishWithDictOffset(/*offset=*/0, out, &dictionary)); + + // Set type of array data to the right dictionary type + (*out)->type = type(); + (*out)->dictionary = dictionary; + return Status::OK(); + } + + Status FinishWithDictOffset(int64_t dict_offset, + std::shared_ptr* out_indices, + std::shared_ptr* out_dictionary) { + // Finalize indices array + ARROW_RETURN_NOT_OK(indices_builder_.FinishInternal(out_indices)); + + // Generate dictionary array from hash table contents + ARROW_RETURN_NOT_OK(memo_table_->GetArrayData(dict_offset, out_dictionary)); + delta_offset_ = memo_table_->size(); + + // Update internals for further uses of this DictionaryBuilder + ArrayBuilder::Reset(); + return Status::OK(); + } + + std::unique_ptr memo_table_; + + // The size of the dictionary memo at last invocation of Finish, to use in + // FinishDelta for computing dictionary deltas + int32_t delta_offset_; + + // Only used for FixedSizeBinaryType + int32_t byte_width_; + + BuilderType indices_builder_; + std::shared_ptr value_type_; +}; + +template +class DictionaryBuilderBase : public ArrayBuilder { + public: + template + DictionaryBuilderBase( + enable_if_t::value, uint8_t> + start_int_size, + const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(start_int_size, pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& index_type, + const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(index_type, pool) {} + + template + explicit DictionaryBuilderBase( + enable_if_t::value, uint8_t> + start_int_size, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(start_int_size, pool) {} + + explicit DictionaryBuilderBase(MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + /// \brief Append a scalar null value + Status AppendNull() final { + length_ += 1; + null_count_ += 1; + + return indices_builder_.AppendNull(); + } + + Status AppendNulls(int64_t length) final { + length_ += length; + null_count_ += length; + + return indices_builder_.AppendNulls(length); + } + + Status AppendEmptyValue() final { + length_ += 1; + + return indices_builder_.AppendEmptyValue(); + } + + Status AppendEmptyValues(int64_t length) final { + length_ += length; + + return indices_builder_.AppendEmptyValues(length); + } + + /// \brief Append a whole dense array to the builder + Status AppendArray(const Array& array) { +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + Type::NA, array, "Wrong value type of array to be appended")); +#endif + for (int64_t i = 0; i < array.length(); i++) { + ARROW_RETURN_NOT_OK(AppendNull()); + } + return Status::OK(); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + + ARROW_RETURN_NOT_OK(indices_builder_.Resize(capacity)); + capacity_ = indices_builder_.capacity(); + return Status::OK(); + } + + Status FinishInternal(std::shared_ptr* out) override { + ARROW_RETURN_NOT_OK(indices_builder_.FinishInternal(out)); + (*out)->type = dictionary((*out)->type, null()); + (*out)->dictionary = NullArray(0).data(); + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { + return ::arrow::dictionary(indices_builder_.type(), null()); + } + + protected: + BuilderType indices_builder_; +}; + +} // namespace internal + +/// \brief A DictionaryArray builder that uses AdaptiveIntBuilder to return the +/// smallest index size that can accommodate the dictionary indices +template +class DictionaryBuilder : public internal::DictionaryBuilderBase { + public: + using BASE = internal::DictionaryBuilderBase; + using BASE::BASE; + + /// \brief Append dictionary indices directly without modifying memo + /// + /// NOTE: Experimental API + Status AppendIndices(const int64_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + int64_t null_count_before = this->indices_builder_.null_count(); + ARROW_RETURN_NOT_OK(this->indices_builder_.AppendValues(values, length, valid_bytes)); + this->capacity_ = this->indices_builder_.capacity(); + this->length_ += length; + this->null_count_ += this->indices_builder_.null_count() - null_count_before; + return Status::OK(); + } +}; + +/// \brief A DictionaryArray builder that always returns int32 dictionary +/// indices so that data cast to dictionary form will have a consistent index +/// type, e.g. for creating a ChunkedArray +template +class Dictionary32Builder : public internal::DictionaryBuilderBase { + public: + using BASE = internal::DictionaryBuilderBase; + using BASE::BASE; + + /// \brief Append dictionary indices directly without modifying memo + /// + /// NOTE: Experimental API + Status AppendIndices(const int32_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + int64_t null_count_before = this->indices_builder_.null_count(); + ARROW_RETURN_NOT_OK(this->indices_builder_.AppendValues(values, length, valid_bytes)); + this->capacity_ = this->indices_builder_.capacity(); + this->length_ += length; + this->null_count_ += this->indices_builder_.null_count() - null_count_before; + return Status::OK(); + } +}; + +// ---------------------------------------------------------------------- +// Binary / Unicode builders +// (compatibility aliases; those used to be derived classes with additional +// Append() overloads, but they have been folded into DictionaryBuilderBase) + +using BinaryDictionaryBuilder = DictionaryBuilder; +using StringDictionaryBuilder = DictionaryBuilder; +using BinaryDictionary32Builder = Dictionary32Builder; +using StringDictionary32Builder = Dictionary32Builder; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_nested.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_nested.h new file mode 100644 index 0000000000000000000000000000000000000000..429aa5c0488cd9b3c6b78252224b68f403febd14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_nested.h @@ -0,0 +1,838 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/array_nested.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/buffer_builder.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup nested-builders +/// +/// @{ + +// ---------------------------------------------------------------------- +// VarLengthListLikeBuilder + +template +class ARROW_EXPORT VarLengthListLikeBuilder : public ArrayBuilder { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + + /// Use this constructor to incrementally build the value array along with offsets and + /// null bitmap. + VarLengthListLikeBuilder(MemoryPool* pool, + std::shared_ptr const& value_builder, + const std::shared_ptr& type, + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + offsets_builder_(pool, alignment), + value_builder_(value_builder), + value_field_(type->field(0)->WithType(NULLPTR)) {} + + VarLengthListLikeBuilder(MemoryPool* pool, + std::shared_ptr const& value_builder, + int64_t alignment = kDefaultBufferAlignment) + : VarLengthListLikeBuilder(pool, value_builder, + std::make_shared(value_builder->type()), + alignment) {} + + ~VarLengthListLikeBuilder() override = default; + + Status Resize(int64_t capacity) override { + if (ARROW_PREDICT_FALSE(capacity > maximum_elements())) { + return Status::CapacityError(type_name(), + " array cannot reserve space for more than ", + maximum_elements(), " got ", capacity); + } + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + + // One more than requested for list offsets + const int64_t offsets_capacity = + is_list_view(TYPE::type_id) ? capacity : capacity + 1; + ARROW_RETURN_NOT_OK(offsets_builder_.Resize(offsets_capacity)); + return ArrayBuilder::Resize(capacity); + } + + void Reset() override { + ArrayBuilder::Reset(); + offsets_builder_.Reset(); + value_builder_->Reset(); + } + + /// \brief Start a new variable-length list slot + /// + /// This function should be called before appending elements to the + /// value builder. Elements appended to the value builder before this function + /// is called for the first time, will not be members of any list value. + /// + /// After this function is called, list_length elements SHOULD be appended to + /// the values builder. If this contract is violated, the behavior is defined by + /// the concrete builder implementation and SHOULD NOT be relied upon unless + /// the caller is specifically building a [Large]List or [Large]ListView array. + /// + /// For [Large]List arrays, the list slot length will be the number of elements + /// appended to the values builder before the next call to Append* or Finish. For + /// [Large]ListView arrays, the list slot length will be exactly list_length, but if + /// Append* is called before at least list_length elements are appended to the values + /// builder, the current list slot will share elements with the next list + /// slots or an invalid [Large]ListView array will be generated because there + /// aren't enough elements in the values builder to fill the list slots. + /// + /// If you're building a [Large]List and don't need to be compatible + /// with [Large]ListView, then `BaseListBuilder::Append(bool is_valid)` + /// is a simpler API. + /// + /// \pre if is_valid is false, list_length MUST be 0 + /// \param is_valid Whether the new list slot is valid + /// \param list_length The number of elements in the list + Status Append(bool is_valid, int64_t list_length) { + ARROW_RETURN_NOT_OK(Reserve(1)); + assert(is_valid || list_length == 0); + UnsafeAppendToBitmap(is_valid); + UnsafeAppendDimensions(/*offset=*/value_builder_->length(), /*size=*/list_length); + return Status::OK(); + } + + Status AppendNull() final { + // Append() a null list slot with list_length=0. + // + // When building [Large]List arrays, elements being appended to the values builder + // before the next call to Append* or Finish will extend the list slot length, but + // that is totally fine because list arrays admit non-empty null list slots. + // + // In the case of [Large]ListViews that's not a problem either because the + // list slot length remains zero. + return Append(false, 0); + } + + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(length, false); + UnsafeAppendEmptyDimensions(/*num_values=*/length); + return Status::OK(); + } + + /// \brief Append an empty list slot + /// + /// \post Another call to Append* or Finish should be made before appending to + /// the values builder to ensure list slot remains empty + Status AppendEmptyValue() final { return Append(true, 0); } + + /// \brief Append an empty list slot + /// + /// \post Another call to Append* or Finish should be made before appending to + /// the values builder to ensure the last list slot remains empty + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(length, true); + UnsafeAppendEmptyDimensions(/*num_values=*/length); + return Status::OK(); + } + + /// \brief Vector append + /// + /// For list-array builders, the sizes are inferred from the offsets. + /// BaseListBuilder provides an implementation that doesn't take sizes, but + /// this virtual function allows dispatching calls to both list-array and + /// list-view-array builders (which need the sizes) + /// + /// \param offsets The offsets of the variable-length lists + /// \param sizes The sizes of the variable-length lists + /// \param length The number of offsets, sizes, and validity bits to append + /// \param valid_bytes If passed, valid_bytes is of equal length to values, + /// and any zero byte will be considered as a null for that slot + virtual Status AppendValues(const offset_type* offsets, const offset_type* sizes, + int64_t length, const uint8_t* valid_bytes) = 0; + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + const offset_type* offsets = array.GetValues(1); + [[maybe_unused]] const offset_type* sizes = NULLPTR; + if constexpr (is_list_view(TYPE::type_id)) { + sizes = array.GetValues(2); + } + const bool all_valid = !array.MayHaveLogicalNulls(); + const uint8_t* validity = array.HasValidityBitmap() ? array.buffers[0].data : NULLPTR; + ARROW_RETURN_NOT_OK(Reserve(length)); + for (int64_t row = offset; row < offset + length; row++) { + const bool is_valid = + all_valid || (validity && bit_util::GetBit(validity, array.offset + row)) || + array.IsValid(row); + int64_t size = 0; + if (is_valid) { + if constexpr (is_list_view(TYPE::type_id)) { + size = sizes[row]; + } else { + size = offsets[row + 1] - offsets[row]; + } + } + UnsafeAppendToBitmap(is_valid); + UnsafeAppendDimensions(/*offset=*/value_builder_->length(), size); + if (is_valid) { + ARROW_RETURN_NOT_OK( + value_builder_->AppendArraySlice(array.child_data[0], offsets[row], size)); + } + } + return Status::OK(); + } + + Status ValidateOverflow(int64_t new_elements) const { + auto new_length = value_builder_->length() + new_elements; + if (ARROW_PREDICT_FALSE(new_length > maximum_elements())) { + return Status::CapacityError(type_name(), " array cannot contain more than ", + maximum_elements(), " elements, have ", new_elements); + } else { + return Status::OK(); + } + } + + ArrayBuilder* value_builder() const { return value_builder_.get(); } + + // Cannot make this a static attribute because of linking issues + static constexpr int64_t maximum_elements() { + return std::numeric_limits::max() - 1; + } + + std::shared_ptr type() const override { + return std::make_shared(value_field_->WithType(value_builder_->type())); + } + + private: + static constexpr const char* type_name() { + if constexpr (is_list_view(TYPE::type_id)) { + return "ListView"; + } else { + return "List"; + } + } + + protected: + /// \brief Append dimensions for num_values empty list slots. + /// + /// ListViewBuilder overrides this to also append the sizes. + virtual void UnsafeAppendEmptyDimensions(int64_t num_values) { + const int64_t offset = value_builder_->length(); + for (int64_t i = 0; i < num_values; ++i) { + offsets_builder_.UnsafeAppend(static_cast(offset)); + } + } + + /// \brief Append dimensions for a single list slot. + /// + /// ListViewBuilder overrides this to also append the size. + virtual void UnsafeAppendDimensions(int64_t offset, int64_t size) { + offsets_builder_.UnsafeAppend(static_cast(offset)); + } + + TypedBufferBuilder offsets_builder_; + std::shared_ptr value_builder_; + std::shared_ptr value_field_; +}; + +// ---------------------------------------------------------------------- +// ListBuilder / LargeListBuilder + +template +class ARROW_EXPORT BaseListBuilder : public VarLengthListLikeBuilder { + private: + using BASE = VarLengthListLikeBuilder; + + public: + using TypeClass = TYPE; + using offset_type = typename BASE::offset_type; + + using BASE::BASE; + + using BASE::Append; + + ~BaseListBuilder() override = default; + + /// \brief Start a new variable-length list slot + /// + /// This function should be called before beginning to append elements to the + /// value builder + Status Append(bool is_valid = true) { + // The value_length parameter to BASE::Append(bool, int64_t) is ignored when + // building a list array, so we can pass 0 here. + return BASE::Append(is_valid, 0); + } + + /// \brief Vector append + /// + /// If passed, valid_bytes is of equal length to values, and any zero byte + /// will be considered as a null for that slot + Status AppendValues(const offset_type* offsets, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + ARROW_RETURN_NOT_OK(this->Reserve(length)); + this->UnsafeAppendToBitmap(valid_bytes, length); + this->offsets_builder_.UnsafeAppend(offsets, length); + return Status::OK(); + } + + Status AppendValues(const offset_type* offsets, const offset_type* sizes, + int64_t length, const uint8_t* valid_bytes) final { + // Offsets are assumed to be valid, but the first length-1 sizes have to be + // consistent with the offsets to partially rule out the possibility that the + // caller is passing sizes that could work if building a list-view, but don't + // work on building a list that requires offsets to be non-decreasing. + // + // CAUTION: the last size element (`sizes[length - 1]`) is not + // validated and could be inconsistent with the offsets given in a + // subsequent call to AppendValues. +#ifndef NDEBUG + if (sizes) { + for (int64_t i = 0; i < length - 1; ++i) { + if (ARROW_PREDICT_FALSE(offsets[i] != offsets[i + 1] - sizes[i])) { + if (!valid_bytes || valid_bytes[i]) { + return Status::Invalid( + "BaseListBuilder: sizes are inconsistent with offsets provided"); + } + } + } + } +#endif + return AppendValues(offsets, length, valid_bytes); + } + + Status AppendValues(const offset_type* offsets, const offset_type* sizes, + int64_t length) { + return AppendValues(offsets, sizes, length, /*valid_bytes=*/NULLPTR); + } + + Status AppendNextOffset() { + ARROW_RETURN_NOT_OK(this->ValidateOverflow(0)); + const int64_t num_values = this->value_builder_->length(); + return this->offsets_builder_.Append(static_cast(num_values)); + } + + Status FinishInternal(std::shared_ptr* out) override { + ARROW_RETURN_NOT_OK(AppendNextOffset()); + + // Offset padding zeroed by BufferBuilder + std::shared_ptr offsets; + std::shared_ptr null_bitmap; + ARROW_RETURN_NOT_OK(this->offsets_builder_.Finish(&offsets)); + ARROW_RETURN_NOT_OK(this->null_bitmap_builder_.Finish(&null_bitmap)); + + if (this->value_builder_->length() == 0) { + // Try to make sure we get a non-null values buffer (ARROW-2744) + ARROW_RETURN_NOT_OK(this->value_builder_->Resize(0)); + } + + std::shared_ptr items; + ARROW_RETURN_NOT_OK(this->value_builder_->FinishInternal(&items)); + + *out = ArrayData::Make(this->type(), this->length_, + {std::move(null_bitmap), std::move(offsets)}, + {std::move(items)}, this->null_count_); + this->Reset(); + return Status::OK(); + } +}; + +/// \class ListBuilder +/// \brief Builder class for variable-length list array value types +/// +/// To use this class, you must append values to the child array builder and use +/// the Append function to delimit each distinct list value (once the values +/// have been appended to the child array) or use the bulk API to append +/// a sequence of offsets and null values. +/// +/// A note on types. Per arrow/type.h all types in the c++ implementation are +/// logical so even though this class always builds list array, this can +/// represent multiple different logical types. If no logical type is provided +/// at construction time, the class defaults to List where t is taken from the +/// value_builder/values that the object is constructed with. +class ARROW_EXPORT ListBuilder : public BaseListBuilder { + public: + using BaseListBuilder::BaseListBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +/// \class LargeListBuilder +/// \brief Builder class for large variable-length list array value types +/// +/// Like ListBuilder, but to create large list arrays (with 64-bit offsets). +class ARROW_EXPORT LargeListBuilder : public BaseListBuilder { + public: + using BaseListBuilder::BaseListBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +// ---------------------------------------------------------------------- +// ListViewBuilder / LargeListViewBuilder + +template +class ARROW_EXPORT BaseListViewBuilder : public VarLengthListLikeBuilder { + private: + using BASE = VarLengthListLikeBuilder; + + public: + using TypeClass = TYPE; + using offset_type = typename BASE::offset_type; + + using BASE::BASE; + + ~BaseListViewBuilder() override = default; + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(BASE::Resize(capacity)); + return sizes_builder_.Resize(capacity); + } + + void Reset() override { + BASE::Reset(); + sizes_builder_.Reset(); + } + + /// \brief Vector append + /// + /// If passed, valid_bytes is of equal length to values, and any zero byte + /// will be considered as a null for that slot + Status AppendValues(const offset_type* offsets, const offset_type* sizes, + int64_t length, const uint8_t* valid_bytes) final { + ARROW_RETURN_NOT_OK(this->Reserve(length)); + this->UnsafeAppendToBitmap(valid_bytes, length); + this->offsets_builder_.UnsafeAppend(offsets, length); + this->sizes_builder_.UnsafeAppend(sizes, length); + return Status::OK(); + } + + Status AppendValues(const offset_type* offsets, const offset_type* sizes, + int64_t length) { + return AppendValues(offsets, sizes, length, /*valid_bytes=*/NULLPTR); + } + + Status FinishInternal(std::shared_ptr* out) override { + // Offset and sizes padding zeroed by BufferBuilder + std::shared_ptr null_bitmap; + std::shared_ptr offsets; + std::shared_ptr sizes; + ARROW_RETURN_NOT_OK(this->null_bitmap_builder_.Finish(&null_bitmap)); + ARROW_RETURN_NOT_OK(this->offsets_builder_.Finish(&offsets)); + ARROW_RETURN_NOT_OK(this->sizes_builder_.Finish(&sizes)); + + if (this->value_builder_->length() == 0) { + // Try to make sure we get a non-null values buffer (ARROW-2744) + ARROW_RETURN_NOT_OK(this->value_builder_->Resize(0)); + } + + std::shared_ptr items; + ARROW_RETURN_NOT_OK(this->value_builder_->FinishInternal(&items)); + + *out = ArrayData::Make(this->type(), this->length_, + {std::move(null_bitmap), std::move(offsets), std::move(sizes)}, + {std::move(items)}, this->null_count_); + this->Reset(); + return Status::OK(); + } + + protected: + void UnsafeAppendEmptyDimensions(int64_t num_values) override { + for (int64_t i = 0; i < num_values; ++i) { + this->offsets_builder_.UnsafeAppend(0); + } + for (int64_t i = 0; i < num_values; ++i) { + this->sizes_builder_.UnsafeAppend(0); + } + } + + void UnsafeAppendDimensions(int64_t offset, int64_t size) override { + this->offsets_builder_.UnsafeAppend(static_cast(offset)); + this->sizes_builder_.UnsafeAppend(static_cast(size)); + } + + private: + TypedBufferBuilder sizes_builder_; +}; + +class ARROW_EXPORT ListViewBuilder final : public BaseListViewBuilder { + public: + using BaseListViewBuilder::BaseListViewBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +class ARROW_EXPORT LargeListViewBuilder final + : public BaseListViewBuilder { + public: + using BaseListViewBuilder::BaseListViewBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +// ---------------------------------------------------------------------- +// Map builder + +/// \class MapBuilder +/// \brief Builder class for arrays of variable-size maps +/// +/// To use this class, you must use the Append function to delimit each distinct +/// map before appending values to the key and item array builders, or use the +/// bulk API to append a sequence of offsets and null maps. +/// +/// Key uniqueness and ordering are not validated. +class ARROW_EXPORT MapBuilder : public ArrayBuilder { + public: + /// Use this constructor to define the built array's type explicitly. If key_builder + /// or item_builder has indeterminate type, this builder will also. + MapBuilder(MemoryPool* pool, const std::shared_ptr& key_builder, + const std::shared_ptr& item_builder, + const std::shared_ptr& type); + + /// Use this constructor to infer the built array's type. If key_builder or + /// item_builder has indeterminate type, this builder will also. + MapBuilder(MemoryPool* pool, const std::shared_ptr& key_builder, + const std::shared_ptr& item_builder, bool keys_sorted = false); + + MapBuilder(MemoryPool* pool, const std::shared_ptr& item_builder, + const std::shared_ptr& type); + + Status Resize(int64_t capacity) override; + void Reset() override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Vector append + /// + /// If passed, valid_bytes is of equal length to values, and any zero byte + /// will be considered as a null for that slot + Status AppendValues(const int32_t* offsets, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + /// \brief Start a new variable-length map slot + /// + /// This function should be called before beginning to append elements to the + /// key and item builders + Status Append(); + + Status AppendNull() final; + + Status AppendNulls(int64_t length) final; + + Status AppendEmptyValue() final; + + Status AppendEmptyValues(int64_t length) final; + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + const int32_t* offsets = array.GetValues(1); + const bool all_valid = !array.MayHaveLogicalNulls(); + const uint8_t* validity = array.HasValidityBitmap() ? array.buffers[0].data : NULLPTR; + for (int64_t row = offset; row < offset + length; row++) { + const bool is_valid = + all_valid || (validity && bit_util::GetBit(validity, array.offset + row)) || + array.IsValid(row); + if (is_valid) { + ARROW_RETURN_NOT_OK(Append()); + const int64_t slot_length = offsets[row + 1] - offsets[row]; + // Add together the inner StructArray offset to the Map/List offset + int64_t key_value_offset = array.child_data[0].offset + offsets[row]; + ARROW_RETURN_NOT_OK(key_builder_->AppendArraySlice( + array.child_data[0].child_data[0], key_value_offset, slot_length)); + ARROW_RETURN_NOT_OK(item_builder_->AppendArraySlice( + array.child_data[0].child_data[1], key_value_offset, slot_length)); + } else { + ARROW_RETURN_NOT_OK(AppendNull()); + } + } + return Status::OK(); + } + + /// \brief Get builder to append keys. + /// + /// Append a key with this builder should be followed by appending + /// an item or null value with item_builder(). + ArrayBuilder* key_builder() const { return key_builder_.get(); } + + /// \brief Get builder to append items + /// + /// Appending an item with this builder should have been preceded + /// by appending a key with key_builder(). + ArrayBuilder* item_builder() const { return item_builder_.get(); } + + /// \brief Get builder to add Map entries as struct values. + /// + /// This is used instead of key_builder()/item_builder() and allows + /// the Map to be built as a list of struct values. + ArrayBuilder* value_builder() const { return list_builder_->value_builder(); } + + std::shared_ptr type() const override { + // Key and Item builder may update types, but they don't contain the field names, + // so we need to reconstruct the type. (See ARROW-13735.) + return std::make_shared( + field(entries_name_, + struct_({field(key_name_, key_builder_->type(), false), + field(item_name_, item_builder_->type(), item_nullable_)}), + false), + keys_sorted_); + } + + Status ValidateOverflow(int64_t new_elements) { + return list_builder_->ValidateOverflow(new_elements); + } + + protected: + inline Status AdjustStructBuilderLength(); + + protected: + bool keys_sorted_ = false; + bool item_nullable_ = false; + std::string entries_name_; + std::string key_name_; + std::string item_name_; + std::shared_ptr list_builder_; + std::shared_ptr key_builder_; + std::shared_ptr item_builder_; +}; + +// ---------------------------------------------------------------------- +// FixedSizeList builder + +/// \class FixedSizeListBuilder +/// \brief Builder class for fixed-length list array value types +class ARROW_EXPORT FixedSizeListBuilder : public ArrayBuilder { + public: + /// Use this constructor to define the built array's type explicitly. If value_builder + /// has indeterminate type, this builder will also. + FixedSizeListBuilder(MemoryPool* pool, + std::shared_ptr const& value_builder, + int32_t list_size); + + /// Use this constructor to infer the built array's type. If value_builder has + /// indeterminate type, this builder will also. + FixedSizeListBuilder(MemoryPool* pool, + std::shared_ptr const& value_builder, + const std::shared_ptr& type); + + Status Resize(int64_t capacity) override; + void Reset() override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Append a valid fixed length list. + /// + /// This function affects only the validity bitmap; the child values must be appended + /// using the child array builder. + Status Append(); + + /// \brief Vector append + /// + /// If passed, valid_bytes will be read and any zero byte + /// will cause the corresponding slot to be null + /// + /// This function affects only the validity bitmap; the child values must be appended + /// using the child array builder. This includes appending nulls for null lists. + /// XXX this restriction is confusing, should this method be omitted? + Status AppendValues(int64_t length, const uint8_t* valid_bytes = NULLPTR); + + /// \brief Append a null fixed length list. + /// + /// The child array builder will have the appropriate number of nulls appended + /// automatically. + Status AppendNull() final; + + /// \brief Append length null fixed length lists. + /// + /// The child array builder will have the appropriate number of nulls appended + /// automatically. + Status AppendNulls(int64_t length) final; + + Status ValidateOverflow(int64_t new_elements); + + Status AppendEmptyValue() final; + + Status AppendEmptyValues(int64_t length) final; + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, int64_t length) final { + const uint8_t* validity = array.MayHaveNulls() ? array.buffers[0].data : NULLPTR; + for (int64_t row = offset; row < offset + length; row++) { + if (!validity || bit_util::GetBit(validity, array.offset + row)) { + ARROW_RETURN_NOT_OK(value_builder_->AppendArraySlice( + array.child_data[0], list_size_ * (array.offset + row), list_size_)); + ARROW_RETURN_NOT_OK(Append()); + } else { + ARROW_RETURN_NOT_OK(AppendNull()); + } + } + return Status::OK(); + } + + ArrayBuilder* value_builder() const { return value_builder_.get(); } + + std::shared_ptr type() const override { + return fixed_size_list(value_field_->WithType(value_builder_->type()), list_size_); + } + + // Cannot make this a static attribute because of linking issues + static constexpr int64_t maximum_elements() { + return std::numeric_limits::max() - 1; + } + + protected: + std::shared_ptr value_field_; + const int32_t list_size_; + std::shared_ptr value_builder_; +}; + +// ---------------------------------------------------------------------- +// Struct + +// --------------------------------------------------------------------------------- +// StructArray builder +/// Append, Resize and Reserve methods are acting on StructBuilder. +/// Please make sure all these methods of all child-builders' are consistently +/// called to maintain data-structure consistency. +class ARROW_EXPORT StructBuilder : public ArrayBuilder { + public: + /// If any of field_builders has indeterminate type, this builder will also + StructBuilder(const std::shared_ptr& type, MemoryPool* pool, + std::vector> field_builders); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// Null bitmap is of equal length to every child field, and any zero byte + /// will be considered as a null for that field, but users must using app- + /// end methods or advance methods of the child builders' independently to + /// insert data. + Status AppendValues(int64_t length, const uint8_t* valid_bytes) { + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(valid_bytes, length); + return Status::OK(); + } + + /// Append an element to the Struct. All child-builders' Append method must + /// be called independently to maintain data-structure consistency. + Status Append(bool is_valid = true) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendToBitmap(is_valid); + return Status::OK(); + } + + /// \brief Append a null value. Automatically appends an empty value to each child + /// builder. + Status AppendNull() final { + for (const auto& field : children_) { + ARROW_RETURN_NOT_OK(field->AppendEmptyValue()); + } + return Append(false); + } + + /// \brief Append multiple null values. Automatically appends empty values to each + /// child builder. + Status AppendNulls(int64_t length) final { + for (const auto& field : children_) { + ARROW_RETURN_NOT_OK(field->AppendEmptyValues(length)); + } + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(length, false); + return Status::OK(); + } + + Status AppendEmptyValue() final { + for (const auto& field : children_) { + ARROW_RETURN_NOT_OK(field->AppendEmptyValue()); + } + return Append(true); + } + + Status AppendEmptyValues(int64_t length) final { + for (const auto& field : children_) { + ARROW_RETURN_NOT_OK(field->AppendEmptyValues(length)); + } + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(length, true); + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + for (int i = 0; static_cast(i) < children_.size(); i++) { + ARROW_RETURN_NOT_OK(children_[i]->AppendArraySlice(array.child_data[i], + array.offset + offset, length)); + } + const uint8_t* validity = array.MayHaveNulls() ? array.buffers[0].data : NULLPTR; + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(validity, array.offset + offset, length); + return Status::OK(); + } + + void Reset() override; + + ArrayBuilder* field_builder(int i) const { return children_[i].get(); } + + int num_fields() const { return static_cast(children_.size()); } + + std::shared_ptr type() const override; + + private: + std::shared_ptr type_; +}; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h new file mode 100644 index 0000000000000000000000000000000000000000..ac92efbd0dbe6b470b8275219e75b41aa3f7ab3a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h @@ -0,0 +1,303 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_base.h" + +namespace arrow { + +/// \addtogroup run-end-encoded-builders +/// +/// @{ + +namespace internal { + +/// \brief An ArrayBuilder that deduplicates repeated values as they are +/// appended to the inner-ArrayBuilder and reports the length of the current run +/// of identical values. +/// +/// The following sequence of calls +/// +/// Append(2) +/// Append(2) +/// Append(2) +/// Append(7) +/// Append(7) +/// Append(2) +/// FinishInternal() +/// +/// will cause the inner-builder to receive only 3 Append calls +/// +/// Append(2) +/// Append(7) +/// Append(2) +/// FinishInternal() +/// +/// Note that values returned by length(), null_count() and capacity() are +/// related to the compressed array built by the inner-ArrayBuilder. +class RunCompressorBuilder : public ArrayBuilder { + public: + RunCompressorBuilder(MemoryPool* pool, std::shared_ptr inner_builder, + std::shared_ptr type); + + ~RunCompressorBuilder() override; + + ARROW_DISALLOW_COPY_AND_ASSIGN(RunCompressorBuilder); + + /// \brief Called right before a run is being closed + /// + /// Subclasses can override this function to perform an additional action when + /// a run is closed (i.e. run-length is known and value is appended to the + /// inner builder). + /// + /// \param value can be NULLPTR if closing a run of NULLs + /// \param length the greater than 0 length of the value run being closed + virtual Status WillCloseRun(const std::shared_ptr& value, + int64_t length) { + return Status::OK(); + } + + /// \brief Called right before a run of empty values is being closed + /// + /// Subclasses can override this function to perform an additional action when + /// a run of empty values is appended (i.e. run-length is known and a single + /// empty value is appended to the inner builder). + /// + /// \param length the greater than 0 length of the value run being closed + virtual Status WillCloseRunOfEmptyValues(int64_t length) { return Status::OK(); } + + /// \brief Allocate enough memory for a given number of array elements. + /// + /// NOTE: Conservatively resizing a run-length compressed array for a given + /// number of logical elements is not possible, since the physical length will + /// vary depending on the values to be appended in the future. But we can + /// pessimistically assume that each run will contain a single value and + /// allocate that number of runs. + Status Resize(int64_t capacity) override { return ResizePhysical(capacity); } + + /// \brief Allocate enough memory for a given number of runs. + /// + /// Like Resize on non-encoded builders, it does not account for variable size + /// data. + Status ResizePhysical(int64_t capacity); + + Status ReservePhysical(int64_t additional_capacity) { + return Reserve(additional_capacity); + } + + void Reset() override; + + Status AppendNull() final { return AppendNulls(1); } + Status AppendNulls(int64_t length) override; + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + Status AppendEmptyValues(int64_t length) override; + + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override; + Status AppendScalars(const ScalarVector& scalars) override; + + // AppendArraySlice() is not implemented. + + /// \brief Append a slice of an array containing values from already + /// compressed runs. + /// + /// NOTE: WillCloseRun() is not called as the length of each run cannot be + /// determined at this point. Caller should ensure that !has_open_run() by + /// calling FinishCurrentRun() before calling this. + /// + /// Pre-condition: !has_open_run() + Status AppendRunCompressedArraySlice(const ArraySpan& array, int64_t offset, + int64_t length); + + /// \brief Forces the closing of the current run if one is currently open. + /// + /// This can be called when one wants to ensure the current run will not be + /// extended. This may cause identical values to appear close to each other in + /// the underlying array (i.e. two runs that could be a single run) if more + /// values are appended after this is called. + /// + /// Finish() and FinishInternal() call this automatically. + virtual Status FinishCurrentRun(); + + Status FinishInternal(std::shared_ptr* out) override; + + ArrayBuilder& inner_builder() const { return *inner_builder_; } + + std::shared_ptr type() const override { return inner_builder_->type(); } + + bool has_open_run() const { return current_run_length_ > 0; } + int64_t open_run_length() const { return current_run_length_; } + + private: + inline void UpdateDimensions() { + capacity_ = inner_builder_->capacity(); + length_ = inner_builder_->length(); + null_count_ = inner_builder_->null_count(); + } + + private: + std::shared_ptr inner_builder_; + std::shared_ptr current_value_ = NULLPTR; + int64_t current_run_length_ = 0; +}; + +} // namespace internal + +// ---------------------------------------------------------------------- +// RunEndEncoded builder + +/// \brief Run-end encoded array builder. +/// +/// NOTE: the value returned by and capacity() is related to the +/// compressed array (physical) and not the decoded array (logical) that is +/// run-end encoded. null_count() always returns 0. length(), on the other hand, +/// returns the logical length of the run-end encoded array. +class ARROW_EXPORT RunEndEncodedBuilder : public ArrayBuilder { + private: + // An internal::RunCompressorBuilder that produces a run-end in the + // RunEndEncodedBuilder every time a value-run is closed. + class ValueRunBuilder : public internal::RunCompressorBuilder { + public: + ValueRunBuilder(MemoryPool* pool, const std::shared_ptr& value_builder, + const std::shared_ptr& value_type, + RunEndEncodedBuilder& ree_builder); + + ~ValueRunBuilder() override = default; + + Status WillCloseRun(const std::shared_ptr&, int64_t length) override { + return ree_builder_.CloseRun(length); + } + + Status WillCloseRunOfEmptyValues(int64_t length) override { + return ree_builder_.CloseRun(length); + } + + private: + RunEndEncodedBuilder& ree_builder_; + }; + + public: + RunEndEncodedBuilder(MemoryPool* pool, + const std::shared_ptr& run_end_builder, + const std::shared_ptr& value_builder, + std::shared_ptr type); + + /// \brief Allocate enough memory for a given number of array elements. + /// + /// NOTE: Conservatively resizing an REE for a given number of logical + /// elements is not possible, since the physical length will vary depending on + /// the values to be appended in the future. But we can pessimistically assume + /// that each run will contain a single value and allocate that number of + /// runs. + Status Resize(int64_t capacity) override { return ResizePhysical(capacity); } + + /// \brief Allocate enough memory for a given number of runs. + Status ResizePhysical(int64_t capacity); + + /// \brief Ensure that there is enough space allocated to append the indicated + /// number of run without any further reallocation. Overallocation is + /// used in order to minimize the impact of incremental ReservePhysical() calls. + /// Note that additional_capacity is relative to the current number of elements + /// rather than to the current capacity, so calls to Reserve() which are not + /// interspersed with addition of new elements may not increase the capacity. + /// + /// \param[in] additional_capacity the number of additional runs + /// \return Status + Status ReservePhysical(int64_t additional_capacity) { + return Reserve(additional_capacity); + } + + void Reset() override; + + Status AppendNull() final { return AppendNulls(1); } + Status AppendNulls(int64_t length) override; + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + Status AppendEmptyValues(int64_t length) override; + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override; + Status AppendScalars(const ScalarVector& scalars) override; + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Forces the closing of the current run if one is currently open. + /// + /// This can be called when one wants to ensure the current run will not be + /// extended. This may cause identical values to appear close to each other in + /// the values array (i.e. two runs that could be a single run) if more + /// values are appended after this is called. + Status FinishCurrentRun(); + + std::shared_ptr type() const override; + + private: + /// \brief Update physical capacity and logical length + /// + /// \param committed_logical_length number of logical values that have been + /// committed to the values array + /// \param open_run_length number of logical values in the currently open run if any + inline void UpdateDimensions(int64_t committed_logical_length, + int64_t open_run_length) { + capacity_ = run_end_builder().capacity(); + length_ = committed_logical_length + open_run_length; + committed_logical_length_ = committed_logical_length; + } + + // Pre-condition: !value_run_builder_.has_open_run() + template + Status DoAppendArraySlice(const ArraySpan& array, int64_t offset, int64_t length); + + template + Status DoAppendRunEnd(int64_t run_end); + + /// \brief Cast run_end to the appropriate type and appends it to the run_ends + /// array. + Status AppendRunEnd(int64_t run_end); + + /// \brief Close a run by appending a value to the run_ends array and updating + /// length_ to reflect the new run. + /// + /// Pre-condition: run_length > 0. + [[nodiscard]] Status CloseRun(int64_t run_length); + + ArrayBuilder& run_end_builder(); + ArrayBuilder& value_builder(); + + private: + std::shared_ptr type_; + ValueRunBuilder* value_run_builder_; + // The length not counting the current open run in the value_run_builder_ + int64_t committed_logical_length_ = 0; +}; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h new file mode 100644 index 0000000000000000000000000000000000000000..e7597aad812c4ca20a9335afa1bc44129b2ad727 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief Concatenate arrays +/// +/// \param[in] arrays a vector of arrays to be concatenated +/// \param[in] pool memory to store the result will be allocated from this memory pool +/// \return the concatenated array +ARROW_EXPORT +Result> Concatenate(const ArrayVector& arrays, + MemoryPool* pool = default_memory_pool()); + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h new file mode 100644 index 0000000000000000000000000000000000000000..fd8e75ddb86405c523a8083f559dab0e72364e24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \defgroup array-factories Array factory functions +/// +/// @{ + +/// \brief Create a strongly-typed Array instance from generic ArrayData +/// \param[in] data the array contents +/// \return the resulting Array instance +ARROW_EXPORT +std::shared_ptr MakeArray(const std::shared_ptr& data); + +/// \brief Create a strongly-typed Array instance with all elements null +/// \param[in] type the array type +/// \param[in] length the array length +/// \param[in] pool the memory pool to allocate memory from +ARROW_EXPORT +Result> MakeArrayOfNull(const std::shared_ptr& type, + int64_t length, + MemoryPool* pool = default_memory_pool()); + +/// \brief Create an Array instance whose slots are the given scalar +/// \param[in] scalar the value with which to fill the array +/// \param[in] length the array length +/// \param[in] pool the memory pool to allocate memory from +ARROW_EXPORT +Result> MakeArrayFromScalar( + const Scalar& scalar, int64_t length, MemoryPool* pool = default_memory_pool()); + +/// \brief Create an empty Array of a given type +/// +/// The output Array will be of the given type. +/// +/// \param[in] type the data type of the empty Array +/// \param[in] pool the memory pool to allocate memory from +/// \return the resulting Array +ARROW_EXPORT +Result> MakeEmptyArray(std::shared_ptr type, + MemoryPool* pool = default_memory_pool()); + +/// @} + +namespace internal { + +/// \brief Swap endian of each element in a generic ArrayData +/// +/// As dictionaries are often shared between different arrays, dictionaries +/// are not swapped by this function and should be handled separately. +/// +/// \param[in] data the array contents +/// \param[in] pool the memory pool to allocate memory from +/// \return the resulting ArrayData whose elements were swapped +ARROW_EXPORT +Result> SwapEndianArrayData( + const std::shared_ptr& data, MemoryPool* pool = default_memory_pool()); + +/// Given a number of ArrayVectors, treat each ArrayVector as the +/// chunks of a chunked array. Then rechunk each ArrayVector such that +/// all ArrayVectors are chunked identically. It is mandatory that +/// all ArrayVectors contain the same total number of elements. +ARROW_EXPORT +std::vector RechunkArraysConsistently(const std::vector&); + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/row/grouper.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/row/grouper.h new file mode 100644 index 0000000000000000000000000000000000000000..628a9c14f3e4402300a3daafdc2308379af5103a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/row/grouper.h @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/compute/kernel.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \brief A segment +/// A segment group is a chunk of continuous rows that have the same segment key. (For +/// example, in ordered time series processing, segment key can be "date", and a segment +/// group can be all the rows that belong to the same date.) A segment group can span +/// across multiple exec batches. A segment is a chunk of continuous rows that has the +/// same segment key within a given batch. When a segment group span cross batches, it +/// will have multiple segments. A segment never spans cross batches. The segment data +/// structure only makes sense when used along with a exec batch. +struct ARROW_EXPORT Segment { + /// \brief the offset into the batch where the segment starts + int64_t offset; + /// \brief the length of the segment + int64_t length; + /// \brief whether the segment may be extended by a next one + bool is_open; + /// \brief whether the segment extends a preceeding one + bool extends; +}; + +inline bool operator==(const Segment& segment1, const Segment& segment2) { + return segment1.offset == segment2.offset && segment1.length == segment2.length && + segment1.is_open == segment2.is_open && segment1.extends == segment2.extends; +} +inline bool operator!=(const Segment& segment1, const Segment& segment2) { + return !(segment1 == segment2); +} + +/// \brief a helper class to divide a batch into segments of equal values +/// +/// For example, given a batch with two rows: +/// +/// A A +/// A A +/// A B +/// A B +/// A A +/// +/// Then the batch could be divided into 3 segments. The first would be rows 0 & 1, +/// the second would be rows 2 & 3, and the third would be row 4. +/// +/// Further, a segmenter keeps track of the last value seen. This allows it to calculate +/// segments which span batches. In our above example the last batch we emit would set +/// the "open" flag, which indicates whether the segment may extend into the next batch. +/// +/// If the next call to the segmenter starts with `A A` then that segment would set the +/// "extends" flag, which indicates whether the segment continues the last open batch. +class ARROW_EXPORT RowSegmenter { + public: + virtual ~RowSegmenter() = default; + + /// \brief Construct a Segmenter which segments on the specified key types + /// + /// \param[in] key_types the specified key types + /// \param[in] nullable_keys whether values of the specified keys may be null + /// \param[in] ctx the execution context to use + static Result> Make( + const std::vector& key_types, bool nullable_keys, ExecContext* ctx); + + /// \brief Return the key types of this segmenter + virtual const std::vector& key_types() const = 0; + + /// \brief Reset this segmenter + /// + /// A segmenter normally extends (see `Segment`) a segment from one batch to the next. + /// If segment-extension is undesirable, for example when each batch is processed + /// independently, then `Reset` should be invoked before processing the next batch. + virtual Status Reset() = 0; + + /// \brief Get the next segment for the given batch starting from the given offset + virtual Result GetNextSegment(const ExecSpan& batch, int64_t offset) = 0; +}; + +/// Consumes batches of keys and yields batches of the group ids. +class ARROW_EXPORT Grouper { + public: + virtual ~Grouper() = default; + + /// Construct a Grouper which receives the specified key types + static Result> Make(const std::vector& key_types, + ExecContext* ctx = default_exec_context()); + + /// Consume a batch of keys, producing the corresponding group ids as an integer array, + /// over a slice defined by an offset and length, which defaults to the batch length. + /// Currently only uint32 indices will be produced, eventually the bit width will only + /// be as wide as necessary. + virtual Result Consume(const ExecSpan& batch, int64_t offset = 0, + int64_t length = -1) = 0; + + /// Get current unique keys. May be called multiple times. + virtual Result GetUniques() = 0; + + /// Get the current number of groups. + virtual uint32_t num_groups() const = 0; + + /// \brief Assemble lists of indices of identical elements. + /// + /// \param[in] ids An unsigned, all-valid integral array which will be + /// used as grouping criteria. + /// \param[in] num_groups An upper bound for the elements of ids + /// \param[in] ctx Execution context to use during the operation + /// \return A num_groups-long ListArray where the slot at i contains a + /// list of indices where i appears in ids. + /// + /// MakeGroupings([ + /// 2, + /// 2, + /// 5, + /// 5, + /// 2, + /// 3 + /// ], 8) == [ + /// [], + /// [], + /// [0, 1, 4], + /// [5], + /// [], + /// [2, 3], + /// [], + /// [] + /// ] + static Result> MakeGroupings( + const UInt32Array& ids, uint32_t num_groups, + ExecContext* ctx = default_exec_context()); + + /// \brief Produce a ListArray whose slots are selections of `array` which correspond to + /// the provided groupings. + /// + /// For example, + /// ApplyGroupings([ + /// [], + /// [], + /// [0, 1, 4], + /// [5], + /// [], + /// [2, 3], + /// [], + /// [] + /// ], [2, 2, 5, 5, 2, 3]) == [ + /// [], + /// [], + /// [2, 2, 2], + /// [3], + /// [], + /// [5, 5], + /// [], + /// [] + /// ] + static Result> ApplyGroupings( + const ListArray& groupings, const Array& array, + ExecContext* ctx = default_exec_context()); +}; + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/api.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/api.h new file mode 100644 index 0000000000000000000000000000000000000000..47b56684b5af7f383e6e2acee014dde6ba40d11d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/api.h @@ -0,0 +1,21 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/json/options.h" +#include "arrow/json/reader.h" diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/extension_type.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/extension_type.h new file mode 100644 index 0000000000000000000000000000000000000000..846e3c7a1657850fe9f8ca91dfca31360dbf067d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/extension_type.h @@ -0,0 +1,211 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/extension_type.h" +#include "arrow/testing/visibility.h" +#include "arrow/util/macros.h" + +namespace arrow { + +class ARROW_TESTING_EXPORT UuidArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT UuidType : public ExtensionType { + public: + UuidType() : ExtensionType(fixed_size_binary(16)) {} + + std::string extension_name() const override { return "uuid"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "uuid-serialized"; } +}; + +class ARROW_TESTING_EXPORT SmallintArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT TinyintArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT ListExtensionArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT SmallintType : public ExtensionType { + public: + SmallintType() : ExtensionType(int16()) {} + + std::string extension_name() const override { return "smallint"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "smallint"; } +}; + +class ARROW_TESTING_EXPORT TinyintType : public ExtensionType { + public: + TinyintType() : ExtensionType(int8()) {} + + std::string extension_name() const override { return "tinyint"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "tinyint"; } +}; + +class ARROW_TESTING_EXPORT ListExtensionType : public ExtensionType { + public: + ListExtensionType() : ExtensionType(list(int32())) {} + + std::string extension_name() const override { return "list-ext"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "list-ext"; } +}; + +class ARROW_TESTING_EXPORT DictExtensionType : public ExtensionType { + public: + DictExtensionType() : ExtensionType(dictionary(int8(), utf8())) {} + + std::string extension_name() const override { return "dict-extension"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "dict-extension-serialized"; } +}; + +class ARROW_TESTING_EXPORT Complex128Array : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT Complex128Type : public ExtensionType { + public: + Complex128Type() + : ExtensionType(struct_({::arrow::field("real", float64(), /*nullable=*/false), + ::arrow::field("imag", float64(), /*nullable=*/false)})) {} + + std::string extension_name() const override { return "complex128"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "complex128-serialized"; } +}; + +ARROW_TESTING_EXPORT +std::shared_ptr uuid(); + +ARROW_TESTING_EXPORT +std::shared_ptr smallint(); + +ARROW_TESTING_EXPORT +std::shared_ptr tinyint(); + +ARROW_TESTING_EXPORT +std::shared_ptr list_extension_type(); + +ARROW_TESTING_EXPORT +std::shared_ptr dict_extension_type(); + +ARROW_TESTING_EXPORT +std::shared_ptr complex128(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleUuid(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleSmallint(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleTinyint(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleDictExtension(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleComplex128(); + +ARROW_TESTING_EXPORT +std::shared_ptr MakeComplex128(const std::shared_ptr& real, + const std::shared_ptr& imag); + +// A RAII class that registers an extension type on construction +// and unregisters it on destruction. +class ARROW_TESTING_EXPORT ExtensionTypeGuard { + public: + explicit ExtensionTypeGuard(const std::shared_ptr& type); + explicit ExtensionTypeGuard(const DataTypeVector& types); + ~ExtensionTypeGuard(); + ARROW_DEFAULT_MOVE_AND_ASSIGN(ExtensionTypeGuard); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(ExtensionTypeGuard); + + std::vector extension_names_; +}; + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/future_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/future_util.h new file mode 100644 index 0000000000000000000000000000000000000000..2ca70d05402f92c71d8f86441eeccec1ebc6d156 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/future_util.h @@ -0,0 +1,142 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/testing/gtest_util.h" +#include "arrow/util/future.h" + +// This macro should be called by futures that are expected to +// complete pretty quickly. arrow::kDefaultAssertFinishesWaitSeconds is the +// default max wait here. Anything longer than that and it's a questionable unit test +// anyways. +#define ASSERT_FINISHES_IMPL(fut) \ + do { \ + ASSERT_TRUE(fut.Wait(::arrow::kDefaultAssertFinishesWaitSeconds)); \ + if (!fut.is_finished()) { \ + FAIL() << "Future did not finish in a timely fashion"; \ + } \ + } while (false) + +#define ASSERT_FINISHES_OK(expr) \ + do { \ + auto&& _fut = (expr); \ + ASSERT_TRUE(_fut.Wait(::arrow::kDefaultAssertFinishesWaitSeconds)); \ + if (!_fut.is_finished()) { \ + FAIL() << "Future did not finish in a timely fashion"; \ + } \ + auto& _st = _fut.status(); \ + if (!_st.ok()) { \ + FAIL() << "'" ARROW_STRINGIFY(expr) "' failed with " << _st.ToString(); \ + } \ + } while (false) + +#define ASSERT_FINISHES_AND_RAISES(ENUM, expr) \ + do { \ + auto&& _fut = (expr); \ + ASSERT_FINISHES_IMPL(_fut); \ + ASSERT_RAISES(ENUM, _fut.status()); \ + } while (false) + +#define EXPECT_FINISHES_AND_RAISES_WITH_MESSAGE_THAT(ENUM, matcher, expr) \ + do { \ + auto&& fut = (expr); \ + ASSERT_FINISHES_IMPL(fut); \ + EXPECT_RAISES_WITH_MESSAGE_THAT(ENUM, matcher, fut.status()); \ + } while (false) + +#define ASSERT_FINISHES_OK_AND_ASSIGN_IMPL(lhs, rexpr, _future_name) \ + auto _future_name = (rexpr); \ + ASSERT_FINISHES_IMPL(_future_name); \ + ASSERT_OK_AND_ASSIGN(lhs, _future_name.result()); + +#define ASSERT_FINISHES_OK_AND_ASSIGN(lhs, rexpr) \ + ASSERT_FINISHES_OK_AND_ASSIGN_IMPL(lhs, rexpr, \ + ARROW_ASSIGN_OR_RAISE_NAME(_fut, __COUNTER__)) + +#define ASSERT_FINISHES_OK_AND_EQ(expected, expr) \ + do { \ + ASSERT_FINISHES_OK_AND_ASSIGN(auto _actual, (expr)); \ + ASSERT_EQ(expected, _actual); \ + } while (0) + +#define EXPECT_FINISHES_IMPL(fut) \ + do { \ + EXPECT_TRUE(fut.Wait(::arrow::kDefaultAssertFinishesWaitSeconds)); \ + if (!fut.is_finished()) { \ + ADD_FAILURE() << "Future did not finish in a timely fashion"; \ + } \ + } while (false) + +#define ON_FINISH_ASSIGN_OR_HANDLE_ERROR_IMPL(handle_error, future_name, lhs, rexpr) \ + auto future_name = (rexpr); \ + EXPECT_FINISHES_IMPL(future_name); \ + handle_error(future_name.status()); \ + EXPECT_OK_AND_ASSIGN(lhs, future_name.result()); + +#define EXPECT_FINISHES(expr) \ + do { \ + EXPECT_FINISHES_IMPL(expr); \ + } while (0) + +#define EXPECT_FINISHES_OK_AND_ASSIGN(lhs, rexpr) \ + ON_FINISH_ASSIGN_OR_HANDLE_ERROR_IMPL( \ + ARROW_EXPECT_OK, ARROW_ASSIGN_OR_RAISE_NAME(_fut, __COUNTER__), lhs, rexpr); + +#define EXPECT_FINISHES_OK_AND_EQ(expected, expr) \ + do { \ + EXPECT_FINISHES_OK_AND_ASSIGN(auto _actual, (expr)); \ + EXPECT_EQ(expected, _actual); \ + } while (0) + +namespace arrow { + +constexpr double kDefaultAssertFinishesWaitSeconds = 64; + +template +void AssertNotFinished(const Future& fut) { + ASSERT_FALSE(IsFutureFinished(fut.state())); +} + +template +void AssertFinished(const Future& fut) { + ASSERT_TRUE(IsFutureFinished(fut.state())); +} + +// Assert the future is successful *now* +template +void AssertSuccessful(const Future& fut) { + if (IsFutureFinished(fut.state())) { + ASSERT_EQ(fut.state(), FutureState::SUCCESS); + ASSERT_OK(fut.status()); + } else { + FAIL() << "Expected future to be completed successfully but it was still pending"; + } +} + +// Assert the future is failed *now* +template +void AssertFailed(const Future& fut) { + if (IsFutureFinished(fut.state())) { + ASSERT_EQ(fut.state(), FutureState::FAILURE); + ASSERT_FALSE(fut.status().ok()); + } else { + FAIL() << "Expected future to have failed but it was still pending"; + } +} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_util.h new file mode 100644 index 0000000000000000000000000000000000000000..85b4c1f1f0138289d5717f7fcf6ade486ae044f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_util.h @@ -0,0 +1,570 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/testing/gtest_compat.h" +#include "arrow/testing/visibility.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/macros.h" +#include "arrow/util/string_builder.h" +#include "arrow/util/type_fwd.h" + +// NOTE: failing must be inline in the macros below, to get correct file / line number +// reporting on test failures. + +// NOTE: using a for loop for this macro allows extra failure messages to be +// appended with operator<< +#define ASSERT_RAISES(ENUM, expr) \ + for (::arrow::Status _st = ::arrow::internal::GenericToStatus((expr)); \ + !_st.Is##ENUM();) \ + FAIL() << "Expected '" ARROW_STRINGIFY(expr) "' to fail with " ARROW_STRINGIFY( \ + ENUM) ", but got " \ + << _st.ToString() + +#define ASSERT_RAISES_WITH_MESSAGE(ENUM, message, expr) \ + do { \ + auto _res = (expr); \ + ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \ + if (!_st.Is##ENUM()) { \ + FAIL() << "Expected '" ARROW_STRINGIFY(expr) "' to fail with " ARROW_STRINGIFY( \ + ENUM) ", but got " \ + << _st.ToString(); \ + } \ + ASSERT_EQ((message), _st.ToStringWithoutContextLines()); \ + } while (false) + +#define EXPECT_RAISES_WITH_MESSAGE_THAT(ENUM, matcher, expr) \ + do { \ + auto _res = (expr); \ + ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \ + EXPECT_TRUE(_st.Is##ENUM()) << "Expected '" ARROW_STRINGIFY(expr) "' to fail with " \ + << ARROW_STRINGIFY(ENUM) ", but got " << _st.ToString(); \ + EXPECT_THAT(_st.ToStringWithoutContextLines(), (matcher)); \ + } while (false) + +#define EXPECT_RAISES_WITH_CODE_AND_MESSAGE_THAT(code, matcher, expr) \ + do { \ + auto _res = (expr); \ + ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \ + EXPECT_EQ(_st.CodeAsString(), Status::CodeAsString(code)); \ + EXPECT_THAT(_st.ToStringWithoutContextLines(), (matcher)); \ + } while (false) + +#define ASSERT_OK(expr) \ + for (::arrow::Status _st = ::arrow::internal::GenericToStatus((expr)); !_st.ok();) \ + FAIL() << "'" ARROW_STRINGIFY(expr) "' failed with " << _st.ToString() + +#define ASSERT_OK_NO_THROW(expr) ASSERT_NO_THROW(ASSERT_OK(expr)) + +#define ARROW_EXPECT_OK(expr) \ + do { \ + auto _res = (expr); \ + ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \ + EXPECT_TRUE(_st.ok()) << "'" ARROW_STRINGIFY(expr) "' failed with " \ + << _st.ToString(); \ + } while (false) + +#define ASSERT_NOT_OK(expr) \ + for (::arrow::Status _st = ::arrow::internal::GenericToStatus((expr)); _st.ok();) \ + FAIL() << "'" ARROW_STRINGIFY(expr) "' did not failed" << _st.ToString() + +#define ABORT_NOT_OK(expr) \ + do { \ + auto _res = (expr); \ + ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \ + if (ARROW_PREDICT_FALSE(!_st.ok())) { \ + _st.Abort(); \ + } \ + } while (false); + +#define ASSIGN_OR_HANDLE_ERROR_IMPL(handle_error, status_name, lhs, rexpr) \ + auto&& status_name = (rexpr); \ + handle_error(status_name.status()); \ + lhs = std::move(status_name).ValueOrDie(); + +#define ASSERT_OK_AND_ASSIGN(lhs, rexpr) \ + ASSIGN_OR_HANDLE_ERROR_IMPL( \ + ASSERT_OK, ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), lhs, rexpr); + +#define ASSIGN_OR_ABORT(lhs, rexpr) \ + ASSIGN_OR_HANDLE_ERROR_IMPL(ABORT_NOT_OK, \ + ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), \ + lhs, rexpr); + +#define EXPECT_OK_AND_ASSIGN(lhs, rexpr) \ + ASSIGN_OR_HANDLE_ERROR_IMPL(ARROW_EXPECT_OK, \ + ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), \ + lhs, rexpr); + +#define ASSERT_OK_AND_EQ(expected, expr) \ + do { \ + ASSERT_OK_AND_ASSIGN(auto _actual, (expr)); \ + ASSERT_EQ(expected, _actual); \ + } while (0) + +// A generalized version of GTest's SCOPED_TRACE that takes arbitrary arguments. +// ARROW_SCOPED_TRACE("some variable = ", some_variable, ...) + +#define ARROW_SCOPED_TRACE(...) SCOPED_TRACE(::arrow::util::StringBuilder(__VA_ARGS__)) + +namespace arrow { + +// ---------------------------------------------------------------------- +// Useful testing::Types declarations + +inline void PrintTo(StatusCode code, std::ostream* os) { + *os << Status::CodeAsString(code); +} + +using NumericArrowTypes = + ::testing::Types; + +using RealArrowTypes = ::testing::Types; + +using IntegralArrowTypes = ::testing::Types; + +using PhysicalIntegralArrowTypes = + ::testing::Types; + +using PrimitiveArrowTypes = + ::testing::Types; + +using TemporalArrowTypes = + ::testing::Types; + +using DecimalArrowTypes = ::testing::Types; + +using BaseBinaryArrowTypes = + ::testing::Types; + +using BaseBinaryOrBinaryViewLikeArrowTypes = + ::testing::Types; + +using BinaryArrowTypes = ::testing::Types; + +using StringArrowTypes = ::testing::Types; + +using StringOrStringViewArrowTypes = + ::testing::Types; + +using ListArrowTypes = ::testing::Types; + +using UnionArrowTypes = ::testing::Types; + +class Array; +class ChunkedArray; +class RecordBatch; +class Table; +struct Datum; + +#define ASSERT_ARRAYS_EQUAL(lhs, rhs) AssertArraysEqual((lhs), (rhs)) +#define ASSERT_BATCHES_EQUAL(lhs, rhs) AssertBatchesEqual((lhs), (rhs)) +#define ASSERT_BATCHES_APPROX_EQUAL(lhs, rhs) AssertBatchesApproxEqual((lhs), (rhs)) +#define ASSERT_TABLES_EQUAL(lhs, rhs) AssertTablesEqual((lhs), (rhs)) + +// Default EqualOptions for testing +static inline EqualOptions TestingEqualOptions() { + return EqualOptions{}.nans_equal(true).signed_zeros_equal(false); +} + +// If verbose is true, then the arrays will be pretty printed +ARROW_TESTING_EXPORT void AssertArraysEqual( + const Array& expected, const Array& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertArraysApproxEqual( + const Array& expected, const Array& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); +// Returns true when values are both null +ARROW_TESTING_EXPORT void AssertScalarsEqual( + const Scalar& expected, const Scalar& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertScalarsApproxEqual( + const Scalar& expected, const Scalar& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertBatchesEqual( + const RecordBatch& expected, const RecordBatch& actual, bool check_metadata = false, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertBatchesApproxEqual( + const RecordBatch& expected, const RecordBatch& actual, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertChunkedEqual( + const ChunkedArray& expected, const ChunkedArray& actual, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertChunkedEqual( + const ChunkedArray& actual, const ArrayVector& expected, + const EqualOptions& options = TestingEqualOptions()); +// Like ChunkedEqual, but permits different chunk layout +ARROW_TESTING_EXPORT void AssertChunkedEquivalent( + const ChunkedArray& expected, const ChunkedArray& actual, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertChunkedApproxEquivalent( + const ChunkedArray& expected, const ChunkedArray& actual, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertBufferEqual(const Buffer& buffer, + const std::vector& expected); +ARROW_TESTING_EXPORT void AssertBufferEqual(const Buffer& buffer, + std::string_view expected); +ARROW_TESTING_EXPORT void AssertBufferEqual(const Buffer& buffer, const Buffer& expected); + +ARROW_TESTING_EXPORT void AssertTypeEqual(const DataType& lhs, const DataType& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertTypeEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertFieldEqual(const Field& lhs, const Field& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertFieldEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertSchemaEqual(const Schema& lhs, const Schema& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertSchemaEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); + +ARROW_TESTING_EXPORT void AssertTypeNotEqual(const DataType& lhs, const DataType& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertTypeNotEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertFieldNotEqual(const Field& lhs, const Field& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertFieldNotEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertSchemaNotEqual(const Schema& lhs, const Schema& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertSchemaNotEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); + +ARROW_TESTING_EXPORT Result> PrintArrayDiff( + const ChunkedArray& expected, const ChunkedArray& actual); + +ARROW_TESTING_EXPORT void AssertTablesEqual( + const Table& expected, const Table& actual, bool same_chunk_layout = true, + bool flatten = false, const EqualOptions& options = TestingEqualOptions()); + +ARROW_TESTING_EXPORT void AssertDatumsEqual( + const Datum& expected, const Datum& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertDatumsApproxEqual( + const Datum& expected, const Datum& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); + +template +void AssertNumericDataEqual(const C_TYPE* raw_data, + const std::vector& expected_values) { + for (auto expected : expected_values) { + ASSERT_EQ(expected, *raw_data); + ++raw_data; + } +} + +ARROW_TESTING_EXPORT void CompareBatch( + const RecordBatch& left, const RecordBatch& right, bool compare_metadata = true, + const EqualOptions& options = TestingEqualOptions()); + +ARROW_TESTING_EXPORT void ApproxCompareBatch( + const RecordBatch& left, const RecordBatch& right, bool compare_metadata = true, + const EqualOptions& options = TestingEqualOptions()); + +// Check if the padding of the buffers of the array is zero. +// Also cause valgrind warnings if the padding bytes are uninitialized. +ARROW_TESTING_EXPORT void AssertZeroPadded(const Array& array); + +// Check if the valid buffer bytes are initialized +// and cause valgrind warnings otherwise. +ARROW_TESTING_EXPORT void TestInitialized(const ArrayData& array); +ARROW_TESTING_EXPORT void TestInitialized(const Array& array); + +#define DECL_T() typedef typename TestFixture::T T; + +#define DECL_TYPE() typedef typename TestFixture::Type Type; + +// ArrayFromJSON: construct an Array from a simple JSON representation + +ARROW_TESTING_EXPORT +std::shared_ptr ArrayFromJSON(const std::shared_ptr&, + std::string_view json); + +ARROW_TESTING_EXPORT +std::shared_ptr DictArrayFromJSON(const std::shared_ptr& type, + std::string_view indices_json, + std::string_view dictionary_json); + +ARROW_TESTING_EXPORT +std::shared_ptr RecordBatchFromJSON(const std::shared_ptr&, + std::string_view); + +ARROW_TESTING_EXPORT +std::shared_ptr ChunkedArrayFromJSON(const std::shared_ptr&, + const std::vector& json); + +ARROW_TESTING_EXPORT +std::shared_ptr ScalarFromJSON(const std::shared_ptr&, + std::string_view json); + +ARROW_TESTING_EXPORT +std::shared_ptr DictScalarFromJSON(const std::shared_ptr&, + std::string_view index_json, + std::string_view dictionary_json); + +ARROW_TESTING_EXPORT +std::shared_ptr TableFromJSON(const std::shared_ptr&, + const std::vector& json); + +ARROW_TESTING_EXPORT +std::shared_ptr TensorFromJSON(const std::shared_ptr& type, + std::string_view data, std::string_view shape, + std::string_view strides = "[]", + std::string_view dim_names = "[]"); + +ARROW_TESTING_EXPORT +std::shared_ptr TensorFromJSON(const std::shared_ptr& type, + std::string_view data, + const std::vector& shape, + const std::vector& strides = {}, + const std::vector& dim_names = {}); + +ARROW_TESTING_EXPORT +Result> RunEndEncodeTableColumns( + const Table& table, const std::vector& column_indices); + +// Given an array, return a new identical array except for one validity bit +// set to a new value. +// This is useful to force the underlying "value" of null entries to otherwise +// invalid data and check that errors don't get reported. +ARROW_TESTING_EXPORT +std::shared_ptr TweakValidityBit(const std::shared_ptr& array, + int64_t index, bool validity); + +ARROW_TESTING_EXPORT +void SleepFor(double seconds); + +// Sleeps for a very small amount of time. The thread will be yielded +// at least once ensuring that context switches could happen. It is intended +// to be used for stress testing parallel code and shouldn't be assumed to do any +// reliable timing. +ARROW_TESTING_EXPORT +void SleepABit(); + +// Wait until predicate is true or timeout in seconds expires. +ARROW_TESTING_EXPORT +void BusyWait(double seconds, std::function predicate); + +// \see SleepABit +ARROW_TESTING_EXPORT +Future<> SleepABitAsync(); + +ARROW_TESTING_EXPORT bool FileIsClosed(int fd); + +template +std::vector IteratorToVector(Iterator iterator) { + EXPECT_OK_AND_ASSIGN(auto out, iterator.ToVector()); + return out; +} + +ARROW_TESTING_EXPORT +bool LocaleExists(const char* locale); + +#ifndef _WIN32 +ARROW_TESTING_EXPORT +void AssertChildExit(int child_pid, int expected_exit_status = 0); +#endif + +// A RAII-style object that switches to a new locale, and switches back +// to the old locale when going out of scope. Doesn't do anything if the +// new locale doesn't exist on the local machine. +// ATTENTION: may crash with an assertion failure on Windows debug builds. +// See ARROW-6108, also https://gerrit.libreoffice.org/#/c/54110/ +class ARROW_TESTING_EXPORT LocaleGuard { + public: + explicit LocaleGuard(const char* new_locale); + ~LocaleGuard(); + + protected: + class Impl; + std::unique_ptr impl_; +}; + +class ARROW_TESTING_EXPORT EnvVarGuard { + public: + EnvVarGuard(const std::string& name, const std::string& value); + ~EnvVarGuard(); + + protected: + const std::string name_; + std::string old_value_; + bool was_set_; +}; + +namespace internal { +class SignalHandler; +} + +class ARROW_TESTING_EXPORT SignalHandlerGuard { + public: + typedef void (*Callback)(int); + + SignalHandlerGuard(int signum, Callback cb); + SignalHandlerGuard(int signum, const internal::SignalHandler& handler); + ~SignalHandlerGuard(); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +#ifndef ARROW_LARGE_MEMORY_TESTS +#define LARGE_MEMORY_TEST(name) DISABLED_##name +#else +#define LARGE_MEMORY_TEST(name) name +#endif + +inline void PrintTo(const Status& st, std::ostream* os) { *os << st.ToString(); } + +template +void PrintTo(const Result& result, std::ostream* os) { + if (result.ok()) { + ::testing::internal::UniversalPrint(result.ValueOrDie(), os); + } else { + *os << result.status(); + } +} + +// A data type with only move constructors (no copy, no default). +struct MoveOnlyDataType { + explicit MoveOnlyDataType(int x) : data(new int(x)) {} + + MoveOnlyDataType(const MoveOnlyDataType& other) = delete; + MoveOnlyDataType& operator=(const MoveOnlyDataType& other) = delete; + + MoveOnlyDataType(MoveOnlyDataType&& other) { MoveFrom(&other); } + MoveOnlyDataType& operator=(MoveOnlyDataType&& other) { + MoveFrom(&other); + return *this; + } + + MoveOnlyDataType& operator=(int x) { + if (data != nullptr) { + delete data; + } + data = new int(x); + return *this; + } + + ~MoveOnlyDataType() { Destroy(); } + + void Destroy() { + if (data != nullptr) { + delete data; + data = nullptr; + moves = -1; + } + } + + void MoveFrom(MoveOnlyDataType* other) { + Destroy(); + data = other->data; + other->data = nullptr; + moves = other->moves + 1; + } + + int ToInt() const { return data == nullptr ? -42 : *data; } + + bool operator==(const MoveOnlyDataType& other) const { + return data != nullptr && other.data != nullptr && *data == *other.data; + } + bool operator<(const MoveOnlyDataType& other) const { + return data == nullptr || (other.data != nullptr && *data < *other.data); + } + + bool operator==(int other) const { return data != nullptr && *data == other; } + friend bool operator==(int left, const MoveOnlyDataType& right) { + return right == left; + } + + int* data = nullptr; + int moves = 0; +}; + +// A task that blocks until unlocked. Useful for timing tests. +class ARROW_TESTING_EXPORT GatingTask { + public: + explicit GatingTask(double timeout_seconds = 10); + /// \brief During destruction we wait for all pending tasks to finish + ~GatingTask(); + + /// \brief Creates a new waiting task (presumably to spawn on a thread). It will return + /// invalid if the timeout arrived before the unlock. The task will not complete until + /// unlocked or timed out + /// + /// Note: The GatingTask must outlive any Task instances + std::function Task(); + /// \brief Creates a new waiting task as a future. The future will not complete + /// until unlocked. + Future<> AsyncTask(); + /// \brief Waits until at least count tasks are running. + Status WaitForRunning(int count); + /// \brief Unlocks all waiting tasks. Returns an invalid status if any waiting task has + /// timed out + Status Unlock(); + + static std::shared_ptr Make(double timeout_seconds = 10); + + private: + class Impl; + std::shared_ptr impl_; +}; + +/// \brief create an exact copy of the data where each buffer has a max alignment of 1 +/// +/// This method does not recurse into the dictionary or children +ARROW_TESTING_EXPORT std::shared_ptr UnalignBuffers(const ArrayData& array); +/// \brief create an exact copy of the array where each buffer has a max alignment of 1 +/// +/// This method does not recurse into the dictionary or children +ARROW_TESTING_EXPORT std::shared_ptr UnalignBuffers(const Array& array); + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/matchers.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/matchers.h new file mode 100644 index 0000000000000000000000000000000000000000..b4625b3922e86bc044e30c63c15ea5b1dbaca469 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/matchers.h @@ -0,0 +1,467 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include + +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/stl_iterator.h" +#include "arrow/testing/future_util.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/util/future.h" +#include "arrow/util/unreachable.h" + +namespace arrow { + +class PointeesEqualMatcher { + public: + template + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + void DescribeTo(::std::ostream* os) const override { *os << "pointees are equal"; } + + void DescribeNegationTo(::std::ostream* os) const override { + *os << "pointees are not equal"; + } + + bool MatchAndExplain(const PtrPair& pair, + testing::MatchResultListener* listener) const override { + const auto& first = *std::get<0>(pair); + const auto& second = *std::get<1>(pair); + const bool match = first.Equals(second); + *listener << "whose pointees " << testing::PrintToString(first) << " and " + << testing::PrintToString(second) + << (match ? " are equal" : " are not equal"); + return match; + } + }; + + return testing::Matcher(new Impl()); + } +}; + +// A matcher that checks that the values pointed to are Equals(). +// Useful in conjunction with other googletest matchers. +inline PointeesEqualMatcher PointeesEqual() { return {}; } + +class AnyOfJSONMatcher { + public: + AnyOfJSONMatcher(std::shared_ptr type, std::string array_json) + : type_(std::move(type)), array_json_(std::move(array_json)) {} + + template + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + static_assert(std::is_same>(), + "AnyOfJSON only supported for std::shared_ptr"); + Impl(std::shared_ptr type, std::string array_json) + : type_(std::move(type)), array_json_(std::move(array_json)) { + array = ArrayFromJSON(type_, array_json_); + } + void DescribeTo(std::ostream* os) const override { + *os << "matches at least one scalar from "; + *os << array->ToString(); + } + void DescribeNegationTo(::std::ostream* os) const override { + *os << "matches no scalar from "; + *os << array->ToString(); + } + bool MatchAndExplain( + const arg_type& arg, + ::testing::MatchResultListener* result_listener) const override { + for (int64_t i = 0; i < array->length(); ++i) { + std::shared_ptr scalar; + auto maybe_scalar = array->GetScalar(i); + if (maybe_scalar.ok()) { + scalar = maybe_scalar.ValueOrDie(); + } else { + *result_listener << "GetScalar() had status " + << maybe_scalar.status().ToString() << "at index " << i + << " in the input JSON Array"; + return false; + } + + if (scalar->Equals(*arg)) return true; + } + *result_listener << "Argument scalar: '" << arg->ToString() + << "' matches no scalar from " << array->ToString(); + return false; + } + const std::shared_ptr type_; + const std::string array_json_; + std::shared_ptr array; + }; + + return testing::Matcher(new Impl(type_, array_json_)); + } + + private: + const std::shared_ptr type_; + const std::string array_json_; +}; + +inline AnyOfJSONMatcher AnyOfJSON(std::shared_ptr type, + std::string array_json) { + return {std::move(type), std::move(array_json)}; +} + +template +class FutureMatcher { + public: + explicit FutureMatcher(ResultMatcher result_matcher, double wait_seconds) + : result_matcher_(std::move(result_matcher)), wait_seconds_(wait_seconds) {} + + template ::type::ValueType> + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + explicit Impl(const ResultMatcher& result_matcher, double wait_seconds) + : result_matcher_(testing::MatcherCast>(result_matcher)), + wait_seconds_(wait_seconds) {} + + void DescribeTo(::std::ostream* os) const override { + *os << "value "; + result_matcher_.DescribeTo(os); + } + + void DescribeNegationTo(::std::ostream* os) const override { + *os << "value "; + result_matcher_.DescribeNegationTo(os); + } + + bool MatchAndExplain(const Fut& fut, + testing::MatchResultListener* listener) const override { + if (!fut.Wait(wait_seconds_)) { + *listener << "which didn't finish within " << wait_seconds_ << " seconds"; + return false; + } + return result_matcher_.MatchAndExplain(fut.result(), listener); + } + + const testing::Matcher> result_matcher_; + const double wait_seconds_; + }; + + return testing::Matcher(new Impl(result_matcher_, wait_seconds_)); + } + + private: + const ResultMatcher result_matcher_; + const double wait_seconds_; +}; + +template +class ResultMatcher { + public: + explicit ResultMatcher(ValueMatcher value_matcher) + : value_matcher_(std::move(value_matcher)) {} + + template ::type::ValueType> + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + explicit Impl(const ValueMatcher& value_matcher) + : value_matcher_(testing::MatcherCast(value_matcher)) {} + + void DescribeTo(::std::ostream* os) const override { + *os << "value "; + value_matcher_.DescribeTo(os); + } + + void DescribeNegationTo(::std::ostream* os) const override { + *os << "value "; + value_matcher_.DescribeNegationTo(os); + } + + bool MatchAndExplain(const Res& maybe_value, + testing::MatchResultListener* listener) const override { + if (!maybe_value.status().ok()) { + *listener << "whose error " + << testing::PrintToString(maybe_value.status().ToString()) + << " doesn't match"; + return false; + } + const ValueType& value = maybe_value.ValueOrDie(); + testing::StringMatchResultListener value_listener; + const bool match = value_matcher_.MatchAndExplain(value, &value_listener); + *listener << "whose value " << testing::PrintToString(value) + << (match ? " matches" : " doesn't match"); + testing::internal::PrintIfNotEmpty(value_listener.str(), listener->stream()); + return match; + } + + const testing::Matcher value_matcher_; + }; + + return testing::Matcher(new Impl(value_matcher_)); + } + + private: + const ValueMatcher value_matcher_; +}; + +class ErrorMatcher { + public: + explicit ErrorMatcher(StatusCode code, + std::optional> message_matcher) + : code_(code), message_matcher_(std::move(message_matcher)) {} + + template + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + explicit Impl(StatusCode code, + std::optional> message_matcher) + : code_(code), message_matcher_(std::move(message_matcher)) {} + + void DescribeTo(::std::ostream* os) const override { + *os << "raises StatusCode::" << Status::CodeAsString(code_); + if (message_matcher_) { + *os << " and message "; + message_matcher_->DescribeTo(os); + } + } + + void DescribeNegationTo(::std::ostream* os) const override { + *os << "does not raise StatusCode::" << Status::CodeAsString(code_); + if (message_matcher_) { + *os << " or message "; + message_matcher_->DescribeNegationTo(os); + } + } + + bool MatchAndExplain(const Res& maybe_value, + testing::MatchResultListener* listener) const override { + const Status& status = internal::GenericToStatus(maybe_value); + testing::StringMatchResultListener value_listener; + + bool match = status.code() == code_; + if (message_matcher_) { + match = match && + message_matcher_->MatchAndExplain(status.message(), &value_listener); + } + + if (match) { + *listener << "whose error matches"; + } else if (status.ok()) { + *listener << "whose non-error doesn't match"; + } else { + *listener << "whose error doesn't match"; + } + + testing::internal::PrintIfNotEmpty(value_listener.str(), listener->stream()); + return match; + } + + const StatusCode code_; + const std::optional> message_matcher_; + }; + + return testing::Matcher(new Impl(code_, message_matcher_)); + } + + private: + const StatusCode code_; + const std::optional> message_matcher_; +}; + +class OkMatcher { + public: + template + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + void DescribeTo(::std::ostream* os) const override { *os << "is ok"; } + + void DescribeNegationTo(::std::ostream* os) const override { *os << "is not ok"; } + + bool MatchAndExplain(const Res& maybe_value, + testing::MatchResultListener* listener) const override { + const Status& status = internal::GenericToStatus(maybe_value); + + const bool match = status.ok(); + *listener << "whose " << (match ? "non-error matches" : "error doesn't match"); + return match; + } + }; + + return testing::Matcher(new Impl()); + } +}; + +// Returns a matcher that waits on a Future (by default for 16 seconds) +// then applies a matcher to the result. +template +FutureMatcher Finishes( + const ResultMatcher& result_matcher, + double wait_seconds = kDefaultAssertFinishesWaitSeconds) { + return FutureMatcher(result_matcher, wait_seconds); +} + +// Returns a matcher that matches the value of a successful Result. +template +ResultMatcher ResultWith(const ValueMatcher& value_matcher) { + return ResultMatcher(value_matcher); +} + +// Returns a matcher that matches an ok Status or Result. +inline OkMatcher Ok() { return {}; } + +// Returns a matcher that matches the StatusCode of a Status or Result. +// Do not use Raises(StatusCode::OK) to match a non error code. +inline ErrorMatcher Raises(StatusCode code) { return ErrorMatcher(code, std::nullopt); } + +// Returns a matcher that matches the StatusCode and message of a Status or Result. +template +ErrorMatcher Raises(StatusCode code, const MessageMatcher& message_matcher) { + return ErrorMatcher(code, testing::MatcherCast(message_matcher)); +} + +class DataEqMatcher { + public: + // TODO(bkietz) support EqualOptions, ApproxEquals, etc + // Probably it's better to use something like config-through-key_value_metadata + // as with the random generators to decouple this from EqualOptions etc. + explicit DataEqMatcher(Datum expected) : expected_(std::move(expected)) {} + + template + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + explicit Impl(Datum expected) : expected_(std::move(expected)) {} + + void DescribeTo(::std::ostream* os) const override { + *os << "has data "; + PrintTo(expected_, os); + } + + void DescribeNegationTo(::std::ostream* os) const override { + *os << "doesn't have data "; + PrintTo(expected_, os); + } + + bool MatchAndExplain(const Data& data, + testing::MatchResultListener* listener) const override { + Datum boxed(data); + + if (boxed.kind() != expected_.kind()) { + *listener << "whose Datum::kind " << boxed.ToString() << " doesn't match " + << expected_.ToString(); + return false; + } + + if (const auto& boxed_type = boxed.type()) { + if (*boxed_type != *expected_.type()) { + *listener << "whose DataType " << boxed_type->ToString() << " doesn't match " + << expected_.type()->ToString(); + return false; + } + } else if (const auto& boxed_schema = boxed.schema()) { + if (*boxed_schema != *expected_.schema()) { + *listener << "whose Schema " << boxed_schema->ToString() << " doesn't match " + << expected_.schema()->ToString(); + return false; + } + } else { + Unreachable(); + } + + if (boxed == expected_) { + *listener << "whose value matches"; + return true; + } + + if (listener->IsInterested() && boxed.kind() == Datum::ARRAY) { + *listener << "whose value differs from the expected value by " + << boxed.make_array()->Diff(*expected_.make_array()); + } else { + *listener << "whose value doesn't match"; + } + return false; + } + + Datum expected_; + }; + + return testing::Matcher(new Impl(expected_)); + } + + private: + Datum expected_; +}; + +/// Constructs a datum against which arguments are matched +template +DataEqMatcher DataEq(Data&& dat) { + return DataEqMatcher(Datum(std::forward(dat))); +} + +/// Constructs an array with ArrayFromJSON against which arguments are matched +inline DataEqMatcher DataEqArray(const std::shared_ptr& type, + std::string_view json) { + return DataEq(ArrayFromJSON(type, json)); +} + +/// Constructs an array from a vector of optionals against which arguments are matched +template ::ArrayType, + typename BuilderType = typename TypeTraits::BuilderType, + typename ValueType = + typename ::arrow::stl::detail::DefaultValueAccessor::ValueType> +DataEqMatcher DataEqArray(T type, const std::vector>& values) { + // FIXME(bkietz) broken until DataType is move constructible + BuilderType builder(std::make_shared(std::move(type)), default_memory_pool()); + DCHECK_OK(builder.Reserve(static_cast(values.size()))); + + // pseudo constexpr: + static const bool need_safe_append = !is_fixed_width(T::type_id); + + for (auto value : values) { + if (need_safe_append) { + DCHECK_OK(builder.AppendOrNull(value)); + } else { + builder.UnsafeAppendOrNull(value); + } + } + + return DataEq(builder.Finish().ValueOrDie()); +} + +/// Constructs a scalar with ScalarFromJSON against which arguments are matched +inline DataEqMatcher DataEqScalar(const std::shared_ptr& type, + std::string_view json) { + return DataEq(ScalarFromJSON(type, json)); +} + +/// Constructs a scalar against which arguments are matched +template ::ScalarType, + typename ValueType = typename ScalarType::ValueType> +DataEqMatcher DataEqScalar(T type, std::optional value) { + ScalarType expected(std::make_shared(std::move(type))); + + if (value) { + expected.is_valid = true; + expected.value = std::move(*value); + } + + return DataEq(std::move(expected)); +} + +// HasType, HasSchema matchers + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/pch.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..e544ad806adc992691600b90ddd7174fb0447c4e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/pch.h @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +#include "arrow/pch.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/testing/util.h" diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/visibility.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..1b2aa7cd86fc65f3a1ad1b332f7c295aa3cc9c25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/visibility.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_TESTING_STATIC +#define ARROW_TESTING_EXPORT +#elif defined(ARROW_TESTING_EXPORTING) +#define ARROW_TESTING_EXPORT __declspec(dllexport) +#else +#define ARROW_TESTING_EXPORT __declspec(dllimport) +#endif + +#define ARROW_TESTING_NO_EXPORT +#else // Not Windows +#ifndef ARROW_TESTING_EXPORT +#define ARROW_TESTING_EXPORT __attribute__((visibility("default"))) +#endif +#ifndef ARROW_TESTING_NO_EXPORT +#define ARROW_TESTING_NO_EXPORT __attribute__((visibility("hidden"))) +#endif +#endif // Non-Windows + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/__init__.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/common.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/common.pxd new file mode 100644 index 0000000000000000000000000000000000000000..044dd0333f323367dcba32a8fe013eccd0986e08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/common.pxd @@ -0,0 +1,175 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libc.stdint cimport * +from libcpp cimport bool as c_bool, nullptr +from libcpp.functional cimport function +from libcpp.memory cimport shared_ptr, unique_ptr, make_shared +from libcpp.string cimport string as c_string +from libcpp.utility cimport pair +from libcpp.vector cimport vector +from libcpp.unordered_map cimport unordered_map +from libcpp.unordered_set cimport unordered_set + +from cpython cimport PyObject +from cpython.datetime cimport PyDateTime_DateTime +cimport cpython + + +cdef extern from * namespace "std" nogil: + cdef shared_ptr[T] static_pointer_cast[T, U](shared_ptr[U]) + + +cdef extern from "" namespace "std" nogil: + cdef cppclass optional[T]: + ctypedef T value_type + optional() + optional(nullopt_t) + optional(optional&) except + + optional(T&) except + + c_bool has_value() + T& value() + T& value_or[U](U& default_value) + void swap(optional&) + void reset() + T& emplace(...) + T& operator*() + # T* operator->() # Not Supported + optional& operator=(optional&) + optional& operator=[U](U&) + + +# vendored from the cymove project https://github.com/ozars/cymove +cdef extern from * namespace "cymove" nogil: + """ + #include + #include + namespace cymove { + template + inline typename std::remove_reference::type&& cymove(T& t) { + return std::move(t); + } + template + inline typename std::remove_reference::type&& cymove(T&& t) { + return std::move(t); + } + } // namespace cymove + """ + cdef T move" cymove::cymove"[T](T) + +cdef extern from * namespace "arrow::py" nogil: + """ + #include + #include + + namespace arrow { + namespace py { + template + std::shared_ptr to_shared(std::unique_ptr& t) { + return std::move(t); + } + template + std::shared_ptr to_shared(std::unique_ptr&& t) { + return std::move(t); + } + } // namespace py + } // namespace arrow + """ + cdef shared_ptr[T] to_shared" arrow::py::to_shared"[T](unique_ptr[T]) + +cdef extern from "arrow/python/platform.h": + pass + +cdef extern from "": + void Py_XDECREF(PyObject* o) + Py_ssize_t Py_REFCNT(PyObject* o) + +cdef extern from "numpy/halffloat.h": + ctypedef uint16_t npy_half + +cdef extern from "arrow/api.h" namespace "arrow" nogil: + # We can later add more of the common status factory methods as needed + cdef CStatus CStatus_OK "arrow::Status::OK"() + + cdef CStatus CStatus_Invalid "arrow::Status::Invalid"() + cdef CStatus CStatus_NotImplemented \ + "arrow::Status::NotImplemented"(const c_string& msg) + cdef CStatus CStatus_UnknownError \ + "arrow::Status::UnknownError"(const c_string& msg) + + cdef cppclass CStatus "arrow::Status": + CStatus() + + c_string ToString() + c_string message() + shared_ptr[CStatusDetail] detail() + + c_bool ok() + c_bool IsIOError() + c_bool IsOutOfMemory() + c_bool IsInvalid() + c_bool IsKeyError() + c_bool IsNotImplemented() + c_bool IsTypeError() + c_bool IsCapacityError() + c_bool IsIndexError() + c_bool IsSerializationError() + c_bool IsCancelled() + + void Warn() + + cdef cppclass CStatusDetail "arrow::StatusDetail": + c_string ToString() + + +cdef extern from "arrow/result.h" namespace "arrow" nogil: + cdef cppclass CResult "arrow::Result"[T]: + CResult() + CResult(CStatus) + CResult(T) + c_bool ok() + CStatus status() + CStatus Value(T*) + T operator*() + + +cdef extern from "arrow/util/future.h" namespace "arrow" nogil: + cdef cppclass CFuture "arrow::Future"[T]: + CFuture() + + +cdef extern from "arrow/python/async.h" namespace "arrow::py" nogil: + # BindFuture's third argument is really a C++ callable with + # the signature `object(T*)`, but Cython does not allow declaring that. + # We use an ellipsis as a workaround. + # Another possibility is to type-erase the argument by making it + # `object(void*)`, but it would lose compile-time C++ type safety. + void BindFuture[T](CFuture[T], object cb, ...) + + +cdef extern from "arrow/python/common.h" namespace "arrow::py" nogil: + T GetResultValue[T](CResult[T]) except * + cdef function[F] BindFunction[F](void* unbound, object bound, ...) + + +cdef inline object PyObject_to_object(PyObject* o): + # Cast to "object" increments reference count + cdef object result = o + cpython.Py_DECREF(result) + return result diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd new file mode 100644 index 0000000000000000000000000000000000000000..6dae45ab80b1c168742cb3e67eb55cccb8ba1bf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd @@ -0,0 +1,3023 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * + + +cdef extern from "arrow/util/key_value_metadata.h" namespace "arrow" nogil: + cdef cppclass CKeyValueMetadata" arrow::KeyValueMetadata": + CKeyValueMetadata() + CKeyValueMetadata(const unordered_map[c_string, c_string]&) + CKeyValueMetadata(const vector[c_string]& keys, + const vector[c_string]& values) + + void reserve(int64_t n) + int64_t size() const + c_string key(int64_t i) const + c_string value(int64_t i) const + int FindKey(const c_string& key) const + + shared_ptr[CKeyValueMetadata] Copy() const + c_bool Equals(const CKeyValueMetadata& other) + void Append(const c_string& key, const c_string& value) + void ToUnorderedMap(unordered_map[c_string, c_string]*) const + c_string ToString() const + + CResult[c_string] Get(const c_string& key) const + CStatus Delete(const c_string& key) + CStatus Set(const c_string& key, const c_string& value) + c_bool Contains(const c_string& key) const + + +cdef extern from "arrow/util/decimal.h" namespace "arrow" nogil: + cdef cppclass CDecimal128" arrow::Decimal128": + c_string ToString(int32_t scale) const + + +cdef extern from "arrow/util/decimal.h" namespace "arrow" nogil: + cdef cppclass CDecimal256" arrow::Decimal256": + c_string ToString(int32_t scale) const + + +cdef extern from "arrow/config.h" namespace "arrow" nogil: + cdef cppclass CBuildInfo" arrow::BuildInfo": + int version + int version_major + int version_minor + int version_patch + c_string version_string + c_string so_version + c_string full_so_version + c_string compiler_id + c_string compiler_version + c_string compiler_flags + c_string git_id + c_string git_description + c_string package_kind + c_string build_type + + const CBuildInfo& GetBuildInfo() + + cdef cppclass CRuntimeInfo" arrow::RuntimeInfo": + c_string simd_level + c_string detected_simd_level + + CRuntimeInfo GetRuntimeInfo() + + cdef cppclass CGlobalOptions" arrow::GlobalOptions": + optional[c_string] timezone_db_path + + CStatus Initialize(const CGlobalOptions& options) + + +cdef extern from "arrow/util/future.h" namespace "arrow" nogil: + cdef cppclass CFuture_Void" arrow::Future<>": + CStatus status() + + +cdef extern from "arrow/api.h" namespace "arrow" nogil: + cdef enum Type" arrow::Type::type": + _Type_NA" arrow::Type::NA" + + _Type_BOOL" arrow::Type::BOOL" + + _Type_UINT8" arrow::Type::UINT8" + _Type_INT8" arrow::Type::INT8" + _Type_UINT16" arrow::Type::UINT16" + _Type_INT16" arrow::Type::INT16" + _Type_UINT32" arrow::Type::UINT32" + _Type_INT32" arrow::Type::INT32" + _Type_UINT64" arrow::Type::UINT64" + _Type_INT64" arrow::Type::INT64" + + _Type_HALF_FLOAT" arrow::Type::HALF_FLOAT" + _Type_FLOAT" arrow::Type::FLOAT" + _Type_DOUBLE" arrow::Type::DOUBLE" + + _Type_DECIMAL128" arrow::Type::DECIMAL128" + _Type_DECIMAL256" arrow::Type::DECIMAL256" + + _Type_DATE32" arrow::Type::DATE32" + _Type_DATE64" arrow::Type::DATE64" + _Type_TIMESTAMP" arrow::Type::TIMESTAMP" + _Type_TIME32" arrow::Type::TIME32" + _Type_TIME64" arrow::Type::TIME64" + _Type_DURATION" arrow::Type::DURATION" + _Type_INTERVAL_MONTH_DAY_NANO" arrow::Type::INTERVAL_MONTH_DAY_NANO" + + _Type_BINARY" arrow::Type::BINARY" + _Type_STRING" arrow::Type::STRING" + _Type_LARGE_BINARY" arrow::Type::LARGE_BINARY" + _Type_LARGE_STRING" arrow::Type::LARGE_STRING" + _Type_FIXED_SIZE_BINARY" arrow::Type::FIXED_SIZE_BINARY" + _Type_BINARY_VIEW" arrow::Type::BINARY_VIEW" + _Type_STRING_VIEW" arrow::Type::STRING_VIEW" + + _Type_LIST" arrow::Type::LIST" + _Type_LARGE_LIST" arrow::Type::LARGE_LIST" + _Type_FIXED_SIZE_LIST" arrow::Type::FIXED_SIZE_LIST" + _Type_LIST_VIEW" arrow::Type::LIST_VIEW" + _Type_LARGE_LIST_VIEW" arrow::Type::LARGE_LIST_VIEW" + _Type_STRUCT" arrow::Type::STRUCT" + _Type_SPARSE_UNION" arrow::Type::SPARSE_UNION" + _Type_DENSE_UNION" arrow::Type::DENSE_UNION" + _Type_DICTIONARY" arrow::Type::DICTIONARY" + _Type_RUN_END_ENCODED" arrow::Type::RUN_END_ENCODED" + _Type_MAP" arrow::Type::MAP" + + _Type_EXTENSION" arrow::Type::EXTENSION" + + cdef enum UnionMode" arrow::UnionMode::type": + _UnionMode_SPARSE" arrow::UnionMode::SPARSE" + _UnionMode_DENSE" arrow::UnionMode::DENSE" + + cdef enum TimeUnit" arrow::TimeUnit::type": + TimeUnit_SECOND" arrow::TimeUnit::SECOND" + TimeUnit_MILLI" arrow::TimeUnit::MILLI" + TimeUnit_MICRO" arrow::TimeUnit::MICRO" + TimeUnit_NANO" arrow::TimeUnit::NANO" + + cdef cppclass CBufferSpec" arrow::DataTypeLayout::BufferSpec": + pass + + cdef cppclass CDataTypeLayout" arrow::DataTypeLayout": + vector[CBufferSpec] buffers + c_bool has_dictionary + + cdef cppclass CDataType" arrow::DataType": + Type id() + + c_bool Equals(const CDataType& other, c_bool check_metadata) + c_bool Equals(const shared_ptr[CDataType]& other, c_bool check_metadata) + + shared_ptr[CField] field(int i) + const vector[shared_ptr[CField]] fields() + int num_fields() + CDataTypeLayout layout() + c_string ToString() + + c_bool is_primitive(Type type) + c_bool is_numeric(Type type) + + cdef cppclass CArrayData" arrow::ArrayData": + shared_ptr[CDataType] type + int64_t length + int64_t null_count + int64_t offset + vector[shared_ptr[CBuffer]] buffers + vector[shared_ptr[CArrayData]] child_data + shared_ptr[CArrayData] dictionary + + @staticmethod + shared_ptr[CArrayData] Make(const shared_ptr[CDataType]& type, + int64_t length, + vector[shared_ptr[CBuffer]]& buffers, + int64_t null_count, + int64_t offset) + + @staticmethod + shared_ptr[CArrayData] MakeWithChildren" Make"( + const shared_ptr[CDataType]& type, + int64_t length, + vector[shared_ptr[CBuffer]]& buffers, + vector[shared_ptr[CArrayData]]& child_data, + int64_t null_count, + int64_t offset) + + @staticmethod + shared_ptr[CArrayData] MakeWithChildrenAndDictionary" Make"( + const shared_ptr[CDataType]& type, + int64_t length, + vector[shared_ptr[CBuffer]]& buffers, + vector[shared_ptr[CArrayData]]& child_data, + shared_ptr[CArrayData]& dictionary, + int64_t null_count, + int64_t offset) + + cdef cppclass CArray" arrow::Array": + shared_ptr[CDataType] type() + + int64_t length() + int64_t null_count() + int64_t offset() + Type type_id() + + int num_fields() + + CResult[shared_ptr[CScalar]] GetScalar(int64_t i) const + + c_string Diff(const CArray& other) + c_bool Equals(const CArray& arr) + c_bool IsNull(int i) + + shared_ptr[CArrayData] data() + + shared_ptr[CArray] Slice(int64_t offset) + shared_ptr[CArray] Slice(int64_t offset, int64_t length) + + CStatus Validate() const + CStatus ValidateFull() const + CResult[shared_ptr[CArray]] View(const shared_ptr[CDataType]& type) + + shared_ptr[CArray] MakeArray(const shared_ptr[CArrayData]& data) + CResult[shared_ptr[CArray]] MakeArrayOfNull( + const shared_ptr[CDataType]& type, int64_t length, CMemoryPool* pool) + + CResult[shared_ptr[CArray]] MakeArrayFromScalar( + const CScalar& scalar, int64_t length, CMemoryPool* pool) + + CStatus DebugPrint(const CArray& arr, int indent) + + cdef cppclass CFixedWidthType" arrow::FixedWidthType"(CDataType): + int bit_width() + int byte_width() + + cdef cppclass CNullArray" arrow::NullArray"(CArray): + CNullArray(int64_t length) + + cdef cppclass CDictionaryArray" arrow::DictionaryArray"(CArray): + CDictionaryArray(const shared_ptr[CDataType]& type, + const shared_ptr[CArray]& indices, + const shared_ptr[CArray]& dictionary) + CDictionaryArray(const shared_ptr[CArrayData]& data) + + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const shared_ptr[CDataType]& type, + const shared_ptr[CArray]& indices, + const shared_ptr[CArray]& dictionary) + + shared_ptr[CArray] indices() + shared_ptr[CArray] dictionary() + + cdef cppclass CDate32Type" arrow::Date32Type"(CFixedWidthType): + pass + + cdef cppclass CDate64Type" arrow::Date64Type"(CFixedWidthType): + pass + + cdef cppclass CTimestampType" arrow::TimestampType"(CFixedWidthType): + CTimestampType(TimeUnit unit) + TimeUnit unit() + const c_string& timezone() + + cdef cppclass CTime32Type" arrow::Time32Type"(CFixedWidthType): + TimeUnit unit() + + cdef cppclass CTime64Type" arrow::Time64Type"(CFixedWidthType): + TimeUnit unit() + + shared_ptr[CDataType] ctime32" arrow::time32"(TimeUnit unit) + shared_ptr[CDataType] ctime64" arrow::time64"(TimeUnit unit) + + cdef cppclass CDurationType" arrow::DurationType"(CFixedWidthType): + TimeUnit unit() + + shared_ptr[CDataType] cduration" arrow::duration"(TimeUnit unit) + + cdef cppclass CDictionaryType" arrow::DictionaryType"(CFixedWidthType): + CDictionaryType(const shared_ptr[CDataType]& index_type, + const shared_ptr[CDataType]& value_type, + c_bool ordered) + + shared_ptr[CDataType] index_type() + shared_ptr[CDataType] value_type() + c_bool ordered() + + shared_ptr[CDataType] ctimestamp" arrow::timestamp"(TimeUnit unit) + shared_ptr[CDataType] ctimestamp" arrow::timestamp"( + TimeUnit unit, const c_string& timezone) + + cdef cppclass CMemoryPool" arrow::MemoryPool": + int64_t bytes_allocated() + int64_t max_memory() + c_string backend_name() + void ReleaseUnused() + + cdef cppclass CLoggingMemoryPool" arrow::LoggingMemoryPool"(CMemoryPool): + CLoggingMemoryPool(CMemoryPool*) + + cdef cppclass CProxyMemoryPool" arrow::ProxyMemoryPool"(CMemoryPool): + CProxyMemoryPool(CMemoryPool*) + + cdef cppclass CBuffer" arrow::Buffer": + CBuffer(const uint8_t* data, int64_t size) + const uint8_t* data() + uint8_t* mutable_data() + uintptr_t address() + uintptr_t mutable_address() + int64_t size() + shared_ptr[CBuffer] parent() + c_bool is_cpu() const + c_bool is_mutable() const + c_string ToHexString() + c_bool Equals(const CBuffer& other) + + CResult[shared_ptr[CBuffer]] SliceBufferSafe( + const shared_ptr[CBuffer]& buffer, int64_t offset) + CResult[shared_ptr[CBuffer]] SliceBufferSafe( + const shared_ptr[CBuffer]& buffer, int64_t offset, int64_t length) + + cdef cppclass CMutableBuffer" arrow::MutableBuffer"(CBuffer): + CMutableBuffer(const uint8_t* data, int64_t size) + + cdef cppclass CResizableBuffer" arrow::ResizableBuffer"(CMutableBuffer): + CStatus Resize(const int64_t new_size, c_bool shrink_to_fit) + CStatus Reserve(const int64_t new_size) + + CResult[unique_ptr[CBuffer]] AllocateBuffer(const int64_t size, + CMemoryPool* pool) + + CResult[unique_ptr[CResizableBuffer]] AllocateResizableBuffer( + const int64_t size, CMemoryPool* pool) + + cdef cppclass CSyncEvent" arrow::Device::SyncEvent": + pass + + cdef cppclass CDevice" arrow::Device": + pass + + cdef CMemoryPool* c_default_memory_pool" arrow::default_memory_pool"() + cdef CMemoryPool* c_system_memory_pool" arrow::system_memory_pool"() + cdef CStatus c_jemalloc_memory_pool" arrow::jemalloc_memory_pool"( + CMemoryPool** out) + cdef CStatus c_mimalloc_memory_pool" arrow::mimalloc_memory_pool"( + CMemoryPool** out) + cdef vector[c_string] c_supported_memory_backends \ + " arrow::SupportedMemoryBackendNames"() + + CStatus c_jemalloc_set_decay_ms" arrow::jemalloc_set_decay_ms"(int ms) + + cdef cppclass CListType" arrow::ListType"(CDataType): + CListType(const shared_ptr[CDataType]& value_type) + CListType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CLargeListType" arrow::LargeListType"(CDataType): + CLargeListType(const shared_ptr[CDataType]& value_type) + CLargeListType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CListViewType" arrow::ListViewType"(CDataType): + CListViewType(const shared_ptr[CDataType]& value_type) + CListViewType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CLargeListViewType" arrow::LargeListViewType"(CDataType): + CLargeListViewType(const shared_ptr[CDataType]& value_type) + CLargeListViewType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CMapType" arrow::MapType"(CDataType): + CMapType(const shared_ptr[CField]& key_field, + const shared_ptr[CField]& item_field, c_bool keys_sorted) + shared_ptr[CDataType] key_type() + shared_ptr[CField] key_field() + shared_ptr[CDataType] item_type() + shared_ptr[CField] item_field() + c_bool keys_sorted() + + cdef cppclass CFixedSizeListType" arrow::FixedSizeListType"(CDataType): + CFixedSizeListType(const shared_ptr[CDataType]& value_type, + int32_t list_size) + CFixedSizeListType(const shared_ptr[CField]& field, int32_t list_size) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + int32_t list_size() + + cdef cppclass CStringType" arrow::StringType"(CDataType): + pass + + cdef cppclass CFixedSizeBinaryType \ + " arrow::FixedSizeBinaryType"(CFixedWidthType): + CFixedSizeBinaryType(int byte_width) + int byte_width() + int bit_width() + + cdef cppclass CDecimal128Type \ + " arrow::Decimal128Type"(CFixedSizeBinaryType): + CDecimal128Type(int precision, int scale) + int precision() + int scale() + + cdef cppclass CDecimal256Type \ + " arrow::Decimal256Type"(CFixedSizeBinaryType): + CDecimal256Type(int precision, int scale) + int precision() + int scale() + + cdef cppclass CRunEndEncodedType " arrow::RunEndEncodedType"(CDataType): + CRunEndEncodedType(const shared_ptr[CDataType]& run_end_type, + const shared_ptr[CDataType]& value_type) + const shared_ptr[CDataType]& run_end_type() + const shared_ptr[CDataType]& value_type() + + cdef cppclass CField" arrow::Field": + cppclass CMergeOptions "MergeOptions": + CMergeOptions() + c_bool promote_nullability + + @staticmethod + CMergeOptions Defaults() + + @staticmethod + CMergeOptions Permissive() + + const c_string& name() + shared_ptr[CDataType] type() + c_bool nullable() + + c_string ToString() + c_bool Equals(const CField& other, c_bool check_metadata) + + shared_ptr[const CKeyValueMetadata] metadata() + + CField(const c_string& name, const shared_ptr[CDataType]& type, + c_bool nullable) + + CField(const c_string& name, const shared_ptr[CDataType]& type, + c_bool nullable, const shared_ptr[CKeyValueMetadata]& metadata) + + # Removed const in Cython so don't have to cast to get code to generate + shared_ptr[CField] AddMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CField] WithMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CField] RemoveMetadata() + shared_ptr[CField] WithType(const shared_ptr[CDataType]& type) + shared_ptr[CField] WithName(const c_string& name) + shared_ptr[CField] WithNullable(c_bool nullable) + vector[shared_ptr[CField]] Flatten() + + cdef cppclass CFieldRef" arrow::FieldRef": + CFieldRef() + CFieldRef(c_string name) + CFieldRef(int index) + CFieldRef(vector[CFieldRef]) + + @staticmethod + CResult[CFieldRef] FromDotPath(c_string& dot_path) + const c_string* name() const + + cdef cppclass CFieldRefHash" arrow::FieldRef::Hash": + pass + + cdef cppclass CStructType" arrow::StructType"(CDataType): + CStructType(const vector[shared_ptr[CField]]& fields) + + shared_ptr[CField] GetFieldByName(const c_string& name) + vector[shared_ptr[CField]] GetAllFieldsByName(const c_string& name) + int GetFieldIndex(const c_string& name) + vector[int] GetAllFieldIndices(const c_string& name) + + cdef cppclass CUnionType" arrow::UnionType"(CDataType): + UnionMode mode() + const vector[int8_t]& type_codes() + const vector[int]& child_ids() + + cdef shared_ptr[CDataType] CMakeSparseUnionType" arrow::sparse_union"( + vector[shared_ptr[CField]] fields, + vector[int8_t] type_codes) + + cdef shared_ptr[CDataType] CMakeDenseUnionType" arrow::dense_union"( + vector[shared_ptr[CField]] fields, + vector[int8_t] type_codes) + + cdef shared_ptr[CDataType] CMakeRunEndEncodedType" arrow::run_end_encoded"( + shared_ptr[CDataType] run_end_type, + shared_ptr[CDataType] value_type) + + cdef shared_ptr[CDataType] CMakeListViewType" arrow::list_view"( + shared_ptr[CField] value_type) + + cdef shared_ptr[CDataType] CMakeLargeListViewType" arrow::large_list_view"( + shared_ptr[CField] value_type) + + cdef cppclass CSchema" arrow::Schema": + CSchema(const vector[shared_ptr[CField]]& fields) + CSchema(const vector[shared_ptr[CField]]& fields, + const shared_ptr[const CKeyValueMetadata]& metadata) + + # Does not actually exist, but gets Cython to not complain + CSchema(const vector[shared_ptr[CField]]& fields, + const shared_ptr[CKeyValueMetadata]& metadata) + + c_bool Equals(const CSchema& other, c_bool check_metadata) + + shared_ptr[CField] field(int i) + shared_ptr[const CKeyValueMetadata] metadata() + shared_ptr[CField] GetFieldByName(const c_string& name) + vector[shared_ptr[CField]] GetAllFieldsByName(const c_string& name) + int GetFieldIndex(const c_string& name) + vector[int] GetAllFieldIndices(const c_string& name) + const vector[shared_ptr[CField]] fields() + int num_fields() + c_string ToString() + + CResult[shared_ptr[CSchema]] AddField(int i, + const shared_ptr[CField]& field) + CResult[shared_ptr[CSchema]] RemoveField(int i) + CResult[shared_ptr[CSchema]] SetField(int i, + const shared_ptr[CField]& field) + + # Removed const in Cython so don't have to cast to get code to generate + shared_ptr[CSchema] AddMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CSchema] WithMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CSchema] RemoveMetadata() + + CResult[shared_ptr[CSchema]] UnifySchemas( + const vector[shared_ptr[CSchema]]& schemas, + CField.CMergeOptions field_merge_options) + + cdef cppclass PrettyPrintOptions: + PrettyPrintOptions() + PrettyPrintOptions(int indent_arg) + PrettyPrintOptions(int indent_arg, int window_arg) + int indent + int indent_size + int window + int container_window + c_string null_rep + c_bool skip_new_lines + c_bool truncate_metadata + c_bool show_field_metadata + c_bool show_schema_metadata + + @staticmethod + PrettyPrintOptions Defaults() + + CStatus PrettyPrint(const CArray& schema, + const PrettyPrintOptions& options, + c_string* result) + CStatus PrettyPrint(const CChunkedArray& schema, + const PrettyPrintOptions& options, + c_string* result) + CStatus PrettyPrint(const CSchema& schema, + const PrettyPrintOptions& options, + c_string* result) + + cdef cppclass CBooleanArray" arrow::BooleanArray"(CArray): + c_bool Value(int i) + int64_t false_count() + int64_t true_count() + + cdef cppclass CUInt8Array" arrow::UInt8Array"(CArray): + uint8_t Value(int i) + + cdef cppclass CInt8Array" arrow::Int8Array"(CArray): + int8_t Value(int i) + + cdef cppclass CUInt16Array" arrow::UInt16Array"(CArray): + uint16_t Value(int i) + + cdef cppclass CInt16Array" arrow::Int16Array"(CArray): + int16_t Value(int i) + + cdef cppclass CUInt32Array" arrow::UInt32Array"(CArray): + uint32_t Value(int i) + + cdef cppclass CInt32Array" arrow::Int32Array"(CArray): + int32_t Value(int i) + + cdef cppclass CUInt64Array" arrow::UInt64Array"(CArray): + uint64_t Value(int i) + + cdef cppclass CInt64Array" arrow::Int64Array"(CArray): + int64_t Value(int i) + + cdef cppclass CDate32Array" arrow::Date32Array"(CArray): + int32_t Value(int i) + + cdef cppclass CDate64Array" arrow::Date64Array"(CArray): + int64_t Value(int i) + + cdef cppclass CTime32Array" arrow::Time32Array"(CArray): + int32_t Value(int i) + + cdef cppclass CTime64Array" arrow::Time64Array"(CArray): + int64_t Value(int i) + + cdef cppclass CTimestampArray" arrow::TimestampArray"(CArray): + int64_t Value(int i) + + cdef cppclass CDurationArray" arrow::DurationArray"(CArray): + int64_t Value(int i) + + cdef cppclass CMonthDayNanoIntervalArray \ + "arrow::MonthDayNanoIntervalArray"(CArray): + pass + + cdef cppclass CHalfFloatArray" arrow::HalfFloatArray"(CArray): + uint16_t Value(int i) + + cdef cppclass CFloatArray" arrow::FloatArray"(CArray): + float Value(int i) + + cdef cppclass CDoubleArray" arrow::DoubleArray"(CArray): + double Value(int i) + + cdef cppclass CFixedSizeBinaryArray" arrow::FixedSizeBinaryArray"(CArray): + const uint8_t* GetValue(int i) + + cdef cppclass CDecimal128Array" arrow::Decimal128Array"( + CFixedSizeBinaryArray + ): + c_string FormatValue(int i) + + cdef cppclass CDecimal256Array" arrow::Decimal256Array"( + CFixedSizeBinaryArray + ): + c_string FormatValue(int i) + + cdef cppclass CListArray" arrow::ListArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + const int32_t* raw_value_offsets() + int32_t value_offset(int i) + int32_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CDataType] value_type() + + cdef cppclass CLargeListArray" arrow::LargeListArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap + ) + + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CDataType] value_type() + + cdef cppclass CFixedSizeListArray" arrow::FixedSizeListArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const shared_ptr[CArray]& values, + int32_t list_size, + shared_ptr[CBuffer] null_bitmap) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + const shared_ptr[CArray]& values, + shared_ptr[CDataType], + shared_ptr[CBuffer] null_bitmap) + + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CDataType] value_type() + + cdef cppclass CListViewArray" arrow::ListViewArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + CResult[shared_ptr[CArray]] Flatten( + CMemoryPool* pool + ) + + const int32_t* raw_value_offsets() + const int32_t* raw_value_sizes() + int32_t value_offset(int i) + int32_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CArray] sizes() + shared_ptr[CDataType] value_type() + + cdef cppclass CLargeListViewArray" arrow::LargeListViewArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + CResult[shared_ptr[CArray]] Flatten( + CMemoryPool* pool + ) + + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CArray] sizes() + shared_ptr[CDataType] value_type() + + cdef cppclass CMapArray" arrow::MapArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const shared_ptr[CArray]& offsets, + const shared_ptr[CArray]& keys, + const shared_ptr[CArray]& items, + CMemoryPool* pool) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const shared_ptr[CArray]& offsets, + const shared_ptr[CArray]& keys, + const shared_ptr[CArray]& items, + CMemoryPool* pool) + + shared_ptr[CArray] keys() + shared_ptr[CArray] items() + CMapType* map_type() + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CDataType] value_type() + + cdef cppclass CUnionArray" arrow::UnionArray"(CArray): + shared_ptr[CBuffer] type_codes() + int8_t* raw_type_codes() + int child_id(int64_t index) + shared_ptr[CArray] field(int pos) + const CArray* UnsafeField(int pos) + UnionMode mode() + + cdef cppclass CSparseUnionArray" arrow::SparseUnionArray"(CUnionArray): + @staticmethod + CResult[shared_ptr[CArray]] Make( + const CArray& type_codes, + const vector[shared_ptr[CArray]]& children, + const vector[c_string]& field_names, + const vector[int8_t]& type_codes) + + cdef cppclass CDenseUnionArray" arrow::DenseUnionArray"(CUnionArray): + @staticmethod + CResult[shared_ptr[CArray]] Make( + const CArray& type_codes, + const CArray& value_offsets, + const vector[shared_ptr[CArray]]& children, + const vector[c_string]& field_names, + const vector[int8_t]& type_codes) + + int32_t value_offset(int i) + shared_ptr[CBuffer] value_offsets() + + cdef cppclass CBinaryArray" arrow::BinaryArray"(CArray): + const uint8_t* GetValue(int i, int32_t* length) + shared_ptr[CBuffer] value_data() + int32_t value_offset(int64_t i) + int32_t value_length(int64_t i) + int32_t total_values_length() + + cdef cppclass CLargeBinaryArray" arrow::LargeBinaryArray"(CArray): + const uint8_t* GetValue(int i, int64_t* length) + shared_ptr[CBuffer] value_data() + int64_t value_offset(int64_t i) + int64_t value_length(int64_t i) + int64_t total_values_length() + + cdef cppclass CStringArray" arrow::StringArray"(CBinaryArray): + CStringArray(int64_t length, shared_ptr[CBuffer] value_offsets, + shared_ptr[CBuffer] data, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + c_string GetString(int i) + + cdef cppclass CLargeStringArray" arrow::LargeStringArray" \ + (CLargeBinaryArray): + CLargeStringArray(int64_t length, shared_ptr[CBuffer] value_offsets, + shared_ptr[CBuffer] data, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + c_string GetString(int i) + + cdef cppclass CStructArray" arrow::StructArray"(CArray): + CStructArray(shared_ptr[CDataType]& type, int64_t length, + vector[shared_ptr[CArray]]& children, + shared_ptr[CBuffer] null_bitmap=nullptr, + int64_t null_count=-1, + int64_t offset=0) + + # XXX Cython crashes if default argument values are declared here + # https://github.com/cython/cython/issues/2167 + @staticmethod + CResult[shared_ptr[CArray]] MakeFromFieldNames "Make"( + vector[shared_ptr[CArray]] children, + vector[c_string] field_names, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + @staticmethod + CResult[shared_ptr[CArray]] MakeFromFields "Make"( + vector[shared_ptr[CArray]] children, + vector[shared_ptr[CField]] fields, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + shared_ptr[CArray] field(int pos) + shared_ptr[CArray] GetFieldByName(const c_string& name) const + CResult[shared_ptr[CArray]] GetFlattenedField(int index, CMemoryPool* pool) const + + CResult[vector[shared_ptr[CArray]]] Flatten(CMemoryPool* pool) + + cdef cppclass CRunEndEncodedArray" arrow::RunEndEncodedArray"(CArray): + @staticmethod + CResult[shared_ptr[CRunEndEncodedArray]] Make( + const shared_ptr[CDataType]& type, + int64_t logical_length, + const shared_ptr[CArray]& run_ends, + const shared_ptr[CArray]& values, + int64_t logical_offset) + + @staticmethod + CResult[shared_ptr[CRunEndEncodedArray]] MakeFromArrays "Make"( + int64_t logical_length, + const shared_ptr[CArray]& run_ends, + const shared_ptr[CArray]& values, + int64_t logical_offset) + + shared_ptr[CArray]& run_ends() + shared_ptr[CArray]& values() + + int64_t FindPhysicalOffset() + int64_t FindPhysicalLength() + + cdef cppclass CChunkedArray" arrow::ChunkedArray": + CChunkedArray(const vector[shared_ptr[CArray]]& arrays) + CChunkedArray(const vector[shared_ptr[CArray]]& arrays, + const shared_ptr[CDataType]& type) + + @staticmethod + CResult[shared_ptr[CChunkedArray]] Make(vector[shared_ptr[CArray]] chunks, + shared_ptr[CDataType] type) + int64_t length() + int64_t null_count() + int num_chunks() + c_bool Equals(const CChunkedArray& other) + + shared_ptr[CArray] chunk(int i) + shared_ptr[CDataType] type() + CResult[shared_ptr[CScalar]] GetScalar(int64_t index) const + shared_ptr[CChunkedArray] Slice(int64_t offset, int64_t length) const + shared_ptr[CChunkedArray] Slice(int64_t offset) const + + CResult[vector[shared_ptr[CChunkedArray]]] Flatten(CMemoryPool* pool) + + CStatus Validate() const + CStatus ValidateFull() const + + cdef cppclass CRecordBatch" arrow::RecordBatch": + @staticmethod + shared_ptr[CRecordBatch] Make( + const shared_ptr[CSchema]& schema, int64_t num_rows, + const vector[shared_ptr[CArray]]& columns) + + CResult[shared_ptr[CStructArray]] ToStructArray() const + + @staticmethod + CResult[shared_ptr[CRecordBatch]] FromStructArray( + const shared_ptr[CArray]& array) + + c_bool Equals(const CRecordBatch& other, c_bool check_metadata) + + shared_ptr[CSchema] schema() + shared_ptr[CArray] column(int i) + const c_string& column_name(int i) + + CResult[shared_ptr[CRecordBatch]] AddColumn( + int i, shared_ptr[CField] field, shared_ptr[CArray] column) + CResult[shared_ptr[CRecordBatch]] RemoveColumn(int i) + CResult[shared_ptr[CRecordBatch]] SetColumn( + int i, shared_ptr[CField] field, shared_ptr[CArray] column) + + const vector[shared_ptr[CArray]]& columns() + + CResult[shared_ptr[CRecordBatch]] RenameColumns(const vector[c_string]&) + CResult[shared_ptr[CRecordBatch]] SelectColumns(const vector[int]&) + + int num_columns() + int64_t num_rows() + + CStatus Validate() const + CStatus ValidateFull() const + + shared_ptr[CRecordBatch] ReplaceSchemaMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + + shared_ptr[CRecordBatch] Slice(int64_t offset) + shared_ptr[CRecordBatch] Slice(int64_t offset, int64_t length) + + CResult[shared_ptr[CTensor]] ToTensor(c_bool null_to_nan, c_bool row_major, + CMemoryPool* pool) const + + cdef cppclass CRecordBatchWithMetadata" arrow::RecordBatchWithMetadata": + shared_ptr[CRecordBatch] batch + # The struct in C++ does not actually have these two `const` qualifiers, but + # adding `const` gets Cython to not complain + const shared_ptr[const CKeyValueMetadata] custom_metadata + + cdef cppclass CTable" arrow::Table": + CTable(const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CChunkedArray]]& columns) + + @staticmethod + shared_ptr[CTable] Make( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CChunkedArray]]& columns) + + @staticmethod + shared_ptr[CTable] MakeWithRows "Make"( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CChunkedArray]]& columns, + int64_t num_rows) + + @staticmethod + shared_ptr[CTable] MakeFromArrays" Make"( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CArray]]& arrays) + + @staticmethod + CResult[shared_ptr[CTable]] FromRecordBatchReader( + CRecordBatchReader *reader) + + @staticmethod + CResult[shared_ptr[CTable]] FromRecordBatches( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CRecordBatch]]& batches) + + int num_columns() + int64_t num_rows() + + c_bool Equals(const CTable& other, c_bool check_metadata) + + shared_ptr[CSchema] schema() + shared_ptr[CChunkedArray] column(int i) + shared_ptr[CField] field(int i) + + CResult[shared_ptr[CTable]] AddColumn( + int i, shared_ptr[CField] field, shared_ptr[CChunkedArray] column) + CResult[shared_ptr[CTable]] RemoveColumn(int i) + CResult[shared_ptr[CTable]] SetColumn( + int i, shared_ptr[CField] field, shared_ptr[CChunkedArray] column) + + vector[c_string] ColumnNames() + CResult[shared_ptr[CTable]] RenameColumns(const vector[c_string]&) + CResult[shared_ptr[CTable]] SelectColumns(const vector[int]&) + + CResult[shared_ptr[CTable]] Flatten(CMemoryPool* pool) + + CResult[shared_ptr[CTable]] CombineChunks(CMemoryPool* pool) + + CStatus Validate() const + CStatus ValidateFull() const + + shared_ptr[CTable] ReplaceSchemaMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + + shared_ptr[CTable] Slice(int64_t offset) + shared_ptr[CTable] Slice(int64_t offset, int64_t length) + + cdef cppclass CRecordBatchReader" arrow::RecordBatchReader": + shared_ptr[CSchema] schema() + CStatus Close() + CResult[CRecordBatchWithMetadata] ReadNext() + CStatus ReadNext(shared_ptr[CRecordBatch]* batch) + CResult[shared_ptr[CTable]] ToTable() + + cdef cppclass TableBatchReader(CRecordBatchReader): + TableBatchReader(const CTable& table) + TableBatchReader(shared_ptr[CTable] table) + void set_chunksize(int64_t chunksize) + + cdef cppclass CTensor" arrow::Tensor": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + + const vector[int64_t]& shape() + const vector[int64_t]& strides() + int64_t size() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + c_bool is_contiguous() + Type type_id() + c_bool Equals(const CTensor& other) + + cdef cppclass CSparseIndex" arrow::SparseIndex": + pass + + cdef cppclass CSparseCOOIndex" arrow::SparseCOOIndex": + c_bool is_canonical() + + cdef cppclass CSparseCOOTensor" arrow::SparseCOOTensor": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + shared_ptr[CSparseIndex] sparse_index() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCOOTensor& other) + + cdef cppclass CSparseCSRMatrix" arrow::SparseCSRMatrix": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCSRMatrix& other) + + cdef cppclass CSparseCSCMatrix" arrow::SparseCSCMatrix": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCSCMatrix& other) + + cdef cppclass CSparseCSFTensor" arrow::SparseCSFTensor": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCSFTensor& other) + + cdef cppclass CScalar" arrow::Scalar": + CScalar(shared_ptr[CDataType]) + + shared_ptr[CDataType] type + c_bool is_valid + + c_string ToString() const + c_bool Equals(const CScalar& other) const + CStatus Validate() const + CStatus ValidateFull() const + + cdef cppclass CScalarHash" arrow::Scalar::Hash": + size_t operator()(const shared_ptr[CScalar]& scalar) const + + cdef cppclass CNullScalar" arrow::NullScalar"(CScalar): + CNullScalar() + + cdef cppclass CBooleanScalar" arrow::BooleanScalar"(CScalar): + CBooleanScalar(c_bool value) + c_bool value + + cdef cppclass CInt8Scalar" arrow::Int8Scalar"(CScalar): + int8_t value + + cdef cppclass CUInt8Scalar" arrow::UInt8Scalar"(CScalar): + uint8_t value + + cdef cppclass CInt16Scalar" arrow::Int16Scalar"(CScalar): + int16_t value + + cdef cppclass CUInt16Scalar" arrow::UInt16Scalar"(CScalar): + uint16_t value + + cdef cppclass CInt32Scalar" arrow::Int32Scalar"(CScalar): + int32_t value + + cdef cppclass CUInt32Scalar" arrow::UInt32Scalar"(CScalar): + uint32_t value + + cdef cppclass CInt64Scalar" arrow::Int64Scalar"(CScalar): + int64_t value + + cdef cppclass CUInt64Scalar" arrow::UInt64Scalar"(CScalar): + uint64_t value + + cdef cppclass CHalfFloatScalar" arrow::HalfFloatScalar"(CScalar): + npy_half value + + cdef cppclass CFloatScalar" arrow::FloatScalar"(CScalar): + float value + + cdef cppclass CDoubleScalar" arrow::DoubleScalar"(CScalar): + double value + + cdef cppclass CDecimal128Scalar" arrow::Decimal128Scalar"(CScalar): + CDecimal128 value + + cdef cppclass CDecimal256Scalar" arrow::Decimal256Scalar"(CScalar): + CDecimal256 value + + cdef cppclass CDate32Scalar" arrow::Date32Scalar"(CScalar): + int32_t value + + cdef cppclass CDate64Scalar" arrow::Date64Scalar"(CScalar): + int64_t value + + cdef cppclass CTime32Scalar" arrow::Time32Scalar"(CScalar): + int32_t value + + cdef cppclass CTime64Scalar" arrow::Time64Scalar"(CScalar): + int64_t value + + cdef cppclass CTimestampScalar" arrow::TimestampScalar"(CScalar): + int64_t value + + cdef cppclass CDurationScalar" arrow::DurationScalar"(CScalar): + int64_t value + + cdef cppclass CMonthDayNanoIntervalScalar \ + "arrow::MonthDayNanoIntervalScalar"(CScalar): + pass + + cdef cppclass CBaseBinaryScalar" arrow::BaseBinaryScalar"(CScalar): + shared_ptr[CBuffer] value + + cdef cppclass CBaseListScalar" arrow::BaseListScalar"(CScalar): + shared_ptr[CArray] value + + cdef cppclass CListScalar" arrow::ListScalar"(CBaseListScalar): + pass + + cdef cppclass CListViewScalar" arrow::ListViewScalar"(CBaseListScalar): + pass + + cdef cppclass CLargeListViewScalar" arrow::LargeListViewScalar"(CBaseListScalar): + pass + + cdef cppclass CMapScalar" arrow::MapScalar"(CListScalar): + pass + + cdef cppclass CStructScalar" arrow::StructScalar"(CScalar): + vector[shared_ptr[CScalar]] value + CResult[shared_ptr[CScalar]] field(CFieldRef ref) const + + cdef cppclass CDictionaryScalarIndexAndDictionary \ + "arrow::DictionaryScalar::ValueType": + shared_ptr[CScalar] index + shared_ptr[CArray] dictionary + + cdef cppclass CDictionaryScalar" arrow::DictionaryScalar"(CScalar): + CDictionaryScalar(CDictionaryScalarIndexAndDictionary value, + shared_ptr[CDataType], c_bool is_valid) + CDictionaryScalarIndexAndDictionary value + + CResult[shared_ptr[CScalar]] GetEncodedValue() + + cdef cppclass CUnionScalar" arrow::UnionScalar"(CScalar): + int8_t type_code + + cdef cppclass CDenseUnionScalar" arrow::DenseUnionScalar"(CUnionScalar): + shared_ptr[CScalar] value + + cdef cppclass CSparseUnionScalar" arrow::SparseUnionScalar"(CUnionScalar): + vector[shared_ptr[CScalar]] value + int child_id + + cdef cppclass CRunEndEncodedScalar" arrow::RunEndEncodedScalar"(CScalar): + shared_ptr[CScalar] value + + cdef cppclass CExtensionScalar" arrow::ExtensionScalar"(CScalar): + CExtensionScalar(shared_ptr[CScalar] storage, + shared_ptr[CDataType], c_bool is_valid) + shared_ptr[CScalar] value + + shared_ptr[CScalar] MakeScalar[Value](Value value) + + cdef cppclass CConcatenateTablesOptions" arrow::ConcatenateTablesOptions": + c_bool unify_schemas + CField.CMergeOptions field_merge_options + + @staticmethod + CConcatenateTablesOptions Defaults() + + CResult[shared_ptr[CTable]] ConcatenateTables( + const vector[shared_ptr[CTable]]& tables, + CConcatenateTablesOptions options, + CMemoryPool* memory_pool) + + cdef cppclass CDictionaryUnifier" arrow::DictionaryUnifier": + @staticmethod + CResult[shared_ptr[CChunkedArray]] UnifyChunkedArray( + shared_ptr[CChunkedArray] array, CMemoryPool* pool) + + @staticmethod + CResult[shared_ptr[CTable]] UnifyTable( + const CTable& table, CMemoryPool* pool) + + shared_ptr[CScalar] MakeNullScalar(shared_ptr[CDataType] type) + + +cdef extern from "arrow/c/dlpack_abi.h" nogil: + ctypedef enum DLDeviceType: + kDLCPU = 1 + + ctypedef struct DLDevice: + DLDeviceType device_type + int32_t device_id + + ctypedef struct DLManagedTensor: + void (*deleter)(DLManagedTensor*) + + +cdef extern from "arrow/c/dlpack.h" namespace "arrow::dlpack" nogil: + CResult[DLManagedTensor*] ExportToDLPack" arrow::dlpack::ExportArray"( + const shared_ptr[CArray]& arr) + + CResult[DLDevice] ExportDevice(const shared_ptr[CArray]& arr) + + +cdef extern from "arrow/builder.h" namespace "arrow" nogil: + + cdef cppclass CArrayBuilder" arrow::ArrayBuilder": + CArrayBuilder(shared_ptr[CDataType], CMemoryPool* pool) + + int64_t length() + int64_t null_count() + CStatus AppendNull() + CStatus Finish(shared_ptr[CArray]* out) + CStatus Reserve(int64_t additional_capacity) + + cdef cppclass CBooleanBuilder" arrow::BooleanBuilder"(CArrayBuilder): + CBooleanBuilder(CMemoryPool* pool) + CStatus Append(const c_bool val) + CStatus Append(const uint8_t val) + + cdef cppclass CInt8Builder" arrow::Int8Builder"(CArrayBuilder): + CInt8Builder(CMemoryPool* pool) + CStatus Append(const int8_t value) + + cdef cppclass CInt16Builder" arrow::Int16Builder"(CArrayBuilder): + CInt16Builder(CMemoryPool* pool) + CStatus Append(const int16_t value) + + cdef cppclass CInt32Builder" arrow::Int32Builder"(CArrayBuilder): + CInt32Builder(CMemoryPool* pool) + CStatus Append(const int32_t value) + + cdef cppclass CInt64Builder" arrow::Int64Builder"(CArrayBuilder): + CInt64Builder(CMemoryPool* pool) + CStatus Append(const int64_t value) + + cdef cppclass CUInt8Builder" arrow::UInt8Builder"(CArrayBuilder): + CUInt8Builder(CMemoryPool* pool) + CStatus Append(const uint8_t value) + + cdef cppclass CUInt16Builder" arrow::UInt16Builder"(CArrayBuilder): + CUInt16Builder(CMemoryPool* pool) + CStatus Append(const uint16_t value) + + cdef cppclass CUInt32Builder" arrow::UInt32Builder"(CArrayBuilder): + CUInt32Builder(CMemoryPool* pool) + CStatus Append(const uint32_t value) + + cdef cppclass CUInt64Builder" arrow::UInt64Builder"(CArrayBuilder): + CUInt64Builder(CMemoryPool* pool) + CStatus Append(const uint64_t value) + + cdef cppclass CHalfFloatBuilder" arrow::HalfFloatBuilder"(CArrayBuilder): + CHalfFloatBuilder(CMemoryPool* pool) + + cdef cppclass CFloatBuilder" arrow::FloatBuilder"(CArrayBuilder): + CFloatBuilder(CMemoryPool* pool) + CStatus Append(const float value) + + cdef cppclass CDoubleBuilder" arrow::DoubleBuilder"(CArrayBuilder): + CDoubleBuilder(CMemoryPool* pool) + CStatus Append(const double value) + + cdef cppclass CBinaryBuilder" arrow::BinaryBuilder"(CArrayBuilder): + CArrayBuilder(shared_ptr[CDataType], CMemoryPool* pool) + CStatus Append(const char* value, int32_t length) + + cdef cppclass CStringBuilder" arrow::StringBuilder"(CBinaryBuilder): + CStringBuilder(CMemoryPool* pool) + CStatus Append(const c_string& value) + + cdef cppclass CBinaryViewBuilder" arrow::BinaryViewBuilder"(CArrayBuilder): + CBinaryViewBuilder(shared_ptr[CDataType], CMemoryPool* pool) + CStatus Append(const char* value, int32_t length) + + cdef cppclass CStringViewBuilder" arrow::StringViewBuilder"(CBinaryViewBuilder): + CStringViewBuilder(CMemoryPool* pool) + CStatus Append(const c_string& value) + + cdef cppclass CTimestampBuilder "arrow::TimestampBuilder"(CArrayBuilder): + CTimestampBuilder(const shared_ptr[CDataType] typ, CMemoryPool* pool) + CStatus Append(const int64_t value) + + cdef cppclass CDate32Builder "arrow::Date32Builder"(CArrayBuilder): + CDate32Builder(CMemoryPool* pool) + CStatus Append(const int32_t value) + + cdef cppclass CDate64Builder "arrow::Date64Builder"(CArrayBuilder): + CDate64Builder(CMemoryPool* pool) + CStatus Append(const int64_t value) + + +# Use typedef to emulate syntax for std::function +ctypedef void CallbackTransform(object, const shared_ptr[CBuffer]& src, + shared_ptr[CBuffer]* dest) + +ctypedef CResult[shared_ptr[CInputStream]] StreamWrapFunc( + shared_ptr[CInputStream]) + + +cdef extern from "arrow/util/cancel.h" namespace "arrow" nogil: + cdef cppclass CStopToken "arrow::StopToken": + CStatus Poll() + c_bool IsStopRequested() + + cdef cppclass CStopSource "arrow::StopSource": + CStopToken token() + + CResult[CStopSource*] SetSignalStopSource() + void ResetSignalStopSource() + + CStatus RegisterCancellingSignalHandler(vector[int] signals) + void UnregisterCancellingSignalHandler() + + +cdef extern from "arrow/io/api.h" namespace "arrow::io" nogil: + cdef enum FileMode" arrow::io::FileMode::type": + FileMode_READ" arrow::io::FileMode::READ" + FileMode_WRITE" arrow::io::FileMode::WRITE" + FileMode_READWRITE" arrow::io::FileMode::READWRITE" + + cdef enum ObjectType" arrow::io::ObjectType::type": + ObjectType_FILE" arrow::io::ObjectType::FILE" + ObjectType_DIRECTORY" arrow::io::ObjectType::DIRECTORY" + + cdef cppclass CIOContext" arrow::io::IOContext": + CIOContext() + CIOContext(CStopToken) + CIOContext(CMemoryPool*) + CIOContext(CMemoryPool*, CStopToken) + + CIOContext c_default_io_context "arrow::io::default_io_context"() + int GetIOThreadPoolCapacity() + CStatus SetIOThreadPoolCapacity(int threads) + + cdef cppclass FileStatistics: + int64_t size + ObjectType kind + + cdef cppclass FileInterface: + CStatus Close() + CResult[int64_t] Tell() + FileMode mode() + c_bool closed() + + cdef cppclass Readable: + # put overload under a different name to avoid cython bug with multiple + # layers of inheritance + CResult[shared_ptr[CBuffer]] ReadBuffer" Read"(int64_t nbytes) + CResult[int64_t] Read(int64_t nbytes, uint8_t* out) + + cdef cppclass Seekable: + CStatus Seek(int64_t position) + + cdef cppclass Writable: + CStatus WriteBuffer" Write"(shared_ptr[CBuffer] data) + CStatus Write(const uint8_t* data, int64_t nbytes) + CStatus Flush() + + cdef cppclass CCacheOptions "arrow::io::CacheOptions": + int64_t hole_size_limit + int64_t range_size_limit + c_bool lazy + int64_t prefetch_limit + c_bool Equals "operator==" (CCacheOptions other) + + @staticmethod + CCacheOptions MakeFromNetworkMetrics(int64_t time_to_first_byte_millis, + int64_t transfer_bandwidth_mib_per_sec, + double ideal_bandwidth_utilization_frac, + int64_t max_ideal_request_size_mib) + + @staticmethod + CCacheOptions LazyDefaults() + + cdef cppclass COutputStream" arrow::io::OutputStream"(FileInterface, + Writable): + pass + + cdef cppclass CInputStream" arrow::io::InputStream"(FileInterface, + Readable): + CResult[shared_ptr[const CKeyValueMetadata]] ReadMetadata() + + cdef cppclass CRandomAccessFile" arrow::io::RandomAccessFile"(CInputStream, + Seekable): + CResult[int64_t] GetSize() + + @staticmethod + CResult[shared_ptr[CInputStream]] GetStream( + shared_ptr[CRandomAccessFile] file, + int64_t file_offset, + int64_t nbytes) + + CResult[int64_t] ReadAt(int64_t position, int64_t nbytes, + uint8_t* buffer) + CResult[shared_ptr[CBuffer]] ReadAt(int64_t position, int64_t nbytes) + c_bool supports_zero_copy() + + cdef cppclass WritableFile(COutputStream, Seekable): + CStatus WriteAt(int64_t position, const uint8_t* data, + int64_t nbytes) + + cdef cppclass ReadWriteFileInterface(CRandomAccessFile, + WritableFile): + pass + + cdef cppclass CIOFileSystem" arrow::io::FileSystem": + CStatus Stat(const c_string& path, FileStatistics* stat) + + cdef cppclass FileOutputStream(COutputStream): + @staticmethod + CResult[shared_ptr[COutputStream]] Open(const c_string& path) + + @staticmethod + CResult[shared_ptr[COutputStream]] OpenWithAppend" Open"( + const c_string& path, c_bool append) + + int file_descriptor() + + cdef cppclass ReadableFile(CRandomAccessFile): + @staticmethod + CResult[shared_ptr[ReadableFile]] Open(const c_string& path) + + @staticmethod + CResult[shared_ptr[ReadableFile]] Open(const c_string& path, + CMemoryPool* memory_pool) + + int file_descriptor() + + cdef cppclass CMemoryMappedFile \ + " arrow::io::MemoryMappedFile"(ReadWriteFileInterface): + + @staticmethod + CResult[shared_ptr[CMemoryMappedFile]] Create(const c_string& path, + int64_t size) + + @staticmethod + CResult[shared_ptr[CMemoryMappedFile]] Open(const c_string& path, + FileMode mode) + + CStatus Resize(int64_t size) + + int file_descriptor() + + cdef cppclass CCompressedInputStream \ + " arrow::io::CompressedInputStream"(CInputStream): + @staticmethod + CResult[shared_ptr[CCompressedInputStream]] Make( + CCodec* codec, shared_ptr[CInputStream] raw) + + cdef cppclass CCompressedOutputStream \ + " arrow::io::CompressedOutputStream"(COutputStream): + @staticmethod + CResult[shared_ptr[CCompressedOutputStream]] Make( + CCodec* codec, shared_ptr[COutputStream] raw) + + cdef cppclass CBufferedInputStream \ + " arrow::io::BufferedInputStream"(CInputStream): + + @staticmethod + CResult[shared_ptr[CBufferedInputStream]] Create( + int64_t buffer_size, CMemoryPool* pool, + shared_ptr[CInputStream] raw) + + CResult[shared_ptr[CInputStream]] Detach() + + cdef cppclass CBufferedOutputStream \ + " arrow::io::BufferedOutputStream"(COutputStream): + + @staticmethod + CResult[shared_ptr[CBufferedOutputStream]] Create( + int64_t buffer_size, CMemoryPool* pool, + shared_ptr[COutputStream] raw) + + CResult[shared_ptr[COutputStream]] Detach() + + cdef cppclass CTransformInputStreamVTable \ + "arrow::py::TransformInputStreamVTable": + CTransformInputStreamVTable() + function[CallbackTransform] transform + + shared_ptr[CInputStream] MakeTransformInputStream \ + "arrow::py::MakeTransformInputStream"( + shared_ptr[CInputStream] wrapped, CTransformInputStreamVTable vtable, + object method_arg) + + shared_ptr[function[StreamWrapFunc]] MakeStreamTransformFunc \ + "arrow::py::MakeStreamTransformFunc"( + CTransformInputStreamVTable vtable, + object method_arg) + + # ---------------------------------------------------------------------- + # HDFS + + CStatus HaveLibHdfs() + CStatus HaveLibHdfs3() + + cdef enum HdfsDriver" arrow::io::HdfsDriver": + HdfsDriver_LIBHDFS" arrow::io::HdfsDriver::LIBHDFS" + HdfsDriver_LIBHDFS3" arrow::io::HdfsDriver::LIBHDFS3" + + cdef cppclass HdfsConnectionConfig: + c_string host + int port + c_string user + c_string kerb_ticket + unordered_map[c_string, c_string] extra_conf + HdfsDriver driver + + cdef cppclass HdfsPathInfo: + ObjectType kind + c_string name + c_string owner + c_string group + int32_t last_modified_time + int32_t last_access_time + int64_t size + int16_t replication + int64_t block_size + int16_t permissions + + cdef cppclass HdfsReadableFile(CRandomAccessFile): + pass + + cdef cppclass HdfsOutputStream(COutputStream): + pass + + cdef cppclass CIOHadoopFileSystem \ + "arrow::io::HadoopFileSystem"(CIOFileSystem): + @staticmethod + CStatus Connect(const HdfsConnectionConfig* config, + shared_ptr[CIOHadoopFileSystem]* client) + + CStatus MakeDirectory(const c_string& path) + + CStatus Delete(const c_string& path, c_bool recursive) + + CStatus Disconnect() + + c_bool Exists(const c_string& path) + + CStatus Chmod(const c_string& path, int mode) + CStatus Chown(const c_string& path, const char* owner, + const char* group) + + CStatus GetCapacity(int64_t* nbytes) + CStatus GetUsed(int64_t* nbytes) + + CStatus ListDirectory(const c_string& path, + vector[HdfsPathInfo]* listing) + + CStatus GetPathInfo(const c_string& path, HdfsPathInfo* info) + + CStatus Rename(const c_string& src, const c_string& dst) + + CStatus OpenReadable(const c_string& path, + shared_ptr[HdfsReadableFile]* handle) + + CStatus OpenWritable(const c_string& path, c_bool append, + int32_t buffer_size, int16_t replication, + int64_t default_block_size, + shared_ptr[HdfsOutputStream]* handle) + + cdef cppclass CBufferReader \ + " arrow::io::BufferReader"(CRandomAccessFile): + CBufferReader(const shared_ptr[CBuffer]& buffer) + CBufferReader(const uint8_t* data, int64_t nbytes) + + cdef cppclass CBufferOutputStream \ + " arrow::io::BufferOutputStream"(COutputStream): + CBufferOutputStream(const shared_ptr[CResizableBuffer]& buffer) + + cdef cppclass CMockOutputStream \ + " arrow::io::MockOutputStream"(COutputStream): + CMockOutputStream() + int64_t GetExtentBytesWritten() + + cdef cppclass CFixedSizeBufferWriter \ + " arrow::io::FixedSizeBufferWriter"(WritableFile): + CFixedSizeBufferWriter(const shared_ptr[CBuffer]& buffer) + + void set_memcopy_threads(int num_threads) + void set_memcopy_blocksize(int64_t blocksize) + void set_memcopy_threshold(int64_t threshold) + + +cdef extern from "arrow/ipc/api.h" namespace "arrow::ipc" nogil: + cdef enum MessageType" arrow::ipc::MessageType": + MessageType_SCHEMA" arrow::ipc::MessageType::SCHEMA" + MessageType_RECORD_BATCH" arrow::ipc::MessageType::RECORD_BATCH" + MessageType_DICTIONARY_BATCH \ + " arrow::ipc::MessageType::DICTIONARY_BATCH" + + # TODO: use "cpdef enum class" to automatically get a Python wrapper? + # See + # https://github.com/cython/cython/commit/2c7c22f51405299a4e247f78edf52957d30cf71d#diff-61c1365c0f761a8137754bb3a73bfbf7 + ctypedef enum CMetadataVersion" arrow::ipc::MetadataVersion": + CMetadataVersion_V1" arrow::ipc::MetadataVersion::V1" + CMetadataVersion_V2" arrow::ipc::MetadataVersion::V2" + CMetadataVersion_V3" arrow::ipc::MetadataVersion::V3" + CMetadataVersion_V4" arrow::ipc::MetadataVersion::V4" + CMetadataVersion_V5" arrow::ipc::MetadataVersion::V5" + + cdef cppclass CIpcWriteOptions" arrow::ipc::IpcWriteOptions": + c_bool allow_64bit + int max_recursion_depth + int32_t alignment + c_bool write_legacy_ipc_format + CMemoryPool* memory_pool + CMetadataVersion metadata_version + shared_ptr[CCodec] codec + c_bool use_threads + c_bool emit_dictionary_deltas + c_bool unify_dictionaries + + CIpcWriteOptions() + CIpcWriteOptions(CIpcWriteOptions) + + @staticmethod + CIpcWriteOptions Defaults() + + cdef cppclass CIpcReadOptions" arrow::ipc::IpcReadOptions": + int max_recursion_depth + CMemoryPool* memory_pool + vector[int] included_fields + c_bool use_threads + c_bool ensure_native_endian + + @staticmethod + CIpcReadOptions Defaults() + + cdef cppclass CIpcWriteStats" arrow::ipc::WriteStats": + int64_t num_messages + int64_t num_record_batches + int64_t num_dictionary_batches + int64_t num_dictionary_deltas + int64_t num_replaced_dictionaries + + cdef cppclass CIpcReadStats" arrow::ipc::ReadStats": + int64_t num_messages + int64_t num_record_batches + int64_t num_dictionary_batches + int64_t num_dictionary_deltas + int64_t num_replaced_dictionaries + + cdef cppclass CDictionaryMemo" arrow::ipc::DictionaryMemo": + pass + + cdef cppclass CIpcPayload" arrow::ipc::IpcPayload": + MessageType type + shared_ptr[CBuffer] metadata + vector[shared_ptr[CBuffer]] body_buffers + int64_t body_length + + cdef cppclass CMessage" arrow::ipc::Message": + CResult[unique_ptr[CMessage]] Open(shared_ptr[CBuffer] metadata, + shared_ptr[CBuffer] body) + + shared_ptr[CBuffer] body() + + c_bool Equals(const CMessage& other) + + shared_ptr[CBuffer] metadata() + CMetadataVersion metadata_version() + MessageType type() + + CStatus SerializeTo(COutputStream* stream, + const CIpcWriteOptions& options, + int64_t* output_length) + + c_string FormatMessageType(MessageType type) + + cdef cppclass CMessageReader" arrow::ipc::MessageReader": + @staticmethod + unique_ptr[CMessageReader] Open(const shared_ptr[CInputStream]& stream) + + CResult[unique_ptr[CMessage]] ReadNextMessage() + + cdef cppclass CRecordBatchWriter" arrow::ipc::RecordBatchWriter": + CStatus Close() + CStatus WriteRecordBatch(const CRecordBatch& batch) + CStatus WriteRecordBatch( + const CRecordBatch& batch, + const shared_ptr[const CKeyValueMetadata]& metadata) + CStatus WriteTable(const CTable& table, int64_t max_chunksize) + + CIpcWriteStats stats() + + cdef cppclass CRecordBatchStreamReader \ + " arrow::ipc::RecordBatchStreamReader"(CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Open( + const shared_ptr[CInputStream], const CIpcReadOptions&) + + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Open2" Open"( + unique_ptr[CMessageReader] message_reader, + const CIpcReadOptions& options) + + CIpcReadStats stats() + + cdef cppclass CRecordBatchFileReader \ + " arrow::ipc::RecordBatchFileReader": + @staticmethod + CResult[shared_ptr[CRecordBatchFileReader]] Open( + CRandomAccessFile* file, + const CIpcReadOptions& options) + + @staticmethod + CResult[shared_ptr[CRecordBatchFileReader]] Open2" Open"( + CRandomAccessFile* file, int64_t footer_offset, + const CIpcReadOptions& options) + + shared_ptr[CSchema] schema() + + int num_record_batches() + + CResult[shared_ptr[CRecordBatch]] ReadRecordBatch(int i) + + CResult[CRecordBatchWithMetadata] ReadRecordBatchWithCustomMetadata(int i) + + CIpcReadStats stats() + + CResult[shared_ptr[CRecordBatchWriter]] MakeStreamWriter( + shared_ptr[COutputStream] sink, const shared_ptr[CSchema]& schema, + CIpcWriteOptions& options) + + CResult[shared_ptr[CRecordBatchWriter]] MakeFileWriter( + shared_ptr[COutputStream] sink, const shared_ptr[CSchema]& schema, + CIpcWriteOptions& options) + + CResult[unique_ptr[CMessage]] ReadMessage(CInputStream* stream, + CMemoryPool* pool) + + CStatus GetRecordBatchSize(const CRecordBatch& batch, int64_t* size) + CStatus GetTensorSize(const CTensor& tensor, int64_t* size) + + CStatus WriteTensor(const CTensor& tensor, COutputStream* dst, + int32_t* metadata_length, + int64_t* body_length) + + CResult[shared_ptr[CTensor]] ReadTensor(CInputStream* stream) + + CResult[shared_ptr[CRecordBatch]] ReadRecordBatch( + const CMessage& message, const shared_ptr[CSchema]& schema, + CDictionaryMemo* dictionary_memo, + const CIpcReadOptions& options) + + CResult[shared_ptr[CBuffer]] SerializeSchema( + const CSchema& schema, CMemoryPool* pool) + + CResult[shared_ptr[CBuffer]] SerializeRecordBatch( + const CRecordBatch& schema, const CIpcWriteOptions& options) + + CResult[shared_ptr[CSchema]] ReadSchema(const CMessage& message, + CDictionaryMemo* dictionary_memo) + + CResult[shared_ptr[CSchema]] ReadSchema(CInputStream* stream, + CDictionaryMemo* dictionary_memo) + + CResult[shared_ptr[CRecordBatch]] ReadRecordBatch( + const shared_ptr[CSchema]& schema, + CDictionaryMemo* dictionary_memo, + const CIpcReadOptions& options, + CInputStream* stream) + + CStatus AlignStream(CInputStream* stream, int64_t alignment) + CStatus AlignStream(COutputStream* stream, int64_t alignment) + + cdef CStatus GetRecordBatchPayload \ + " arrow::ipc::GetRecordBatchPayload"( + const CRecordBatch& batch, + const CIpcWriteOptions& options, + CIpcPayload* out) + + +cdef extern from "arrow/util/value_parsing.h" namespace "arrow" nogil: + cdef cppclass CTimestampParser" arrow::TimestampParser": + const char* kind() const + const char* format() const + + @staticmethod + shared_ptr[CTimestampParser] MakeStrptime(c_string format) + + @staticmethod + shared_ptr[CTimestampParser] MakeISO8601() + + +cdef extern from "arrow/csv/api.h" namespace "arrow::csv" nogil: + + cdef cppclass CCSVInvalidRow" arrow::csv::InvalidRow": + int32_t expected_columns + int32_t actual_columns + int64_t number + c_string text + + ctypedef enum CInvalidRowResult" arrow::csv::InvalidRowResult": + CInvalidRowResult_Error" arrow::csv::InvalidRowResult::Error" + CInvalidRowResult_Skip" arrow::csv::InvalidRowResult::Skip" + + ctypedef CInvalidRowResult CInvalidRowHandler(const CCSVInvalidRow&) + + +cdef extern from "arrow/csv/api.h" namespace "arrow::csv" nogil: + + ctypedef enum CQuotingStyle "arrow::csv::QuotingStyle": + CQuotingStyle_Needed "arrow::csv::QuotingStyle::Needed" + CQuotingStyle_AllValid "arrow::csv::QuotingStyle::AllValid" + CQuotingStyle_None "arrow::csv::QuotingStyle::None" + + cdef cppclass CCSVParseOptions" arrow::csv::ParseOptions": + unsigned char delimiter + c_bool quoting + unsigned char quote_char + c_bool double_quote + c_bool escaping + unsigned char escape_char + c_bool newlines_in_values + c_bool ignore_empty_lines + function[CInvalidRowHandler] invalid_row_handler + + CCSVParseOptions() + CCSVParseOptions(CCSVParseOptions) + + @staticmethod + CCSVParseOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVConvertOptions" arrow::csv::ConvertOptions": + c_bool check_utf8 + unordered_map[c_string, shared_ptr[CDataType]] column_types + vector[c_string] null_values + vector[c_string] true_values + vector[c_string] false_values + c_bool strings_can_be_null + c_bool quoted_strings_can_be_null + vector[shared_ptr[CTimestampParser]] timestamp_parsers + + c_bool auto_dict_encode + int32_t auto_dict_max_cardinality + unsigned char decimal_point + + vector[c_string] include_columns + c_bool include_missing_columns + + CCSVConvertOptions() + CCSVConvertOptions(CCSVConvertOptions) + + @staticmethod + CCSVConvertOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVReadOptions" arrow::csv::ReadOptions": + c_bool use_threads + int32_t block_size + int32_t skip_rows + int32_t skip_rows_after_names + vector[c_string] column_names + c_bool autogenerate_column_names + + CCSVReadOptions() + CCSVReadOptions(CCSVReadOptions) + + @staticmethod + CCSVReadOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVWriteOptions" arrow::csv::WriteOptions": + c_bool include_header + int32_t batch_size + unsigned char delimiter + CQuotingStyle quoting_style + CIOContext io_context + + CCSVWriteOptions() + CCSVWriteOptions(CCSVWriteOptions) + + @staticmethod + CCSVWriteOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVReader" arrow::csv::TableReader": + @staticmethod + CResult[shared_ptr[CCSVReader]] Make( + CIOContext, shared_ptr[CInputStream], + CCSVReadOptions, CCSVParseOptions, CCSVConvertOptions) + + CResult[shared_ptr[CTable]] Read() + + cdef cppclass CCSVStreamingReader" arrow::csv::StreamingReader"( + CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CCSVStreamingReader]] Make( + CIOContext, shared_ptr[CInputStream], + CCSVReadOptions, CCSVParseOptions, CCSVConvertOptions) + + cdef CStatus WriteCSV(CTable&, CCSVWriteOptions& options, COutputStream*) + cdef CStatus WriteCSV( + CRecordBatch&, CCSVWriteOptions& options, COutputStream*) + cdef CResult[shared_ptr[CRecordBatchWriter]] MakeCSVWriter( + shared_ptr[COutputStream], shared_ptr[CSchema], + CCSVWriteOptions& options) + + +cdef extern from "arrow/json/options.h" nogil: + + ctypedef enum CUnexpectedFieldBehavior \ + "arrow::json::UnexpectedFieldBehavior": + CUnexpectedFieldBehavior_Ignore \ + "arrow::json::UnexpectedFieldBehavior::Ignore" + CUnexpectedFieldBehavior_Error \ + "arrow::json::UnexpectedFieldBehavior::Error" + CUnexpectedFieldBehavior_InferType \ + "arrow::json::UnexpectedFieldBehavior::InferType" + + cdef cppclass CJSONReadOptions" arrow::json::ReadOptions": + c_bool use_threads + int32_t block_size + + @staticmethod + CJSONReadOptions Defaults() + + cdef cppclass CJSONParseOptions" arrow::json::ParseOptions": + shared_ptr[CSchema] explicit_schema + c_bool newlines_in_values + CUnexpectedFieldBehavior unexpected_field_behavior + + @staticmethod + CJSONParseOptions Defaults() + + +cdef extern from "arrow/json/reader.h" namespace "arrow::json" nogil: + + cdef cppclass CJSONReader" arrow::json::TableReader": + @staticmethod + CResult[shared_ptr[CJSONReader]] Make( + CMemoryPool*, shared_ptr[CInputStream], + CJSONReadOptions, CJSONParseOptions) + + CResult[shared_ptr[CTable]] Read() + + +cdef extern from "arrow/util/thread_pool.h" namespace "arrow::internal" nogil: + + cdef cppclass CExecutor "arrow::internal::Executor": + pass + + cdef cppclass CThreadPool "arrow::internal::ThreadPool"(CExecutor): + @staticmethod + CResult[shared_ptr[CThreadPool]] Make(int threads) + + CThreadPool* GetCpuThreadPool() + + +cdef extern from "arrow/compute/api.h" namespace "arrow::compute" nogil: + + cdef cppclass CExecBatch "arrow::compute::ExecBatch": + vector[CDatum] values + int64_t length + + cdef cppclass CExecContext" arrow::compute::ExecContext": + CExecContext() + CExecContext(CMemoryPool* pool) + CExecContext(CMemoryPool* pool, CExecutor* exc) + + CMemoryPool* memory_pool() const + CExecutor* executor() + + cdef cppclass CKernelSignature" arrow::compute::KernelSignature": + c_string ToString() const + + cdef cppclass CKernel" arrow::compute::Kernel": + shared_ptr[CKernelSignature] signature + + cdef cppclass CArrayKernel" arrow::compute::ArrayKernel"(CKernel): + pass + + cdef cppclass CScalarKernel" arrow::compute::ScalarKernel"(CArrayKernel): + pass + + cdef cppclass CVectorKernel" arrow::compute::VectorKernel"(CArrayKernel): + pass + + cdef cppclass CScalarAggregateKernel \ + " arrow::compute::ScalarAggregateKernel"(CKernel): + pass + + cdef cppclass CHashAggregateKernel \ + " arrow::compute::HashAggregateKernel"(CKernel): + pass + + cdef cppclass CArity" arrow::compute::Arity": + int num_args + c_bool is_varargs + + CArity() + + CArity(int num_args, c_bool is_varargs) + + cdef enum FunctionKind" arrow::compute::Function::Kind": + FunctionKind_SCALAR" arrow::compute::Function::SCALAR" + FunctionKind_VECTOR" arrow::compute::Function::VECTOR" + FunctionKind_SCALAR_AGGREGATE \ + " arrow::compute::Function::SCALAR_AGGREGATE" + FunctionKind_HASH_AGGREGATE \ + " arrow::compute::Function::HASH_AGGREGATE" + FunctionKind_META \ + " arrow::compute::Function::META" + + cdef cppclass CFunctionDoc" arrow::compute::FunctionDoc": + c_string summary + c_string description + vector[c_string] arg_names + c_string options_class + c_bool options_required + + cdef cppclass CFunctionOptionsType" arrow::compute::FunctionOptionsType": + const char* type_name() const + + cdef cppclass CFunctionOptions" arrow::compute::FunctionOptions": + const CFunctionOptionsType* options_type() const + const char* type_name() const + c_bool Equals(const CFunctionOptions& other) const + c_string ToString() const + unique_ptr[CFunctionOptions] Copy() const + CResult[shared_ptr[CBuffer]] Serialize() const + + @staticmethod + CResult[unique_ptr[CFunctionOptions]] Deserialize( + const c_string& type_name, const CBuffer& buffer) + + cdef cppclass CFunction" arrow::compute::Function": + const c_string& name() const + FunctionKind kind() const + const CArity& arity() const + const CFunctionDoc& doc() const + int num_kernels() const + CResult[CDatum] Execute(const vector[CDatum]& args, + const CFunctionOptions* options, + CExecContext* ctx) const + CResult[CDatum] Execute(const CExecBatch& args, + const CFunctionOptions* options, + CExecContext* ctx) const + + cdef cppclass CScalarFunction" arrow::compute::ScalarFunction"(CFunction): + vector[const CScalarKernel*] kernels() const + + cdef cppclass CVectorFunction" arrow::compute::VectorFunction"(CFunction): + vector[const CVectorKernel*] kernels() const + + cdef cppclass CScalarAggregateFunction \ + " arrow::compute::ScalarAggregateFunction"(CFunction): + vector[const CScalarAggregateKernel*] kernels() const + + cdef cppclass CHashAggregateFunction \ + " arrow::compute::HashAggregateFunction"(CFunction): + vector[const CHashAggregateKernel*] kernels() const + + cdef cppclass CMetaFunction" arrow::compute::MetaFunction"(CFunction): + pass + + cdef cppclass CFunctionRegistry" arrow::compute::FunctionRegistry": + CResult[shared_ptr[CFunction]] GetFunction( + const c_string& name) const + vector[c_string] GetFunctionNames() const + int num_functions() const + + CFunctionRegistry* GetFunctionRegistry() + + cdef cppclass CElementWiseAggregateOptions \ + "arrow::compute::ElementWiseAggregateOptions"(CFunctionOptions): + CElementWiseAggregateOptions(c_bool skip_nulls) + c_bool skip_nulls + + ctypedef enum CRoundMode \ + "arrow::compute::RoundMode": + CRoundMode_DOWN \ + "arrow::compute::RoundMode::DOWN" + CRoundMode_UP \ + "arrow::compute::RoundMode::UP" + CRoundMode_TOWARDS_ZERO \ + "arrow::compute::RoundMode::TOWARDS_ZERO" + CRoundMode_TOWARDS_INFINITY \ + "arrow::compute::RoundMode::TOWARDS_INFINITY" + CRoundMode_HALF_DOWN \ + "arrow::compute::RoundMode::HALF_DOWN" + CRoundMode_HALF_UP \ + "arrow::compute::RoundMode::HALF_UP" + CRoundMode_HALF_TOWARDS_ZERO \ + "arrow::compute::RoundMode::HALF_TOWARDS_ZERO" + CRoundMode_HALF_TOWARDS_INFINITY \ + "arrow::compute::RoundMode::HALF_TOWARDS_INFINITY" + CRoundMode_HALF_TO_EVEN \ + "arrow::compute::RoundMode::HALF_TO_EVEN" + CRoundMode_HALF_TO_ODD \ + "arrow::compute::RoundMode::HALF_TO_ODD" + + cdef cppclass CRoundOptions \ + "arrow::compute::RoundOptions"(CFunctionOptions): + CRoundOptions(int64_t ndigits, CRoundMode round_mode) + int64_t ndigits + CRoundMode round_mode + + cdef cppclass CRoundBinaryOptions \ + "arrow::compute::RoundBinaryOptions"(CFunctionOptions): + CRoundBinaryOptions(CRoundMode round_mode) + CRoundMode round_mode + + ctypedef enum CCalendarUnit \ + "arrow::compute::CalendarUnit": + CCalendarUnit_NANOSECOND \ + "arrow::compute::CalendarUnit::NANOSECOND" + CCalendarUnit_MICROSECOND \ + "arrow::compute::CalendarUnit::MICROSECOND" + CCalendarUnit_MILLISECOND \ + "arrow::compute::CalendarUnit::MILLISECOND" + CCalendarUnit_SECOND \ + "arrow::compute::CalendarUnit::SECOND" + CCalendarUnit_MINUTE \ + "arrow::compute::CalendarUnit::MINUTE" + CCalendarUnit_HOUR \ + "arrow::compute::CalendarUnit::HOUR" + CCalendarUnit_DAY \ + "arrow::compute::CalendarUnit::DAY" + CCalendarUnit_WEEK \ + "arrow::compute::CalendarUnit::WEEK" + CCalendarUnit_MONTH \ + "arrow::compute::CalendarUnit::MONTH" + CCalendarUnit_QUARTER \ + "arrow::compute::CalendarUnit::QUARTER" + CCalendarUnit_YEAR \ + "arrow::compute::CalendarUnit::YEAR" + + cdef cppclass CRoundTemporalOptions \ + "arrow::compute::RoundTemporalOptions"(CFunctionOptions): + CRoundTemporalOptions(int multiple, CCalendarUnit unit, + c_bool week_starts_monday, + c_bool ceil_is_strictly_greater, + c_bool calendar_based_origin) + int multiple + CCalendarUnit unit + c_bool week_starts_monday + c_bool ceil_is_strictly_greater + c_bool calendar_based_origin + + cdef cppclass CRoundToMultipleOptions \ + "arrow::compute::RoundToMultipleOptions"(CFunctionOptions): + CRoundToMultipleOptions(shared_ptr[CScalar] multiple, CRoundMode round_mode) + shared_ptr[CScalar] multiple + CRoundMode round_mode + + cdef enum CJoinNullHandlingBehavior \ + "arrow::compute::JoinOptions::NullHandlingBehavior": + CJoinNullHandlingBehavior_EMIT_NULL \ + "arrow::compute::JoinOptions::EMIT_NULL" + CJoinNullHandlingBehavior_SKIP \ + "arrow::compute::JoinOptions::SKIP" + CJoinNullHandlingBehavior_REPLACE \ + "arrow::compute::JoinOptions::REPLACE" + + cdef cppclass CJoinOptions \ + "arrow::compute::JoinOptions"(CFunctionOptions): + CJoinOptions(CJoinNullHandlingBehavior null_handling, + c_string null_replacement) + CJoinNullHandlingBehavior null_handling + c_string null_replacement + + cdef cppclass CMatchSubstringOptions \ + "arrow::compute::MatchSubstringOptions"(CFunctionOptions): + CMatchSubstringOptions(c_string pattern, c_bool ignore_case) + c_string pattern + c_bool ignore_case + + cdef cppclass CTrimOptions \ + "arrow::compute::TrimOptions"(CFunctionOptions): + CTrimOptions(c_string characters) + c_string characters + + cdef cppclass CPadOptions \ + "arrow::compute::PadOptions"(CFunctionOptions): + CPadOptions(int64_t width, c_string padding) + int64_t width + c_string padding + + cdef cppclass CSliceOptions \ + "arrow::compute::SliceOptions"(CFunctionOptions): + CSliceOptions(int64_t start, int64_t stop, int64_t step) + int64_t start + int64_t stop + int64_t step + + cdef cppclass CListSliceOptions \ + "arrow::compute::ListSliceOptions"(CFunctionOptions): + CListSliceOptions(int64_t start, optional[int64_t] stop, + int64_t step, + optional[c_bool] return_fixed_size_list) + int64_t start + optional[int64_t] stop + int64_t step + optional[c_bool] return_fixed_size_list + + cdef cppclass CSplitOptions \ + "arrow::compute::SplitOptions"(CFunctionOptions): + CSplitOptions(int64_t max_splits, c_bool reverse) + int64_t max_splits + c_bool reverse + + cdef cppclass CSplitPatternOptions \ + "arrow::compute::SplitPatternOptions"(CFunctionOptions): + CSplitPatternOptions(c_string pattern, int64_t max_splits, + c_bool reverse) + int64_t max_splits + c_bool reverse + c_string pattern + + cdef cppclass CReplaceSliceOptions \ + "arrow::compute::ReplaceSliceOptions"(CFunctionOptions): + CReplaceSliceOptions(int64_t start, int64_t stop, c_string replacement) + int64_t start + int64_t stop + c_string replacement + + cdef cppclass CReplaceSubstringOptions \ + "arrow::compute::ReplaceSubstringOptions"(CFunctionOptions): + CReplaceSubstringOptions(c_string pattern, c_string replacement, + int64_t max_replacements) + c_string pattern + c_string replacement + int64_t max_replacements + + cdef cppclass CExtractRegexOptions \ + "arrow::compute::ExtractRegexOptions"(CFunctionOptions): + CExtractRegexOptions(c_string pattern) + c_string pattern + + cdef cppclass CCastOptions" arrow::compute::CastOptions"(CFunctionOptions): + CCastOptions() + CCastOptions(c_bool safe) + CCastOptions(CCastOptions options) + + @staticmethod + CCastOptions Safe() + + @staticmethod + CCastOptions Unsafe() + shared_ptr[CDataType] to_type + c_bool allow_int_overflow + c_bool allow_time_truncate + c_bool allow_time_overflow + c_bool allow_decimal_truncate + c_bool allow_float_truncate + c_bool allow_invalid_utf8 + + cdef enum CFilterNullSelectionBehavior \ + "arrow::compute::FilterOptions::NullSelectionBehavior": + CFilterNullSelectionBehavior_DROP \ + "arrow::compute::FilterOptions::DROP" + CFilterNullSelectionBehavior_EMIT_NULL \ + "arrow::compute::FilterOptions::EMIT_NULL" + + cdef cppclass CFilterOptions \ + " arrow::compute::FilterOptions"(CFunctionOptions): + CFilterOptions() + CFilterOptions(CFilterNullSelectionBehavior null_selection_behavior) + CFilterNullSelectionBehavior null_selection_behavior + + cdef enum CDictionaryEncodeNullEncodingBehavior \ + "arrow::compute::DictionaryEncodeOptions::NullEncodingBehavior": + CDictionaryEncodeNullEncodingBehavior_ENCODE \ + "arrow::compute::DictionaryEncodeOptions::ENCODE" + CDictionaryEncodeNullEncodingBehavior_MASK \ + "arrow::compute::DictionaryEncodeOptions::MASK" + + cdef cppclass CDictionaryEncodeOptions \ + "arrow::compute::DictionaryEncodeOptions"(CFunctionOptions): + CDictionaryEncodeOptions( + CDictionaryEncodeNullEncodingBehavior null_encoding) + CDictionaryEncodeNullEncodingBehavior null_encoding + + cdef cppclass CRunEndEncodeOptions \ + "arrow::compute::RunEndEncodeOptions"(CFunctionOptions): + CRunEndEncodeOptions() + CRunEndEncodeOptions(shared_ptr[CDataType] run_end_type) + shared_ptr[CDataType] run_end_type + + cdef cppclass CTakeOptions \ + " arrow::compute::TakeOptions"(CFunctionOptions): + CTakeOptions(c_bool boundscheck) + c_bool boundscheck + + cdef cppclass CStrptimeOptions \ + "arrow::compute::StrptimeOptions"(CFunctionOptions): + CStrptimeOptions(c_string format, TimeUnit unit, c_bool raise_error) + c_string format + TimeUnit unit + c_bool raise_error + + cdef cppclass CStrftimeOptions \ + "arrow::compute::StrftimeOptions"(CFunctionOptions): + CStrftimeOptions(c_string format, c_string locale) + c_string format + c_string locale + + cdef cppclass CDayOfWeekOptions \ + "arrow::compute::DayOfWeekOptions"(CFunctionOptions): + CDayOfWeekOptions(c_bool count_from_zero, uint32_t week_start) + c_bool count_from_zero + uint32_t week_start + + cdef enum CAssumeTimezoneAmbiguous \ + "arrow::compute::AssumeTimezoneOptions::Ambiguous": + CAssumeTimezoneAmbiguous_AMBIGUOUS_RAISE \ + "arrow::compute::AssumeTimezoneOptions::AMBIGUOUS_RAISE" + CAssumeTimezoneAmbiguous_AMBIGUOUS_EARLIEST \ + "arrow::compute::AssumeTimezoneOptions::AMBIGUOUS_EARLIEST" + CAssumeTimezoneAmbiguous_AMBIGUOUS_LATEST \ + "arrow::compute::AssumeTimezoneOptions::AMBIGUOUS_LATEST" + + cdef enum CAssumeTimezoneNonexistent \ + "arrow::compute::AssumeTimezoneOptions::Nonexistent": + CAssumeTimezoneNonexistent_NONEXISTENT_RAISE \ + "arrow::compute::AssumeTimezoneOptions::NONEXISTENT_RAISE" + CAssumeTimezoneNonexistent_NONEXISTENT_EARLIEST \ + "arrow::compute::AssumeTimezoneOptions::NONEXISTENT_EARLIEST" + CAssumeTimezoneNonexistent_NONEXISTENT_LATEST \ + "arrow::compute::AssumeTimezoneOptions::NONEXISTENT_LATEST" + + cdef cppclass CAssumeTimezoneOptions \ + "arrow::compute::AssumeTimezoneOptions"(CFunctionOptions): + CAssumeTimezoneOptions(c_string timezone, + CAssumeTimezoneAmbiguous ambiguous, + CAssumeTimezoneNonexistent nonexistent) + c_string timezone + CAssumeTimezoneAmbiguous ambiguous + CAssumeTimezoneNonexistent nonexistent + + cdef cppclass CWeekOptions \ + "arrow::compute::WeekOptions"(CFunctionOptions): + CWeekOptions(c_bool week_starts_monday, c_bool count_from_zero, + c_bool first_week_is_fully_in_year) + c_bool week_starts_monday + c_bool count_from_zero + c_bool first_week_is_fully_in_year + + cdef cppclass CNullOptions \ + "arrow::compute::NullOptions"(CFunctionOptions): + CNullOptions(c_bool nan_is_null) + c_bool nan_is_null + + cdef cppclass CVarianceOptions \ + "arrow::compute::VarianceOptions"(CFunctionOptions): + CVarianceOptions(int ddof, c_bool skip_nulls, uint32_t min_count) + int ddof + c_bool skip_nulls + uint32_t min_count + + cdef cppclass CScalarAggregateOptions \ + "arrow::compute::ScalarAggregateOptions"(CFunctionOptions): + CScalarAggregateOptions(c_bool skip_nulls, uint32_t min_count) + c_bool skip_nulls + uint32_t min_count + + cdef enum CCountMode "arrow::compute::CountOptions::CountMode": + CCountMode_ONLY_VALID "arrow::compute::CountOptions::ONLY_VALID" + CCountMode_ONLY_NULL "arrow::compute::CountOptions::ONLY_NULL" + CCountMode_ALL "arrow::compute::CountOptions::ALL" + + cdef cppclass CCountOptions \ + "arrow::compute::CountOptions"(CFunctionOptions): + CCountOptions(CCountMode mode) + CCountMode mode + + cdef cppclass CModeOptions \ + "arrow::compute::ModeOptions"(CFunctionOptions): + CModeOptions(int64_t n, c_bool skip_nulls, uint32_t min_count) + int64_t n + c_bool skip_nulls + uint32_t min_count + + cdef cppclass CIndexOptions \ + "arrow::compute::IndexOptions"(CFunctionOptions): + CIndexOptions(shared_ptr[CScalar] value) + shared_ptr[CScalar] value + + cdef cppclass CAggregate "arrow::compute::Aggregate": + c_string function + shared_ptr[CFunctionOptions] options + vector[CFieldRef] target + c_string name + + cdef enum CMapLookupOccurrence \ + "arrow::compute::MapLookupOptions::Occurrence": + CMapLookupOccurrence_ALL "arrow::compute::MapLookupOptions::ALL" + CMapLookupOccurrence_FIRST "arrow::compute::MapLookupOptions::FIRST" + CMapLookupOccurrence_LAST "arrow::compute::MapLookupOptions::LAST" + + cdef cppclass CMapLookupOptions \ + "arrow::compute::MapLookupOptions"(CFunctionOptions): + CMapLookupOptions(shared_ptr[CScalar] query_key, + CMapLookupOccurrence occurrence) + CMapLookupOccurrence occurrence + shared_ptr[CScalar] query_key + + cdef cppclass CMakeStructOptions \ + "arrow::compute::MakeStructOptions"(CFunctionOptions): + CMakeStructOptions(vector[c_string] n, + vector[c_bool] r, + vector[shared_ptr[const CKeyValueMetadata]] m) + CMakeStructOptions(vector[c_string] n) + vector[c_string] field_names + vector[c_bool] field_nullability + vector[shared_ptr[const CKeyValueMetadata]] field_metadata + + cdef cppclass CStructFieldOptions \ + "arrow::compute::StructFieldOptions"(CFunctionOptions): + CStructFieldOptions(vector[int] indices) + CStructFieldOptions(CFieldRef field_ref) + vector[int] indices + CFieldRef field_ref + + ctypedef enum CSortOrder" arrow::compute::SortOrder": + CSortOrder_Ascending \ + "arrow::compute::SortOrder::Ascending" + CSortOrder_Descending \ + "arrow::compute::SortOrder::Descending" + + ctypedef enum CNullPlacement" arrow::compute::NullPlacement": + CNullPlacement_AtStart \ + "arrow::compute::NullPlacement::AtStart" + CNullPlacement_AtEnd \ + "arrow::compute::NullPlacement::AtEnd" + + cdef cppclass CPartitionNthOptions \ + "arrow::compute::PartitionNthOptions"(CFunctionOptions): + CPartitionNthOptions(int64_t pivot, CNullPlacement) + int64_t pivot + CNullPlacement null_placement + + cdef cppclass CCumulativeOptions \ + "arrow::compute::CumulativeOptions"(CFunctionOptions): + CCumulativeOptions(c_bool skip_nulls) + CCumulativeOptions(shared_ptr[CScalar] start, c_bool skip_nulls) + optional[shared_ptr[CScalar]] start + c_bool skip_nulls + + cdef cppclass CPairwiseOptions \ + "arrow::compute::PairwiseOptions"(CFunctionOptions): + CPairwiseOptions(int64_t period) + int64_t period + + cdef cppclass CArraySortOptions \ + "arrow::compute::ArraySortOptions"(CFunctionOptions): + CArraySortOptions(CSortOrder, CNullPlacement) + CSortOrder order + CNullPlacement null_placement + + cdef cppclass CSortKey" arrow::compute::SortKey": + CSortKey(CFieldRef target, CSortOrder order) + CFieldRef target + CSortOrder order + + cdef cppclass COrdering" arrow::compute::Ordering": + COrdering(vector[CSortKey] sort_keys, CNullPlacement null_placement) + + cdef cppclass CSortOptions \ + "arrow::compute::SortOptions"(CFunctionOptions): + CSortOptions(vector[CSortKey] sort_keys, CNullPlacement) + vector[CSortKey] sort_keys + CNullPlacement null_placement + + cdef cppclass CSelectKOptions \ + "arrow::compute::SelectKOptions"(CFunctionOptions): + CSelectKOptions(int64_t k, vector[CSortKey] sort_keys) + int64_t k + vector[CSortKey] sort_keys + + cdef enum CQuantileInterp \ + "arrow::compute::QuantileOptions::Interpolation": + CQuantileInterp_LINEAR "arrow::compute::QuantileOptions::LINEAR" + CQuantileInterp_LOWER "arrow::compute::QuantileOptions::LOWER" + CQuantileInterp_HIGHER "arrow::compute::QuantileOptions::HIGHER" + CQuantileInterp_NEAREST "arrow::compute::QuantileOptions::NEAREST" + CQuantileInterp_MIDPOINT "arrow::compute::QuantileOptions::MIDPOINT" + + cdef cppclass CQuantileOptions \ + "arrow::compute::QuantileOptions"(CFunctionOptions): + CQuantileOptions(vector[double] q, CQuantileInterp interpolation, + c_bool skip_nulls, uint32_t min_count) + vector[double] q + CQuantileInterp interpolation + c_bool skip_nulls + uint32_t min_count + + cdef cppclass CTDigestOptions \ + "arrow::compute::TDigestOptions"(CFunctionOptions): + CTDigestOptions(vector[double] q, + uint32_t delta, uint32_t buffer_size, + c_bool skip_nulls, uint32_t min_count) + vector[double] q + uint32_t delta + uint32_t buffer_size + c_bool skip_nulls + uint32_t min_count + + cdef enum CUtf8NormalizeForm \ + "arrow::compute::Utf8NormalizeOptions::Form": + CUtf8NormalizeForm_NFC "arrow::compute::Utf8NormalizeOptions::NFC" + CUtf8NormalizeForm_NFKC "arrow::compute::Utf8NormalizeOptions::NFKC" + CUtf8NormalizeForm_NFD "arrow::compute::Utf8NormalizeOptions::NFD" + CUtf8NormalizeForm_NFKD "arrow::compute::Utf8NormalizeOptions::NFKD" + + cdef cppclass CUtf8NormalizeOptions \ + "arrow::compute::Utf8NormalizeOptions"(CFunctionOptions): + CUtf8NormalizeOptions(CUtf8NormalizeForm form) + CUtf8NormalizeForm form + + cdef cppclass CSetLookupOptions \ + "arrow::compute::SetLookupOptions"(CFunctionOptions): + CSetLookupOptions(CDatum value_set, c_bool skip_nulls) + CDatum value_set + c_bool skip_nulls + + cdef cppclass CRandomOptions \ + "arrow::compute::RandomOptions"(CFunctionOptions): + CRandomOptions(CRandomOptions) + + @staticmethod + CRandomOptions FromSystemRandom() + + @staticmethod + CRandomOptions FromSeed(uint64_t seed) + + cdef enum CRankOptionsTiebreaker \ + "arrow::compute::RankOptions::Tiebreaker": + CRankOptionsTiebreaker_Min "arrow::compute::RankOptions::Min" + CRankOptionsTiebreaker_Max "arrow::compute::RankOptions::Max" + CRankOptionsTiebreaker_First "arrow::compute::RankOptions::First" + CRankOptionsTiebreaker_Dense "arrow::compute::RankOptions::Dense" + + cdef cppclass CRankOptions \ + "arrow::compute::RankOptions"(CFunctionOptions): + CRankOptions(vector[CSortKey] sort_keys, CNullPlacement, + CRankOptionsTiebreaker tiebreaker) + vector[CSortKey] sort_keys + CNullPlacement null_placement + CRankOptionsTiebreaker tiebreaker + + cdef enum DatumType" arrow::Datum::type": + DatumType_NONE" arrow::Datum::NONE" + DatumType_SCALAR" arrow::Datum::SCALAR" + DatumType_ARRAY" arrow::Datum::ARRAY" + DatumType_CHUNKED_ARRAY" arrow::Datum::CHUNKED_ARRAY" + DatumType_RECORD_BATCH" arrow::Datum::RECORD_BATCH" + DatumType_TABLE" arrow::Datum::TABLE" + DatumType_COLLECTION" arrow::Datum::COLLECTION" + + cdef cppclass CDatum" arrow::Datum": + CDatum() + CDatum(const shared_ptr[CArray]& value) + CDatum(const shared_ptr[CChunkedArray]& value) + CDatum(const shared_ptr[CScalar]& value) + CDatum(const shared_ptr[CRecordBatch]& value) + CDatum(const shared_ptr[CTable]& value) + + DatumType kind() const + c_string ToString() const + + const shared_ptr[CArrayData]& array() const + const shared_ptr[CChunkedArray]& chunked_array() const + const shared_ptr[CRecordBatch]& record_batch() const + const shared_ptr[CTable]& table() const + const shared_ptr[CScalar]& scalar() const + + cdef c_string ToString(DatumType kind) + + +cdef extern from * namespace "arrow::compute": + # inlined from compute/function_internal.h to avoid exposing + # implementation details + """ + #include "arrow/compute/function.h" + namespace arrow { + namespace compute { + namespace internal { + Result> DeserializeFunctionOptions( + const Buffer& buffer); + } // namespace internal + } // namespace compute + } // namespace arrow + """ + CResult[unique_ptr[CFunctionOptions]] DeserializeFunctionOptions \ + " arrow::compute::internal::DeserializeFunctionOptions"( + const CBuffer& buffer) + + +cdef extern from * namespace "arrow::compute": + # inlined from expression_internal.h to avoid + # proliferation of #include + """ + #include + + #include "arrow/type.h" + #include "arrow/datum.h" + + namespace arrow { + namespace compute { + struct KnownFieldValues { + std::unordered_map map; + }; + } // namespace compute + } // namespace arrow + """ + cdef struct CKnownFieldValues "arrow::compute::KnownFieldValues": + unordered_map[CFieldRef, CDatum, CFieldRefHash] map + +cdef extern from "arrow/compute/expression.h" \ + namespace "arrow::compute" nogil: + + cdef cppclass CExpression "arrow::compute::Expression": + c_bool Equals(const CExpression& other) const + c_string ToString() const + CResult[CExpression] Bind(const CSchema&) + const CFieldRef* field_ref() const + + cdef CExpression CMakeScalarExpression \ + "arrow::compute::literal"(shared_ptr[CScalar] value) + + cdef CExpression CMakeFieldExpression \ + "arrow::compute::field_ref"(CFieldRef) + + cdef CExpression CMakeFieldExpressionByIndex \ + "arrow::compute::field_ref"(int idx) + + cdef CExpression CMakeCallExpression \ + "arrow::compute::call"(c_string function, + vector[CExpression] arguments, + shared_ptr[CFunctionOptions] options) + + cdef CResult[shared_ptr[CBuffer]] CSerializeExpression \ + "arrow::compute::Serialize"(const CExpression&) + + cdef CResult[CExpression] CDeserializeExpression \ + "arrow::compute::Deserialize"(shared_ptr[CBuffer]) + + cdef CResult[CKnownFieldValues] \ + CExtractKnownFieldValues "arrow::compute::ExtractKnownFieldValues"( + const CExpression& partition_expression) + + +cdef extern from "arrow/extension_type.h" namespace "arrow": + cdef cppclass CExtensionTypeRegistry" arrow::ExtensionTypeRegistry": + @staticmethod + shared_ptr[CExtensionTypeRegistry] GetGlobalRegistry() + + cdef cppclass CExtensionType" arrow::ExtensionType"(CDataType): + c_string extension_name() + shared_ptr[CDataType] storage_type() + + @staticmethod + shared_ptr[CArray] WrapArray(shared_ptr[CDataType] ext_type, + shared_ptr[CArray] storage) + + @staticmethod + shared_ptr[CChunkedArray] WrapArray(shared_ptr[CDataType] ext_type, + shared_ptr[CChunkedArray] storage) + + cdef cppclass CExtensionArray" arrow::ExtensionArray"(CArray): + CExtensionArray(shared_ptr[CDataType], shared_ptr[CArray] storage) + + shared_ptr[CArray] storage() + + +cdef extern from "arrow/extension/fixed_shape_tensor.h" namespace "arrow::extension" nogil: + cdef cppclass CFixedShapeTensorType \ + " arrow::extension::FixedShapeTensorType"(CExtensionType): + + CResult[shared_ptr[CTensor]] MakeTensor(const shared_ptr[CExtensionScalar]& scalar) const + + @staticmethod + CResult[shared_ptr[CDataType]] Make(const shared_ptr[CDataType]& value_type, + const vector[int64_t]& shape, + const vector[int64_t]& permutation, + const vector[c_string]& dim_names) + + const shared_ptr[CDataType] value_type() + const vector[int64_t] shape() + const vector[int64_t] permutation() + const vector[c_string] dim_names() + + cdef cppclass CFixedShapeTensorArray \ + " arrow::extension::FixedShapeTensorArray"(CExtensionArray): + const CResult[shared_ptr[CTensor]] ToTensor() const + +cdef extern from "arrow/util/compression.h" namespace "arrow" nogil: + cdef enum CCompressionType" arrow::Compression::type": + CCompressionType_UNCOMPRESSED" arrow::Compression::UNCOMPRESSED" + CCompressionType_SNAPPY" arrow::Compression::SNAPPY" + CCompressionType_GZIP" arrow::Compression::GZIP" + CCompressionType_BROTLI" arrow::Compression::BROTLI" + CCompressionType_ZSTD" arrow::Compression::ZSTD" + CCompressionType_LZ4" arrow::Compression::LZ4" + CCompressionType_LZ4_FRAME" arrow::Compression::LZ4_FRAME" + CCompressionType_BZ2" arrow::Compression::BZ2" + + cdef cppclass CCodec" arrow::util::Codec": + @staticmethod + CResult[unique_ptr[CCodec]] Create(CCompressionType codec) + + @staticmethod + CResult[unique_ptr[CCodec]] CreateWithLevel" Create"( + CCompressionType codec, + int compression_level) + + @staticmethod + c_bool SupportsCompressionLevel(CCompressionType codec) + + @staticmethod + CResult[int] MinimumCompressionLevel(CCompressionType codec) + + @staticmethod + CResult[int] MaximumCompressionLevel(CCompressionType codec) + + @staticmethod + CResult[int] DefaultCompressionLevel(CCompressionType codec) + + @staticmethod + c_bool IsAvailable(CCompressionType codec) + + CResult[int64_t] Decompress(int64_t input_len, const uint8_t* input, + int64_t output_len, + uint8_t* output_buffer) + CResult[int64_t] Compress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, + uint8_t* output_buffer) + c_string name() const + int compression_level() const + int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) + + +cdef extern from "arrow/util/io_util.h" namespace "arrow::internal" nogil: + int ErrnoFromStatus(CStatus status) + int WinErrorFromStatus(CStatus status) + int SignalFromStatus(CStatus status) + + CStatus SendSignal(int signum) + CStatus SendSignalToThread(int signum, uint64_t thread_id) + + +cdef extern from "arrow/util/iterator.h" namespace "arrow" nogil: + cdef cppclass CIterator" arrow::Iterator"[T]: + CResult[T] Next() + CStatus Visit[Visitor](Visitor&& visitor) + cppclass RangeIterator: + CResult[T] operator*() + RangeIterator& operator++() + c_bool operator!=(RangeIterator) const + RangeIterator begin() + RangeIterator end() + CIterator[T] MakeVectorIterator[T](vector[T] v) + +cdef extern from "arrow/util/thread_pool.h" namespace "arrow" nogil: + int GetCpuThreadPoolCapacity() + CStatus SetCpuThreadPoolCapacity(int threads) + +cdef extern from "arrow/array/concatenate.h" namespace "arrow" nogil: + CResult[shared_ptr[CArray]] Concatenate( + const vector[shared_ptr[CArray]]& arrays, + CMemoryPool* pool) + +cdef extern from "arrow/c/abi.h": + cdef struct ArrowSchema: + void (*release)(ArrowSchema*) noexcept nogil + + cdef struct ArrowArray: + void (*release)(ArrowArray*) noexcept nogil + + cdef struct ArrowArrayStream: + void (*release)(ArrowArrayStream*) noexcept nogil + + cdef struct ArrowDeviceArray: + pass + +cdef extern from "arrow/c/bridge.h" namespace "arrow" nogil: + CStatus ExportType(CDataType&, ArrowSchema* out) + CResult[shared_ptr[CDataType]] ImportType(ArrowSchema*) + + CStatus ExportField(CField&, ArrowSchema* out) + CResult[shared_ptr[CField]] ImportField(ArrowSchema*) + + CStatus ExportSchema(CSchema&, ArrowSchema* out) + CResult[shared_ptr[CSchema]] ImportSchema(ArrowSchema*) + + CStatus ExportArray(CArray&, ArrowArray* out) + CStatus ExportArray(CArray&, ArrowArray* out, ArrowSchema* out_schema) + CResult[shared_ptr[CArray]] ImportArray(ArrowArray*, + shared_ptr[CDataType]) + CResult[shared_ptr[CArray]] ImportArray(ArrowArray*, ArrowSchema*) + + CStatus ExportRecordBatch(CRecordBatch&, ArrowArray* out) + CStatus ExportRecordBatch(CRecordBatch&, ArrowArray* out, + ArrowSchema* out_schema) + CResult[shared_ptr[CRecordBatch]] ImportRecordBatch(ArrowArray*, + shared_ptr[CSchema]) + CResult[shared_ptr[CRecordBatch]] ImportRecordBatch(ArrowArray*, + ArrowSchema*) + + CStatus ExportRecordBatchReader(shared_ptr[CRecordBatchReader], + ArrowArrayStream*) + CResult[shared_ptr[CRecordBatchReader]] ImportRecordBatchReader( + ArrowArrayStream*) + + CStatus ExportChunkedArray(shared_ptr[CChunkedArray], ArrowArrayStream*) + CResult[shared_ptr[CChunkedArray]] ImportChunkedArray(ArrowArrayStream*) + + CStatus ExportDeviceArray(const CArray&, shared_ptr[CSyncEvent], + ArrowDeviceArray* out, ArrowSchema*) + CResult[shared_ptr[CArray]] ImportDeviceArray( + ArrowDeviceArray*, shared_ptr[CDataType]) + CResult[shared_ptr[CArray]] ImportDeviceArray( + ArrowDeviceArray*, ArrowSchema*) + + CStatus ExportDeviceRecordBatch(const CRecordBatch&, shared_ptr[CSyncEvent], + ArrowDeviceArray* out, ArrowSchema*) + CResult[shared_ptr[CRecordBatch]] ImportDeviceRecordBatch( + ArrowDeviceArray*, shared_ptr[CSchema]) + CResult[shared_ptr[CRecordBatch]] ImportDeviceRecordBatch( + ArrowDeviceArray*, ArrowSchema*) + + +cdef extern from "arrow/util/byte_size.h" namespace "arrow::util" nogil: + CResult[int64_t] ReferencedBufferSize(const CArray& array_data) + CResult[int64_t] ReferencedBufferSize(const CRecordBatch& record_batch) + CResult[int64_t] ReferencedBufferSize(const CChunkedArray& chunked_array) + CResult[int64_t] ReferencedBufferSize(const CTable& table) + int64_t TotalBufferSize(const CArray& array) + int64_t TotalBufferSize(const CChunkedArray& array) + int64_t TotalBufferSize(const CRecordBatch& record_batch) + int64_t TotalBufferSize(const CTable& table) + +ctypedef PyObject* CallbackUdf(object user_function, const CUdfContext& context, object inputs) + + +cdef extern from "arrow/api.h" namespace "arrow" nogil: + + cdef cppclass CRecordBatchIterator "arrow::RecordBatchIterator"( + CIterator[shared_ptr[CRecordBatch]]): + pass + + +cdef extern from "arrow/python/udf.h" namespace "arrow::py" nogil: + cdef cppclass CUdfContext" arrow::py::UdfContext": + CMemoryPool *pool + int64_t batch_length + + cdef cppclass CUdfOptions" arrow::py::UdfOptions": + c_string func_name + CArity arity + CFunctionDoc func_doc + vector[shared_ptr[CDataType]] input_types + shared_ptr[CDataType] output_type + + CStatus RegisterScalarFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CStatus RegisterTabularFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CStatus RegisterAggregateFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CStatus RegisterVectorFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CResult[shared_ptr[CRecordBatchReader]] CallTabularFunction( + const c_string& func_name, const vector[CDatum]& args, CFunctionRegistry* registry) + +cdef extern from "arrow/compute/cast.h" namespace "arrow::compute": + CResult[CDatum] Cast(const CDatum& value, const CCastOptions& options) diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd new file mode 100644 index 0000000000000000000000000000000000000000..dc9babee190e110583d9d2cc24b71567203290af --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd @@ -0,0 +1,118 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + + +cdef extern from "arrow/acero/options.h" namespace "arrow::acero" nogil: + cdef enum CJoinType "arrow::acero::JoinType": + CJoinType_LEFT_SEMI "arrow::acero::JoinType::LEFT_SEMI" + CJoinType_RIGHT_SEMI "arrow::acero::JoinType::RIGHT_SEMI" + CJoinType_LEFT_ANTI "arrow::acero::JoinType::LEFT_ANTI" + CJoinType_RIGHT_ANTI "arrow::acero::JoinType::RIGHT_ANTI" + CJoinType_INNER "arrow::acero::JoinType::INNER" + CJoinType_LEFT_OUTER "arrow::acero::JoinType::LEFT_OUTER" + CJoinType_RIGHT_OUTER "arrow::acero::JoinType::RIGHT_OUTER" + CJoinType_FULL_OUTER "arrow::acero::JoinType::FULL_OUTER" + + cdef cppclass CExecNodeOptions "arrow::acero::ExecNodeOptions": + pass + + cdef cppclass CSourceNodeOptions "arrow::acero::SourceNodeOptions"(CExecNodeOptions): + pass + + cdef cppclass CTableSourceNodeOptions "arrow::acero::TableSourceNodeOptions"(CExecNodeOptions): + CTableSourceNodeOptions(shared_ptr[CTable] table) + CTableSourceNodeOptions(shared_ptr[CTable] table, int64_t max_batch_size) + + cdef cppclass CSinkNodeOptions "arrow::acero::SinkNodeOptions"(CExecNodeOptions): + pass + + cdef cppclass CFilterNodeOptions "arrow::acero::FilterNodeOptions"(CExecNodeOptions): + CFilterNodeOptions(CExpression) + + cdef cppclass CProjectNodeOptions "arrow::acero::ProjectNodeOptions"(CExecNodeOptions): + CProjectNodeOptions(vector[CExpression] expressions) + CProjectNodeOptions(vector[CExpression] expressions, + vector[c_string] names) + + cdef cppclass CAggregateNodeOptions "arrow::acero::AggregateNodeOptions"(CExecNodeOptions): + CAggregateNodeOptions(vector[CAggregate] aggregates, vector[CFieldRef] names) + + cdef cppclass COrderByNodeOptions "arrow::acero::OrderByNodeOptions"(CExecNodeOptions): + COrderByNodeOptions(COrdering ordering) + + cdef cppclass CHashJoinNodeOptions "arrow::acero::HashJoinNodeOptions"(CExecNodeOptions): + CHashJoinNodeOptions(CJoinType, vector[CFieldRef] in_left_keys, + vector[CFieldRef] in_right_keys) + CHashJoinNodeOptions(CJoinType, vector[CFieldRef] in_left_keys, + vector[CFieldRef] in_right_keys, + CExpression filter, + c_string output_suffix_for_left, + c_string output_suffix_for_right) + CHashJoinNodeOptions(CJoinType join_type, + vector[CFieldRef] left_keys, + vector[CFieldRef] right_keys, + vector[CFieldRef] left_output, + vector[CFieldRef] right_output, + CExpression filter, + c_string output_suffix_for_left, + c_string output_suffix_for_right) + + cdef struct CAsofJoinKeys "arrow::acero::AsofJoinNodeOptions::Keys": + CFieldRef on_key + vector[CFieldRef] by_key + + cdef cppclass CAsofJoinNodeOptions "arrow::acero::AsofJoinNodeOptions"(CExecNodeOptions): + CAsofJoinNodeOptions(vector[CAsofJoinKeys] keys, int64_t tolerance) + + +cdef extern from "arrow/acero/exec_plan.h" namespace "arrow::acero" nogil: + cdef cppclass CDeclaration "arrow::acero::Declaration": + cppclass Input: + Input(CExecNode*) + Input(CDeclaration) + + c_string label + vector[Input] inputs + + CDeclaration() + CDeclaration(c_string factory_name, CExecNodeOptions options) + CDeclaration(c_string factory_name, vector[Input] inputs, shared_ptr[CExecNodeOptions] options) + + @staticmethod + CDeclaration Sequence(vector[CDeclaration] decls) + + cdef cppclass CExecNode "arrow::acero::ExecNode": + const vector[CExecNode*]& inputs() const + const shared_ptr[CSchema]& output_schema() const + + CResult[shared_ptr[CTable]] DeclarationToTable( + CDeclaration declaration, c_bool use_threads + ) + CResult[shared_ptr[CTable]] DeclarationToTable( + CDeclaration declaration, c_bool use_threads, + CMemoryPool* memory_pool, CFunctionRegistry* function_registry + ) + CResult[unique_ptr[CRecordBatchReader]] DeclarationToReader( + CDeclaration declaration, c_bool use_threads + ) + + CResult[c_string] DeclarationToString(const CDeclaration& declaration) diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd new file mode 100644 index 0000000000000000000000000000000000000000..3ac943cf941d8591483d4be2e2bceaac3e051292 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd @@ -0,0 +1,107 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.libarrow cimport * + +cdef extern from "arrow/gpu/cuda_api.h" namespace "arrow::cuda" nogil: + + cdef cppclass CCudaDeviceManager" arrow::cuda::CudaDeviceManager": + @staticmethod + CResult[CCudaDeviceManager*] Instance() + CResult[shared_ptr[CCudaContext]] GetContext(int gpu_number) + CResult[shared_ptr[CCudaContext]] GetSharedContext(int gpu_number, + void* handle) + CStatus AllocateHost(int device_number, int64_t nbytes, + shared_ptr[CCudaHostBuffer]* buffer) + int num_devices() const + + cdef cppclass CCudaContext" arrow::cuda::CudaContext": + CResult[shared_ptr[CCudaBuffer]] Allocate(int64_t nbytes) + CResult[shared_ptr[CCudaBuffer]] View(uint8_t* data, int64_t nbytes) + CResult[shared_ptr[CCudaBuffer]] OpenIpcBuffer( + const CCudaIpcMemHandle& ipc_handle) + CStatus Synchronize() + int64_t bytes_allocated() const + const void* handle() const + int device_number() const + CResult[uintptr_t] GetDeviceAddress(uintptr_t addr) + + cdef cppclass CCudaIpcMemHandle" arrow::cuda::CudaIpcMemHandle": + @staticmethod + CResult[shared_ptr[CCudaIpcMemHandle]] FromBuffer( + const void* opaque_handle) + CResult[shared_ptr[CBuffer]] Serialize(CMemoryPool* pool) const + + cdef cppclass CCudaBuffer" arrow::cuda::CudaBuffer"(CBuffer): + CCudaBuffer(uint8_t* data, int64_t size, + const shared_ptr[CCudaContext]& context, + c_bool own_data=false, c_bool is_ipc=false) + CCudaBuffer(const shared_ptr[CCudaBuffer]& parent, + const int64_t offset, const int64_t size) + + @staticmethod + CResult[shared_ptr[CCudaBuffer]] FromBuffer(shared_ptr[CBuffer] buf) + + CStatus CopyToHost(const int64_t position, const int64_t nbytes, + void* out) const + CStatus CopyFromHost(const int64_t position, const void* data, + int64_t nbytes) + CStatus CopyFromDevice(const int64_t position, const void* data, + int64_t nbytes) + CStatus CopyFromAnotherDevice(const shared_ptr[CCudaContext]& src_ctx, + const int64_t position, const void* data, + int64_t nbytes) + CResult[shared_ptr[CCudaIpcMemHandle]] ExportForIpc() + shared_ptr[CCudaContext] context() const + + cdef cppclass \ + CCudaHostBuffer" arrow::cuda::CudaHostBuffer"(CMutableBuffer): + pass + + cdef cppclass \ + CCudaBufferReader" arrow::cuda::CudaBufferReader"(CBufferReader): + CCudaBufferReader(const shared_ptr[CBuffer]& buffer) + CResult[int64_t] Read(int64_t nbytes, void* buffer) + CResult[shared_ptr[CBuffer]] Read(int64_t nbytes) + + cdef cppclass \ + CCudaBufferWriter" arrow::cuda::CudaBufferWriter"(WritableFile): + CCudaBufferWriter(const shared_ptr[CCudaBuffer]& buffer) + CStatus Close() + CStatus Write(const void* data, int64_t nbytes) + CStatus WriteAt(int64_t position, const void* data, int64_t nbytes) + CStatus SetBufferSize(const int64_t buffer_size) + int64_t buffer_size() + int64_t num_bytes_buffered() const + + CResult[shared_ptr[CCudaHostBuffer]] AllocateCudaHostBuffer( + int device_number, const int64_t size) + + # Cuda prefix is added to avoid picking up arrow::cuda functions + # from arrow namespace. + CResult[shared_ptr[CCudaBuffer]] \ + CudaSerializeRecordBatch" arrow::cuda::SerializeRecordBatch"\ + (const CRecordBatch& batch, + CCudaContext* ctx) + CResult[shared_ptr[CRecordBatch]] \ + CudaReadRecordBatch" arrow::cuda::ReadRecordBatch"\ + (const shared_ptr[CSchema]& schema, + CDictionaryMemo* dictionary_memo, + const shared_ptr[CCudaBuffer]& buffer, + CMemoryPool* pool) diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd new file mode 100644 index 0000000000000000000000000000000000000000..fe96705a54b2ff3eb9e2ec4da998566a58767a81 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd @@ -0,0 +1,413 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libcpp.unordered_map cimport unordered_map +from libcpp cimport bool as c_bool + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * +from pyarrow.includes.libarrow_fs cimport * + + +cdef extern from "arrow/dataset/plan.h" namespace "arrow::dataset::internal" nogil: + + cdef void Initialize() + + +ctypedef CStatus cb_writer_finish_internal(CFileWriter*) +ctypedef void cb_writer_finish(dict, CFileWriter*) + +cdef extern from "arrow/dataset/api.h" namespace "arrow::dataset" nogil: + + cdef enum ExistingDataBehavior" arrow::dataset::ExistingDataBehavior": + ExistingDataBehavior_DELETE_MATCHING" \ + arrow::dataset::ExistingDataBehavior::kDeleteMatchingPartitions" + ExistingDataBehavior_OVERWRITE_OR_IGNORE" \ + arrow::dataset::ExistingDataBehavior::kOverwriteOrIgnore" + ExistingDataBehavior_ERROR" \ + arrow::dataset::ExistingDataBehavior::kError" + + cdef cppclass CScanOptions "arrow::dataset::ScanOptions": + shared_ptr[CSchema] dataset_schema + shared_ptr[CSchema] projected_schema + c_bool use_threads + CExpression filter + + cdef cppclass CScanNodeOptions "arrow::dataset::ScanNodeOptions"(CExecNodeOptions): + CScanNodeOptions(shared_ptr[CDataset] dataset, shared_ptr[CScanOptions] scan_options) + + shared_ptr[CScanOptions] scan_options + + cdef cppclass CFragmentScanOptions "arrow::dataset::FragmentScanOptions": + c_string type_name() const + + ctypedef CIterator[shared_ptr[CScanTask]] CScanTaskIterator \ + "arrow::dataset::ScanTaskIterator" + + cdef cppclass CScanTask" arrow::dataset::ScanTask": + CResult[CRecordBatchIterator] Execute() + + cdef cppclass CFragment "arrow::dataset::Fragment": + CResult[shared_ptr[CSchema]] ReadPhysicalSchema() + CResult[CScanTaskIterator] Scan(shared_ptr[CScanOptions] options) + c_bool splittable() const + c_string type_name() const + const CExpression& partition_expression() const + + ctypedef vector[shared_ptr[CFragment]] CFragmentVector \ + "arrow::dataset::FragmentVector" + + ctypedef CIterator[shared_ptr[CFragment]] CFragmentIterator \ + "arrow::dataset::FragmentIterator" + + cdef cppclass CInMemoryFragment "arrow::dataset::InMemoryFragment"( + CFragment): + CInMemoryFragment(vector[shared_ptr[CRecordBatch]] record_batches, + CExpression partition_expression) + + cdef cppclass CTaggedRecordBatch "arrow::dataset::TaggedRecordBatch": + shared_ptr[CRecordBatch] record_batch + shared_ptr[CFragment] fragment + + ctypedef CIterator[CTaggedRecordBatch] CTaggedRecordBatchIterator \ + "arrow::dataset::TaggedRecordBatchIterator" + + cdef cppclass CScanner "arrow::dataset::Scanner": + CScanner(shared_ptr[CDataset], shared_ptr[CScanOptions]) + CScanner(shared_ptr[CFragment], shared_ptr[CScanOptions]) + CResult[CScanTaskIterator] Scan() + CResult[CTaggedRecordBatchIterator] ScanBatches() + CResult[shared_ptr[CTable]] ToTable() + CResult[shared_ptr[CTable]] TakeRows(const CArray& indices) + CResult[shared_ptr[CTable]] Head(int64_t num_rows) + CResult[int64_t] CountRows() + CResult[CFragmentIterator] GetFragments() + CResult[shared_ptr[CRecordBatchReader]] ToRecordBatchReader() + const shared_ptr[CScanOptions]& options() + + cdef cppclass CScannerBuilder "arrow::dataset::ScannerBuilder": + CScannerBuilder(shared_ptr[CDataset], + shared_ptr[CScanOptions] scan_options) + CScannerBuilder(shared_ptr[CSchema], shared_ptr[CFragment], + shared_ptr[CScanOptions] scan_options) + + @staticmethod + shared_ptr[CScannerBuilder] FromRecordBatchReader( + shared_ptr[CRecordBatchReader] reader) + CStatus ProjectColumns "Project"(const vector[c_string]& columns) + CStatus Project(vector[CExpression]& exprs, vector[c_string]& columns) + CStatus Filter(CExpression filter) + CStatus UseThreads(c_bool use_threads) + CStatus Pool(CMemoryPool* pool) + CStatus BatchSize(int64_t batch_size) + CStatus BatchReadahead(int32_t batch_readahead) + CStatus FragmentReadahead(int32_t fragment_readahead) + CStatus FragmentScanOptions( + shared_ptr[CFragmentScanOptions] fragment_scan_options) + CResult[shared_ptr[CScanOptions]] GetScanOptions() + CResult[shared_ptr[CScanner]] Finish() + shared_ptr[CSchema] schema() const + + ctypedef vector[shared_ptr[CDataset]] CDatasetVector \ + "arrow::dataset::DatasetVector" + + cdef cppclass CDataset "arrow::dataset::Dataset": + const shared_ptr[CSchema] & schema() + CResult[CFragmentIterator] GetFragments() + CResult[CFragmentIterator] GetFragments(CExpression predicate) + const CExpression & partition_expression() + c_string type_name() + + CResult[shared_ptr[CDataset]] ReplaceSchema(shared_ptr[CSchema]) + + CResult[shared_ptr[CScannerBuilder]] NewScan() + + cdef cppclass CInMemoryDataset "arrow::dataset::InMemoryDataset"( + CDataset): + CInMemoryDataset(shared_ptr[CRecordBatchReader]) + CInMemoryDataset(shared_ptr[CTable]) + + cdef cppclass CUnionDataset "arrow::dataset::UnionDataset"( + CDataset): + @staticmethod + CResult[shared_ptr[CUnionDataset]] Make(shared_ptr[CSchema] schema, + CDatasetVector children) + + const CDatasetVector& children() const + + cdef cppclass CInspectOptions "arrow::dataset::InspectOptions": + int fragments + + cdef cppclass CFinishOptions "arrow::dataset::FinishOptions": + shared_ptr[CSchema] schema + CInspectOptions inspect_options + c_bool validate_fragments + + cdef cppclass CDatasetFactory "arrow::dataset::DatasetFactory": + CResult[vector[shared_ptr[CSchema]]] InspectSchemas(CInspectOptions) + CResult[shared_ptr[CSchema]] Inspect(CInspectOptions) + CResult[shared_ptr[CDataset]] FinishWithSchema "Finish"( + const shared_ptr[CSchema]& schema) + CResult[shared_ptr[CDataset]] Finish() + const CExpression& root_partition() + CStatus SetRootPartition(CExpression partition) + + cdef cppclass CUnionDatasetFactory "arrow::dataset::UnionDatasetFactory": + @staticmethod + CResult[shared_ptr[CDatasetFactory]] Make( + vector[shared_ptr[CDatasetFactory]] factories) + + cdef cppclass CFileSource "arrow::dataset::FileSource": + const c_string& path() const + const shared_ptr[CFileSystem]& filesystem() const + const shared_ptr[CBuffer]& buffer() const + const int64_t size() const + # HACK: Cython can't handle all the overloads so don't declare them. + # This means invalid construction of CFileSource won't be caught in + # the C++ generation phase (though it will still be caught when + # the generated C++ is compiled). + CFileSource(...) + + cdef cppclass CFileWriteOptions \ + "arrow::dataset::FileWriteOptions": + const shared_ptr[CFileFormat]& format() const + c_string type_name() const + + cdef cppclass CFileWriter \ + "arrow::dataset::FileWriter": + const shared_ptr[CFileFormat]& format() const + const shared_ptr[CSchema]& schema() const + const shared_ptr[CFileWriteOptions]& options() const + const CFileLocator& destination() const + CResult[int64_t] GetBytesWritten() + + cdef cppclass CFileFormat "arrow::dataset::FileFormat": + shared_ptr[CFragmentScanOptions] default_fragment_scan_options + c_string type_name() const + CResult[shared_ptr[CSchema]] Inspect(const CFileSource&) const + CResult[shared_ptr[CFileFragment]] MakeFragment( + CFileSource source, + CExpression partition_expression, + shared_ptr[CSchema] physical_schema) + shared_ptr[CFileWriteOptions] DefaultWriteOptions() + + cdef cppclass CFileFragment "arrow::dataset::FileFragment"( + CFragment): + const CFileSource& source() const + const shared_ptr[CFileFormat]& format() const + + cdef cppclass CFileSystemDatasetWriteOptions \ + "arrow::dataset::FileSystemDatasetWriteOptions": + shared_ptr[CFileWriteOptions] file_write_options + shared_ptr[CFileSystem] filesystem + c_string base_dir + shared_ptr[CPartitioning] partitioning + int max_partitions + c_string basename_template + function[cb_writer_finish_internal] writer_pre_finish + function[cb_writer_finish_internal] writer_post_finish + ExistingDataBehavior existing_data_behavior + c_bool create_dir + uint32_t max_open_files + uint64_t max_rows_per_file + uint64_t min_rows_per_group + uint64_t max_rows_per_group + + cdef cppclass CFileSystemDataset \ + "arrow::dataset::FileSystemDataset"(CDataset): + @staticmethod + CResult[shared_ptr[CDataset]] Make( + shared_ptr[CSchema] schema, + CExpression source_partition, + shared_ptr[CFileFormat] format, + shared_ptr[CFileSystem] filesystem, + vector[shared_ptr[CFileFragment]] fragments) + + @staticmethod + CStatus Write( + const CFileSystemDatasetWriteOptions& write_options, + shared_ptr[CScanner] scanner) + + c_string type() + vector[c_string] files() + const shared_ptr[CFileFormat]& format() const + const shared_ptr[CFileSystem]& filesystem() const + const shared_ptr[CPartitioning]& partitioning() const + + cdef cppclass CIpcFileWriteOptions \ + "arrow::dataset::IpcFileWriteOptions"(CFileWriteOptions): + shared_ptr[CIpcWriteOptions] options + + cdef cppclass CIpcFileFormat "arrow::dataset::IpcFileFormat"( + CFileFormat): + pass + + cdef cppclass COrcFileFormat "arrow::dataset::OrcFileFormat"( + CFileFormat): + pass + + cdef cppclass CCsvFileWriteOptions \ + "arrow::dataset::CsvFileWriteOptions"(CFileWriteOptions): + shared_ptr[CCSVWriteOptions] write_options + CMemoryPool* pool + + cdef cppclass CCsvFileFormat "arrow::dataset::CsvFileFormat"( + CFileFormat): + CCSVParseOptions parse_options + + cdef cppclass CCsvFragmentScanOptions \ + "arrow::dataset::CsvFragmentScanOptions"(CFragmentScanOptions): + CCSVConvertOptions convert_options + CCSVReadOptions read_options + function[StreamWrapFunc] stream_transform_func + + cdef cppclass CJsonFileFormat "arrow::dataset::JsonFileFormat"(CFileFormat): + pass + + cdef cppclass CJsonFragmentScanOptions "arrow::dataset::JsonFragmentScanOptions"(CFragmentScanOptions): + CJSONParseOptions parse_options + CJSONReadOptions read_options + + cdef cppclass CPartitioning "arrow::dataset::Partitioning": + c_string type_name() const + CResult[CExpression] Parse(const c_string & path) const + const shared_ptr[CSchema] & schema() + c_bool Equals(const CPartitioning& other) const + + cdef cppclass CSegmentEncoding" arrow::dataset::SegmentEncoding": + bint operator==(CSegmentEncoding) + + CSegmentEncoding CSegmentEncoding_None\ + " arrow::dataset::SegmentEncoding::None" + CSegmentEncoding CSegmentEncoding_Uri\ + " arrow::dataset::SegmentEncoding::Uri" + + cdef cppclass CKeyValuePartitioningOptions \ + "arrow::dataset::KeyValuePartitioningOptions": + CSegmentEncoding segment_encoding + + cdef cppclass CHivePartitioningOptions \ + "arrow::dataset::HivePartitioningOptions": + CSegmentEncoding segment_encoding + c_string null_fallback + + cdef cppclass CPartitioningFactoryOptions \ + "arrow::dataset::PartitioningFactoryOptions": + c_bool infer_dictionary + shared_ptr[CSchema] schema + CSegmentEncoding segment_encoding + + cdef cppclass CHivePartitioningFactoryOptions \ + "arrow::dataset::HivePartitioningFactoryOptions": + c_bool infer_dictionary + c_string null_fallback + shared_ptr[CSchema] schema + CSegmentEncoding segment_encoding + + cdef cppclass CPartitioningFactory "arrow::dataset::PartitioningFactory": + c_string type_name() const + + cdef cppclass CKeyValuePartitioning \ + "arrow::dataset::KeyValuePartitioning"(CPartitioning): + CKeyValuePartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries, + CKeyValuePartitioningOptions options) + + vector[shared_ptr[CArray]] dictionaries() const + CSegmentEncoding segment_encoding() + + cdef cppclass CDirectoryPartitioning \ + "arrow::dataset::DirectoryPartitioning"(CPartitioning): + CDirectoryPartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries) + + @staticmethod + shared_ptr[CPartitioningFactory] MakeFactory( + vector[c_string] field_names, CPartitioningFactoryOptions) + + vector[shared_ptr[CArray]] dictionaries() const + + cdef cppclass CHivePartitioning \ + "arrow::dataset::HivePartitioning"(CPartitioning): + CHivePartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries, + CHivePartitioningOptions options) + + @staticmethod + shared_ptr[CPartitioningFactory] MakeFactory( + CHivePartitioningFactoryOptions) + + vector[shared_ptr[CArray]] dictionaries() const + c_string null_fallback() const + + cdef cppclass CFilenamePartitioning \ + "arrow::dataset::FilenamePartitioning"(CPartitioning): + CFilenamePartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries) + + @staticmethod + shared_ptr[CPartitioningFactory] MakeFactory( + vector[c_string] field_names, CPartitioningFactoryOptions) + + vector[shared_ptr[CArray]] dictionaries() const + + cdef cppclass CPartitioningOrFactory \ + "arrow::dataset::PartitioningOrFactory": + CPartitioningOrFactory(shared_ptr[CPartitioning]) + CPartitioningOrFactory(shared_ptr[CPartitioningFactory]) + CPartitioningOrFactory & operator = (shared_ptr[CPartitioning]) + CPartitioningOrFactory & operator = ( + shared_ptr[CPartitioningFactory]) + shared_ptr[CPartitioning] partitioning() const + shared_ptr[CPartitioningFactory] factory() const + + cdef cppclass CFileSystemFactoryOptions \ + "arrow::dataset::FileSystemFactoryOptions": + CPartitioningOrFactory partitioning + c_string partition_base_dir + c_bool exclude_invalid_files + vector[c_string] selector_ignore_prefixes + + cdef cppclass CFileSystemDatasetFactory \ + "arrow::dataset::FileSystemDatasetFactory"( + CDatasetFactory): + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromPaths "Make"( + shared_ptr[CFileSystem] filesystem, + vector[c_string] paths, + shared_ptr[CFileFormat] format, + CFileSystemFactoryOptions options + ) + + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromSelector "Make"( + shared_ptr[CFileSystem] filesystem, + CFileSelector, + shared_ptr[CFileFormat] format, + CFileSystemFactoryOptions options + ) + + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromFileInfos "Make"( + shared_ptr[CFileSystem] filesystem, + vector[CFileInfo] files, + shared_ptr[CFileFormat] format, + CFileSystemFactoryOptions options + ) diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e5389b3135faf472e9b1f7d85fee9dea5fd9f2b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd @@ -0,0 +1,105 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow.includes.libparquet_encryption cimport * + +from pyarrow._parquet cimport * + + +cdef extern from "arrow/dataset/parquet_encryption_config.h" namespace "arrow::dataset" nogil: + cdef cppclass CParquetEncryptionConfig "arrow::dataset::ParquetEncryptionConfig": + shared_ptr[CCryptoFactory] crypto_factory + shared_ptr[CKmsConnectionConfig] kms_connection_config + shared_ptr[CEncryptionConfiguration] encryption_config + + cdef cppclass CParquetDecryptionConfig "arrow::dataset::ParquetDecryptionConfig": + shared_ptr[CCryptoFactory] crypto_factory + shared_ptr[CKmsConnectionConfig] kms_connection_config + shared_ptr[CDecryptionConfiguration] decryption_config + + +cdef extern from "arrow/dataset/api.h" namespace "arrow::dataset" nogil: + + cdef cppclass CParquetFileWriter \ + "arrow::dataset::ParquetFileWriter"(CFileWriter): + const shared_ptr[FileWriter]& parquet_writer() const + + cdef cppclass CParquetFileWriteOptions \ + "arrow::dataset::ParquetFileWriteOptions"(CFileWriteOptions): + shared_ptr[WriterProperties] writer_properties + shared_ptr[ArrowWriterProperties] arrow_writer_properties + shared_ptr[CParquetEncryptionConfig] parquet_encryption_config + + cdef cppclass CParquetFileFragment "arrow::dataset::ParquetFileFragment"( + CFileFragment): + const vector[int]& row_groups() const + shared_ptr[CFileMetaData] metadata() const + CResult[vector[shared_ptr[CFragment]]] SplitByRowGroup( + CExpression predicate) + CResult[shared_ptr[CFragment]] SubsetWithFilter "Subset"( + CExpression predicate) + CResult[shared_ptr[CFragment]] SubsetWithIds "Subset"( + vector[int] row_group_ids) + CStatus EnsureCompleteMetadata() + + cdef cppclass CParquetFileFormatReaderOptions \ + "arrow::dataset::ParquetFileFormat::ReaderOptions": + unordered_set[c_string] dict_columns + TimeUnit coerce_int96_timestamp_unit + + cdef cppclass CParquetFileFormat "arrow::dataset::ParquetFileFormat"( + CFileFormat): + CParquetFileFormatReaderOptions reader_options + CResult[shared_ptr[CFileFragment]] MakeFragment( + CFileSource source, + CExpression partition_expression, + shared_ptr[CSchema] physical_schema, + vector[int] row_groups) + + cdef cppclass CParquetFragmentScanOptions \ + "arrow::dataset::ParquetFragmentScanOptions"(CFragmentScanOptions): + shared_ptr[CReaderProperties] reader_properties + shared_ptr[ArrowReaderProperties] arrow_reader_properties + shared_ptr[CParquetDecryptionConfig] parquet_decryption_config + + cdef cppclass CParquetFactoryOptions \ + "arrow::dataset::ParquetFactoryOptions": + CPartitioningOrFactory partitioning + c_string partition_base_dir + c_bool validate_column_chunk_paths + + cdef cppclass CParquetDatasetFactory \ + "arrow::dataset::ParquetDatasetFactory"(CDatasetFactory): + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromMetaDataPath "Make"( + const c_string& metadata_path, + shared_ptr[CFileSystem] filesystem, + shared_ptr[CParquetFileFormat] format, + CParquetFactoryOptions options + ) + + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromMetaDataSource "Make"( + const CFileSource& metadata_path, + const c_string& base_path, + shared_ptr[CFileSystem] filesystem, + shared_ptr[CParquetFileFormat] format, + CParquetFactoryOptions options + ) diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd new file mode 100644 index 0000000000000000000000000000000000000000..722e947bfeca238af0bd6ee002e7f3f9f1063ed6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.libarrow cimport (CCompressionType, CStatus, CTable, + COutputStream, CResult, shared_ptr, + vector, CRandomAccessFile, CSchema, + c_string, CIpcReadOptions) + + +cdef extern from "arrow/ipc/api.h" namespace "arrow::ipc" nogil: + int kFeatherV1Version" arrow::ipc::feather::kFeatherV1Version" + int kFeatherV2Version" arrow::ipc::feather::kFeatherV2Version" + + cdef cppclass CFeatherProperties" arrow::ipc::feather::WriteProperties": + int version + int chunksize + CCompressionType compression + int compression_level + + CStatus WriteFeather" arrow::ipc::feather::WriteTable" \ + (const CTable& table, COutputStream* out, + CFeatherProperties properties) + + cdef cppclass CFeatherReader" arrow::ipc::feather::Reader": + @staticmethod + CResult[shared_ptr[CFeatherReader]] Open( + const shared_ptr[CRandomAccessFile]& file, + const CIpcReadOptions& options) + int version() + shared_ptr[CSchema] schema() + + CStatus Read(shared_ptr[CTable]* out) + CStatus Read(const vector[int] indices, shared_ptr[CTable]* out) + CStatus Read(const vector[c_string] names, shared_ptr[CTable]* out) diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c4cf5830c41286fde3af95c894b909e0aa34a72d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd @@ -0,0 +1,622 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + + +cdef extern from "arrow/flight/api.h" namespace "arrow" nogil: + cdef char* CTracingServerMiddlewareName\ + " arrow::flight::TracingServerMiddleware::kMiddlewareName" + + cdef cppclass CActionType" arrow::flight::ActionType": + c_string type + c_string description + bint operator==(CActionType) + CResult[c_string] SerializeToString() + + @staticmethod + CResult[CActionType] Deserialize(const c_string& serialized) + + cdef cppclass CAction" arrow::flight::Action": + c_string type + shared_ptr[CBuffer] body + bint operator==(CAction) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CAction] Deserialize(const c_string& serialized) + + cdef cppclass CFlightResult" arrow::flight::Result": + CFlightResult() + CFlightResult(CFlightResult) + shared_ptr[CBuffer] body + bint operator==(CFlightResult) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CFlightResult] Deserialize(const c_string& serialized) + + cdef cppclass CBasicAuth" arrow::flight::BasicAuth": + CBasicAuth() + CBasicAuth(CBuffer) + CBasicAuth(CBasicAuth) + c_string username + c_string password + bint operator==(CBasicAuth) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CBasicAuth] Deserialize(const c_string& serialized) + + cdef cppclass CResultStream" arrow::flight::ResultStream": + CResult[unique_ptr[CFlightResult]] Next() + + cdef cppclass CDescriptorType \ + " arrow::flight::FlightDescriptor::DescriptorType": + bint operator==(CDescriptorType) + + CDescriptorType CDescriptorTypeUnknown\ + " arrow::flight::FlightDescriptor::UNKNOWN" + CDescriptorType CDescriptorTypePath\ + " arrow::flight::FlightDescriptor::PATH" + CDescriptorType CDescriptorTypeCmd\ + " arrow::flight::FlightDescriptor::CMD" + + cdef cppclass CFlightDescriptor" arrow::flight::FlightDescriptor": + CDescriptorType type + c_string cmd + vector[c_string] path + bint operator==(CFlightDescriptor) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CFlightDescriptor] Deserialize(const c_string& serialized) + + cdef cppclass CTicket" arrow::flight::Ticket": + CTicket() + c_string ticket + bint operator==(CTicket) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CTicket] Deserialize(const c_string& serialized) + + cdef cppclass CCriteria" arrow::flight::Criteria": + CCriteria() + c_string expression + bint operator==(CCriteria) + CResult[c_string] SerializeToString() + + @staticmethod + CResult[CCriteria] Deserialize(const c_string& serialized) + + cdef cppclass CLocation" arrow::flight::Location": + CLocation() + c_string ToString() + c_bool Equals(const CLocation& other) + + @staticmethod + CResult[CLocation] Parse(const c_string& uri_string) + + @staticmethod + CResult[CLocation] ForGrpcTcp(const c_string& host, int port) + + @staticmethod + CResult[CLocation] ForGrpcTls(const c_string& host, int port) + + @staticmethod + CResult[CLocation] ForGrpcUnix(const c_string& path) + + cdef cppclass CFlightEndpoint" arrow::flight::FlightEndpoint": + CFlightEndpoint() + + CTicket ticket + vector[CLocation] locations + + bint operator==(CFlightEndpoint) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CFlightEndpoint] Deserialize(const c_string& serialized) + + cdef cppclass CFlightInfo" arrow::flight::FlightInfo": + CFlightInfo(CFlightInfo info) + int64_t total_records() + int64_t total_bytes() + CResult[shared_ptr[CSchema]] GetSchema(CDictionaryMemo* memo) + CFlightDescriptor& descriptor() + const vector[CFlightEndpoint]& endpoints() + CResult[c_string] SerializeToString() + c_string ToString() + bint operator==(CFlightInfo) + + @staticmethod + CResult[unique_ptr[CFlightInfo]] Deserialize( + const c_string& serialized) + + cdef cppclass CSchemaResult" arrow::flight::SchemaResult": + CSchemaResult() + CSchemaResult(CSchemaResult result) + CResult[shared_ptr[CSchema]] GetSchema(CDictionaryMemo* memo) + bint operator==(CSchemaResult) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CSchemaResult] Deserialize(const c_string& serialized) + + cdef cppclass CFlightListing" arrow::flight::FlightListing": + CResult[unique_ptr[CFlightInfo]] Next() + + cdef cppclass CSimpleFlightListing" arrow::flight::SimpleFlightListing": + # This doesn't work with Cython >= 3 + # CSimpleFlightListing(vector[CFlightInfo]&& info) + CSimpleFlightListing(const vector[CFlightInfo]& info) + + cdef cppclass CFlightPayload" arrow::flight::FlightPayload": + shared_ptr[CBuffer] descriptor + shared_ptr[CBuffer] app_metadata + CIpcPayload ipc_message + + cdef cppclass CFlightDataStream" arrow::flight::FlightDataStream": + shared_ptr[CSchema] schema() + CResult[CFlightPayload] Next() + + cdef cppclass CFlightStreamChunk" arrow::flight::FlightStreamChunk": + CFlightStreamChunk() + shared_ptr[CRecordBatch] data + shared_ptr[CBuffer] app_metadata + + cdef cppclass CMetadataRecordBatchReader \ + " arrow::flight::MetadataRecordBatchReader": + CResult[shared_ptr[CSchema]] GetSchema() + CResult[CFlightStreamChunk] Next() + CResult[shared_ptr[CTable]] ToTable() + + CResult[shared_ptr[CRecordBatchReader]] MakeRecordBatchReader\ + " arrow::flight::MakeRecordBatchReader"( + shared_ptr[CMetadataRecordBatchReader]) + + cdef cppclass CMetadataRecordBatchWriter \ + " arrow::flight::MetadataRecordBatchWriter"(CRecordBatchWriter): + CStatus Begin(shared_ptr[CSchema] schema, + const CIpcWriteOptions& options) + CStatus WriteMetadata(shared_ptr[CBuffer] app_metadata) + CStatus WriteWithMetadata(const CRecordBatch& batch, + shared_ptr[CBuffer] app_metadata) + + cdef cppclass CFlightStreamReader \ + " arrow::flight::FlightStreamReader"(CMetadataRecordBatchReader): + void Cancel() + CResult[shared_ptr[CTable]] ToTableWithStopToken" ToTable"\ + (const CStopToken& stop_token) + + cdef cppclass CFlightMessageReader \ + " arrow::flight::FlightMessageReader"(CMetadataRecordBatchReader): + CFlightDescriptor& descriptor() + + cdef cppclass CFlightMessageWriter \ + " arrow::flight::FlightMessageWriter"(CMetadataRecordBatchWriter): + pass + + cdef cppclass CFlightStreamWriter \ + " arrow::flight::FlightStreamWriter"(CMetadataRecordBatchWriter): + CStatus DoneWriting() + + cdef cppclass CRecordBatchStream \ + " arrow::flight::RecordBatchStream"(CFlightDataStream): + CRecordBatchStream(shared_ptr[CRecordBatchReader]& reader, + const CIpcWriteOptions& options) + + cdef cppclass CFlightMetadataReader" arrow::flight::FlightMetadataReader": + CStatus ReadMetadata(shared_ptr[CBuffer]* out) + + cdef cppclass CFlightMetadataWriter" arrow::flight::FlightMetadataWriter": + CStatus WriteMetadata(const CBuffer& message) + + cdef cppclass CServerAuthReader" arrow::flight::ServerAuthReader": + CStatus Read(c_string* token) + + cdef cppclass CServerAuthSender" arrow::flight::ServerAuthSender": + CStatus Write(c_string& token) + + cdef cppclass CClientAuthReader" arrow::flight::ClientAuthReader": + CStatus Read(c_string* token) + + cdef cppclass CClientAuthSender" arrow::flight::ClientAuthSender": + CStatus Write(c_string& token) + + cdef cppclass CServerAuthHandler" arrow::flight::ServerAuthHandler": + pass + + cdef cppclass CClientAuthHandler" arrow::flight::ClientAuthHandler": + pass + + cdef cppclass CServerCallContext" arrow::flight::ServerCallContext": + c_string& peer_identity() + c_string& peer() + c_bool is_cancelled() + void AddHeader(const c_string& key, const c_string& value) + void AddTrailer(const c_string& key, const c_string& value) + CServerMiddleware* GetMiddleware(const c_string& key) + + cdef cppclass CTimeoutDuration" arrow::flight::TimeoutDuration": + CTimeoutDuration(double) + + cdef cppclass CFlightCallOptions" arrow::flight::FlightCallOptions": + CFlightCallOptions() + CTimeoutDuration timeout + CIpcWriteOptions write_options + CIpcReadOptions read_options + vector[pair[c_string, c_string]] headers + CStopToken stop_token + + cdef cppclass CCertKeyPair" arrow::flight::CertKeyPair": + CCertKeyPair() + c_string pem_cert + c_string pem_key + + cdef cppclass CFlightMethod" arrow::flight::FlightMethod": + bint operator==(CFlightMethod) + + CFlightMethod CFlightMethodInvalid\ + " arrow::flight::FlightMethod::Invalid" + CFlightMethod CFlightMethodHandshake\ + " arrow::flight::FlightMethod::Handshake" + CFlightMethod CFlightMethodListFlights\ + " arrow::flight::FlightMethod::ListFlights" + CFlightMethod CFlightMethodGetFlightInfo\ + " arrow::flight::FlightMethod::GetFlightInfo" + CFlightMethod CFlightMethodGetSchema\ + " arrow::flight::FlightMethod::GetSchema" + CFlightMethod CFlightMethodDoGet\ + " arrow::flight::FlightMethod::DoGet" + CFlightMethod CFlightMethodDoPut\ + " arrow::flight::FlightMethod::DoPut" + CFlightMethod CFlightMethodDoAction\ + " arrow::flight::FlightMethod::DoAction" + CFlightMethod CFlightMethodListActions\ + " arrow::flight::FlightMethod::ListActions" + CFlightMethod CFlightMethodDoExchange\ + " arrow::flight::FlightMethod::DoExchange" + + cdef cppclass CCallInfo" arrow::flight::CallInfo": + CFlightMethod method + + # This is really std::unordered_multimap, but Cython has no + # bindings for it, so treat it as an opaque class and bind the + # methods we need + cdef cppclass CCallHeaders" arrow::flight::CallHeaders": + cppclass const_iterator: + pair[c_string, c_string] operator*() + # For Cython < 3 + const_iterator operator++() + # For Cython >= 3 + const_iterator operator++(int) + bint operator==(const_iterator) + bint operator!=(const_iterator) + const_iterator cbegin() + const_iterator cend() + + cdef cppclass CAddCallHeaders" arrow::flight::AddCallHeaders": + void AddHeader(const c_string& key, const c_string& value) + + cdef cppclass CServerMiddleware" arrow::flight::ServerMiddleware": + c_string name() + + cdef cppclass CServerMiddlewareFactory\ + " arrow::flight::ServerMiddlewareFactory": + pass + + cdef cppclass CClientMiddleware" arrow::flight::ClientMiddleware": + pass + + cdef cppclass CClientMiddlewareFactory\ + " arrow::flight::ClientMiddlewareFactory": + pass + + cpdef cppclass CTracingServerMiddlewareTraceKey\ + " arrow::flight::TracingServerMiddleware::TraceKey": + CTracingServerMiddlewareTraceKey() + c_string key + c_string value + + cdef cppclass CTracingServerMiddleware\ + " arrow::flight::TracingServerMiddleware"(CServerMiddleware): + vector[CTracingServerMiddlewareTraceKey] GetTraceContext() + + cdef shared_ptr[CServerMiddlewareFactory] \ + MakeTracingServerMiddlewareFactory\ + " arrow::flight::MakeTracingServerMiddlewareFactory"() + + cdef cppclass CFlightServerOptions" arrow::flight::FlightServerOptions": + CFlightServerOptions(const CLocation& location) + CLocation location + unique_ptr[CServerAuthHandler] auth_handler + vector[CCertKeyPair] tls_certificates + c_bool verify_client + c_string root_certificates + vector[pair[c_string, shared_ptr[CServerMiddlewareFactory]]] middleware + + cdef cppclass CFlightClientOptions" arrow::flight::FlightClientOptions": + c_string tls_root_certs + c_string cert_chain + c_string private_key + c_string override_hostname + vector[shared_ptr[CClientMiddlewareFactory]] middleware + int64_t write_size_limit_bytes + vector[pair[c_string, CIntStringVariant]] generic_options + c_bool disable_server_verification + + @staticmethod + CFlightClientOptions Defaults() + + cdef cppclass CDoPutResult" arrow::flight::FlightClient::DoPutResult": + unique_ptr[CFlightStreamWriter] writer + unique_ptr[CFlightMetadataReader] reader + + cdef cppclass CDoExchangeResult" arrow::flight::FlightClient::DoExchangeResult": + unique_ptr[CFlightStreamWriter] writer + unique_ptr[CFlightStreamReader] reader + + cdef cppclass CFlightClient" arrow::flight::FlightClient": + @staticmethod + CResult[unique_ptr[CFlightClient]] Connect(const CLocation& location, + const CFlightClientOptions& options) + + c_bool supports_async() + CStatus CheckAsyncSupport() + + CStatus Authenticate(CFlightCallOptions& options, + unique_ptr[CClientAuthHandler] auth_handler) + + CResult[pair[c_string, c_string]] AuthenticateBasicToken( + CFlightCallOptions& options, + const c_string& username, + const c_string& password) + + CResult[unique_ptr[CResultStream]] DoAction(CFlightCallOptions& options, CAction& action) + CResult[vector[CActionType]] ListActions(CFlightCallOptions& options) + + CResult[unique_ptr[CFlightListing]] ListFlights(CFlightCallOptions& options, CCriteria criteria) + CResult[unique_ptr[CFlightInfo]] GetFlightInfo(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CFuture[CFlightInfo] GetFlightInfoAsync(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CResult[unique_ptr[CSchemaResult]] GetSchema(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CResult[unique_ptr[CFlightStreamReader]] DoGet(CFlightCallOptions& options, CTicket& ticket) + CResult[CDoPutResult] DoPut(CFlightCallOptions& options, + CFlightDescriptor& descriptor, + shared_ptr[CSchema]& schema) + CResult[CDoExchangeResult] DoExchange(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CStatus Close() + + cdef cppclass CFlightStatusCode" arrow::flight::FlightStatusCode": + bint operator==(CFlightStatusCode) + + CFlightStatusCode CFlightStatusInternal \ + " arrow::flight::FlightStatusCode::Internal" + CFlightStatusCode CFlightStatusTimedOut \ + " arrow::flight::FlightStatusCode::TimedOut" + CFlightStatusCode CFlightStatusCancelled \ + " arrow::flight::FlightStatusCode::Cancelled" + CFlightStatusCode CFlightStatusUnauthenticated \ + " arrow::flight::FlightStatusCode::Unauthenticated" + CFlightStatusCode CFlightStatusUnauthorized \ + " arrow::flight::FlightStatusCode::Unauthorized" + CFlightStatusCode CFlightStatusUnavailable \ + " arrow::flight::FlightStatusCode::Unavailable" + CFlightStatusCode CFlightStatusFailed \ + " arrow::flight::FlightStatusCode::Failed" + + cdef cppclass FlightStatusDetail" arrow::flight::FlightStatusDetail": + CFlightStatusCode code() + c_string extra_info() + + @staticmethod + shared_ptr[FlightStatusDetail] UnwrapStatus(const CStatus& status) + + cdef cppclass FlightWriteSizeStatusDetail\ + " arrow::flight::FlightWriteSizeStatusDetail": + int64_t limit() + int64_t actual() + + @staticmethod + shared_ptr[FlightWriteSizeStatusDetail] UnwrapStatus( + const CStatus& status) + + cdef CStatus MakeFlightError" arrow::flight::MakeFlightError" \ + (CFlightStatusCode code, const c_string& message) + + cdef CStatus MakeFlightError" arrow::flight::MakeFlightError" \ + (CFlightStatusCode code, + const c_string& message, + const c_string& extra_info) + +# Callbacks for implementing Flight servers +# Use typedef to emulate syntax for std::function +ctypedef CStatus cb_list_flights(object, const CServerCallContext&, + const CCriteria*, + unique_ptr[CFlightListing]*) +ctypedef CStatus cb_get_flight_info(object, const CServerCallContext&, + const CFlightDescriptor&, + unique_ptr[CFlightInfo]*) +ctypedef CStatus cb_get_schema(object, const CServerCallContext&, + const CFlightDescriptor&, + unique_ptr[CSchemaResult]*) +ctypedef CStatus cb_do_put(object, const CServerCallContext&, + unique_ptr[CFlightMessageReader], + unique_ptr[CFlightMetadataWriter]) +ctypedef CStatus cb_do_get(object, const CServerCallContext&, + const CTicket&, + unique_ptr[CFlightDataStream]*) +ctypedef CStatus cb_do_exchange(object, const CServerCallContext&, + unique_ptr[CFlightMessageReader], + unique_ptr[CFlightMessageWriter]) +ctypedef CStatus cb_do_action(object, const CServerCallContext&, + const CAction&, + unique_ptr[CResultStream]*) +ctypedef CStatus cb_list_actions(object, const CServerCallContext&, + vector[CActionType]*) +ctypedef CStatus cb_result_next(object, unique_ptr[CFlightResult]*) +ctypedef CStatus cb_data_stream_next(object, CFlightPayload*) +ctypedef CStatus cb_server_authenticate(object, CServerAuthSender*, + CServerAuthReader*) +ctypedef CStatus cb_is_valid(object, const c_string&, c_string*) +ctypedef CStatus cb_client_authenticate(object, CClientAuthSender*, + CClientAuthReader*) +ctypedef CStatus cb_get_token(object, c_string*) + +ctypedef CStatus cb_middleware_sending_headers(object, CAddCallHeaders*) +ctypedef CStatus cb_middleware_call_completed(object, const CStatus&) +ctypedef CStatus cb_client_middleware_received_headers( + object, const CCallHeaders&) +ctypedef CStatus cb_server_middleware_start_call( + object, + const CCallInfo&, + const CCallHeaders&, + shared_ptr[CServerMiddleware]*) +ctypedef CStatus cb_client_middleware_start_call( + object, + const CCallInfo&, + unique_ptr[CClientMiddleware]*) + +cdef extern from "arrow/python/flight.h" namespace "arrow::py::flight" nogil: + cdef char* CPyServerMiddlewareName\ + " arrow::py::flight::kPyServerMiddlewareName" + + cdef cppclass PyFlightServerVtable: + PyFlightServerVtable() + function[cb_list_flights] list_flights + function[cb_get_flight_info] get_flight_info + function[cb_get_schema] get_schema + function[cb_do_put] do_put + function[cb_do_get] do_get + function[cb_do_exchange] do_exchange + function[cb_do_action] do_action + function[cb_list_actions] list_actions + + cdef cppclass PyServerAuthHandlerVtable: + PyServerAuthHandlerVtable() + function[cb_server_authenticate] authenticate + function[cb_is_valid] is_valid + + cdef cppclass PyClientAuthHandlerVtable: + PyClientAuthHandlerVtable() + function[cb_client_authenticate] authenticate + function[cb_get_token] get_token + + cdef cppclass PyFlightServer: + PyFlightServer(object server, PyFlightServerVtable vtable) + + CStatus Init(CFlightServerOptions& options) + int port() + CStatus ServeWithSignals() except * + CStatus Shutdown() + CStatus Wait() + + cdef cppclass PyServerAuthHandler\ + " arrow::py::flight::PyServerAuthHandler"(CServerAuthHandler): + PyServerAuthHandler(object handler, PyServerAuthHandlerVtable vtable) + + cdef cppclass PyClientAuthHandler\ + " arrow::py::flight::PyClientAuthHandler"(CClientAuthHandler): + PyClientAuthHandler(object handler, PyClientAuthHandlerVtable vtable) + + cdef cppclass CPyFlightResultStream\ + " arrow::py::flight::PyFlightResultStream"(CResultStream): + CPyFlightResultStream(object generator, + function[cb_result_next] callback) + + cdef cppclass CPyFlightDataStream\ + " arrow::py::flight::PyFlightDataStream"(CFlightDataStream): + CPyFlightDataStream(object data_source, + unique_ptr[CFlightDataStream] stream) + + cdef cppclass CPyGeneratorFlightDataStream\ + " arrow::py::flight::PyGeneratorFlightDataStream"\ + (CFlightDataStream): + CPyGeneratorFlightDataStream(object generator, + shared_ptr[CSchema] schema, + function[cb_data_stream_next] callback, + const CIpcWriteOptions& options) + + cdef cppclass PyServerMiddlewareVtable\ + " arrow::py::flight::PyServerMiddleware::Vtable": + PyServerMiddlewareVtable() + function[cb_middleware_sending_headers] sending_headers + function[cb_middleware_call_completed] call_completed + + cdef cppclass PyClientMiddlewareVtable\ + " arrow::py::flight::PyClientMiddleware::Vtable": + PyClientMiddlewareVtable() + function[cb_middleware_sending_headers] sending_headers + function[cb_client_middleware_received_headers] received_headers + function[cb_middleware_call_completed] call_completed + + cdef cppclass CPyServerMiddleware\ + " arrow::py::flight::PyServerMiddleware"(CServerMiddleware): + CPyServerMiddleware(object middleware, PyServerMiddlewareVtable vtable) + void* py_object() + + cdef cppclass CPyServerMiddlewareFactory\ + " arrow::py::flight::PyServerMiddlewareFactory"\ + (CServerMiddlewareFactory): + CPyServerMiddlewareFactory( + object factory, + function[cb_server_middleware_start_call] start_call) + + cdef cppclass CPyClientMiddleware\ + " arrow::py::flight::PyClientMiddleware"(CClientMiddleware): + CPyClientMiddleware(object middleware, PyClientMiddlewareVtable vtable) + + cdef cppclass CPyClientMiddlewareFactory\ + " arrow::py::flight::PyClientMiddlewareFactory"\ + (CClientMiddlewareFactory): + CPyClientMiddlewareFactory( + object factory, + function[cb_client_middleware_start_call] start_call) + + cdef CStatus CreateFlightInfo" arrow::py::flight::CreateFlightInfo"( + shared_ptr[CSchema] schema, + CFlightDescriptor& descriptor, + vector[CFlightEndpoint] endpoints, + int64_t total_records, + int64_t total_bytes, + unique_ptr[CFlightInfo]* out) + + cdef CStatus CreateSchemaResult" arrow::py::flight::CreateSchemaResult"( + shared_ptr[CSchema] schema, + unique_ptr[CSchemaResult]* out) + + +cdef extern from "" namespace "std" nogil: + cdef cppclass CIntStringVariant" std::variant": + CIntStringVariant() + CIntStringVariant(int) + CIntStringVariant(c_string) diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd new file mode 100644 index 0000000000000000000000000000000000000000..328b426a498db70bb5b1fca5765f8d0220559ccc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd @@ -0,0 +1,366 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport CTimePoint + +cdef extern from "arrow/filesystem/api.h" namespace "arrow::fs" nogil: + + ctypedef enum CFileType "arrow::fs::FileType": + CFileType_NotFound "arrow::fs::FileType::NotFound" + CFileType_Unknown "arrow::fs::FileType::Unknown" + CFileType_File "arrow::fs::FileType::File" + CFileType_Directory "arrow::fs::FileType::Directory" + + cdef cppclass CFileInfo "arrow::fs::FileInfo": + CFileInfo() + CFileInfo(CFileInfo) + CFileInfo& operator=(CFileInfo) + CFileInfo(const CFileInfo&) + CFileInfo& operator=(const CFileInfo&) + + CFileType type() + void set_type(CFileType type) + c_string path() + void set_path(const c_string& path) + c_string base_name() + int64_t size() + void set_size(int64_t size) + c_string extension() + CTimePoint mtime() + void set_mtime(CTimePoint mtime) + + cdef cppclass CFileSelector "arrow::fs::FileSelector": + CFileSelector() + c_string base_dir + c_bool allow_not_found + c_bool recursive + + cdef cppclass CFileLocator "arrow::fs::FileLocator": + shared_ptr[CFileSystem] filesystem + c_string path + + cdef cppclass CFileSystem "arrow::fs::FileSystem": + shared_ptr[CFileSystem] shared_from_this() + c_string type_name() const + CResult[c_string] NormalizePath(c_string path) + CResult[CFileInfo] GetFileInfo(const c_string& path) + CResult[vector[CFileInfo]] GetFileInfo( + const vector[c_string]& paths) + CResult[vector[CFileInfo]] GetFileInfo(const CFileSelector& select) + CStatus CreateDir(const c_string& path, c_bool recursive) + CStatus DeleteDir(const c_string& path) + CStatus DeleteDirContents(const c_string& path, c_bool missing_dir_ok) + CStatus DeleteRootDirContents() + CStatus DeleteFile(const c_string& path) + CStatus DeleteFiles(const vector[c_string]& paths) + CStatus Move(const c_string& src, const c_string& dest) + CStatus CopyFile(const c_string& src, const c_string& dest) + CResult[shared_ptr[CInputStream]] OpenInputStream( + const c_string& path) + CResult[shared_ptr[CRandomAccessFile]] OpenInputFile( + const c_string& path) + CResult[shared_ptr[COutputStream]] OpenOutputStream( + const c_string& path, const shared_ptr[const CKeyValueMetadata]&) + CResult[shared_ptr[COutputStream]] OpenAppendStream( + const c_string& path, const shared_ptr[const CKeyValueMetadata]&) + c_bool Equals(const CFileSystem& other) + c_bool Equals(shared_ptr[CFileSystem] other) + + CResult[shared_ptr[CFileSystem]] CFileSystemFromUri \ + "arrow::fs::FileSystemFromUri"(const c_string& uri, c_string* out_path) + CResult[shared_ptr[CFileSystem]] CFileSystemFromUriOrPath \ + "arrow::fs::FileSystemFromUriOrPath"(const c_string& uri, + c_string* out_path) + + cdef cppclass CFileSystemGlobalOptions \ + "arrow::fs::FileSystemGlobalOptions": + c_string tls_ca_file_path + c_string tls_ca_dir_path + + CStatus CFileSystemsInitialize "arrow::fs::Initialize" \ + (const CFileSystemGlobalOptions& options) + + cdef cppclass CLocalFileSystemOptions "arrow::fs::LocalFileSystemOptions": + c_bool use_mmap + + @staticmethod + CLocalFileSystemOptions Defaults() + + c_bool Equals(const CLocalFileSystemOptions& other) + + cdef cppclass CLocalFileSystem "arrow::fs::LocalFileSystem"(CFileSystem): + CLocalFileSystem() + CLocalFileSystem(CLocalFileSystemOptions) + CLocalFileSystemOptions options() + + cdef cppclass CSubTreeFileSystem \ + "arrow::fs::SubTreeFileSystem"(CFileSystem): + CSubTreeFileSystem(const c_string& base_path, + shared_ptr[CFileSystem] base_fs) + c_string base_path() + shared_ptr[CFileSystem] base_fs() + + ctypedef enum CS3LogLevel "arrow::fs::S3LogLevel": + CS3LogLevel_Off "arrow::fs::S3LogLevel::Off" + CS3LogLevel_Fatal "arrow::fs::S3LogLevel::Fatal" + CS3LogLevel_Error "arrow::fs::S3LogLevel::Error" + CS3LogLevel_Warn "arrow::fs::S3LogLevel::Warn" + CS3LogLevel_Info "arrow::fs::S3LogLevel::Info" + CS3LogLevel_Debug "arrow::fs::S3LogLevel::Debug" + CS3LogLevel_Trace "arrow::fs::S3LogLevel::Trace" + + cdef struct CS3GlobalOptions "arrow::fs::S3GlobalOptions": + CS3LogLevel log_level + int num_event_loop_threads + + cdef cppclass CS3ProxyOptions "arrow::fs::S3ProxyOptions": + c_string scheme + c_string host + int port + c_string username + c_string password + c_bool Equals(const CS3ProxyOptions& other) + + @staticmethod + CResult[CS3ProxyOptions] FromUriString "FromUri"( + const c_string& uri_string) + + ctypedef enum CS3CredentialsKind "arrow::fs::S3CredentialsKind": + CS3CredentialsKind_Anonymous "arrow::fs::S3CredentialsKind::Anonymous" + CS3CredentialsKind_Default "arrow::fs::S3CredentialsKind::Default" + CS3CredentialsKind_Explicit "arrow::fs::S3CredentialsKind::Explicit" + CS3CredentialsKind_Role "arrow::fs::S3CredentialsKind::Role" + CS3CredentialsKind_WebIdentity \ + "arrow::fs::S3CredentialsKind::WebIdentity" + + cdef cppclass CS3RetryStrategy "arrow::fs::S3RetryStrategy": + @staticmethod + shared_ptr[CS3RetryStrategy] GetAwsDefaultRetryStrategy(int64_t max_attempts) + + @staticmethod + shared_ptr[CS3RetryStrategy] GetAwsStandardRetryStrategy(int64_t max_attempts) + + cdef cppclass CS3Options "arrow::fs::S3Options": + c_string region + double connect_timeout + double request_timeout + c_string endpoint_override + c_string scheme + c_bool background_writes + c_bool allow_bucket_creation + c_bool allow_bucket_deletion + c_bool force_virtual_addressing + shared_ptr[const CKeyValueMetadata] default_metadata + c_string role_arn + c_string session_name + c_string external_id + int load_frequency + CS3ProxyOptions proxy_options + CS3CredentialsKind credentials_kind + shared_ptr[CS3RetryStrategy] retry_strategy + void ConfigureDefaultCredentials() + void ConfigureAccessKey(const c_string& access_key, + const c_string& secret_key, + const c_string& session_token) + c_string GetAccessKey() + c_string GetSecretKey() + c_string GetSessionToken() + c_bool Equals(const CS3Options& other) + + @staticmethod + CS3Options Defaults() + + @staticmethod + CS3Options Anonymous() + + @staticmethod + CS3Options FromAccessKey(const c_string& access_key, + const c_string& secret_key, + const c_string& session_token) + + @staticmethod + CS3Options FromAssumeRole(const c_string& role_arn, + const c_string& session_name, + const c_string& external_id, + const int load_frequency) + + cdef cppclass CS3FileSystem "arrow::fs::S3FileSystem"(CFileSystem): + @staticmethod + CResult[shared_ptr[CS3FileSystem]] Make(const CS3Options& options) + CS3Options options() + c_string region() + + cdef CStatus CInitializeS3 "arrow::fs::InitializeS3"( + const CS3GlobalOptions& options) + cdef CStatus CEnsureS3Initialized "arrow::fs::EnsureS3Initialized"() + cdef CStatus CFinalizeS3 "arrow::fs::FinalizeS3"() + cdef CStatus CEnsureS3Finalized "arrow::fs::EnsureS3Finalized"() + + cdef CResult[c_string] ResolveS3BucketRegion(const c_string& bucket) + + cdef cppclass CGcsCredentials "arrow::fs::GcsCredentials": + c_bool anonymous() + CTimePoint expiration() + c_string access_token() + c_string target_service_account() + + cdef cppclass CGcsOptions "arrow::fs::GcsOptions": + CGcsCredentials credentials + c_string endpoint_override + c_string scheme + c_string default_bucket_location + optional[c_string] project_id + optional[double] retry_limit_seconds + shared_ptr[const CKeyValueMetadata] default_metadata + c_bool Equals(const CS3Options& other) + + @staticmethod + CGcsOptions Defaults() + + @staticmethod + CGcsOptions Anonymous() + + @staticmethod + CGcsOptions FromAccessToken(const c_string& access_token, + CTimePoint expiration) + + @staticmethod + CGcsOptions FromImpersonatedServiceAccount(const CGcsCredentials& base_credentials, + c_string& target_service_account) + + cdef cppclass CGcsFileSystem "arrow::fs::GcsFileSystem": + @staticmethod + CResult[shared_ptr[CGcsFileSystem]] Make(const CGcsOptions& options) + CGcsOptions options() + + cdef cppclass CAzureOptions "arrow::fs::AzureOptions": + c_string account_name + c_string blob_storage_authority + c_string dfs_storage_authority + c_string blob_storage_scheme + c_string dfs_storage_scheme + + c_bool Equals(const CAzureOptions& other) + CStatus ConfigureDefaultCredential() + CStatus ConfigureAccountKeyCredential(c_string account_key) + + cdef cppclass CAzureFileSystem "arrow::fs::AzureFileSystem": + @staticmethod + CResult[shared_ptr[CAzureFileSystem]] Make(const CAzureOptions& options) + CAzureOptions options() + + cdef cppclass CHdfsOptions "arrow::fs::HdfsOptions": + HdfsConnectionConfig connection_config + int32_t buffer_size + int16_t replication + int64_t default_block_size + + @staticmethod + CResult[CHdfsOptions] FromUriString "FromUri"( + const c_string& uri_string) + void ConfigureEndPoint(c_string host, int port) + void ConfigureDriver(c_bool use_hdfs3) + void ConfigureReplication(int16_t replication) + void ConfigureUser(c_string user_name) + void ConfigureBufferSize(int32_t buffer_size) + void ConfigureBlockSize(int64_t default_block_size) + void ConfigureKerberosTicketCachePath(c_string path) + void ConfigureExtraConf(c_string key, c_string value) + + cdef cppclass CHadoopFileSystem "arrow::fs::HadoopFileSystem"(CFileSystem): + @staticmethod + CResult[shared_ptr[CHadoopFileSystem]] Make( + const CHdfsOptions& options) + CHdfsOptions options() + + cdef cppclass CMockFileSystem "arrow::fs::internal::MockFileSystem"( + CFileSystem): + CMockFileSystem(CTimePoint current_time) + + CStatus CCopyFiles "arrow::fs::CopyFiles"( + const vector[CFileLocator]& sources, + const vector[CFileLocator]& destinations, + const CIOContext& io_context, + int64_t chunk_size, c_bool use_threads) + CStatus CCopyFilesWithSelector "arrow::fs::CopyFiles"( + const shared_ptr[CFileSystem]& source_fs, + const CFileSelector& source_sel, + const shared_ptr[CFileSystem]& destination_fs, + const c_string& destination_base_dir, + const CIOContext& io_context, + int64_t chunk_size, c_bool use_threads) + + +# Callbacks for implementing Python filesystems +# Use typedef to emulate syntax for std::function +ctypedef void CallbackGetTypeName(object, c_string*) +ctypedef c_bool CallbackEquals(object, const CFileSystem&) + +ctypedef void CallbackGetFileInfo(object, const c_string&, CFileInfo*) +ctypedef void CallbackGetFileInfoVector(object, const vector[c_string]&, + vector[CFileInfo]*) +ctypedef void CallbackGetFileInfoSelector(object, const CFileSelector&, + vector[CFileInfo]*) +ctypedef void CallbackCreateDir(object, const c_string&, c_bool) +ctypedef void CallbackDeleteDir(object, const c_string&) +ctypedef void CallbackDeleteDirContents(object, const c_string&, c_bool) +ctypedef void CallbackDeleteRootDirContents(object) +ctypedef void CallbackDeleteFile(object, const c_string&) +ctypedef void CallbackMove(object, const c_string&, const c_string&) +ctypedef void CallbackCopyFile(object, const c_string&, const c_string&) + +ctypedef void CallbackOpenInputStream(object, const c_string&, + shared_ptr[CInputStream]*) +ctypedef void CallbackOpenInputFile(object, const c_string&, + shared_ptr[CRandomAccessFile]*) +ctypedef void CallbackOpenOutputStream( + object, const c_string&, const shared_ptr[const CKeyValueMetadata]&, + shared_ptr[COutputStream]*) +ctypedef void CallbackNormalizePath(object, const c_string&, c_string*) + +cdef extern from "arrow/python/filesystem.h" namespace "arrow::py::fs" nogil: + + cdef cppclass CPyFileSystemVtable "arrow::py::fs::PyFileSystemVtable": + PyFileSystemVtable() + function[CallbackGetTypeName] get_type_name + function[CallbackEquals] equals + function[CallbackGetFileInfo] get_file_info + function[CallbackGetFileInfoVector] get_file_info_vector + function[CallbackGetFileInfoSelector] get_file_info_selector + function[CallbackCreateDir] create_dir + function[CallbackDeleteDir] delete_dir + function[CallbackDeleteDirContents] delete_dir_contents + function[CallbackDeleteRootDirContents] delete_root_dir_contents + function[CallbackDeleteFile] delete_file + function[CallbackMove] move + function[CallbackCopyFile] copy_file + function[CallbackOpenInputStream] open_input_stream + function[CallbackOpenInputFile] open_input_file + function[CallbackOpenOutputStream] open_output_stream + function[CallbackOpenOutputStream] open_append_stream + function[CallbackNormalizePath] normalize_path + + cdef cppclass CPyFileSystem "arrow::py::fs::PyFileSystem": + @staticmethod + shared_ptr[CPyFileSystem] Make(object handler, + CPyFileSystemVtable vtable) + + PyObject* handler() diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd new file mode 100644 index 0000000000000000000000000000000000000000..136d6bc8b14cd7826cd51f46ea97bf325180e738 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd @@ -0,0 +1,319 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + + +ctypedef CInvalidRowResult PyInvalidRowCallback(object, + const CCSVInvalidRow&) + + +cdef extern from "arrow/python/csv.h" namespace "arrow::py::csv": + + function[CInvalidRowHandler] MakeInvalidRowHandler( + function[PyInvalidRowCallback], object handler) + + +cdef extern from "arrow/python/api.h" namespace "arrow::py": + # Requires GIL + CResult[shared_ptr[CDataType]] InferArrowType( + object obj, object mask, c_bool pandas_null_sentinels) + + +cdef extern from "arrow/python/api.h" namespace "arrow::py::internal": + object NewMonthDayNanoTupleType() + CResult[PyObject*] MonthDayNanoIntervalArrayToPyList( + const CMonthDayNanoIntervalArray& array) + CResult[PyObject*] MonthDayNanoIntervalScalarToPyObject( + const CMonthDayNanoIntervalScalar& scalar) + + +cdef extern from "arrow/python/arrow_to_pandas.h" namespace "arrow::py::MapConversionType": + cdef enum MapConversionType "arrow::py::MapConversionType": + DEFAULT, + LOSSY, + STRICT_ + + +cdef extern from "arrow/python/api.h" namespace "arrow::py" nogil: + shared_ptr[CDataType] GetPrimitiveType(Type type) + + object PyHalf_FromHalf(npy_half value) + + cdef cppclass PyConversionOptions: + PyConversionOptions() + + shared_ptr[CDataType] type + int64_t size + CMemoryPool* pool + c_bool from_pandas + c_bool ignore_timezone + c_bool strict + + # TODO Some functions below are not actually "nogil" + + CResult[shared_ptr[CChunkedArray]] ConvertPySequence( + object obj, object mask, const PyConversionOptions& options, + CMemoryPool* pool) + + CResult[shared_ptr[CDataType]] NumPyDtypeToArrow(object dtype) + + CStatus NdarrayToArrow(CMemoryPool* pool, object ao, object mo, + c_bool from_pandas, + const shared_ptr[CDataType]& type, + shared_ptr[CChunkedArray]* out) + + CStatus NdarrayToArrow(CMemoryPool* pool, object ao, object mo, + c_bool from_pandas, + const shared_ptr[CDataType]& type, + const CCastOptions& cast_options, + shared_ptr[CChunkedArray]* out) + + CStatus NdarrayToTensor(CMemoryPool* pool, object ao, + const vector[c_string]& dim_names, + shared_ptr[CTensor]* out) + + CStatus TensorToNdarray(const shared_ptr[CTensor]& tensor, object base, + PyObject** out) + + CStatus SparseCOOTensorToNdarray( + const shared_ptr[CSparseCOOTensor]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_coords) + + CStatus SparseCSRMatrixToNdarray( + const shared_ptr[CSparseCSRMatrix]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices) + + CStatus SparseCSCMatrixToNdarray( + const shared_ptr[CSparseCSCMatrix]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices) + + CStatus SparseCSFTensorToNdarray( + const shared_ptr[CSparseCSFTensor]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices) + + CStatus NdarraysToSparseCOOTensor(CMemoryPool* pool, object data_ao, + object coords_ao, + const vector[int64_t]& shape, + const vector[c_string]& dim_names, + shared_ptr[CSparseCOOTensor]* out) + + CStatus NdarraysToSparseCSRMatrix(CMemoryPool* pool, object data_ao, + object indptr_ao, object indices_ao, + const vector[int64_t]& shape, + const vector[c_string]& dim_names, + shared_ptr[CSparseCSRMatrix]* out) + + CStatus NdarraysToSparseCSCMatrix(CMemoryPool* pool, object data_ao, + object indptr_ao, object indices_ao, + const vector[int64_t]& shape, + const vector[c_string]& dim_names, + shared_ptr[CSparseCSCMatrix]* out) + + CStatus NdarraysToSparseCSFTensor(CMemoryPool* pool, object data_ao, + object indptr_ao, object indices_ao, + const vector[int64_t]& shape, + const vector[int64_t]& axis_order, + const vector[c_string]& dim_names, + shared_ptr[CSparseCSFTensor]* out) + + CStatus TensorToSparseCOOTensor(shared_ptr[CTensor], + shared_ptr[CSparseCOOTensor]* out) + + CStatus TensorToSparseCSRMatrix(shared_ptr[CTensor], + shared_ptr[CSparseCSRMatrix]* out) + + CStatus TensorToSparseCSCMatrix(shared_ptr[CTensor], + shared_ptr[CSparseCSCMatrix]* out) + + CStatus TensorToSparseCSFTensor(shared_ptr[CTensor], + shared_ptr[CSparseCSFTensor]* out) + + CStatus ConvertArrayToPandas(const PandasOptions& options, + shared_ptr[CArray] arr, + object py_ref, PyObject** out) + + CStatus ConvertChunkedArrayToPandas(const PandasOptions& options, + shared_ptr[CChunkedArray] arr, + object py_ref, PyObject** out) + + CStatus ConvertTableToPandas(const PandasOptions& options, + shared_ptr[CTable] table, + PyObject** out) + + void c_set_default_memory_pool \ + " arrow::py::set_default_memory_pool"(CMemoryPool* pool)\ + + CMemoryPool* c_get_memory_pool \ + " arrow::py::get_memory_pool"() + + cdef cppclass PyBuffer(CBuffer): + @staticmethod + CResult[shared_ptr[CBuffer]] FromPyObject(object obj) + + cdef cppclass PyForeignBuffer(CBuffer): + @staticmethod + CStatus Make(const uint8_t* data, int64_t size, object base, + shared_ptr[CBuffer]* out) + + cdef cppclass PyReadableFile(CRandomAccessFile): + PyReadableFile(object fo) + + cdef cppclass PyOutputStream(COutputStream): + PyOutputStream(object fo) + + cdef cppclass PandasOptions: + CMemoryPool* pool + c_bool strings_to_categorical + c_bool zero_copy_only + c_bool integer_object_nulls + c_bool date_as_object + c_bool timestamp_as_object + c_bool use_threads + c_bool coerce_temporal_nanoseconds + c_bool ignore_timezone + c_bool deduplicate_objects + c_bool safe_cast + c_bool split_blocks + c_bool self_destruct + MapConversionType maps_as_pydicts + c_bool decode_dictionaries + unordered_set[c_string] categorical_columns + unordered_set[c_string] extension_columns + c_bool to_numpy + + cdef cppclass CSerializedPyObject" arrow::py::SerializedPyObject": + shared_ptr[CRecordBatch] batch + vector[shared_ptr[CTensor]] tensors + + CStatus WriteTo(COutputStream* dst) + CStatus GetComponents(CMemoryPool* pool, PyObject** dst) + + CStatus SerializeObject(object context, object sequence, + CSerializedPyObject* out) + + CStatus DeserializeObject(object context, + const CSerializedPyObject& obj, + PyObject* base, PyObject** out) + + CStatus ReadSerializedObject(CRandomAccessFile* src, + CSerializedPyObject* out) + + cdef cppclass SparseTensorCounts: + SparseTensorCounts() + int coo + int csr + int csc + int csf + int ndim_csf + int num_total_tensors() const + int num_total_buffers() const + + CStatus GetSerializedFromComponents( + int num_tensors, + const SparseTensorCounts& num_sparse_tensors, + int num_ndarrays, + int num_buffers, + object buffers, + CSerializedPyObject* out) + + +cdef extern from "arrow/python/api.h" namespace "arrow::py::internal" nogil: + cdef cppclass CTimePoint "arrow::py::internal::TimePoint": + pass + + CTimePoint PyDateTime_to_TimePoint(PyDateTime_DateTime* pydatetime) + int64_t TimePoint_to_ns(CTimePoint val) + CTimePoint TimePoint_from_s(double val) + CTimePoint TimePoint_from_ns(int64_t val) + + CResult[c_string] TzinfoToString(PyObject* pytzinfo) + CResult[PyObject*] StringToTzinfo(c_string) + + +cdef extern from "arrow/python/init.h": + int arrow_init_numpy() except -1 + + +cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py": + int import_pyarrow() except -1 + + +cdef extern from "arrow/python/common.h" namespace "arrow::py": + c_bool IsPyError(const CStatus& status) + void RestorePyError(const CStatus& status) except * + + +cdef extern from "arrow/python/common.h" namespace "arrow::py" nogil: + cdef cppclass SharedPtrNoGIL[T](shared_ptr[T]): + # This looks like the only way to satisfy both Cython 2 and Cython 3 + SharedPtrNoGIL& operator=(...) + cdef cppclass UniquePtrNoGIL[T, DELETER=*](unique_ptr[T, DELETER]): + UniquePtrNoGIL& operator=(...) + + +cdef extern from "arrow/python/inference.h" namespace "arrow::py": + c_bool IsPyBool(object o) + c_bool IsPyInt(object o) + c_bool IsPyFloat(object o) + + +cdef extern from "arrow/python/ipc.h" namespace "arrow::py": + cdef cppclass CPyRecordBatchReader" arrow::py::PyRecordBatchReader" \ + (CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Make(shared_ptr[CSchema], + object) + + +cdef extern from "arrow/python/ipc.h" namespace "arrow::py" nogil: + cdef cppclass CCastingRecordBatchReader" arrow::py::CastingRecordBatchReader" \ + (CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Make(shared_ptr[CRecordBatchReader], + shared_ptr[CSchema]) + + +cdef extern from "arrow/python/extension_type.h" namespace "arrow::py": + cdef cppclass CPyExtensionType \ + " arrow::py::PyExtensionType"(CExtensionType): + @staticmethod + CStatus FromClass(const shared_ptr[CDataType] storage_type, + const c_string extension_name, object typ, + shared_ptr[CExtensionType]* out) + + @staticmethod + CStatus FromInstance(shared_ptr[CDataType] storage_type, + object inst, shared_ptr[CExtensionType]* out) + + object GetInstance() + CStatus SetInstance(object) + + c_string PyExtensionName() + CStatus RegisterPyExtensionType(shared_ptr[CDataType]) + CStatus UnregisterPyExtensionType(c_string type_name) + + +cdef extern from "arrow/python/benchmark.h" namespace "arrow::py::benchmark": + void Benchmark_PandasObjectIsNull(object lst) except * + + +cdef extern from "arrow/python/gdb.h" namespace "arrow::gdb" nogil: + void GdbTestSession "arrow::gdb::TestSession"() diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c41f4c05d3a77ca4a3ba163b27d9df9f9f234767 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd @@ -0,0 +1,77 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libcpp.vector cimport vector as std_vector + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * + +ctypedef CResult[CDeclaration] CNamedTableProvider(const std_vector[c_string]&, const CSchema&) + +cdef extern from "arrow/engine/substrait/options.h" namespace "arrow::engine" nogil: + cdef enum ConversionStrictness \ + "arrow::engine::ConversionStrictness": + EXACT_ROUNDTRIP \ + "arrow::engine::ConversionStrictness::EXACT_ROUNDTRIP" + PRESERVE_STRUCTURE \ + "arrow::engine::ConversionStrictness::PRESERVE_STRUCTURE" + BEST_EFFORT \ + "arrow::engine::ConversionStrictness::BEST_EFFORT" + + cdef cppclass CConversionOptions \ + "arrow::engine::ConversionOptions": + CConversionOptions() + ConversionStrictness strictness + function[CNamedTableProvider] named_table_provider + c_bool allow_arrow_extensions + +cdef extern from "arrow/engine/substrait/extension_set.h" \ + namespace "arrow::engine" nogil: + + cdef cppclass ExtensionIdRegistry: + std_vector[c_string] GetSupportedSubstraitFunctions() + + ExtensionIdRegistry* default_extension_id_registry() + +cdef extern from "arrow/engine/substrait/relation.h" namespace "arrow::engine" nogil: + + cdef cppclass CNamedExpression "arrow::engine::NamedExpression": + CExpression expression + c_string name + + cdef cppclass CBoundExpressions "arrow::engine::BoundExpressions": + std_vector[CNamedExpression] named_expressions + shared_ptr[CSchema] schema + +cdef extern from "arrow/engine/substrait/serde.h" namespace "arrow::engine" nogil: + + CResult[shared_ptr[CBuffer]] SerializeExpressions( + const CBoundExpressions& bound_expressions, const CConversionOptions& conversion_options) + + CResult[CBoundExpressions] DeserializeExpressions( + const CBuffer& serialized_expressions) + +cdef extern from "arrow/engine/substrait/util.h" namespace "arrow::engine" nogil: + CResult[shared_ptr[CRecordBatchReader]] ExecuteSerializedPlan( + const CBuffer& substrait_buffer, const ExtensionIdRegistry* registry, + CFunctionRegistry* func_registry, const CConversionOptions& conversion_options, + c_bool use_threads) + + CResult[shared_ptr[CBuffer]] SerializeJsonPlan(const c_string& substrait_json) diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd new file mode 100644 index 0000000000000000000000000000000000000000..7d76576bef2b9513f53c20c55ec92bb01c8b2766 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd @@ -0,0 +1,298 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libcpp.string cimport string as c_string +from libcpp.unordered_set cimport unordered_set as c_unordered_set +from libc.stdint cimport int64_t, int32_t, uint8_t, uintptr_t + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + +cdef extern from "gandiva/node.h" namespace "gandiva" nogil: + + cdef cppclass CNode" gandiva::Node": + c_string ToString() + shared_ptr[CDataType] return_type() + + cdef cppclass CGandivaExpression" gandiva::Expression": + c_string ToString() + shared_ptr[CNode] root() + shared_ptr[CField] result() + + ctypedef vector[shared_ptr[CNode]] CNodeVector" gandiva::NodeVector" + + ctypedef vector[shared_ptr[CGandivaExpression]] \ + CExpressionVector" gandiva::ExpressionVector" + +cdef extern from "gandiva/selection_vector.h" namespace "gandiva" nogil: + + cdef cppclass CSelectionVector" gandiva::SelectionVector": + + shared_ptr[CArray] ToArray() + + enum CSelectionVector_Mode" gandiva::SelectionVector::Mode": + CSelectionVector_Mode_NONE" gandiva::SelectionVector::Mode::MODE_NONE" + CSelectionVector_Mode_UINT16" \ + gandiva::SelectionVector::Mode::MODE_UINT16" + CSelectionVector_Mode_UINT32" \ + gandiva::SelectionVector::Mode::MODE_UINT32" + CSelectionVector_Mode_UINT64" \ + gandiva::SelectionVector::Mode::MODE_UINT64" + + cdef CStatus SelectionVector_MakeInt16\ + "gandiva::SelectionVector::MakeInt16"( + int64_t max_slots, CMemoryPool* pool, + shared_ptr[CSelectionVector]* selection_vector) + + cdef CStatus SelectionVector_MakeInt32\ + "gandiva::SelectionVector::MakeInt32"( + int64_t max_slots, CMemoryPool* pool, + shared_ptr[CSelectionVector]* selection_vector) + + cdef CStatus SelectionVector_MakeInt64\ + "gandiva::SelectionVector::MakeInt64"( + int64_t max_slots, CMemoryPool* pool, + shared_ptr[CSelectionVector]* selection_vector) + +cdef inline CSelectionVector_Mode _ensure_selection_mode(str name) except *: + uppercase = name.upper() + if uppercase == 'NONE': + return CSelectionVector_Mode_NONE + elif uppercase == 'UINT16': + return CSelectionVector_Mode_UINT16 + elif uppercase == 'UINT32': + return CSelectionVector_Mode_UINT32 + elif uppercase == 'UINT64': + return CSelectionVector_Mode_UINT64 + else: + raise ValueError('Invalid value for Selection Mode: {!r}'.format(name)) + +cdef inline str _selection_mode_name(CSelectionVector_Mode ctype): + if ctype == CSelectionVector_Mode_NONE: + return 'NONE' + elif ctype == CSelectionVector_Mode_UINT16: + return 'UINT16' + elif ctype == CSelectionVector_Mode_UINT32: + return 'UINT32' + elif ctype == CSelectionVector_Mode_UINT64: + return 'UINT64' + else: + raise RuntimeError('Unexpected CSelectionVector_Mode value') + +cdef extern from "gandiva/condition.h" namespace "gandiva" nogil: + + cdef cppclass CCondition" gandiva::Condition": + c_string ToString() + shared_ptr[CNode] root() + shared_ptr[CField] result() + +cdef extern from "gandiva/arrow.h" namespace "gandiva" nogil: + + ctypedef vector[shared_ptr[CArray]] CArrayVector" gandiva::ArrayVector" + + +cdef extern from "gandiva/tree_expr_builder.h" namespace "gandiva" nogil: + + cdef shared_ptr[CNode] TreeExprBuilder_MakeBoolLiteral \ + "gandiva::TreeExprBuilder::MakeLiteral"(c_bool value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt8Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint8_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt16Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint16_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt32Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint32_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt64Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint64_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt8Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int8_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt16Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int16_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt32Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int32_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt64Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int64_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeFloatLiteral \ + "gandiva::TreeExprBuilder::MakeLiteral"(float value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeDoubleLiteral \ + "gandiva::TreeExprBuilder::MakeLiteral"(double value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeStringLiteral \ + "gandiva::TreeExprBuilder::MakeStringLiteral"(const c_string& value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeBinaryLiteral \ + "gandiva::TreeExprBuilder::MakeBinaryLiteral"(const c_string& value) + + cdef shared_ptr[CGandivaExpression] TreeExprBuilder_MakeExpression\ + "gandiva::TreeExprBuilder::MakeExpression"( + shared_ptr[CNode] root_node, shared_ptr[CField] result_field) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeFunction \ + "gandiva::TreeExprBuilder::MakeFunction"( + const c_string& name, const CNodeVector& children, + shared_ptr[CDataType] return_type) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeField \ + "gandiva::TreeExprBuilder::MakeField"(shared_ptr[CField] field) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeIf \ + "gandiva::TreeExprBuilder::MakeIf"( + shared_ptr[CNode] condition, shared_ptr[CNode] this_node, + shared_ptr[CNode] else_node, shared_ptr[CDataType] return_type) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeAnd \ + "gandiva::TreeExprBuilder::MakeAnd"(const CNodeVector& children) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeOr \ + "gandiva::TreeExprBuilder::MakeOr"(const CNodeVector& children) + + cdef shared_ptr[CCondition] TreeExprBuilder_MakeCondition \ + "gandiva::TreeExprBuilder::MakeCondition"( + shared_ptr[CNode] condition) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionInt32 \ + "gandiva::TreeExprBuilder::MakeInExpressionInt32"( + shared_ptr[CNode] node, const c_unordered_set[int32_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionInt64 \ + "gandiva::TreeExprBuilder::MakeInExpressionInt64"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTime32 \ + "gandiva::TreeExprBuilder::MakeInExpressionTime32"( + shared_ptr[CNode] node, const c_unordered_set[int32_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTime64 \ + "gandiva::TreeExprBuilder::MakeInExpressionTime64"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionDate32 \ + "gandiva::TreeExprBuilder::MakeInExpressionDate32"( + shared_ptr[CNode] node, const c_unordered_set[int32_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionDate64 \ + "gandiva::TreeExprBuilder::MakeInExpressionDate64"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTimeStamp \ + "gandiva::TreeExprBuilder::MakeInExpressionTimeStamp"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionString \ + "gandiva::TreeExprBuilder::MakeInExpressionString"( + shared_ptr[CNode] node, const c_unordered_set[c_string]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionBinary \ + "gandiva::TreeExprBuilder::MakeInExpressionBinary"( + shared_ptr[CNode] node, const c_unordered_set[c_string]& values) + +cdef extern from "gandiva/projector.h" namespace "gandiva" nogil: + + cdef cppclass CProjector" gandiva::Projector": + + CStatus Evaluate( + const CRecordBatch& batch, CMemoryPool* pool, + const CArrayVector* output) + + CStatus Evaluate( + const CRecordBatch& batch, + const CSelectionVector* selection, + CMemoryPool* pool, + const CArrayVector* output) + + c_string DumpIR() + + cdef CStatus Projector_Make \ + "gandiva::Projector::Make"( + shared_ptr[CSchema] schema, const CExpressionVector& children, + shared_ptr[CProjector]* projector) + + cdef CStatus Projector_Make \ + "gandiva::Projector::Make"( + shared_ptr[CSchema] schema, const CExpressionVector& children, + CSelectionVector_Mode mode, + shared_ptr[CConfiguration] configuration, + shared_ptr[CProjector]* projector) + +cdef extern from "gandiva/filter.h" namespace "gandiva" nogil: + + cdef cppclass CFilter" gandiva::Filter": + + CStatus Evaluate( + const CRecordBatch& batch, + shared_ptr[CSelectionVector] out_selection) + + c_string DumpIR() + + cdef CStatus Filter_Make \ + "gandiva::Filter::Make"( + shared_ptr[CSchema] schema, shared_ptr[CCondition] condition, + shared_ptr[CConfiguration] configuration, + shared_ptr[CFilter]* filter) + +cdef extern from "gandiva/function_signature.h" namespace "gandiva" nogil: + + cdef cppclass CFunctionSignature" gandiva::FunctionSignature": + + CFunctionSignature(const c_string& base_name, + vector[shared_ptr[CDataType]] param_types, + shared_ptr[CDataType] ret_type) + + shared_ptr[CDataType] ret_type() const + + const c_string& base_name() const + + vector[shared_ptr[CDataType]] param_types() const + + c_string ToString() const + +cdef extern from "gandiva/expression_registry.h" namespace "gandiva" nogil: + + cdef vector[shared_ptr[CFunctionSignature]] \ + GetRegisteredFunctionSignatures() + +cdef extern from "gandiva/configuration.h" namespace "gandiva" nogil: + + cdef cppclass CConfiguration" gandiva::Configuration": + + CConfiguration() + + CConfiguration(bint optimize, bint dump_ir) + + void set_optimize(bint optimize) + + void set_dump_ir(bint dump_ir) + + cdef cppclass CConfigurationBuilder \ + " gandiva::ConfigurationBuilder": + @staticmethod + shared_ptr[CConfiguration] DefaultConfiguration() + + CConfigurationBuilder() + + shared_ptr[CConfiguration] build() diff --git a/venv/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd b/venv/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd new file mode 100644 index 0000000000000000000000000000000000000000..2b40414ce538319dc66d5a2e7a58fc28cb93770e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd @@ -0,0 +1,130 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow._parquet cimport (ParquetCipher, + CFileEncryptionProperties, + CFileDecryptionProperties, + ParquetCipher_AES_GCM_V1, + ParquetCipher_AES_GCM_CTR_V1) + + +cdef extern from "parquet/encryption/kms_client.h" \ + namespace "parquet::encryption" nogil: + cdef cppclass CKmsClient" parquet::encryption::KmsClient": + c_string WrapKey(const c_string& key_bytes, + const c_string& master_key_identifier) except + + c_string UnwrapKey(const c_string& wrapped_key, + const c_string& master_key_identifier) except + + + cdef cppclass CKeyAccessToken" parquet::encryption::KeyAccessToken": + CKeyAccessToken(const c_string value) + void Refresh(const c_string& new_value) + const c_string& value() const + + cdef cppclass CKmsConnectionConfig \ + " parquet::encryption::KmsConnectionConfig": + CKmsConnectionConfig() + c_string kms_instance_id + c_string kms_instance_url + shared_ptr[CKeyAccessToken] refreshable_key_access_token + unordered_map[c_string, c_string] custom_kms_conf + +# Callbacks for implementing Python kms clients +# Use typedef to emulate syntax for std::function +ctypedef void CallbackWrapKey( + object, const c_string&, const c_string&, c_string*) +ctypedef void CallbackUnwrapKey( + object, const c_string&, const c_string&, c_string*) + +cdef extern from "parquet/encryption/kms_client_factory.h" \ + namespace "parquet::encryption" nogil: + cdef cppclass CKmsClientFactory" parquet::encryption::KmsClientFactory": + shared_ptr[CKmsClient] CreateKmsClient( + const CKmsConnectionConfig& kms_connection_config) except + + +# Callbacks for implementing Python kms client factories +# Use typedef to emulate syntax for std::function +ctypedef void CallbackCreateKmsClient( + object, + const CKmsConnectionConfig&, shared_ptr[CKmsClient]*) + +cdef extern from "parquet/encryption/crypto_factory.h" \ + namespace "parquet::encryption" nogil: + cdef cppclass CEncryptionConfiguration\ + " parquet::encryption::EncryptionConfiguration": + CEncryptionConfiguration(const c_string& footer_key) except + + c_string footer_key + c_string column_keys + ParquetCipher encryption_algorithm + c_bool plaintext_footer + c_bool double_wrapping + double cache_lifetime_seconds + c_bool internal_key_material + int32_t data_key_length_bits + + cdef cppclass CDecryptionConfiguration\ + " parquet::encryption::DecryptionConfiguration": + CDecryptionConfiguration() except + + double cache_lifetime_seconds + + cdef cppclass CCryptoFactory" parquet::encryption::CryptoFactory": + void RegisterKmsClientFactory( + shared_ptr[CKmsClientFactory] kms_client_factory) except + + shared_ptr[CFileEncryptionProperties] GetFileEncryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CEncryptionConfiguration& encryption_config) except +* + shared_ptr[CFileDecryptionProperties] GetFileDecryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CDecryptionConfiguration& decryption_config) except +* + void RemoveCacheEntriesForToken(const c_string& access_token) except + + void RemoveCacheEntriesForAllTokens() except + + +cdef extern from "arrow/python/parquet_encryption.h" \ + namespace "arrow::py::parquet::encryption" nogil: + cdef cppclass CPyKmsClientVtable \ + " arrow::py::parquet::encryption::PyKmsClientVtable": + CPyKmsClientVtable() + function[CallbackWrapKey] wrap_key + function[CallbackUnwrapKey] unwrap_key + + cdef cppclass CPyKmsClient\ + " arrow::py::parquet::encryption::PyKmsClient"(CKmsClient): + CPyKmsClient(object handler, CPyKmsClientVtable vtable) + + cdef cppclass CPyKmsClientFactoryVtable\ + " arrow::py::parquet::encryption::PyKmsClientFactoryVtable": + CPyKmsClientFactoryVtable() + function[CallbackCreateKmsClient] create_kms_client + + cdef cppclass CPyKmsClientFactory\ + " arrow::py::parquet::encryption::PyKmsClientFactory"( + CKmsClientFactory): + CPyKmsClientFactory(object handler, CPyKmsClientFactoryVtable vtable) + + cdef cppclass CPyCryptoFactory\ + " arrow::py::parquet::encryption::PyCryptoFactory"(CCryptoFactory): + CResult[shared_ptr[CFileEncryptionProperties]] \ + SafeGetFileEncryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CEncryptionConfiguration& encryption_config) + CResult[shared_ptr[CFileDecryptionProperties]] \ + SafeGetFileDecryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CDecryptionConfiguration& decryption_config) diff --git a/venv/lib/python3.10/site-packages/pyarrow/interchange/__init__.py b/venv/lib/python3.10/site-packages/pyarrow/interchange/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ebe59b499c214dd82954bff84824cfea574b415 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/interchange/__init__.py @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# flake8: noqa + +from .from_dataframe import from_dataframe diff --git a/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f7ce0acef2dd32afc2e1b1bbf1730031a7d1beb Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/buffer.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/buffer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d876e6cf819bbb70ab3101072e99604d8271c48f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/buffer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/column.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/column.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0747b0b3e235a84d51822b78707cb2bc832eef04 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/column.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51b257d2009497698d17f4feb3842cf1f53e49dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/from_dataframe.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/from_dataframe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a3ac2e9ec75c5c153c8fa58951d8da241a999af Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/from_dataframe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/interchange/buffer.py b/venv/lib/python3.10/site-packages/pyarrow/interchange/buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..1f537798130b9a77bc50e1040ea8046557974894 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/interchange/buffer.py @@ -0,0 +1,107 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations +import enum + +import pyarrow as pa + + +class DlpackDeviceType(enum.IntEnum): + """Integer enum for device type codes matching DLPack.""" + + CPU = 1 + CUDA = 2 + CPU_PINNED = 3 + OPENCL = 4 + VULKAN = 7 + METAL = 8 + VPI = 9 + ROCM = 10 + + +class _PyArrowBuffer: + """ + Data in the buffer is guaranteed to be contiguous in memory. + + Note that there is no dtype attribute present, a buffer can be thought of + as simply a block of memory. However, if the column that the buffer is + attached to has a dtype that's supported by DLPack and ``__dlpack__`` is + implemented, then that dtype information will be contained in the return + value from ``__dlpack__``. + + This distinction is useful to support both data exchange via DLPack on a + buffer and (b) dtypes like variable-length strings which do not have a + fixed number of bytes per element. + """ + + def __init__(self, x: pa.Buffer, allow_copy: bool = True) -> None: + """ + Handle PyArrow Buffers. + """ + self._x = x + + @property + def bufsize(self) -> int: + """ + Buffer size in bytes. + """ + return self._x.size + + @property + def ptr(self) -> int: + """ + Pointer to start of the buffer as an integer. + """ + return self._x.address + + def __dlpack__(self): + """ + Produce DLPack capsule (see array API standard). + + Raises: + - TypeError : if the buffer contains unsupported dtypes. + - NotImplementedError : if DLPack support is not implemented + + Useful to have to connect to array libraries. Support optional because + it's not completely trivial to implement for a Python-only library. + """ + raise NotImplementedError("__dlpack__") + + def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: + """ + Device type and device ID for where the data in the buffer resides. + Uses device type codes matching DLPack. + Note: must be implemented even if ``__dlpack__`` is not. + """ + if self._x.is_cpu: + return (DlpackDeviceType.CPU, None) + else: + raise NotImplementedError("__dlpack_device__") + + def __repr__(self) -> str: + return ( + "PyArrowBuffer(" + + str( + { + "bufsize": self.bufsize, + "ptr": self.ptr, + "device": self.__dlpack_device__()[0].name, + } + ) + + ")" + ) diff --git a/venv/lib/python3.10/site-packages/pyarrow/interchange/column.py b/venv/lib/python3.10/site-packages/pyarrow/interchange/column.py new file mode 100644 index 0000000000000000000000000000000000000000..e609e469b0ffa6a880f530757b72ed15d859571a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/interchange/column.py @@ -0,0 +1,529 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import enum +from typing import ( + Any, + Dict, + Iterable, + Optional, + Tuple, +) + +import sys +if sys.version_info >= (3, 8): + from typing import TypedDict +else: + from typing_extensions import TypedDict + +import pyarrow as pa +import pyarrow.compute as pc +from pyarrow.interchange.buffer import _PyArrowBuffer + + +class DtypeKind(enum.IntEnum): + """ + Integer enum for data types. + + Attributes + ---------- + INT : int + Matches to signed integer data type. + UINT : int + Matches to unsigned integer data type. + FLOAT : int + Matches to floating point data type. + BOOL : int + Matches to boolean data type. + STRING : int + Matches to string data type (UTF-8 encoded). + DATETIME : int + Matches to datetime data type. + CATEGORICAL : int + Matches to categorical data type. + """ + + INT = 0 + UINT = 1 + FLOAT = 2 + BOOL = 20 + STRING = 21 # UTF-8 + DATETIME = 22 + CATEGORICAL = 23 + + +Dtype = Tuple[DtypeKind, int, str, str] # see Column.dtype + + +_PYARROW_KINDS = { + pa.int8(): (DtypeKind.INT, "c"), + pa.int16(): (DtypeKind.INT, "s"), + pa.int32(): (DtypeKind.INT, "i"), + pa.int64(): (DtypeKind.INT, "l"), + pa.uint8(): (DtypeKind.UINT, "C"), + pa.uint16(): (DtypeKind.UINT, "S"), + pa.uint32(): (DtypeKind.UINT, "I"), + pa.uint64(): (DtypeKind.UINT, "L"), + pa.float16(): (DtypeKind.FLOAT, "e"), + pa.float32(): (DtypeKind.FLOAT, "f"), + pa.float64(): (DtypeKind.FLOAT, "g"), + pa.bool_(): (DtypeKind.BOOL, "b"), + pa.string(): (DtypeKind.STRING, "u"), + pa.large_string(): (DtypeKind.STRING, "U"), +} + + +class ColumnNullType(enum.IntEnum): + """ + Integer enum for null type representation. + + Attributes + ---------- + NON_NULLABLE : int + Non-nullable column. + USE_NAN : int + Use explicit float NaN value. + USE_SENTINEL : int + Sentinel value besides NaN. + USE_BITMASK : int + The bit is set/unset representing a null on a certain position. + USE_BYTEMASK : int + The byte is set/unset representing a null on a certain position. + """ + + NON_NULLABLE = 0 + USE_NAN = 1 + USE_SENTINEL = 2 + USE_BITMASK = 3 + USE_BYTEMASK = 4 + + +class ColumnBuffers(TypedDict): + # first element is a buffer containing the column data; + # second element is the data buffer's associated dtype + data: Tuple[_PyArrowBuffer, Dtype] + + # first element is a buffer containing mask values indicating missing data; + # second element is the mask value buffer's associated dtype. + # None if the null representation is not a bit or byte mask + validity: Optional[Tuple[_PyArrowBuffer, Dtype]] + + # first element is a buffer containing the offset values for + # variable-size binary data (e.g., variable-length strings); + # second element is the offsets buffer's associated dtype. + # None if the data buffer does not have an associated offsets buffer + offsets: Optional[Tuple[_PyArrowBuffer, Dtype]] + + +class CategoricalDescription(TypedDict): + # whether the ordering of dictionary indices is semantically meaningful + is_ordered: bool + # whether a dictionary-style mapping of categorical values to other objects + # exists + is_dictionary: bool + # Python-level only (e.g. ``{int: str}``). + # None if not a dictionary-style categorical. + categories: Optional[_PyArrowColumn] + + +class Endianness: + """Enum indicating the byte-order of a data-type.""" + + LITTLE = "<" + BIG = ">" + NATIVE = "=" + NA = "|" + + +class NoBufferPresent(Exception): + """Exception to signal that there is no requested buffer.""" + + +class _PyArrowColumn: + """ + A column object, with only the methods and properties required by the + interchange protocol defined. + + A column can contain one or more chunks. Each chunk can contain up to three + buffers - a data buffer, a mask buffer (depending on null representation), + and an offsets buffer (if variable-size binary; e.g., variable-length + strings). + + TBD: Arrow has a separate "null" dtype, and has no separate mask concept. + Instead, it seems to use "children" for both columns with a bit mask, + and for nested dtypes. Unclear whether this is elegant or confusing. + This design requires checking the null representation explicitly. + + The Arrow design requires checking: + 1. the ARROW_FLAG_NULLABLE (for sentinel values) + 2. if a column has two children, combined with one of those children + having a null dtype. + + Making the mask concept explicit seems useful. One null dtype would + not be enough to cover both bit and byte masks, so that would mean + even more checking if we did it the Arrow way. + + TBD: there's also the "chunk" concept here, which is implicit in Arrow as + multiple buffers per array (= column here). Semantically it may make + sense to have both: chunks were meant for example for lazy evaluation + of data which doesn't fit in memory, while multiple buffers per column + could also come from doing a selection operation on a single + contiguous buffer. + + Given these concepts, one would expect chunks to be all of the same + size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows), + while multiple buffers could have data-dependent lengths. Not an issue + in pandas if one column is backed by a single NumPy array, but in + Arrow it seems possible. + Are multiple chunks *and* multiple buffers per column necessary for + the purposes of this interchange protocol, or must producers either + reuse the chunk concept for this or copy the data? + + Note: this Column object can only be produced by ``__dataframe__``, so + doesn't need its own version or ``__column__`` protocol. + """ + + def __init__( + self, column: pa.Array | pa.ChunkedArray, allow_copy: bool = True + ) -> None: + """ + Handles PyArrow Arrays and ChunkedArrays. + """ + # Store the column as a private attribute + if isinstance(column, pa.ChunkedArray): + if column.num_chunks == 1: + column = column.chunk(0) + else: + if not allow_copy: + raise RuntimeError( + "Chunks will be combined and a copy is required which " + "is forbidden by allow_copy=False" + ) + column = column.combine_chunks() + + self._allow_copy = allow_copy + + if pa.types.is_boolean(column.type): + if not allow_copy: + raise RuntimeError( + "Boolean column will be casted to uint8 and a copy " + "is required which is forbidden by allow_copy=False" + ) + self._dtype = self._dtype_from_arrowdtype(column.type, 8) + self._col = pc.cast(column, pa.uint8()) + else: + self._col = column + dtype = self._col.type + try: + bit_width = dtype.bit_width + except ValueError: + # in case of a variable-length strings, considered as array + # of bytes (8 bits) + bit_width = 8 + self._dtype = self._dtype_from_arrowdtype(dtype, bit_width) + + def size(self) -> int: + """ + Size of the column, in elements. + + Corresponds to DataFrame.num_rows() if column is a single chunk; + equal to size of this current chunk otherwise. + + Is a method rather than a property because it may cause a (potentially + expensive) computation for some dataframe implementations. + """ + return len(self._col) + + @property + def offset(self) -> int: + """ + Offset of first element. + + May be > 0 if using chunks; for example for a column with N chunks of + equal size M (only the last chunk may be shorter), + ``offset = n * M``, ``n = 0 .. N-1``. + """ + return self._col.offset + + @property + def dtype(self) -> Tuple[DtypeKind, int, str, str]: + """ + Dtype description as a tuple ``(kind, bit-width, format string, + endianness)``. + + Bit-width : the number of bits as an integer + Format string : data type description format string in Apache Arrow C + Data Interface format. + Endianness : current only native endianness (``=``) is supported + + Notes: + - Kind specifiers are aligned with DLPack where possible (hence the + jump to 20, leave enough room for future extension) + - Masks must be specified as boolean with either bit width 1 (for + bit masks) or 8 (for byte masks). + - Dtype width in bits was preferred over bytes + - Endianness isn't too useful, but included now in case in the + future we need to support non-native endianness + - Went with Apache Arrow format strings over NumPy format strings + because they're more complete from a dataframe perspective + - Format strings are mostly useful for datetime specification, and + for categoricals. + - For categoricals, the format string describes the type of the + categorical in the data buffer. In case of a separate encoding of + the categorical (e.g. an integer to string mapping), this can + be derived from ``self.describe_categorical``. + - Data types not included: complex, Arrow-style null, binary, + decimal, and nested (list, struct, map, union) dtypes. + """ + return self._dtype + + def _dtype_from_arrowdtype( + self, dtype: pa.DataType, bit_width: int + ) -> Tuple[DtypeKind, int, str, str]: + """ + See `self.dtype` for details. + """ + # Note: 'c' (complex) not handled yet (not in array spec v1). + # 'b', 'B' (bytes), 'S', 'a', (old-style string) 'V' (void) + # not handled datetime and timedelta both map to datetime + # (is timedelta handled?) + + if pa.types.is_timestamp(dtype): + kind = DtypeKind.DATETIME + ts = dtype.unit[0] + tz = dtype.tz if dtype.tz else "" + f_string = "ts{ts}:{tz}".format(ts=ts, tz=tz) + return kind, bit_width, f_string, Endianness.NATIVE + elif pa.types.is_dictionary(dtype): + kind = DtypeKind.CATEGORICAL + arr = self._col + indices_dtype = arr.indices.type + _, f_string = _PYARROW_KINDS.get(indices_dtype) + return kind, bit_width, f_string, Endianness.NATIVE + else: + kind, f_string = _PYARROW_KINDS.get(dtype, (None, None)) + if kind is None: + raise ValueError( + f"Data type {dtype} not supported by interchange protocol") + + return kind, bit_width, f_string, Endianness.NATIVE + + @property + def describe_categorical(self) -> CategoricalDescription: + """ + If the dtype is categorical, there are two options: + - There are only values in the data buffer. + - There is a separate non-categorical Column encoding categorical + values. + + Raises TypeError if the dtype is not categorical + + Returns the dictionary with description on how to interpret the + data buffer: + - "is_ordered" : bool, whether the ordering of dictionary indices + is semantically meaningful. + - "is_dictionary" : bool, whether a mapping of + categorical values to other objects exists + - "categories" : Column representing the (implicit) mapping of + indices to category values (e.g. an array of + cat1, cat2, ...). None if not a dictionary-style + categorical. + + TBD: are there any other in-memory representations that are needed? + """ + arr = self._col + if not pa.types.is_dictionary(arr.type): + raise TypeError( + "describe_categorical only works on a column with " + "categorical dtype!" + ) + + return { + "is_ordered": self._col.type.ordered, + "is_dictionary": True, + "categories": _PyArrowColumn(arr.dictionary), + } + + @property + def describe_null(self) -> Tuple[ColumnNullType, Any]: + """ + Return the missing value (or "null") representation the column dtype + uses, as a tuple ``(kind, value)``. + + Value : if kind is "sentinel value", the actual value. If kind is a bit + mask or a byte mask, the value (0 or 1) indicating a missing value. + None otherwise. + """ + # In case of no missing values, we need to set ColumnNullType to + # non nullable as in the current __dataframe__ protocol bit/byte masks + # cannot be None + if self.null_count == 0: + return ColumnNullType.NON_NULLABLE, None + else: + return ColumnNullType.USE_BITMASK, 0 + + @property + def null_count(self) -> int: + """ + Number of null elements, if known. + + Note: Arrow uses -1 to indicate "unknown", but None seems cleaner. + """ + arrow_null_count = self._col.null_count + n = arrow_null_count if arrow_null_count != -1 else None + return n + + @property + def metadata(self) -> Dict[str, Any]: + """ + The metadata for the column. See `DataFrame.metadata` for more details. + """ + pass + + def num_chunks(self) -> int: + """ + Return the number of chunks the column consists of. + """ + return 1 + + def get_chunks( + self, n_chunks: Optional[int] = None + ) -> Iterable[_PyArrowColumn]: + """ + Return an iterator yielding the chunks. + + See `DataFrame.get_chunks` for details on ``n_chunks``. + """ + if n_chunks and n_chunks > 1: + chunk_size = self.size() // n_chunks + if self.size() % n_chunks != 0: + chunk_size += 1 + + array = self._col + i = 0 + for start in range(0, chunk_size * n_chunks, chunk_size): + yield _PyArrowColumn( + array.slice(start, chunk_size), self._allow_copy + ) + i += 1 + else: + yield self + + def get_buffers(self) -> ColumnBuffers: + """ + Return a dictionary containing the underlying buffers. + + The returned dictionary has the following contents: + + - "data": a two-element tuple whose first element is a buffer + containing the data and whose second element is the data + buffer's associated dtype. + - "validity": a two-element tuple whose first element is a buffer + containing mask values indicating missing data and + whose second element is the mask value buffer's + associated dtype. None if the null representation is + not a bit or byte mask. + - "offsets": a two-element tuple whose first element is a buffer + containing the offset values for variable-size binary + data (e.g., variable-length strings) and whose second + element is the offsets buffer's associated dtype. None + if the data buffer does not have an associated offsets + buffer. + """ + buffers: ColumnBuffers = { + "data": self._get_data_buffer(), + "validity": None, + "offsets": None, + } + + try: + buffers["validity"] = self._get_validity_buffer() + except NoBufferPresent: + pass + + try: + buffers["offsets"] = self._get_offsets_buffer() + except NoBufferPresent: + pass + + return buffers + + def _get_data_buffer( + self, + ) -> Tuple[_PyArrowBuffer, Any]: # Any is for self.dtype tuple + """ + Return the buffer containing the data and the buffer's + associated dtype. + """ + array = self._col + dtype = self.dtype + + # In case of dictionary arrays, use indices + # to define a buffer, codes are transferred through + # describe_categorical() + if pa.types.is_dictionary(array.type): + array = array.indices + dtype = _PyArrowColumn(array).dtype + + n = len(array.buffers()) + if n == 2: + return _PyArrowBuffer(array.buffers()[1]), dtype + elif n == 3: + return _PyArrowBuffer(array.buffers()[2]), dtype + + def _get_validity_buffer(self) -> Tuple[_PyArrowBuffer, Any]: + """ + Return the buffer containing the mask values indicating missing data + and the buffer's associated dtype. + Raises NoBufferPresent if null representation is not a bit or byte + mask. + """ + # Define the dtype of the returned buffer + dtype = (DtypeKind.BOOL, 1, "b", Endianness.NATIVE) + array = self._col + buff = array.buffers()[0] + if buff: + return _PyArrowBuffer(buff), dtype + else: + raise NoBufferPresent( + "There are no missing values so " + "does not have a separate mask") + + def _get_offsets_buffer(self) -> Tuple[_PyArrowBuffer, Any]: + """ + Return the buffer containing the offset values for variable-size binary + data (e.g., variable-length strings) and the buffer's associated dtype. + Raises NoBufferPresent if the data buffer does not have an associated + offsets buffer. + """ + array = self._col + n = len(array.buffers()) + if n == 2: + raise NoBufferPresent( + "This column has a fixed-length dtype so " + "it does not have an offsets buffer" + ) + elif n == 3: + # Define the dtype of the returned buffer + dtype = self._col.type + if pa.types.is_large_string(dtype): + dtype = (DtypeKind.INT, 64, "l", Endianness.NATIVE) + else: + dtype = (DtypeKind.INT, 32, "i", Endianness.NATIVE) + return _PyArrowBuffer(array.buffers()[1]), dtype diff --git a/venv/lib/python3.10/site-packages/pyarrow/interchange/dataframe.py b/venv/lib/python3.10/site-packages/pyarrow/interchange/dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..59ba765c175ad471274a99bf857c8880a072e0b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/interchange/dataframe.py @@ -0,0 +1,217 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations +from typing import ( + Any, + Iterable, + Optional, + Sequence, +) + +import pyarrow as pa + +from pyarrow.interchange.column import _PyArrowColumn + + +class _PyArrowDataFrame: + """ + A data frame class, with only the methods required by the interchange + protocol defined. + + A "data frame" represents an ordered collection of named columns. + A column's "name" must be a unique string. + Columns may be accessed by name or by position. + + This could be a public data frame class, or an object with the methods and + attributes defined on this DataFrame class could be returned from the + ``__dataframe__`` method of a public data frame class in a library adhering + to the dataframe interchange protocol specification. + """ + + def __init__( + self, df: pa.Table | pa.RecordBatch, + nan_as_null: bool = False, + allow_copy: bool = True + ) -> None: + """ + Constructor - an instance of this (private) class is returned from + `pa.Table.__dataframe__` or `pa.RecordBatch.__dataframe__`. + """ + self._df = df + # ``nan_as_null`` is a keyword intended for the consumer to tell the + # producer to overwrite null values in the data with ``NaN`` (or + # ``NaT``). + if nan_as_null is True: + raise RuntimeError( + "nan_as_null=True currently has no effect, " + "use the default nan_as_null=False" + ) + self._nan_as_null = nan_as_null + self._allow_copy = allow_copy + + def __dataframe__( + self, nan_as_null: bool = False, allow_copy: bool = True + ) -> _PyArrowDataFrame: + """ + Construct a new exchange object, potentially changing the parameters. + ``nan_as_null`` is a keyword intended for the consumer to tell the + producer to overwrite null values in the data with ``NaN``. + It is intended for cases where the consumer does not support the bit + mask or byte mask that is the producer's native representation. + ``allow_copy`` is a keyword that defines whether or not the library is + allowed to make a copy of the data. For example, copying data would be + necessary if a library supports strided buffers, given that this + protocol specifies contiguous buffers. + """ + return _PyArrowDataFrame(self._df, nan_as_null, allow_copy) + + @property + def metadata(self) -> dict[str, Any]: + """ + The metadata for the data frame, as a dictionary with string keys. The + contents of `metadata` may be anything, they are meant for a library + to store information that it needs to, e.g., roundtrip losslessly or + for two implementations to share data that is not (yet) part of the + interchange protocol specification. For avoiding collisions with other + entries, please add name the keys with the name of the library + followed by a period and the desired name, e.g, ``pandas.indexcol``. + """ + # The metadata for the data frame, as a dictionary with string keys. + # Add schema metadata here (pandas metadata or custom metadata) + if self._df.schema.metadata: + schema_metadata = {"pyarrow." + k.decode('utf8'): v.decode('utf8') + for k, v in self._df.schema.metadata.items()} + return schema_metadata + else: + return {} + + def num_columns(self) -> int: + """ + Return the number of columns in the DataFrame. + """ + return self._df.num_columns + + def num_rows(self) -> int: + """ + Return the number of rows in the DataFrame, if available. + """ + return self._df.num_rows + + def num_chunks(self) -> int: + """ + Return the number of chunks the DataFrame consists of. + """ + if isinstance(self._df, pa.RecordBatch): + return 1 + else: + # pyarrow.Table can have columns with different number + # of chunks so we take the number of chunks that + # .to_batches() returns as it takes the min chunk size + # of all the columns (to_batches is a zero copy method) + batches = self._df.to_batches() + return len(batches) + + def column_names(self) -> Iterable[str]: + """ + Return an iterator yielding the column names. + """ + return self._df.schema.names + + def get_column(self, i: int) -> _PyArrowColumn: + """ + Return the column at the indicated position. + """ + return _PyArrowColumn(self._df.column(i), + allow_copy=self._allow_copy) + + def get_column_by_name(self, name: str) -> _PyArrowColumn: + """ + Return the column whose name is the indicated name. + """ + return _PyArrowColumn(self._df.column(name), + allow_copy=self._allow_copy) + + def get_columns(self) -> Iterable[_PyArrowColumn]: + """ + Return an iterator yielding the columns. + """ + return [ + _PyArrowColumn(col, allow_copy=self._allow_copy) + for col in self._df.columns + ] + + def select_columns(self, indices: Sequence[int]) -> _PyArrowDataFrame: + """ + Create a new DataFrame by selecting a subset of columns by index. + """ + return _PyArrowDataFrame( + self._df.select(list(indices)), self._nan_as_null, self._allow_copy + ) + + def select_columns_by_name( + self, names: Sequence[str] + ) -> _PyArrowDataFrame: + """ + Create a new DataFrame by selecting a subset of columns by name. + """ + return _PyArrowDataFrame( + self._df.select(list(names)), self._nan_as_null, self._allow_copy + ) + + def get_chunks( + self, n_chunks: Optional[int] = None + ) -> Iterable[_PyArrowDataFrame]: + """ + Return an iterator yielding the chunks. + + By default (None), yields the chunks that the data is stored as by the + producer. If given, ``n_chunks`` must be a multiple of + ``self.num_chunks()``, meaning the producer must subdivide each chunk + before yielding it. + + Note that the producer must ensure that all columns are chunked the + same way. + """ + # Subdivide chunks + if n_chunks and n_chunks > 1: + chunk_size = self.num_rows() // n_chunks + if self.num_rows() % n_chunks != 0: + chunk_size += 1 + if isinstance(self._df, pa.Table): + batches = self._df.to_batches(max_chunksize=chunk_size) + else: + batches = [] + for start in range(0, chunk_size * n_chunks, chunk_size): + batches.append(self._df.slice(start, chunk_size)) + # In case when the size of the chunk is such that the resulting + # list is one less chunk then n_chunks -> append an empty chunk + if len(batches) == n_chunks - 1: + batches.append(pa.record_batch([[]], schema=self._df.schema)) + # yields the chunks that the data is stored as + else: + if isinstance(self._df, pa.Table): + batches = self._df.to_batches() + else: + batches = [self._df] + + # Create an iterator of RecordBatches + iterator = [_PyArrowDataFrame(batch, + self._nan_as_null, + self._allow_copy) + for batch in batches] + return iterator diff --git a/venv/lib/python3.10/site-packages/pyarrow/interchange/from_dataframe.py b/venv/lib/python3.10/site-packages/pyarrow/interchange/from_dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..fcaec41e3dcdf982e19bd45ba4a1941fab5ec34e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/interchange/from_dataframe.py @@ -0,0 +1,614 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +from typing import ( + Any, + Tuple, +) + +from pyarrow.interchange.column import ( + DtypeKind, + ColumnBuffers, + ColumnNullType, +) + +import pyarrow as pa +import re + +import pyarrow.compute as pc +from pyarrow.interchange.column import Dtype + + +# A typing protocol could be added later to let Mypy validate code using +# `from_dataframe` better. +DataFrameObject = Any +ColumnObject = Any +BufferObject = Any + + +_PYARROW_DTYPES: dict[DtypeKind, dict[int, Any]] = { + DtypeKind.INT: {8: pa.int8(), + 16: pa.int16(), + 32: pa.int32(), + 64: pa.int64()}, + DtypeKind.UINT: {8: pa.uint8(), + 16: pa.uint16(), + 32: pa.uint32(), + 64: pa.uint64()}, + DtypeKind.FLOAT: {16: pa.float16(), + 32: pa.float32(), + 64: pa.float64()}, + DtypeKind.BOOL: {1: pa.bool_(), + 8: pa.uint8()}, + DtypeKind.STRING: {8: pa.string()}, +} + + +def from_dataframe(df: DataFrameObject, allow_copy=True) -> pa.Table: + """ + Build a ``pa.Table`` from any DataFrame supporting the interchange protocol. + + Parameters + ---------- + df : DataFrameObject + Object supporting the interchange protocol, i.e. `__dataframe__` + method. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Table + + Examples + -------- + >>> import pyarrow + >>> from pyarrow.interchange import from_dataframe + + Convert a pandas dataframe to a pyarrow table: + + >>> import pandas as pd + >>> df = pd.DataFrame({ + ... "n_attendees": [100, 10, 1], + ... "country": ["Italy", "Spain", "Slovenia"], + ... }) + >>> df + n_attendees country + 0 100 Italy + 1 10 Spain + 2 1 Slovenia + >>> from_dataframe(df) + pyarrow.Table + n_attendees: int64 + country: large_string + ---- + n_attendees: [[100,10,1]] + country: [["Italy","Spain","Slovenia"]] + """ + if isinstance(df, pa.Table): + return df + elif isinstance(df, pa.RecordBatch): + return pa.Table.from_batches([df]) + + if not hasattr(df, "__dataframe__"): + raise ValueError("`df` does not support __dataframe__") + + return _from_dataframe(df.__dataframe__(allow_copy=allow_copy), + allow_copy=allow_copy) + + +def _from_dataframe(df: DataFrameObject, allow_copy=True): + """ + Build a ``pa.Table`` from the DataFrame interchange object. + + Parameters + ---------- + df : DataFrameObject + Object supporting the interchange protocol, i.e. `__dataframe__` + method. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Table + """ + batches = [] + for chunk in df.get_chunks(): + batch = protocol_df_chunk_to_pyarrow(chunk, allow_copy) + batches.append(batch) + + if not batches: + batch = protocol_df_chunk_to_pyarrow(df) + batches.append(batch) + + return pa.Table.from_batches(batches) + + +def protocol_df_chunk_to_pyarrow( + df: DataFrameObject, + allow_copy: bool = True +) -> pa.RecordBatch: + """ + Convert interchange protocol chunk to ``pa.RecordBatch``. + + Parameters + ---------- + df : DataFrameObject + Object supporting the interchange protocol, i.e. `__dataframe__` + method. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.RecordBatch + """ + # We need a dict of columns here, with each column being a pa.Array + columns: dict[str, pa.Array] = {} + for name in df.column_names(): + if not isinstance(name, str): + raise ValueError(f"Column {name} is not a string") + if name in columns: + raise ValueError(f"Column {name} is not unique") + col = df.get_column_by_name(name) + dtype = col.dtype[0] + if dtype in ( + DtypeKind.INT, + DtypeKind.UINT, + DtypeKind.FLOAT, + DtypeKind.STRING, + DtypeKind.DATETIME, + ): + columns[name] = column_to_array(col, allow_copy) + elif dtype == DtypeKind.BOOL: + columns[name] = bool_column_to_array(col, allow_copy) + elif dtype == DtypeKind.CATEGORICAL: + columns[name] = categorical_column_to_dictionary(col, allow_copy) + else: + raise NotImplementedError(f"Data type {dtype} not handled yet") + + return pa.RecordBatch.from_pydict(columns) + + +def column_to_array( + col: ColumnObject, + allow_copy: bool = True, +) -> pa.Array: + """ + Convert a column holding one of the primitive dtypes to a PyArrow array. + A primitive type is one of: int, uint, float, bool (1 bit). + + Parameters + ---------- + col : ColumnObject + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Array + """ + buffers = col.get_buffers() + data_type = col.dtype + data = buffers_to_array(buffers, data_type, + col.size(), + col.describe_null, + col.offset, + allow_copy) + return data + + +def bool_column_to_array( + col: ColumnObject, + allow_copy: bool = True, +) -> pa.Array: + """ + Convert a column holding boolean dtype to a PyArrow array. + + Parameters + ---------- + col : ColumnObject + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Array + """ + buffers = col.get_buffers() + size = buffers["data"][1][1] + + # If booleans are byte-packed a copy to bit-packed will be made + if size == 8 and not allow_copy: + raise RuntimeError( + "Boolean column will be casted from uint8 and a copy " + "is required which is forbidden by allow_copy=False" + ) + + data_type = col.dtype + data = buffers_to_array(buffers, data_type, + col.size(), + col.describe_null, + col.offset) + if size == 8: + data = pc.cast(data, pa.bool_()) + + return data + + +def categorical_column_to_dictionary( + col: ColumnObject, + allow_copy: bool = True, +) -> pa.DictionaryArray: + """ + Convert a column holding categorical data to a pa.DictionaryArray. + + Parameters + ---------- + col : ColumnObject + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.DictionaryArray + """ + if not allow_copy: + raise RuntimeError( + "Categorical column will be casted from uint8 and a copy " + "is required which is forbidden by allow_copy=False" + ) + + categorical = col.describe_categorical + + if not categorical["is_dictionary"]: + raise NotImplementedError( + "Non-dictionary categoricals not supported yet") + + # We need to first convert the dictionary column + cat_column = categorical["categories"] + dictionary = column_to_array(cat_column) + # Then we need to convert the indices + # Here we need to use the buffer data type! + buffers = col.get_buffers() + _, data_type = buffers["data"] + indices = buffers_to_array(buffers, data_type, + col.size(), + col.describe_null, + col.offset) + + # Constructing a pa.DictionaryArray + dict_array = pa.DictionaryArray.from_arrays(indices, dictionary) + + return dict_array + + +def parse_datetime_format_str(format_str): + """Parse datetime `format_str` to interpret the `data`.""" + + # timestamp 'ts{unit}:tz' + timestamp_meta = re.match(r"ts([smun]):(.*)", format_str) + if timestamp_meta: + unit, tz = timestamp_meta.group(1), timestamp_meta.group(2) + if unit != "s": + # the format string describes only a first letter of the unit, so + # add one extra letter to convert the unit to numpy-style: + # 'm' -> 'ms', 'u' -> 'us', 'n' -> 'ns' + unit += "s" + + return unit, tz + + raise NotImplementedError(f"DateTime kind is not supported: {format_str}") + + +def map_date_type(data_type): + """Map column date type to pyarrow date type. """ + kind, bit_width, f_string, _ = data_type + + if kind == DtypeKind.DATETIME: + unit, tz = parse_datetime_format_str(f_string) + return pa.timestamp(unit, tz=tz) + else: + pa_dtype = _PYARROW_DTYPES.get(kind, {}).get(bit_width, None) + + # Error if dtype is not supported + if pa_dtype: + return pa_dtype + else: + raise NotImplementedError( + f"Conversion for {data_type} is not yet supported.") + + +def buffers_to_array( + buffers: ColumnBuffers, + data_type: Tuple[DtypeKind, int, str, str], + length: int, + describe_null: ColumnNullType, + offset: int = 0, + allow_copy: bool = True, +) -> pa.Array: + """ + Build a PyArrow array from the passed buffer. + + Parameters + ---------- + buffer : ColumnBuffers + Dictionary containing tuples of underlying buffers and + their associated dtype. + data_type : Tuple[DtypeKind, int, str, str], + Dtype description of the column as a tuple ``(kind, bit-width, format string, + endianness)``. + length : int + The number of values in the array. + describe_null: ColumnNullType + Null representation the column dtype uses, + as a tuple ``(kind, value)`` + offset : int, default: 0 + Number of elements to offset from the start of the buffer. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Array + + Notes + ----- + The returned array doesn't own the memory. The caller of this function + is responsible for keeping the memory owner object alive as long as + the returned PyArrow array is being used. + """ + data_buff, _ = buffers["data"] + try: + validity_buff, validity_dtype = buffers["validity"] + except TypeError: + validity_buff = None + try: + offset_buff, offset_dtype = buffers["offsets"] + except TypeError: + offset_buff = None + + # Construct a pyarrow Buffer + data_pa_buffer = pa.foreign_buffer(data_buff.ptr, data_buff.bufsize, + base=data_buff) + + # Construct a validity pyarrow Buffer, if applicable + if validity_buff: + validity_pa_buff = validity_buffer_from_mask(validity_buff, + validity_dtype, + describe_null, + length, + offset, + allow_copy) + else: + validity_pa_buff = validity_buffer_nan_sentinel(data_pa_buffer, + data_type, + describe_null, + length, + offset, + allow_copy) + + # Construct a pyarrow Array from buffers + data_dtype = map_date_type(data_type) + + if offset_buff: + _, offset_bit_width, _, _ = offset_dtype + # If an offset buffer exists, construct an offset pyarrow Buffer + # and add it to the construction of an array + offset_pa_buffer = pa.foreign_buffer(offset_buff.ptr, + offset_buff.bufsize, + base=offset_buff) + + if data_type[2] == 'U': + string_type = pa.large_string() + else: + if offset_bit_width == 64: + string_type = pa.large_string() + else: + string_type = pa.string() + array = pa.Array.from_buffers( + string_type, + length, + [validity_pa_buff, offset_pa_buffer, data_pa_buffer], + offset=offset, + ) + else: + array = pa.Array.from_buffers( + data_dtype, + length, + [validity_pa_buff, data_pa_buffer], + offset=offset, + ) + + return array + + +def validity_buffer_from_mask( + validity_buff: BufferObject, + validity_dtype: Dtype, + describe_null: ColumnNullType, + length: int, + offset: int = 0, + allow_copy: bool = True, +) -> pa.Buffer: + """ + Build a PyArrow buffer from the passed mask buffer. + + Parameters + ---------- + validity_buff : BufferObject + Tuple of underlying validity buffer and associated dtype. + validity_dtype : Dtype + Dtype description as a tuple ``(kind, bit-width, format string, + endianness)``. + describe_null : ColumnNullType + Null representation the column dtype uses, + as a tuple ``(kind, value)`` + length : int + The number of values in the array. + offset : int, default: 0 + Number of elements to offset from the start of the buffer. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Buffer + """ + null_kind, sentinel_val = describe_null + validity_kind, _, _, _ = validity_dtype + assert validity_kind == DtypeKind.BOOL + + if null_kind == ColumnNullType.NON_NULLABLE: + # Sliced array can have a NON_NULLABLE ColumnNullType due + # to no missing values in that slice of an array though the bitmask + # exists and validity_buff must be set to None in this case + return None + + elif null_kind == ColumnNullType.USE_BYTEMASK or ( + null_kind == ColumnNullType.USE_BITMASK and sentinel_val == 1 + ): + buff = pa.foreign_buffer(validity_buff.ptr, + validity_buff.bufsize, + base=validity_buff) + + if null_kind == ColumnNullType.USE_BYTEMASK: + if not allow_copy: + raise RuntimeError( + "To create a bitmask a copy of the data is " + "required which is forbidden by allow_copy=False" + ) + mask = pa.Array.from_buffers(pa.int8(), length, + [None, buff], + offset=offset) + mask_bool = pc.cast(mask, pa.bool_()) + else: + mask_bool = pa.Array.from_buffers(pa.bool_(), length, + [None, buff], + offset=offset) + + if sentinel_val == 1: + mask_bool = pc.invert(mask_bool) + + return mask_bool.buffers()[1] + + elif null_kind == ColumnNullType.USE_BITMASK and sentinel_val == 0: + return pa.foreign_buffer(validity_buff.ptr, + validity_buff.bufsize, + base=validity_buff) + else: + raise NotImplementedError( + f"{describe_null} null representation is not yet supported.") + + +def validity_buffer_nan_sentinel( + data_pa_buffer: BufferObject, + data_type: Dtype, + describe_null: ColumnNullType, + length: int, + offset: int = 0, + allow_copy: bool = True, +) -> pa.Buffer: + """ + Build a PyArrow buffer from NaN or sentinel values. + + Parameters + ---------- + data_pa_buffer : pa.Buffer + PyArrow buffer for the column data. + data_type : Dtype + Dtype description as a tuple ``(kind, bit-width, format string, + endianness)``. + describe_null : ColumnNullType + Null representation the column dtype uses, + as a tuple ``(kind, value)`` + length : int + The number of values in the array. + offset : int, default: 0 + Number of elements to offset from the start of the buffer. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pa.Buffer + """ + kind, bit_width, _, _ = data_type + data_dtype = map_date_type(data_type) + null_kind, sentinel_val = describe_null + + # Check for float NaN values + if null_kind == ColumnNullType.USE_NAN: + if not allow_copy: + raise RuntimeError( + "To create a bitmask a copy of the data is " + "required which is forbidden by allow_copy=False" + ) + + if kind == DtypeKind.FLOAT and bit_width == 16: + # 'pyarrow.compute.is_nan' kernel not yet implemented + # for float16 + raise NotImplementedError( + f"{data_type} with {null_kind} is not yet supported.") + else: + pyarrow_data = pa.Array.from_buffers( + data_dtype, + length, + [None, data_pa_buffer], + offset=offset, + ) + mask = pc.is_nan(pyarrow_data) + mask = pc.invert(mask) + return mask.buffers()[1] + + # Check for sentinel values + elif null_kind == ColumnNullType.USE_SENTINEL: + if not allow_copy: + raise RuntimeError( + "To create a bitmask a copy of the data is " + "required which is forbidden by allow_copy=False" + ) + + if kind == DtypeKind.DATETIME: + sentinel_dtype = pa.int64() + else: + sentinel_dtype = data_dtype + pyarrow_data = pa.Array.from_buffers(sentinel_dtype, + length, + [None, data_pa_buffer], + offset=offset) + sentinel_arr = pc.equal(pyarrow_data, sentinel_val) + mask_bool = pc.invert(sentinel_arr) + return mask_bool.buffers()[1] + + elif null_kind == ColumnNullType.NON_NULLABLE: + pass + else: + raise NotImplementedError( + f"{describe_null} null representation is not yet supported.")