File size: 14,564 Bytes
0b5e147 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "arrow/chunked_array.h" // IWYU pragma: keep
#include "arrow/record_batch.h"
#include "arrow/status.h"
#include "arrow/type.h"
#include "arrow/type_fwd.h"
#include "arrow/util/macros.h"
#include "arrow/util/visibility.h"
namespace arrow {
class Array;
class ChunkedArray;
class KeyValueMetadata;
class MemoryPool;
/// \class Table
/// \brief Logical table as sequence of chunked arrays
class ARROW_EXPORT Table {
public:
virtual ~Table() = default;
/// \brief Construct a Table from schema and columns
///
/// If columns is zero-length, the table's number of rows is zero
///
/// \param[in] schema The table schema (column types)
/// \param[in] columns The table's columns as chunked arrays
/// \param[in] num_rows number of rows in table, -1 (default) to infer from columns
static std::shared_ptr<Table> Make(std::shared_ptr<Schema> schema,
std::vector<std::shared_ptr<ChunkedArray>> columns,
int64_t num_rows = -1);
/// \brief Construct a Table from schema and arrays
///
/// \param[in] schema The table schema (column types)
/// \param[in] arrays The table's columns as arrays
/// \param[in] num_rows number of rows in table, -1 (default) to infer from columns
static std::shared_ptr<Table> Make(std::shared_ptr<Schema> schema,
const std::vector<std::shared_ptr<Array>>& arrays,
int64_t num_rows = -1);
/// \brief Create an empty Table of a given schema
///
/// The output Table will be created with a single empty chunk per column.
///
/// \param[in] schema the schema of the empty Table
/// \param[in] pool the memory pool to allocate memory from
/// \return the resulting Table
static Result<std::shared_ptr<Table>> MakeEmpty(
std::shared_ptr<Schema> schema, MemoryPool* pool = default_memory_pool());
/// \brief Construct a Table from a RecordBatchReader.
///
/// \param[in] reader the arrow::RecordBatchReader that produces batches
static Result<std::shared_ptr<Table>> FromRecordBatchReader(RecordBatchReader* reader);
/// \brief Construct a Table from RecordBatches, using schema supplied by the first
/// RecordBatch.
///
/// \param[in] batches a std::vector of record batches
static Result<std::shared_ptr<Table>> FromRecordBatches(
const std::vector<std::shared_ptr<RecordBatch>>& batches);
/// \brief Construct a Table from RecordBatches, using supplied schema. There may be
/// zero record batches
///
/// \param[in] schema the arrow::Schema for each batch
/// \param[in] batches a std::vector of record batches
static Result<std::shared_ptr<Table>> FromRecordBatches(
std::shared_ptr<Schema> schema,
const std::vector<std::shared_ptr<RecordBatch>>& batches);
/// \brief Construct a Table from a chunked StructArray. One column will be produced
/// for each field of the StructArray.
///
/// \param[in] array a chunked StructArray
static Result<std::shared_ptr<Table>> FromChunkedStructArray(
const std::shared_ptr<ChunkedArray>& array);
/// \brief Return the table schema
const std::shared_ptr<Schema>& schema() const { return schema_; }
/// \brief Return a column by index
virtual std::shared_ptr<ChunkedArray> column(int i) const = 0;
/// \brief Return vector of all columns for table
virtual const std::vector<std::shared_ptr<ChunkedArray>>& columns() const = 0;
/// Return a column's field by index
std::shared_ptr<Field> field(int i) const { return schema_->field(i); }
/// \brief Return vector of all fields for table
std::vector<std::shared_ptr<Field>> fields() const;
/// \brief Construct a zero-copy slice of the table with the
/// indicated offset and length
///
/// \param[in] offset the index of the first row in the constructed
/// slice
/// \param[in] length the number of rows of the slice. If there are not enough
/// rows in the table, the length will be adjusted accordingly
///
/// \return a new object wrapped in std::shared_ptr<Table>
virtual std::shared_ptr<Table> Slice(int64_t offset, int64_t length) const = 0;
/// \brief Slice from first row at offset until end of the table
std::shared_ptr<Table> Slice(int64_t offset) const { return Slice(offset, num_rows_); }
/// \brief Return a column by name
/// \param[in] name field name
/// \return an Array or null if no field was found
std::shared_ptr<ChunkedArray> GetColumnByName(const std::string& name) const {
auto i = schema_->GetFieldIndex(name);
return i == -1 ? NULLPTR : column(i);
}
/// \brief Remove column from the table, producing a new Table
virtual Result<std::shared_ptr<Table>> RemoveColumn(int i) const = 0;
/// \brief Add column to the table, producing a new Table
virtual Result<std::shared_ptr<Table>> AddColumn(
int i, std::shared_ptr<Field> field_arg,
std::shared_ptr<ChunkedArray> column) const = 0;
/// \brief Replace a column in the table, producing a new Table
virtual Result<std::shared_ptr<Table>> SetColumn(
int i, std::shared_ptr<Field> field_arg,
std::shared_ptr<ChunkedArray> column) const = 0;
/// \brief Return names of all columns
std::vector<std::string> ColumnNames() const;
/// \brief Rename columns with provided names
Result<std::shared_ptr<Table>> RenameColumns(
const std::vector<std::string>& names) const;
/// \brief Return new table with specified columns
Result<std::shared_ptr<Table>> SelectColumns(const std::vector<int>& indices) const;
/// \brief Replace schema key-value metadata with new metadata
/// \since 0.5.0
///
/// \param[in] metadata new KeyValueMetadata
/// \return new Table
virtual std::shared_ptr<Table> ReplaceSchemaMetadata(
const std::shared_ptr<const KeyValueMetadata>& metadata) const = 0;
/// \brief Flatten the table, producing a new Table. Any column with a
/// struct type will be flattened into multiple columns
///
/// \param[in] pool The pool for buffer allocations, if any
virtual Result<std::shared_ptr<Table>> Flatten(
MemoryPool* pool = default_memory_pool()) const = 0;
/// \return PrettyPrint representation suitable for debugging
std::string ToString() const;
/// \brief Perform cheap validation checks to determine obvious inconsistencies
/// within the table's schema and internal data.
///
/// This is O(k*m) where k is the total number of field descendents,
/// and m is the number of chunks.
///
/// \return Status
virtual Status Validate() const = 0;
/// \brief Perform extensive validation checks to determine inconsistencies
/// within the table's schema and internal data.
///
/// This is O(k*n) where k is the total number of field descendents,
/// and n is the number of rows.
///
/// \return Status
virtual Status ValidateFull() const = 0;
/// \brief Return the number of columns in the table
int num_columns() const { return schema_->num_fields(); }
/// \brief Return the number of rows (equal to each column's logical length)
int64_t num_rows() const { return num_rows_; }
/// \brief Determine if tables are equal
///
/// Two tables can be equal only if they have equal schemas.
/// However, they may be equal even if they have different chunkings.
bool Equals(const Table& other, bool check_metadata = false) const;
/// \brief Make a new table by combining the chunks this table has.
///
/// All the underlying chunks in the ChunkedArray of each column are
/// concatenated into zero or one chunk.
///
/// \param[in] pool The pool for buffer allocations
Result<std::shared_ptr<Table>> CombineChunks(
MemoryPool* pool = default_memory_pool()) const;
/// \brief Make a new record batch by combining the chunks this table has.
///
/// All the underlying chunks in the ChunkedArray of each column are
/// concatenated into a single chunk.
///
/// \param[in] pool The pool for buffer allocations
Result<std::shared_ptr<RecordBatch>> CombineChunksToBatch(
MemoryPool* pool = default_memory_pool()) const;
protected:
Table();
std::shared_ptr<Schema> schema_;
int64_t num_rows_;
private:
ARROW_DISALLOW_COPY_AND_ASSIGN(Table);
};
/// \brief Compute a stream of record batches from a (possibly chunked) Table
///
/// The conversion is zero-copy: each record batch is a view over a slice
/// of the table's columns.
class ARROW_EXPORT TableBatchReader : public RecordBatchReader {
public:
/// \brief Construct a TableBatchReader for the given table
explicit TableBatchReader(const Table& table);
explicit TableBatchReader(std::shared_ptr<Table> table);
std::shared_ptr<Schema> schema() const override;
Status ReadNext(std::shared_ptr<RecordBatch>* out) override;
/// \brief Set the desired maximum number of rows for record batches
///
/// The actual number of rows in each record batch may be smaller, depending
/// on actual chunking characteristics of each table column.
void set_chunksize(int64_t chunksize);
private:
std::shared_ptr<Table> owned_table_;
const Table& table_;
std::vector<ChunkedArray*> column_data_;
std::vector<int> chunk_numbers_;
std::vector<int64_t> chunk_offsets_;
int64_t absolute_row_position_;
int64_t max_chunksize_;
};
/// \defgroup concat-tables ConcatenateTables function.
///
/// ConcatenateTables function.
/// @{
/// \brief Controls the behavior of ConcatenateTables().
struct ARROW_EXPORT ConcatenateTablesOptions {
/// If true, the schemas of the tables will be first unified with fields of
/// the same name being merged, according to `field_merge_options`, then each
/// table will be promoted to the unified schema before being concatenated.
/// Otherwise, all tables should have the same schema. Each column in the output table
/// is the result of concatenating the corresponding columns in all input tables.
bool unify_schemas = false;
/// options to control how fields are merged when unifying schemas
///
/// This field will be ignored if unify_schemas is false
Field::MergeOptions field_merge_options = Field::MergeOptions::Defaults();
static ConcatenateTablesOptions Defaults() { return {}; }
};
/// \brief Construct a new table from multiple input tables.
///
/// The new table is assembled from existing column chunks without copying,
/// if schemas are identical. If schemas do not match exactly and
/// unify_schemas is enabled in options (off by default), an attempt is
/// made to unify them, and then column chunks are converted to their
/// respective unified datatype, which will probably incur a copy.
/// :func:`arrow::PromoteTableToSchema` is used to unify schemas.
///
/// Tables are concatenated in order they are provided in and the order of
/// rows within tables will be preserved.
///
/// \param[in] tables a std::vector of Tables to be concatenated
/// \param[in] options specify how to unify schema of input tables
/// \param[in] memory_pool MemoryPool to be used if null-filled arrays need to
/// be created or if existing column chunks need to endure type conversion
/// \return new Table
ARROW_EXPORT
Result<std::shared_ptr<Table>> ConcatenateTables(
const std::vector<std::shared_ptr<Table>>& tables,
ConcatenateTablesOptions options = ConcatenateTablesOptions::Defaults(),
MemoryPool* memory_pool = default_memory_pool());
namespace compute {
class CastOptions;
}
/// \brief Promotes a table to conform to the given schema.
///
/// If a field in the schema does not have a corresponding column in
/// the table, a column of nulls will be added to the resulting table.
/// If the corresponding column is of type Null, it will be promoted
/// to the type specified by schema, with null values filled. The
/// column will be casted to the type specified by the schema.
///
/// Returns an error:
/// - if the corresponding column's type is not compatible with the
/// schema.
/// - if there is a column in the table that does not exist in the schema.
/// - if the cast fails or casting would be required but is not available.
///
/// \param[in] table the input Table
/// \param[in] schema the target schema to promote to
/// \param[in] pool The memory pool to be used if null-filled arrays need to
/// be created.
ARROW_EXPORT
Result<std::shared_ptr<Table>> PromoteTableToSchema(
const std::shared_ptr<Table>& table, const std::shared_ptr<Schema>& schema,
MemoryPool* pool = default_memory_pool());
/// \brief Promotes a table to conform to the given schema.
///
/// If a field in the schema does not have a corresponding column in
/// the table, a column of nulls will be added to the resulting table.
/// If the corresponding column is of type Null, it will be promoted
/// to the type specified by schema, with null values filled. The column
/// will be casted to the type specified by the schema.
///
/// Returns an error:
/// - if the corresponding column's type is not compatible with the
/// schema.
/// - if there is a column in the table that does not exist in the schema.
/// - if the cast fails or casting would be required but is not available.
///
/// \param[in] table the input Table
/// \param[in] schema the target schema to promote to
/// \param[in] options The cast options to allow promotion of types
/// \param[in] pool The memory pool to be used if null-filled arrays need to
/// be created.
ARROW_EXPORT
Result<std::shared_ptr<Table>> PromoteTableToSchema(
const std::shared_ptr<Table>& table, const std::shared_ptr<Schema>& schema,
const compute::CastOptions& options, MemoryPool* pool = default_memory_pool());
} // namespace arrow
|