File size: 5,212 Bytes
3230c19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
//   http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.  See the License for the
// specific language governing permissions and limitations
// under the License.

#pragma once

#include <memory>

#include "arrow/io/type_fwd.h"
#include "arrow/json/options.h"
#include "arrow/record_batch.h"
#include "arrow/result.h"
#include "arrow/status.h"
#include "arrow/util/macros.h"
#include "arrow/util/type_fwd.h"
#include "arrow/util/visibility.h"

namespace arrow {
namespace json {

/// A class that reads an entire JSON file into a Arrow Table
///
/// The file is expected to consist of individual line-separated JSON objects
class ARROW_EXPORT TableReader {
 public:
  virtual ~TableReader() = default;

  /// Read the entire JSON file and convert it to a Arrow Table
  virtual Result<std::shared_ptr<Table>> Read() = 0;

  /// Create a TableReader instance
  static Result<std::shared_ptr<TableReader>> Make(MemoryPool* pool,
                                                   std::shared_ptr<io::InputStream> input,
                                                   const ReadOptions&,
                                                   const ParseOptions&);
};

ARROW_EXPORT Result<std::shared_ptr<RecordBatch>> ParseOne(ParseOptions options,
                                                           std::shared_ptr<Buffer> json);

/// \brief A class that reads a JSON file incrementally
///
/// JSON data is read from a stream in fixed-size blocks (configurable with
/// `ReadOptions::block_size`). Each block is converted to a `RecordBatch`. Yielded
/// batches have a consistent schema but may differ in row count.
///
/// The supplied `ParseOptions` are used to determine a schema, based either on a
/// provided explicit schema or inferred from the first non-empty block.
/// Afterwards, the target schema is frozen. If `UnexpectedFieldBehavior::InferType` is
/// specified, unexpected fields will only be inferred for the first block. Afterwards
/// they'll be treated as errors.
///
/// If `ReadOptions::use_threads` is `true`, each block's parsing/decoding task will be
/// parallelized on the given `cpu_executor` (with readahead corresponding to the
/// executor's capacity). If an executor isn't provided, the global thread pool will be
/// used.
///
/// If `ReadOptions::use_threads` is `false`, computations will be run on the calling
/// thread and `cpu_executor` will be ignored.
class ARROW_EXPORT StreamingReader : public RecordBatchReader {
 public:
  virtual ~StreamingReader() = default;

  /// \brief Read the next `RecordBatch` asynchronously
  /// This function is async-reentrant (but not synchronously reentrant). However, if
  /// threading is disabled, this will block until completion.
  virtual Future<std::shared_ptr<RecordBatch>> ReadNextAsync() = 0;

  /// Get the number of bytes which have been successfully converted to record batches
  /// and consumed
  [[nodiscard]] virtual int64_t bytes_processed() const = 0;

  /// \brief Create a `StreamingReader` from an `InputStream`
  /// Blocks until the initial batch is loaded
  ///
  /// \param[in] stream JSON source stream
  /// \param[in] read_options Options for reading
  /// \param[in] parse_options Options for chunking, parsing, and conversion
  /// \param[in] io_context Context for IO operations (optional)
  /// \param[in] cpu_executor Executor for computation tasks (optional)
  /// \return The initialized reader
  static Result<std::shared_ptr<StreamingReader>> Make(
      std::shared_ptr<io::InputStream> stream, const ReadOptions& read_options,
      const ParseOptions& parse_options,
      const io::IOContext& io_context = io::default_io_context(),
      ::arrow::internal::Executor* cpu_executor = NULLPTR);

  /// \brief Create a `StreamingReader` from an `InputStream` asynchronously
  /// Returned future completes after loading the first batch
  ///
  /// \param[in] stream JSON source stream
  /// \param[in] read_options Options for reading
  /// \param[in] parse_options Options for chunking, parsing, and conversion
  /// \param[in] io_context Context for IO operations (optional)
  /// \param[in] cpu_executor Executor for computation tasks (optional)
  /// \return Future for the initialized reader
  static Future<std::shared_ptr<StreamingReader>> MakeAsync(
      std::shared_ptr<io::InputStream> stream, const ReadOptions& read_options,
      const ParseOptions& parse_options,
      const io::IOContext& io_context = io::default_io_context(),
      ::arrow::internal::Executor* cpu_executor = NULLPTR);
};

}  // namespace json
}  // namespace arrow