File size: 14,560 Bytes
0b5e147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
//   http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.  See the License for the
// specific language governing permissions and limitations
// under the License.

#pragma once

#include <memory>
#include <string>
#include <vector>

#include "arrow/filesystem/filesystem.h"
#include "arrow/util/macros.h"
#include "arrow/util/uri.h"

namespace Azure::Core::Credentials {
class TokenCredential;
}

namespace Azure::Storage {
class StorageSharedKeyCredential;
}

namespace Azure::Storage::Blobs {
class BlobServiceClient;
}

namespace Azure::Storage::Files::DataLake {
class DataLakeFileSystemClient;
class DataLakeServiceClient;
}  // namespace Azure::Storage::Files::DataLake

namespace arrow::fs {

class TestAzureFileSystem;
class TestAzureOptions;

/// Options for the AzureFileSystem implementation.
///
/// By default, authentication is handled by the Azure SDK's credential chain
/// which may read from multiple environment variables, such as:
/// - `AZURE_TENANT_ID`
/// - `AZURE_CLIENT_ID`
/// - `AZURE_CLIENT_SECRET`
/// - `AZURE_AUTHORITY_HOST`
/// - `AZURE_CLIENT_CERTIFICATE_PATH`
/// - `AZURE_FEDERATED_TOKEN_FILE`
///
/// Functions are provided for explicit configuration of credentials if that is preferred.
struct ARROW_EXPORT AzureOptions {
  friend class TestAzureOptions;

  /// \brief The name of the Azure Storage Account being accessed.
  ///
  /// All service URLs will be constructed using this storage account name.
  /// `ConfigureAccountKeyCredential` assumes the user wants to authenticate
  /// this account.
  std::string account_name;

  /// \brief hostname[:port] of the Azure Blob Storage Service.
  ///
  /// If the hostname is a relative domain name (one that starts with a '.'), then storage
  /// account URLs will be constructed by prepending the account name to the hostname.
  /// If the hostname is a fully qualified domain name, then the hostname will be used
  /// as-is and the account name will follow the hostname in the URL path.
  ///
  /// Default: ".blob.core.windows.net"
  std::string blob_storage_authority = ".blob.core.windows.net";

  /// \brief hostname[:port] of the Azure Data Lake Storage Gen 2 Service.
  ///
  /// If the hostname is a relative domain name (one that starts with a '.'), then storage
  /// account URLs will be constructed by prepending the account name to the hostname.
  /// If the hostname is a fully qualified domain name, then the hostname will be used
  /// as-is and the account name will follow the hostname in the URL path.
  ///
  /// Default: ".dfs.core.windows.net"
  std::string dfs_storage_authority = ".dfs.core.windows.net";

  /// \brief Azure Blob Storage connection transport.
  ///
  /// Default: "https"
  std::string blob_storage_scheme = "https";

  /// \brief Azure Data Lake Storage Gen 2 connection transport.
  ///
  /// Default: "https"
  std::string dfs_storage_scheme = "https";

  // TODO(GH-38598): Add support for more auth methods.
  // std::string connection_string;
  // std::string sas_token;

  /// \brief Default metadata for OpenOutputStream.
  ///
  /// This will be ignored if non-empty metadata is passed to OpenOutputStream.
  std::shared_ptr<const KeyValueMetadata> default_metadata;

 private:
  enum class CredentialKind {
    kDefault,
    kAnonymous,
    kStorageSharedKey,
    kClientSecret,
    kManagedIdentity,
    kWorkloadIdentity,
  } credential_kind_ = CredentialKind::kDefault;

  std::shared_ptr<Azure::Storage::StorageSharedKeyCredential>
      storage_shared_key_credential_;
  mutable std::shared_ptr<Azure::Core::Credentials::TokenCredential> token_credential_;

 public:
  AzureOptions();
  ~AzureOptions();

 private:
  void ExtractFromUriSchemeAndHierPart(const Uri& uri, std::string* out_path);
  Status ExtractFromUriQuery(const Uri& uri);

 public:
  /// \brief Construct a new AzureOptions from an URI.
  ///
  /// Supported formats:
  ///
  /// 1. abfs[s]://[:\<password\>@]\<account\>.blob.core.windows.net
  ///    [/\<container\>[/\<path\>]]
  /// 2. abfs[s]://\<container\>[:\<password\>]@\<account\>.dfs.core.windows.net
  ///     [/path]
  /// 3. abfs[s]://[\<account[:\<password\>]@]\<host[.domain]\>[\<:port\>]
  ///    [/\<container\>[/path]]
  /// 4. abfs[s]://[\<account[:\<password\>]@]\<container\>[/path]
  ///
  /// 1. and 2. are compatible with the Azure Data Lake Storage Gen2 URIs:
  /// https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
  ///
  /// 3. is for Azure Blob Storage compatible service including Azurite.
  ///
  /// 4. is a shorter version of 1. and 2.
  ///
  /// Note that there is no difference between abfs and abfss. HTTPS is
  /// used with abfs by default. You can force to use HTTP by specifying
  /// "enable_tls=false" query.
  ///
  /// Supported query parameters:
  ///
  /// * blob_storage_authority: Set AzureOptions::blob_storage_authority
  /// * dfs_storage_authority: Set AzureOptions::dfs_storage_authority
  /// * enable_tls: If it's "false" or "0", HTTP not HTTPS is used.
  /// * credential_kind: One of "default", "anonymous",
  ///   "workload_identity". If "default" is specified, it's just
  ///   ignored.  If "anonymous" is specified,
  ///   AzureOptions::ConfigureAnonymousCredential() is called. If
  ///   "workload_identity" is specified,
  ///   AzureOptions::ConfigureWorkloadIdentityCredential() is called.
  /// * tenant_id: You must specify "client_id" and "client_secret"
  ///   too. AzureOptions::ConfigureClientSecretCredential() is called.
  /// * client_id: If you don't specify "tenant_id" and
  ///   "client_secret",
  ///   AzureOptions::ConfigureManagedIdentityCredential() is
  ///   called. If you specify "tenant_id" and "client_secret" too,
  ///   AzureOptions::ConfigureClientSecretCredential() is called.
  /// * client_secret: You must specify "tenant_id" and "client_id"
  ///   too. AzureOptions::ConfigureClientSecretCredential() is called.
  static Result<AzureOptions> FromUri(const Uri& uri, std::string* out_path);
  static Result<AzureOptions> FromUri(const std::string& uri, std::string* out_path);

  Status ConfigureDefaultCredential();
  Status ConfigureAnonymousCredential();
  Status ConfigureAccountKeyCredential(const std::string& account_key);
  Status ConfigureClientSecretCredential(const std::string& tenant_id,
                                         const std::string& client_id,
                                         const std::string& client_secret);
  Status ConfigureManagedIdentityCredential(const std::string& client_id = std::string());
  Status ConfigureWorkloadIdentityCredential();

  bool Equals(const AzureOptions& other) const;

  std::string AccountBlobUrl(const std::string& account_name) const;
  std::string AccountDfsUrl(const std::string& account_name) const;

  Result<std::unique_ptr<Azure::Storage::Blobs::BlobServiceClient>>
  MakeBlobServiceClient() const;

  Result<std::unique_ptr<Azure::Storage::Files::DataLake::DataLakeServiceClient>>
  MakeDataLakeServiceClient() const;
};

/// \brief FileSystem implementation backed by Azure Blob Storage (ABS) [1] and
/// Azure Data Lake Storage Gen2 (ADLS Gen2) [2].
///
/// ADLS Gen2 isn't a dedicated service or account type. It's a set of capabilities that
/// support high throughput analytic workloads, built on Azure Blob Storage. All the data
/// ingested via the ADLS Gen2 APIs is persisted as blobs in the storage account.
/// ADLS Gen2 provides filesystem semantics, file-level security, and Hadoop
/// compatibility. ADLS Gen1 exists as a separate object that will retired on 2024-02-29
/// and new ADLS accounts use Gen2 instead.
///
/// ADLS Gen2 and Blob APIs can operate on the same data, but there are
/// some limitations [3]. The ones that are relevant to this
/// implementation are listed here:
///
/// - You can't use Blob APIs, and ADLS APIs to write to the same instance of a file. If
///   you write to a file by using ADLS APIs then that file's blocks won't be visible
///   to calls to the GetBlockList Blob API. The only exception is when you're
///   overwriting.
/// - When you use the ListBlobs operation without specifying a delimiter, the results
///   include both directories and blobs. If you choose to use a delimiter, use only a
///   forward slash (/) -- the only supported delimiter.
/// - If you use the DeleteBlob API to delete a directory, that directory is deleted only
///   if it's empty. This means that you can't use the Blob API delete directories
///   recursively.
///
/// [1]: https://azure.microsoft.com/en-us/products/storage/blobs
/// [2]: https://azure.microsoft.com/en-us/products/storage/data-lake-storage
/// [3]:
/// https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-known-issues
class ARROW_EXPORT AzureFileSystem : public FileSystem {
 private:
  class Impl;
  std::unique_ptr<Impl> impl_;

  explicit AzureFileSystem(std::unique_ptr<Impl>&& impl);

  friend class TestAzureFileSystem;
  void ForceCachedHierarchicalNamespaceSupport(int hns_support);

 public:
  ~AzureFileSystem() override = default;

  static Result<std::shared_ptr<AzureFileSystem>> Make(
      const AzureOptions& options, const io::IOContext& = io::default_io_context());

  std::string type_name() const override { return "abfs"; }

  /// Return the original Azure options when constructing the filesystem
  const AzureOptions& options() const;

  bool Equals(const FileSystem& other) const override;

  /// \cond FALSE
  using FileSystem::CreateDir;
  using FileSystem::DeleteDirContents;
  using FileSystem::GetFileInfo;
  using FileSystem::OpenAppendStream;
  using FileSystem::OpenOutputStream;
  /// \endcond

  Result<FileInfo> GetFileInfo(const std::string& path) override;

  Result<FileInfoVector> GetFileInfo(const FileSelector& select) override;

  Status CreateDir(const std::string& path, bool recursive) override;

  /// \brief Delete a directory and its contents recursively.
  ///
  /// Atomicity is guaranteed only on Hierarchical Namespace Storage accounts.
  Status DeleteDir(const std::string& path) override;

  /// \brief Non-atomically deletes the contents of a directory.
  ///
  /// This function can return a bad Status after only partially deleting the
  /// contents of the directory.
  Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override;

  /// \brief Deletion of all the containers in the storage account (not
  /// implemented for safety reasons).
  ///
  /// \return Status::NotImplemented
  Status DeleteRootDirContents() override;

  /// \brief Deletes a file.
  ///
  /// Supported on both flat namespace and Hierarchical Namespace storage
  /// accounts. A check is made to guarantee the parent directory doesn't
  /// disappear after the blob is deleted and while this operation is running,
  /// no other client can delete the parent directory due to the use of leases.
  ///
  /// This means applications can safely retry this operation without coordination to
  /// guarantee only one client/process is trying to delete the same file.
  Status DeleteFile(const std::string& path) override;

  /// \brief Move/rename a file or directory.
  ///
  /// There are no files immediately at the root directory, so paths like
  /// "/segment" always refer to a container of the storage account and are
  /// treated as directories.
  ///
  /// If `dest` exists but the operation fails for some reason, `Move`
  /// guarantees `dest` is not lost.
  ///
  /// Conditions for a successful move:
  ///
  /// 1. `src` must exist.
  /// 2. `dest` can't contain a strict path prefix of `src`. More generally,
  ///    a directory can't be made a subdirectory of itself.
  /// 3. If `dest` already exists and it's a file, `src` must also be a file.
  ///    `dest` is then replaced by `src`.
  /// 4. All components of `dest` must exist, except for the last.
  /// 5. If `dest` already exists and it's a directory, `src` must also be a
  ///    directory and `dest` must be empty. `dest` is then replaced by `src`
  ///    and its contents.
  ///
  /// Leases are used to guarantee the pre-condition checks and the rename
  /// operation are atomic: other clients can't invalidate the pre-condition in
  /// the time between the checks and the actual rename operation.
  ///
  /// This is possible because Move() is only support on storage accounts with
  /// Hierarchical Namespace Support enabled.
  ///
  /// ## Limitations
  ///
  /// - Moves are not supported on storage accounts without
  ///   Hierarchical Namespace support enabled
  /// - Moves across different containers are not supported
  /// - Moving a path of the form `/container` is not supported as it would
  ///   require moving all the files in a container to another container.
  ///   The only exception is a `Move("/container_a", "/container_b")` where
  ///   both containers are empty or `container_b` doesn't even exist.
  ///   The atomicity of the emptiness checks followed by the renaming operation
  ///   is guaranteed by the use of leases.
  Status Move(const std::string& src, const std::string& dest) override;

  Status CopyFile(const std::string& src, const std::string& dest) override;

  Result<std::shared_ptr<io::InputStream>> OpenInputStream(
      const std::string& path) override;

  Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info) override;

  Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
      const std::string& path) override;

  Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
      const FileInfo& info) override;

  Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
      const std::string& path,
      const std::shared_ptr<const KeyValueMetadata>& metadata) override;

  Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
      const std::string& path,
      const std::shared_ptr<const KeyValueMetadata>& metadata) override;
};

}  // namespace arrow::fs