applied-ai-018 commited on
Commit
ef521bb
·
verified ·
1 Parent(s): b60b1e0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/basic/factory.h +23 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/cma/factory.h +23 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/context.h +108 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/error.h +40 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/mpt/factory.h +27 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/xth/factory.h +23 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/buffer.h +135 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/cpu_buffer.h +23 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/cuda_buffer.h +24 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/device.h +64 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/error.h +127 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/optional.h +1020 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/config.h +14 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/config_cuda.h +12 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/context.h +96 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/error.h +48 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/listener.h +96 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/message.h +104 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/pipe.h +98 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/context.h +78 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/error.h +47 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/error.h +48 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/factory.h +23 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/utility.h +26 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/shm/factory.h +23 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/error.h +38 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/factory.h +23 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/utility.h +36 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h +144 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h +6 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h +21 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h +30 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h +36 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h +414 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h +9 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h +28 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h +25 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h +27 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h +6 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h +7 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h +25 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h +15 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h +56 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h +8 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h +8 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h +23 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h +19 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h +30 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h +26 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h +13 -0
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/basic/factory.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <memory>
12
+
13
+ #include <tensorpipe/channel/context.h>
14
+
15
+ namespace tensorpipe {
16
+ namespace channel {
17
+ namespace basic {
18
+
19
+ std::shared_ptr<Context> create();
20
+
21
+ } // namespace basic
22
+ } // namespace channel
23
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/cma/factory.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <memory>
12
+
13
+ #include <tensorpipe/channel/context.h>
14
+
15
+ namespace tensorpipe {
16
+ namespace channel {
17
+ namespace cma {
18
+
19
+ std::shared_ptr<Context> create();
20
+
21
+ } // namespace cma
22
+ } // namespace channel
23
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/context.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <memory>
12
+ #include <string>
13
+ #include <unordered_map>
14
+ #include <vector>
15
+
16
+ #include <tensorpipe/common/buffer.h>
17
+ #include <tensorpipe/transport/context.h>
18
+
19
+ namespace tensorpipe {
20
+ namespace channel {
21
+
22
+ enum class Endpoint : bool { kConnect, kListen };
23
+
24
+ class Channel;
25
+
26
+ // Abstract base class for channel context classes.
27
+ //
28
+ // Instances of these classes are expected to be registered with a
29
+ // context. All registered instances are assumed to be eligible
30
+ // channels for all pairs.
31
+ //
32
+ class Context {
33
+ public:
34
+ // Return whether the context is able to operate correctly.
35
+ //
36
+ // Some channel types may be unable to perform as intended under some
37
+ // circumstances (e.g., specialized hardware unavailable, lack of
38
+ // permissions). They can report it through this method in order for
39
+ // the core context to avoid registering them in the first place.
40
+ //
41
+ virtual bool isViable() const = 0;
42
+
43
+ // Return the number of control connections needed to create an instance of
44
+ // this channel.
45
+ //
46
+ // Most channels require only one, but some require more (cuda_basic), and
47
+ // some might require none.
48
+ //
49
+ virtual size_t numConnectionsNeeded() const = 0;
50
+
51
+ // Return a map from supported devices to strings describing the device from
52
+ // the channel's perspective.
53
+ //
54
+ // Two processes with a channel context of the same type can leverage this
55
+ // channel to make two devices communicate if one side's device descriptor is
56
+ // "accepted" by the other one, using the canCommunicateWithRemote method
57
+ // below. That method must be symmetric, and unless overridden defaults to
58
+ // string comparison.
59
+ //
60
+ virtual const std::unordered_map<Device, std::string>& deviceDescriptors()
61
+ const = 0;
62
+
63
+ // Compare local and remote device descriptors for compatibility.
64
+ //
65
+ // Determine whether a channel can be opened between a local device and
66
+ // a remote one that has the given device descriptor. This function
67
+ // needs to be symmetric: if we called this method on the remote
68
+ // context with the local descriptor we should get the same answer.
69
+ // Unless overridden it defaults to string comparison.
70
+ //
71
+ virtual bool canCommunicateWithRemote(
72
+ const std::string& localDeviceDescriptor,
73
+ const std::string& remoteDeviceDescriptor) const = 0;
74
+
75
+ // Return newly created channel using the specified connections.
76
+ //
77
+ // It is up to the channel to either use these connections for further
78
+ // initialization, or use them directly. Either way, the returned
79
+ // channel should be immediately usable. If the channel isn't fully
80
+ // initialized yet, take care to queue these operations to execute
81
+ // as soon as initialization has completed.
82
+ //
83
+ virtual std::shared_ptr<Channel> createChannel(
84
+ std::vector<std::shared_ptr<transport::Connection>>,
85
+ Endpoint) = 0;
86
+
87
+ // Tell the context what its identifier is.
88
+ //
89
+ // This is only supposed to be called from the high-level context. It will
90
+ // only used for logging and debugging purposes.
91
+ virtual void setId(std::string id) = 0;
92
+
93
+ // Put the channel context in a terminal state, in turn closing all of its
94
+ // channels, and release its resources. This may be done asynchronously, in
95
+ // background.
96
+ virtual void close() = 0;
97
+
98
+ // Wait for all resources to be released and all background activity to stop.
99
+ virtual void join() = 0;
100
+
101
+ virtual ~Context() = default;
102
+
103
+ private:
104
+ std::string name_;
105
+ };
106
+
107
+ } // namespace channel
108
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/error.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <string>
12
+
13
+ #include <tensorpipe/common/error.h>
14
+
15
+ namespace tensorpipe {
16
+ namespace channel {
17
+
18
+ class ContextClosedError final : public BaseError {
19
+ public:
20
+ ContextClosedError() {}
21
+
22
+ std::string what() const override;
23
+ };
24
+
25
+ class ChannelClosedError final : public BaseError {
26
+ public:
27
+ ChannelClosedError() {}
28
+
29
+ std::string what() const override;
30
+ };
31
+
32
+ class ContextNotViableError final : public BaseError {
33
+ public:
34
+ ContextNotViableError() {}
35
+
36
+ std::string what() const override;
37
+ };
38
+
39
+ } // namespace channel
40
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/mpt/factory.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <memory>
12
+ #include <vector>
13
+
14
+ #include <tensorpipe/channel/context.h>
15
+ #include <tensorpipe/transport/context.h>
16
+
17
+ namespace tensorpipe {
18
+ namespace channel {
19
+ namespace mpt {
20
+
21
+ std::shared_ptr<Context> create(
22
+ std::vector<std::shared_ptr<transport::Context>> contexts,
23
+ std::vector<std::shared_ptr<transport::Listener>> listeners);
24
+
25
+ } // namespace mpt
26
+ } // namespace channel
27
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/xth/factory.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <memory>
12
+
13
+ #include <tensorpipe/channel/context.h>
14
+
15
+ namespace tensorpipe {
16
+ namespace channel {
17
+ namespace xth {
18
+
19
+ std::shared_ptr<Context> create();
20
+
21
+ } // namespace xth
22
+ } // namespace channel
23
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/buffer.h ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <cstddef>
12
+ #include <stdexcept>
13
+ #include <type_traits>
14
+ #include <utility>
15
+
16
+ #include <tensorpipe/common/cpu_buffer.h>
17
+ #include <tensorpipe/common/device.h>
18
+
19
+ namespace tensorpipe {
20
+
21
+ class Buffer {
22
+ class AbstractBufferWrapper {
23
+ public:
24
+ virtual Device device() const = 0;
25
+ virtual void copyConstructInto(void* ptr) const = 0;
26
+ virtual void moveConstructInto(void* ptr) = 0;
27
+ virtual ~AbstractBufferWrapper() = default;
28
+ };
29
+
30
+ template <typename TBuffer>
31
+ class BufferWrapper : public AbstractBufferWrapper {
32
+ static_assert(
33
+ std::is_trivially_copyable<TBuffer>::value,
34
+ "wrapping non-trivially copyable class");
35
+
36
+ public:
37
+ TBuffer buffer;
38
+
39
+ explicit BufferWrapper(TBuffer buffer) : buffer(std::move(buffer)) {}
40
+
41
+ Device device() const override {
42
+ return buffer.getDevice();
43
+ }
44
+
45
+ void copyConstructInto(void* ptr) const override {
46
+ new (ptr) BufferWrapper(*this);
47
+ }
48
+
49
+ void moveConstructInto(void* ptr) override {
50
+ new (ptr) BufferWrapper(std::move(*this));
51
+ }
52
+ };
53
+
54
+ public:
55
+ template <typename TBuffer>
56
+ /* implicit */ Buffer(TBuffer b) {
57
+ static_assert(
58
+ sizeof(BufferWrapper<TBuffer>) <= kStructSize, "kStructSize too small");
59
+ static_assert(
60
+ alignof(BufferWrapper<TBuffer>) <= kStructAlign,
61
+ "kStructAlign too small");
62
+ new (&raw_) BufferWrapper<TBuffer>(std::move(b));
63
+ }
64
+
65
+ Buffer() : Buffer(CpuBuffer{}) {}
66
+
67
+ Buffer(const Buffer& other) {
68
+ other.ptr()->copyConstructInto(&raw_);
69
+ }
70
+
71
+ Buffer& operator=(const Buffer& other) {
72
+ if (this != &other) {
73
+ ptr()->~AbstractBufferWrapper();
74
+ other.ptr()->copyConstructInto(&raw_);
75
+ }
76
+ return *this;
77
+ }
78
+
79
+ Buffer(Buffer&& other) noexcept {
80
+ other.ptr()->moveConstructInto(&raw_);
81
+ }
82
+
83
+ Buffer& operator=(Buffer&& other) {
84
+ if (this != &other) {
85
+ ptr()->~AbstractBufferWrapper();
86
+ other.ptr()->moveConstructInto(&raw_);
87
+ }
88
+ return *this;
89
+ }
90
+
91
+ ~Buffer() {
92
+ ptr()->~AbstractBufferWrapper();
93
+ }
94
+
95
+ template <typename TBuffer>
96
+ TBuffer& unwrap() {
97
+ BufferWrapper<TBuffer>* wrapperPtr =
98
+ dynamic_cast<BufferWrapper<TBuffer>*>(ptr());
99
+ if (wrapperPtr == nullptr) {
100
+ throw std::runtime_error("Invalid unwrapping of tensorpipe::Buffer");
101
+ }
102
+ return wrapperPtr->buffer;
103
+ }
104
+
105
+ template <typename TBuffer>
106
+ const TBuffer& unwrap() const {
107
+ const BufferWrapper<TBuffer>* wrapperPtr =
108
+ dynamic_cast<const BufferWrapper<TBuffer>*>(ptr());
109
+ if (wrapperPtr == nullptr) {
110
+ throw std::runtime_error("Invalid unwrapping of tensorpipe::Buffer");
111
+ }
112
+ return wrapperPtr->buffer;
113
+ }
114
+
115
+ Device device() const {
116
+ return ptr()->device();
117
+ }
118
+
119
+ private:
120
+ static constexpr int kStructSize = 32;
121
+ static constexpr int kStructAlign = 8;
122
+ std::aligned_storage<kStructSize, kStructAlign>::type raw_{};
123
+
124
+ const AbstractBufferWrapper* ptr() const {
125
+ // FIXME: Once we go C++17, use std::launder on the returned pointer.
126
+ return reinterpret_cast<const AbstractBufferWrapper*>(&raw_);
127
+ }
128
+
129
+ AbstractBufferWrapper* ptr() {
130
+ // FIXME: Once we go C++17, use std::launder on the returned pointer.
131
+ return reinterpret_cast<AbstractBufferWrapper*>(&raw_);
132
+ }
133
+ };
134
+
135
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/cpu_buffer.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <tensorpipe/common/device.h>
12
+
13
+ namespace tensorpipe {
14
+
15
+ struct CpuBuffer {
16
+ void* ptr{nullptr};
17
+
18
+ Device getDevice() const {
19
+ return Device{kCpuDeviceType, 0};
20
+ }
21
+ };
22
+
23
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/cuda_buffer.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <cuda_runtime.h>
12
+
13
+ #include <tensorpipe/common/device.h>
14
+
15
+ namespace tensorpipe {
16
+
17
+ struct CudaBuffer {
18
+ void* ptr{nullptr};
19
+ cudaStream_t stream{cudaStreamDefault};
20
+
21
+ Device getDevice() const;
22
+ };
23
+
24
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/device.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <sstream>
12
+ #include <stdexcept>
13
+ #include <string>
14
+
15
+ namespace tensorpipe {
16
+
17
+ const std::string kCpuDeviceType{"cpu"};
18
+ const std::string kCudaDeviceType{"cuda"};
19
+
20
+ struct Device {
21
+ std::string type;
22
+ int index;
23
+
24
+ // This pointless constructor is needed to work around a bug in GCC 5.5 (and
25
+ // possibly other versions). It appears to be needed in the nop types that
26
+ // are used inside nop::Optional.
27
+ Device() {}
28
+
29
+ Device(std::string type, int index) : type(std::move(type)), index(index) {}
30
+
31
+ std::string toString() const {
32
+ std::stringstream ss;
33
+ ss << type << ":" << index;
34
+ return ss.str();
35
+ }
36
+
37
+ bool operator==(const Device& other) const {
38
+ return type == other.type && index == other.index;
39
+ }
40
+ };
41
+
42
+ } // namespace tensorpipe
43
+
44
+ namespace std {
45
+
46
+ template <>
47
+ struct hash<::tensorpipe::Device> {
48
+ size_t operator()(const ::tensorpipe::Device& device) const noexcept {
49
+ return std::hash<std::string>{}(device.toString());
50
+ }
51
+ };
52
+
53
+ template <>
54
+ struct hash<std::pair<::tensorpipe::Device, ::tensorpipe::Device>> {
55
+ size_t operator()(const std::pair<::tensorpipe::Device, ::tensorpipe::Device>&
56
+ p) const noexcept {
57
+ size_t h1 = std::hash<::tensorpipe::Device>{}(p.first);
58
+ size_t h2 = std::hash<::tensorpipe::Device>{}(p.second);
59
+ // Shifting one hash to avoid collisions between (a, b) and (b, a).
60
+ return h1 ^ (h2 << 1);
61
+ }
62
+ };
63
+
64
+ } // namespace std
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/error.h ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <memory>
12
+ #include <string>
13
+
14
+ namespace tensorpipe {
15
+
16
+ // Base class for actual errors.
17
+ class BaseError {
18
+ public:
19
+ virtual ~BaseError() = default;
20
+
21
+ // Returns an explanatory string.
22
+ // Like `std::exception` but returns a `std::string`.
23
+ virtual std::string what() const = 0;
24
+ };
25
+
26
+ // Wrapper class for errors.
27
+ //
28
+ // Background: we wish to not use exceptions yet need an error
29
+ // representation that can propagate across function and thread
30
+ // boundaries. This representation must be copyable (so we can store
31
+ // and return it at a later point in time) and retain downstream type
32
+ // information. This implies a heap allocation because it's the
33
+ // easiest way to deal with variable size objects (barring a union of
34
+ // all downstream error classes and a lot of custom code). Instead of
35
+ // passing a shared_ptr around directly, we use this wrapper class to
36
+ // keep implementation details hidden from calling code.
37
+ //
38
+ class Error final {
39
+ public:
40
+ // Constant instance that indicates success.
41
+ static const Error kSuccess;
42
+
43
+ // Default constructor for error that is not an error.
44
+ Error() {}
45
+
46
+ Error(std::shared_ptr<BaseError> error, std::string file, int line)
47
+ : error_(std::move(error)), file_(std::move(file)), line_(line) {}
48
+
49
+ virtual ~Error() = default;
50
+
51
+ // Converting to boolean means checking if there is an error. This
52
+ // means we don't need to use an `std::optional` and allows for a
53
+ // snippet like the following:
54
+ //
55
+ // if (error) {
56
+ // // Deal with it.
57
+ // }
58
+ //
59
+ operator bool() const {
60
+ return static_cast<bool>(error_);
61
+ }
62
+
63
+ template <typename T>
64
+ std::shared_ptr<T> castToType() const {
65
+ return std::dynamic_pointer_cast<T>(error_);
66
+ }
67
+
68
+ template <typename T>
69
+ bool isOfType() const {
70
+ return castToType<T>() != nullptr;
71
+ }
72
+
73
+ // Like `std::exception` but returns a `std::string`.
74
+ std::string what() const;
75
+
76
+ private:
77
+ std::shared_ptr<BaseError> error_;
78
+ std::string file_;
79
+ int line_;
80
+ };
81
+
82
+ class SystemError final : public BaseError {
83
+ public:
84
+ explicit SystemError(const char* syscall, int error)
85
+ : syscall_(syscall), error_(error) {}
86
+
87
+ std::string what() const override;
88
+
89
+ int errorCode() const;
90
+
91
+ private:
92
+ const char* syscall_;
93
+ const int error_;
94
+ };
95
+
96
+ class ShortReadError final : public BaseError {
97
+ public:
98
+ ShortReadError(ssize_t expected, ssize_t actual)
99
+ : expected_(expected), actual_(actual) {}
100
+
101
+ std::string what() const override;
102
+
103
+ private:
104
+ const ssize_t expected_;
105
+ const ssize_t actual_;
106
+ };
107
+
108
+ class ShortWriteError final : public BaseError {
109
+ public:
110
+ ShortWriteError(ssize_t expected, ssize_t actual)
111
+ : expected_(expected), actual_(actual) {}
112
+
113
+ std::string what() const override;
114
+
115
+ private:
116
+ const ssize_t expected_;
117
+ const ssize_t actual_;
118
+ };
119
+
120
+ class EOFError final : public BaseError {
121
+ public:
122
+ EOFError() {}
123
+
124
+ std::string what() const override;
125
+ };
126
+
127
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/optional.h ADDED
@@ -0,0 +1,1020 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (C) 2011 - 2012 Andrzej Krzemienski.
2
+ //
3
+ // Use, modification, and distribution is subject to the Boost Software
4
+ // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
5
+ // http://www.boost.org/LICENSE_1_0.txt)
6
+ //
7
+ // The idea and interface is based on Boost.Optional library
8
+ // authored by Fernando Luis Cacciola Carballal
9
+
10
+ #pragma once
11
+
12
+ #include <cassert>
13
+ #include <functional>
14
+ #include <initializer_list>
15
+ #include <stdexcept>
16
+ #include <string>
17
+ #include <type_traits>
18
+ #include <utility>
19
+
20
+ #define TR2_OPTIONAL_REQUIRES(...) \
21
+ typename std::enable_if<__VA_ARGS__::value, bool>::type = false
22
+
23
+ #if defined __GNUC__ // NOTE: GNUC is also defined for Clang
24
+ #if (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)
25
+ #define TR2_OPTIONAL_GCC_4_8_AND_HIGHER___
26
+ #elif (__GNUC__ > 4)
27
+ #define TR2_OPTIONAL_GCC_4_8_AND_HIGHER___
28
+ #endif
29
+ #
30
+ #if (__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)
31
+ #define TR2_OPTIONAL_GCC_4_7_AND_HIGHER___
32
+ #elif (__GNUC__ > 4)
33
+ #define TR2_OPTIONAL_GCC_4_7_AND_HIGHER___
34
+ #endif
35
+ #
36
+ #if (__GNUC__ == 4) && (__GNUC_MINOR__ == 8) && (__GNUC_PATCHLEVEL__ >= 1)
37
+ #define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___
38
+ #elif (__GNUC__ == 4) && (__GNUC_MINOR__ >= 9)
39
+ #define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___
40
+ #elif (__GNUC__ > 4)
41
+ #define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___
42
+ #endif
43
+ #endif
44
+ #
45
+ #if defined __clang_major__
46
+ #if (__clang_major__ == 3 && __clang_minor__ >= 5)
47
+ #define TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_
48
+ #elif (__clang_major__ > 3)
49
+ #define TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_
50
+ #endif
51
+ #if defined TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_
52
+ #define TR2_OPTIONAL_CLANG_3_4_2_AND_HIGHER_
53
+ #elif ( \
54
+ __clang_major__ == 3 && __clang_minor__ == 4 && __clang_patchlevel__ >= 2)
55
+ #define TR2_OPTIONAL_CLANG_3_4_2_AND_HIGHER_
56
+ #endif
57
+ #endif
58
+ #
59
+ #if defined _MSC_VER
60
+ #if (_MSC_VER >= 1900)
61
+ #define TR2_OPTIONAL_MSVC_2015_AND_HIGHER___
62
+ #endif
63
+ #endif
64
+
65
+ #if defined __clang__
66
+ #if (__clang_major__ > 2) || (__clang_major__ == 2) && (__clang_minor__ >= 9)
67
+ #define OPTIONAL_HAS_THIS_RVALUE_REFS 1
68
+ #else
69
+ #define OPTIONAL_HAS_THIS_RVALUE_REFS 0
70
+ #endif
71
+ #elif defined TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___
72
+ #define OPTIONAL_HAS_THIS_RVALUE_REFS 1
73
+ #elif defined TR2_OPTIONAL_MSVC_2015_AND_HIGHER___
74
+ #define OPTIONAL_HAS_THIS_RVALUE_REFS 1
75
+ #else
76
+ #define OPTIONAL_HAS_THIS_RVALUE_REFS 0
77
+ #endif
78
+
79
+ #if defined TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___
80
+ #define OPTIONAL_HAS_CONSTEXPR_INIT_LIST 1
81
+ #define OPTIONAL_CONSTEXPR_INIT_LIST constexpr
82
+ #else
83
+ #define OPTIONAL_HAS_CONSTEXPR_INIT_LIST 0
84
+ #define OPTIONAL_CONSTEXPR_INIT_LIST
85
+ #endif
86
+
87
+ #if defined TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_ && (defined __cplusplus) && \
88
+ (__cplusplus != 201103L)
89
+ #define OPTIONAL_HAS_MOVE_ACCESSORS 1
90
+ #else
91
+ #define OPTIONAL_HAS_MOVE_ACCESSORS 0
92
+ #endif
93
+
94
+ #// In C++11 constexpr implies const, so we need to make non-const members also non-constexpr
95
+ #if (defined __cplusplus) && (__cplusplus == 201103L)
96
+ #define OPTIONAL_MUTABLE_CONSTEXPR
97
+ #else
98
+ #define OPTIONAL_MUTABLE_CONSTEXPR constexpr
99
+ #endif
100
+
101
+ namespace tensorpipe {
102
+
103
+ // 20.5.4, optional for object types
104
+ template <class T>
105
+ class optional;
106
+
107
+ // 20.5.5, optional for lvalue reference types
108
+ template <class T>
109
+ class optional<T&>;
110
+
111
+ // workaround: std utility functions aren't constexpr yet
112
+ template <class T>
113
+ inline constexpr T&& constexpr_forward(
114
+ typename std::remove_reference<T>::type& t) noexcept {
115
+ return static_cast<T&&>(t);
116
+ }
117
+
118
+ template <class T>
119
+ inline constexpr T&& constexpr_forward(
120
+ typename std::remove_reference<T>::type&& t) noexcept {
121
+ static_assert(!std::is_lvalue_reference<T>::value, "!!");
122
+ return static_cast<T&&>(t);
123
+ }
124
+
125
+ template <class T>
126
+ inline constexpr typename std::remove_reference<T>::type&& constexpr_move(
127
+ T&& t) noexcept {
128
+ return static_cast<typename std::remove_reference<T>::type&&>(t);
129
+ }
130
+
131
+ #if defined NDEBUG
132
+ #define TR2_OPTIONAL_ASSERTED_EXPRESSION(CHECK, EXPR) (EXPR)
133
+ #else
134
+ #define TR2_OPTIONAL_ASSERTED_EXPRESSION(CHECK, EXPR) \
135
+ ((CHECK) ? (EXPR) : ([] { assert(!#CHECK); }(), (EXPR)))
136
+ #endif
137
+
138
+ namespace detail_ {
139
+
140
+ // static_addressof: a constexpr version of addressof
141
+ template <typename T>
142
+ struct has_overloaded_addressof {
143
+ template <class X>
144
+ constexpr static bool has_overload(...) {
145
+ return false;
146
+ }
147
+
148
+ template <class X, size_t S = sizeof(std::declval<X&>().operator&())>
149
+ constexpr static bool has_overload(bool) {
150
+ return true;
151
+ }
152
+
153
+ constexpr static bool value = has_overload<T>(true);
154
+ };
155
+
156
+ template <typename T, TR2_OPTIONAL_REQUIRES(!has_overloaded_addressof<T>)>
157
+ constexpr T* static_addressof(T& ref) {
158
+ return &ref;
159
+ }
160
+
161
+ template <typename T, TR2_OPTIONAL_REQUIRES(has_overloaded_addressof<T>)>
162
+ T* static_addressof(T& ref) {
163
+ return std::addressof(ref);
164
+ }
165
+
166
+ // the call to convert<A>(b) has return type A and converts b to type A iff b
167
+ // decltype(b) is implicitly convertible to A
168
+ template <class U>
169
+ constexpr U convert(U v) {
170
+ return v;
171
+ }
172
+
173
+ } // namespace detail_
174
+
175
+ constexpr struct trivial_init_t {
176
+ } trivial_init{};
177
+
178
+ // 20.5.6, In-place construction
179
+ constexpr struct in_place_t {
180
+ } in_place{};
181
+
182
+ // 20.5.7, Disengaged state indicator
183
+ struct nullopt_t {
184
+ struct init {};
185
+ constexpr explicit nullopt_t(init) {}
186
+ };
187
+ constexpr nullopt_t nullopt{nullopt_t::init()};
188
+
189
+ // 20.5.8, class bad_optional_access
190
+ class bad_optional_access : public std::logic_error {
191
+ public:
192
+ explicit bad_optional_access(const std::string& what_arg)
193
+ : logic_error{what_arg} {}
194
+ explicit bad_optional_access(const char* what_arg) : logic_error{what_arg} {}
195
+ };
196
+
197
+ template <class T>
198
+ union storage_t {
199
+ unsigned char dummy_;
200
+ T value_;
201
+
202
+ constexpr storage_t(trivial_init_t) noexcept : dummy_(){};
203
+
204
+ template <class... Args>
205
+ constexpr storage_t(Args&&... args)
206
+ : value_(constexpr_forward<Args>(args)...) {}
207
+
208
+ ~storage_t() {}
209
+ };
210
+
211
+ template <class T>
212
+ union constexpr_storage_t {
213
+ unsigned char dummy_;
214
+ T value_;
215
+
216
+ constexpr constexpr_storage_t(trivial_init_t) noexcept : dummy_(){};
217
+
218
+ template <class... Args>
219
+ constexpr constexpr_storage_t(Args&&... args)
220
+ : value_(constexpr_forward<Args>(args)...) {}
221
+
222
+ ~constexpr_storage_t() = default;
223
+ };
224
+
225
+ template <class T>
226
+ struct optional_base {
227
+ bool init_;
228
+ storage_t<T> storage_;
229
+
230
+ constexpr optional_base() noexcept : init_(false), storage_(trivial_init){};
231
+
232
+ explicit constexpr optional_base(const T& v) : init_(true), storage_(v) {}
233
+
234
+ explicit constexpr optional_base(T&& v)
235
+ : init_(true), storage_(constexpr_move(v)) {}
236
+
237
+ template <class... Args>
238
+ explicit optional_base(in_place_t, Args&&... args)
239
+ : init_(true), storage_(constexpr_forward<Args>(args)...) {}
240
+
241
+ template <
242
+ class U,
243
+ class... Args,
244
+ TR2_OPTIONAL_REQUIRES(std::is_constructible<T, std::initializer_list<U>>)>
245
+ explicit optional_base(
246
+ in_place_t,
247
+ std::initializer_list<U> il,
248
+ Args&&... args)
249
+ : init_(true), storage_(il, std::forward<Args>(args)...) {}
250
+
251
+ ~optional_base() {
252
+ if (init_)
253
+ storage_.value_.T::~T();
254
+ }
255
+ };
256
+
257
+ template <class T>
258
+ struct constexpr_optional_base {
259
+ bool init_;
260
+ constexpr_storage_t<T> storage_;
261
+
262
+ constexpr constexpr_optional_base() noexcept
263
+ : init_(false), storage_(trivial_init){};
264
+
265
+ explicit constexpr constexpr_optional_base(const T& v)
266
+ : init_(true), storage_(v) {}
267
+
268
+ explicit constexpr constexpr_optional_base(T&& v)
269
+ : init_(true), storage_(constexpr_move(v)) {}
270
+
271
+ template <class... Args>
272
+ explicit constexpr constexpr_optional_base(in_place_t, Args&&... args)
273
+ : init_(true), storage_(constexpr_forward<Args>(args)...) {}
274
+
275
+ template <
276
+ class U,
277
+ class... Args,
278
+ TR2_OPTIONAL_REQUIRES(std::is_constructible<T, std::initializer_list<U>>)>
279
+ OPTIONAL_CONSTEXPR_INIT_LIST explicit constexpr_optional_base(
280
+ in_place_t,
281
+ std::initializer_list<U> il,
282
+ Args&&... args)
283
+ : init_(true), storage_(il, std::forward<Args>(args)...) {}
284
+
285
+ ~constexpr_optional_base() = default;
286
+ };
287
+
288
+ template <class T>
289
+ using OptionalBase = typename std::conditional<
290
+ std::is_trivially_destructible<T>::value, // if possible
291
+ constexpr_optional_base<typename std::remove_const<
292
+ T>::type>, // use base with trivial destructor
293
+ optional_base<typename std::remove_const<T>::type>>::type;
294
+
295
+ template <class T>
296
+ class optional : private OptionalBase<T> {
297
+ static_assert(
298
+ !std::is_same<typename std::decay<T>::type, nullopt_t>::value,
299
+ "bad T");
300
+ static_assert(
301
+ !std::is_same<typename std::decay<T>::type, in_place_t>::value,
302
+ "bad T");
303
+
304
+ constexpr bool initialized() const noexcept {
305
+ return OptionalBase<T>::init_;
306
+ }
307
+ typename std::remove_const<T>::type* dataptr() {
308
+ return std::addressof(OptionalBase<T>::storage_.value_);
309
+ }
310
+ constexpr const T* dataptr() const {
311
+ return detail_::static_addressof(OptionalBase<T>::storage_.value_);
312
+ }
313
+
314
+ #if OPTIONAL_HAS_THIS_RVALUE_REFS == 1
315
+ constexpr const T& contained_val() const& {
316
+ return OptionalBase<T>::storage_.value_;
317
+ }
318
+ #if OPTIONAL_HAS_MOVE_ACCESSORS == 1
319
+ OPTIONAL_MUTABLE_CONSTEXPR T&& contained_val() && {
320
+ return std::move(OptionalBase<T>::storage_.value_);
321
+ }
322
+ OPTIONAL_MUTABLE_CONSTEXPR T& contained_val() & {
323
+ return OptionalBase<T>::storage_.value_;
324
+ }
325
+ #else
326
+ T& contained_val() & {
327
+ return OptionalBase<T>::storage_.value_;
328
+ }
329
+ T&& contained_val() && {
330
+ return std::move(OptionalBase<T>::storage_.value_);
331
+ }
332
+ #endif
333
+ #else
334
+ constexpr const T& contained_val() const {
335
+ return OptionalBase<T>::storage_.value_;
336
+ }
337
+ T& contained_val() {
338
+ return OptionalBase<T>::storage_.value_;
339
+ }
340
+ #endif
341
+
342
+ void clear() noexcept {
343
+ if (initialized())
344
+ dataptr()->T::~T();
345
+ OptionalBase<T>::init_ = false;
346
+ }
347
+
348
+ template <class... Args>
349
+ void initialize(Args&&... args) noexcept(
350
+ noexcept(T(std::forward<Args>(args)...))) {
351
+ assert(!OptionalBase<T>::init_);
352
+ ::new (static_cast<void*>(dataptr())) T(std::forward<Args>(args)...);
353
+ OptionalBase<T>::init_ = true;
354
+ }
355
+
356
+ template <class U, class... Args>
357
+ void initialize(std::initializer_list<U> il, Args&&... args) noexcept(
358
+ noexcept(T(il, std::forward<Args>(args)...))) {
359
+ assert(!OptionalBase<T>::init_);
360
+ ::new (static_cast<void*>(dataptr())) T(il, std::forward<Args>(args)...);
361
+ OptionalBase<T>::init_ = true;
362
+ }
363
+
364
+ public:
365
+ typedef T value_type;
366
+
367
+ // 20.5.5.1, constructors
368
+ constexpr optional() noexcept : OptionalBase<T>(){};
369
+ constexpr optional(nullopt_t) noexcept : OptionalBase<T>(){};
370
+
371
+ optional(const optional& rhs) : OptionalBase<T>() {
372
+ if (rhs.initialized()) {
373
+ ::new (static_cast<void*>(dataptr())) T(*rhs);
374
+ OptionalBase<T>::init_ = true;
375
+ }
376
+ }
377
+
378
+ optional(optional&& rhs) noexcept(
379
+ std::is_nothrow_move_constructible<T>::value)
380
+ : OptionalBase<T>() {
381
+ if (rhs.initialized()) {
382
+ ::new (static_cast<void*>(dataptr())) T(std::move(*rhs));
383
+ OptionalBase<T>::init_ = true;
384
+ }
385
+ }
386
+
387
+ constexpr optional(const T& v) : OptionalBase<T>(v) {}
388
+
389
+ constexpr optional(T&& v) : OptionalBase<T>(constexpr_move(v)) {}
390
+
391
+ template <class... Args>
392
+ explicit constexpr optional(in_place_t, Args&&... args)
393
+ : OptionalBase<T>(in_place_t{}, constexpr_forward<Args>(args)...) {}
394
+
395
+ template <
396
+ class U,
397
+ class... Args,
398
+ TR2_OPTIONAL_REQUIRES(std::is_constructible<T, std::initializer_list<U>>)>
399
+ OPTIONAL_CONSTEXPR_INIT_LIST explicit optional(
400
+ in_place_t,
401
+ std::initializer_list<U> il,
402
+ Args&&... args)
403
+ : OptionalBase<T>(in_place_t{}, il, constexpr_forward<Args>(args)...) {}
404
+
405
+ // 20.5.4.2, Destructor
406
+ ~optional() = default;
407
+
408
+ // 20.5.4.3, assignment
409
+ optional& operator=(nullopt_t) noexcept {
410
+ clear();
411
+ return *this;
412
+ }
413
+
414
+ optional& operator=(const optional& rhs) {
415
+ if (initialized() == true && rhs.initialized() == false)
416
+ clear();
417
+ else if (initialized() == false && rhs.initialized() == true)
418
+ initialize(*rhs);
419
+ else if (initialized() == true && rhs.initialized() == true)
420
+ contained_val() = *rhs;
421
+ return *this;
422
+ }
423
+
424
+ optional& operator=(optional&& rhs) noexcept(
425
+ std::is_nothrow_move_assignable<T>::value&&
426
+ std::is_nothrow_move_constructible<T>::value) {
427
+ if (initialized() == true && rhs.initialized() == false)
428
+ clear();
429
+ else if (initialized() == false && rhs.initialized() == true)
430
+ initialize(std::move(*rhs));
431
+ else if (initialized() == true && rhs.initialized() == true)
432
+ contained_val() = std::move(*rhs);
433
+ return *this;
434
+ }
435
+
436
+ template <class U>
437
+ auto operator=(U&& v) -> typename std::enable_if<
438
+ std::is_same<typename std::decay<U>::type, T>::value,
439
+ optional&>::type {
440
+ if (initialized()) {
441
+ contained_val() = std::forward<U>(v);
442
+ } else {
443
+ initialize(std::forward<U>(v));
444
+ }
445
+ return *this;
446
+ }
447
+
448
+ template <class... Args>
449
+ void emplace(Args&&... args) {
450
+ clear();
451
+ initialize(std::forward<Args>(args)...);
452
+ }
453
+
454
+ template <class U, class... Args>
455
+ void emplace(std::initializer_list<U> il, Args&&... args) {
456
+ clear();
457
+ initialize<U, Args...>(il, std::forward<Args>(args)...);
458
+ }
459
+
460
+ // 20.5.4.4, Swap
461
+ void swap(optional<T>& rhs) noexcept(
462
+ std::is_nothrow_move_constructible<T>::value&& noexcept(
463
+ std::swap(std::declval<T&>(), std::declval<T&>()))) {
464
+ if (initialized() == true && rhs.initialized() == false) {
465
+ rhs.initialize(std::move(**this));
466
+ clear();
467
+ } else if (initialized() == false && rhs.initialized() == true) {
468
+ initialize(std::move(*rhs));
469
+ rhs.clear();
470
+ } else if (initialized() == true && rhs.initialized() == true) {
471
+ using std::swap;
472
+ swap(**this, *rhs);
473
+ }
474
+ }
475
+
476
+ // 20.5.4.5, Observers
477
+
478
+ explicit constexpr operator bool() const noexcept {
479
+ return initialized();
480
+ }
481
+ constexpr bool has_value() const noexcept {
482
+ return initialized();
483
+ }
484
+
485
+ constexpr T const* operator->() const {
486
+ return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), dataptr());
487
+ }
488
+
489
+ #if OPTIONAL_HAS_MOVE_ACCESSORS == 1
490
+
491
+ OPTIONAL_MUTABLE_CONSTEXPR T* operator->() {
492
+ assert(initialized());
493
+ return dataptr();
494
+ }
495
+
496
+ constexpr T const& operator*() const& {
497
+ return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), contained_val());
498
+ }
499
+
500
+ OPTIONAL_MUTABLE_CONSTEXPR T& operator*() & {
501
+ assert(initialized());
502
+ return contained_val();
503
+ }
504
+
505
+ OPTIONAL_MUTABLE_CONSTEXPR T&& operator*() && {
506
+ assert(initialized());
507
+ return constexpr_move(contained_val());
508
+ }
509
+
510
+ constexpr T const& value() const& {
511
+ return initialized()
512
+ ? contained_val()
513
+ : (throw bad_optional_access("bad optional access"), contained_val());
514
+ }
515
+
516
+ OPTIONAL_MUTABLE_CONSTEXPR T& value() & {
517
+ return initialized()
518
+ ? contained_val()
519
+ : (throw bad_optional_access("bad optional access"), contained_val());
520
+ }
521
+
522
+ OPTIONAL_MUTABLE_CONSTEXPR T&& value() && {
523
+ if (!initialized())
524
+ throw bad_optional_access("bad optional access");
525
+ return std::move(contained_val());
526
+ }
527
+
528
+ #else
529
+
530
+ T* operator->() {
531
+ assert(initialized());
532
+ return dataptr();
533
+ }
534
+
535
+ constexpr T const& operator*() const {
536
+ return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), contained_val());
537
+ }
538
+
539
+ T& operator*() {
540
+ assert(initialized());
541
+ return contained_val();
542
+ }
543
+
544
+ constexpr T const& value() const {
545
+ return initialized()
546
+ ? contained_val()
547
+ : (throw bad_optional_access("bad optional access"), contained_val());
548
+ }
549
+
550
+ T& value() {
551
+ return initialized()
552
+ ? contained_val()
553
+ : (throw bad_optional_access("bad optional access"), contained_val());
554
+ }
555
+
556
+ #endif
557
+
558
+ #if OPTIONAL_HAS_THIS_RVALUE_REFS == 1
559
+
560
+ template <class V>
561
+ constexpr T value_or(V&& v) const& {
562
+ return *this ? **this : detail_::convert<T>(constexpr_forward<V>(v));
563
+ }
564
+
565
+ #if OPTIONAL_HAS_MOVE_ACCESSORS == 1
566
+
567
+ template <class V>
568
+ OPTIONAL_MUTABLE_CONSTEXPR T value_or(V&& v) && {
569
+ return *this
570
+ ? constexpr_move(const_cast<optional<T>&>(*this).contained_val())
571
+ : detail_::convert<T>(constexpr_forward<V>(v));
572
+ }
573
+
574
+ #else
575
+
576
+ template <class V>
577
+ T value_or(V&& v) && {
578
+ return *this
579
+ ? constexpr_move(const_cast<optional<T>&>(*this).contained_val())
580
+ : detail_::convert<T>(constexpr_forward<V>(v));
581
+ }
582
+
583
+ #endif
584
+
585
+ #else
586
+
587
+ template <class V>
588
+ constexpr T value_or(V&& v) const {
589
+ return *this ? **this : detail_::convert<T>(constexpr_forward<V>(v));
590
+ }
591
+
592
+ #endif
593
+
594
+ // 20.6.3.6, modifiers
595
+ void reset() noexcept {
596
+ clear();
597
+ }
598
+ };
599
+
600
+ template <class T>
601
+ class optional<T&> {
602
+ static_assert(!std::is_same<T, nullopt_t>::value, "bad T");
603
+ static_assert(!std::is_same<T, in_place_t>::value, "bad T");
604
+ T* ref;
605
+
606
+ public:
607
+ // 20.5.5.1, construction/destruction
608
+ constexpr optional() noexcept : ref(nullptr) {}
609
+
610
+ constexpr optional(nullopt_t) noexcept : ref(nullptr) {}
611
+
612
+ constexpr optional(T& v) noexcept : ref(detail_::static_addressof(v)) {}
613
+
614
+ optional(T&&) = delete;
615
+
616
+ constexpr optional(const optional& rhs) noexcept : ref(rhs.ref) {}
617
+
618
+ explicit constexpr optional(in_place_t, T& v) noexcept
619
+ : ref(detail_::static_addressof(v)) {}
620
+
621
+ explicit optional(in_place_t, T&&) = delete;
622
+
623
+ ~optional() = default;
624
+
625
+ // 20.5.5.2, mutation
626
+ optional& operator=(nullopt_t) noexcept {
627
+ ref = nullptr;
628
+ return *this;
629
+ }
630
+
631
+ // optional& operator=(const optional& rhs) noexcept {
632
+ // ref = rhs.ref;
633
+ // return *this;
634
+ // }
635
+
636
+ // optional& operator=(optional&& rhs) noexcept {
637
+ // ref = rhs.ref;
638
+ // return *this;
639
+ // }
640
+
641
+ template <typename U>
642
+ auto operator=(U&& rhs) noexcept -> typename std::enable_if<
643
+ std::is_same<typename std::decay<U>::type, optional<T&>>::value,
644
+ optional&>::type {
645
+ ref = rhs.ref;
646
+ return *this;
647
+ }
648
+
649
+ template <typename U>
650
+ auto operator=(U&& rhs) noexcept -> typename std::enable_if<
651
+ !std::is_same<typename std::decay<U>::type, optional<T&>>::value,
652
+ optional&>::type = delete;
653
+
654
+ void emplace(T& v) noexcept {
655
+ ref = detail_::static_addressof(v);
656
+ }
657
+
658
+ void emplace(T&&) = delete;
659
+
660
+ void swap(optional<T&>& rhs) noexcept {
661
+ std::swap(ref, rhs.ref);
662
+ }
663
+
664
+ // 20.5.5.3, observers
665
+ constexpr T* operator->() const {
666
+ return TR2_OPTIONAL_ASSERTED_EXPRESSION(ref, ref);
667
+ }
668
+
669
+ constexpr T& operator*() const {
670
+ return TR2_OPTIONAL_ASSERTED_EXPRESSION(ref, *ref);
671
+ }
672
+
673
+ constexpr T& value() const {
674
+ return ref ? *ref
675
+ : (throw bad_optional_access("bad optional access"), *ref);
676
+ }
677
+
678
+ explicit constexpr operator bool() const noexcept {
679
+ return ref != nullptr;
680
+ }
681
+
682
+ constexpr bool has_value() const noexcept {
683
+ return ref != nullptr;
684
+ }
685
+
686
+ template <class V>
687
+ constexpr typename std::decay<T>::type value_or(V&& v) const {
688
+ return *this ? **this
689
+ : detail_::convert<typename std::decay<T>::type>(
690
+ constexpr_forward<V>(v));
691
+ }
692
+
693
+ // x.x.x.x, modifiers
694
+ void reset() noexcept {
695
+ ref = nullptr;
696
+ }
697
+ };
698
+
699
+ template <class T>
700
+ class optional<T&&> {
701
+ static_assert(sizeof(T) == 0, "optional rvalue references disallowed");
702
+ };
703
+
704
+ // 20.5.8, Relational operators
705
+ template <class T>
706
+ constexpr bool operator==(const optional<T>& x, const optional<T>& y) {
707
+ return bool(x) != bool(y) ? false : bool(x) == false ? true : *x == *y;
708
+ }
709
+
710
+ template <class T>
711
+ constexpr bool operator!=(const optional<T>& x, const optional<T>& y) {
712
+ return !(x == y);
713
+ }
714
+
715
+ template <class T>
716
+ constexpr bool operator<(const optional<T>& x, const optional<T>& y) {
717
+ return (!y) ? false : (!x) ? true : *x < *y;
718
+ }
719
+
720
+ template <class T>
721
+ constexpr bool operator>(const optional<T>& x, const optional<T>& y) {
722
+ return (y < x);
723
+ }
724
+
725
+ template <class T>
726
+ constexpr bool operator<=(const optional<T>& x, const optional<T>& y) {
727
+ return !(y < x);
728
+ }
729
+
730
+ template <class T>
731
+ constexpr bool operator>=(const optional<T>& x, const optional<T>& y) {
732
+ return !(x < y);
733
+ }
734
+
735
+ // 20.5.9, Comparison with nullopt
736
+ template <class T>
737
+ constexpr bool operator==(const optional<T>& x, nullopt_t) noexcept {
738
+ return (!x);
739
+ }
740
+
741
+ template <class T>
742
+ constexpr bool operator==(nullopt_t, const optional<T>& x) noexcept {
743
+ return (!x);
744
+ }
745
+
746
+ template <class T>
747
+ constexpr bool operator!=(const optional<T>& x, nullopt_t) noexcept {
748
+ return bool(x);
749
+ }
750
+
751
+ template <class T>
752
+ constexpr bool operator!=(nullopt_t, const optional<T>& x) noexcept {
753
+ return bool(x);
754
+ }
755
+
756
+ template <class T>
757
+ constexpr bool operator<(const optional<T>&, nullopt_t) noexcept {
758
+ return false;
759
+ }
760
+
761
+ template <class T>
762
+ constexpr bool operator<(nullopt_t, const optional<T>& x) noexcept {
763
+ return bool(x);
764
+ }
765
+
766
+ template <class T>
767
+ constexpr bool operator<=(const optional<T>& x, nullopt_t) noexcept {
768
+ return (!x);
769
+ }
770
+
771
+ template <class T>
772
+ constexpr bool operator<=(nullopt_t, const optional<T>&) noexcept {
773
+ return true;
774
+ }
775
+
776
+ template <class T>
777
+ constexpr bool operator>(const optional<T>& x, nullopt_t) noexcept {
778
+ return bool(x);
779
+ }
780
+
781
+ template <class T>
782
+ constexpr bool operator>(nullopt_t, const optional<T>&) noexcept {
783
+ return false;
784
+ }
785
+
786
+ template <class T>
787
+ constexpr bool operator>=(const optional<T>&, nullopt_t) noexcept {
788
+ return true;
789
+ }
790
+
791
+ template <class T>
792
+ constexpr bool operator>=(nullopt_t, const optional<T>& x) noexcept {
793
+ return (!x);
794
+ }
795
+
796
+ // 20.5.10, Comparison with T
797
+ template <class T>
798
+ constexpr bool operator==(const optional<T>& x, const T& v) {
799
+ return bool(x) ? *x == v : false;
800
+ }
801
+
802
+ template <class T>
803
+ constexpr bool operator==(const T& v, const optional<T>& x) {
804
+ return bool(x) ? v == *x : false;
805
+ }
806
+
807
+ template <class T>
808
+ constexpr bool operator!=(const optional<T>& x, const T& v) {
809
+ return bool(x) ? *x != v : true;
810
+ }
811
+
812
+ template <class T>
813
+ constexpr bool operator!=(const T& v, const optional<T>& x) {
814
+ return bool(x) ? v != *x : true;
815
+ }
816
+
817
+ template <class T>
818
+ constexpr bool operator<(const optional<T>& x, const T& v) {
819
+ return bool(x) ? *x < v : true;
820
+ }
821
+
822
+ template <class T>
823
+ constexpr bool operator>(const T& v, const optional<T>& x) {
824
+ return bool(x) ? v > *x : true;
825
+ }
826
+
827
+ template <class T>
828
+ constexpr bool operator>(const optional<T>& x, const T& v) {
829
+ return bool(x) ? *x > v : false;
830
+ }
831
+
832
+ template <class T>
833
+ constexpr bool operator<(const T& v, const optional<T>& x) {
834
+ return bool(x) ? v < *x : false;
835
+ }
836
+
837
+ template <class T>
838
+ constexpr bool operator>=(const optional<T>& x, const T& v) {
839
+ return bool(x) ? *x >= v : false;
840
+ }
841
+
842
+ template <class T>
843
+ constexpr bool operator<=(const T& v, const optional<T>& x) {
844
+ return bool(x) ? v <= *x : false;
845
+ }
846
+
847
+ template <class T>
848
+ constexpr bool operator<=(const optional<T>& x, const T& v) {
849
+ return bool(x) ? *x <= v : true;
850
+ }
851
+
852
+ template <class T>
853
+ constexpr bool operator>=(const T& v, const optional<T>& x) {
854
+ return bool(x) ? v >= *x : true;
855
+ }
856
+
857
+ // Comparison of optional<T&> with T
858
+ template <class T>
859
+ constexpr bool operator==(const optional<T&>& x, const T& v) {
860
+ return bool(x) ? *x == v : false;
861
+ }
862
+
863
+ template <class T>
864
+ constexpr bool operator==(const T& v, const optional<T&>& x) {
865
+ return bool(x) ? v == *x : false;
866
+ }
867
+
868
+ template <class T>
869
+ constexpr bool operator!=(const optional<T&>& x, const T& v) {
870
+ return bool(x) ? *x != v : true;
871
+ }
872
+
873
+ template <class T>
874
+ constexpr bool operator!=(const T& v, const optional<T&>& x) {
875
+ return bool(x) ? v != *x : true;
876
+ }
877
+
878
+ template <class T>
879
+ constexpr bool operator<(const optional<T&>& x, const T& v) {
880
+ return bool(x) ? *x < v : true;
881
+ }
882
+
883
+ template <class T>
884
+ constexpr bool operator>(const T& v, const optional<T&>& x) {
885
+ return bool(x) ? v > *x : true;
886
+ }
887
+
888
+ template <class T>
889
+ constexpr bool operator>(const optional<T&>& x, const T& v) {
890
+ return bool(x) ? *x > v : false;
891
+ }
892
+
893
+ template <class T>
894
+ constexpr bool operator<(const T& v, const optional<T&>& x) {
895
+ return bool(x) ? v < *x : false;
896
+ }
897
+
898
+ template <class T>
899
+ constexpr bool operator>=(const optional<T&>& x, const T& v) {
900
+ return bool(x) ? *x >= v : false;
901
+ }
902
+
903
+ template <class T>
904
+ constexpr bool operator<=(const T& v, const optional<T&>& x) {
905
+ return bool(x) ? v <= *x : false;
906
+ }
907
+
908
+ template <class T>
909
+ constexpr bool operator<=(const optional<T&>& x, const T& v) {
910
+ return bool(x) ? *x <= v : true;
911
+ }
912
+
913
+ template <class T>
914
+ constexpr bool operator>=(const T& v, const optional<T&>& x) {
915
+ return bool(x) ? v >= *x : true;
916
+ }
917
+
918
+ // Comparison of optional<T const&> with T
919
+ template <class T>
920
+ constexpr bool operator==(const optional<const T&>& x, const T& v) {
921
+ return bool(x) ? *x == v : false;
922
+ }
923
+
924
+ template <class T>
925
+ constexpr bool operator==(const T& v, const optional<const T&>& x) {
926
+ return bool(x) ? v == *x : false;
927
+ }
928
+
929
+ template <class T>
930
+ constexpr bool operator!=(const optional<const T&>& x, const T& v) {
931
+ return bool(x) ? *x != v : true;
932
+ }
933
+
934
+ template <class T>
935
+ constexpr bool operator!=(const T& v, const optional<const T&>& x) {
936
+ return bool(x) ? v != *x : true;
937
+ }
938
+
939
+ template <class T>
940
+ constexpr bool operator<(const optional<const T&>& x, const T& v) {
941
+ return bool(x) ? *x < v : true;
942
+ }
943
+
944
+ template <class T>
945
+ constexpr bool operator>(const T& v, const optional<const T&>& x) {
946
+ return bool(x) ? v > *x : true;
947
+ }
948
+
949
+ template <class T>
950
+ constexpr bool operator>(const optional<const T&>& x, const T& v) {
951
+ return bool(x) ? *x > v : false;
952
+ }
953
+
954
+ template <class T>
955
+ constexpr bool operator<(const T& v, const optional<const T&>& x) {
956
+ return bool(x) ? v < *x : false;
957
+ }
958
+
959
+ template <class T>
960
+ constexpr bool operator>=(const optional<const T&>& x, const T& v) {
961
+ return bool(x) ? *x >= v : false;
962
+ }
963
+
964
+ template <class T>
965
+ constexpr bool operator<=(const T& v, const optional<const T&>& x) {
966
+ return bool(x) ? v <= *x : false;
967
+ }
968
+
969
+ template <class T>
970
+ constexpr bool operator<=(const optional<const T&>& x, const T& v) {
971
+ return bool(x) ? *x <= v : true;
972
+ }
973
+
974
+ template <class T>
975
+ constexpr bool operator>=(const T& v, const optional<const T&>& x) {
976
+ return bool(x) ? v >= *x : true;
977
+ }
978
+
979
+ // 20.5.12, Specialized algorithms
980
+ template <class T>
981
+ void swap(optional<T>& x, optional<T>& y) noexcept(noexcept(x.swap(y))) {
982
+ x.swap(y);
983
+ }
984
+
985
+ template <class T>
986
+ constexpr optional<typename std::decay<T>::type> make_optional(T&& v) {
987
+ return optional<typename std::decay<T>::type>(constexpr_forward<T>(v));
988
+ }
989
+
990
+ template <class X>
991
+ constexpr optional<X&> make_optional(std::reference_wrapper<X> v) {
992
+ return optional<X&>(v.get());
993
+ }
994
+
995
+ } // namespace tensorpipe
996
+
997
+ namespace std {
998
+ template <typename T>
999
+ struct hash<tensorpipe::optional<T>> {
1000
+ typedef typename hash<T>::result_type result_type;
1001
+ typedef tensorpipe::optional<T> argument_type;
1002
+
1003
+ constexpr result_type operator()(argument_type const& arg) const {
1004
+ return arg ? std::hash<T>{}(*arg) : result_type{};
1005
+ }
1006
+ };
1007
+
1008
+ template <typename T>
1009
+ struct hash<tensorpipe::optional<T&>> {
1010
+ typedef typename hash<T>::result_type result_type;
1011
+ typedef tensorpipe::optional<T&> argument_type;
1012
+
1013
+ constexpr result_type operator()(argument_type const& arg) const {
1014
+ return arg ? std::hash<T>{}(*arg) : result_type{};
1015
+ }
1016
+ };
1017
+ } // namespace std
1018
+
1019
+ #undef TR2_OPTIONAL_REQUIRES
1020
+ #undef TR2_OPTIONAL_ASSERTED_EXPRESSION
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/config.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #define TENSORPIPE_HAS_SHM_TRANSPORT 1
12
+ #define TENSORPIPE_HAS_IBV_TRANSPORT 1
13
+
14
+ #define TENSORPIPE_HAS_CMA_CHANNEL 1
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/config_cuda.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #define TENSORPIPE_HAS_CUDA_IPC_CHANNEL 1
12
+ #define TENSORPIPE_HAS_CUDA_GDR_CHANNEL 1
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/context.h ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <memory>
12
+ #include <string>
13
+ #include <utility>
14
+ #include <vector>
15
+
16
+ #include <tensorpipe/transport/context.h>
17
+
18
+ #include <tensorpipe/channel/context.h>
19
+
20
+ namespace tensorpipe {
21
+
22
+ class ContextImpl;
23
+ class Listener;
24
+ class Pipe;
25
+
26
+ class ContextOptions {
27
+ public:
28
+ // The name should be a semantically meaningful description of this context.
29
+ // It will only be used for logging and debugging purposes, to identify the
30
+ // endpoints of a pipe.
31
+ ContextOptions&& name(std::string name) && {
32
+ name_ = std::move(name);
33
+ return std::move(*this);
34
+ }
35
+
36
+ private:
37
+ std::string name_;
38
+
39
+ friend ContextImpl;
40
+ };
41
+
42
+ class PipeOptions {
43
+ public:
44
+ // The name should be a semantically meaningful description of the context
45
+ // that the pipe is connecting to. It will only be used for logging and
46
+ // debugging purposes, to identify the endpoints of a pipe.
47
+ PipeOptions&& remoteName(std::string remoteName) && {
48
+ remoteName_ = std::move(remoteName);
49
+ return std::move(*this);
50
+ }
51
+
52
+ private:
53
+ std::string remoteName_;
54
+
55
+ friend ContextImpl;
56
+ };
57
+
58
+ class Context final {
59
+ public:
60
+ explicit Context(ContextOptions opts = ContextOptions());
61
+
62
+ void registerTransport(
63
+ int64_t priority,
64
+ std::string transport,
65
+ std::shared_ptr<transport::Context> context);
66
+
67
+ void registerChannel(
68
+ int64_t priority,
69
+ std::string channel,
70
+ std::shared_ptr<channel::Context> context);
71
+
72
+ std::shared_ptr<Listener> listen(const std::vector<std::string>& urls);
73
+
74
+ std::shared_ptr<Pipe> connect(
75
+ const std::string& url,
76
+ PipeOptions opts = PipeOptions());
77
+
78
+ // Put the context in a terminal state, in turn closing all of its pipes and
79
+ // listeners, and release its resources. This may be done asynchronously, in
80
+ // background.
81
+ void close();
82
+
83
+ // Wait for all resources to be released and all background activity to stop.
84
+ void join();
85
+
86
+ ~Context();
87
+
88
+ private:
89
+ // The implementation is managed by a shared_ptr because each child object
90
+ // will also hold a shared_ptr to it. However, its lifetime is tied to the one
91
+ // of this public object since when the latter is destroyed the implementation
92
+ // is closed and joined.
93
+ const std::shared_ptr<ContextImpl> impl_;
94
+ };
95
+
96
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/error.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <string>
12
+
13
+ #include <tensorpipe/common/error.h>
14
+
15
+ namespace tensorpipe {
16
+
17
+ class LogicError final : public BaseError {
18
+ public:
19
+ explicit LogicError(std::string reason) : reason_(std::move(reason)) {}
20
+
21
+ std::string what() const override;
22
+
23
+ private:
24
+ const std::string reason_;
25
+ };
26
+
27
+ class ContextClosedError final : public BaseError {
28
+ public:
29
+ explicit ContextClosedError() {}
30
+
31
+ std::string what() const override;
32
+ };
33
+
34
+ class ListenerClosedError final : public BaseError {
35
+ public:
36
+ explicit ListenerClosedError() {}
37
+
38
+ std::string what() const override;
39
+ };
40
+
41
+ class PipeClosedError final : public BaseError {
42
+ public:
43
+ explicit PipeClosedError() {}
44
+
45
+ std::string what() const override;
46
+ };
47
+
48
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/listener.h ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <functional>
12
+ #include <map>
13
+ #include <memory>
14
+ #include <string>
15
+ #include <vector>
16
+
17
+ #include <tensorpipe/common/error.h>
18
+
19
+ namespace tensorpipe {
20
+
21
+ class ContextImpl;
22
+ class ListenerImpl;
23
+ class Pipe;
24
+
25
+ // The listener.
26
+ //
27
+ // Listeners are used to produce pipes. Depending on the type of the
28
+ // context, listeners may use a variety of addresses to listen on. For
29
+ // example, for TCP/IP sockets they listen on an IPv4 or IPv6 address,
30
+ // for Unix domain sockets they listen on a path, etcetera.
31
+ //
32
+ // A pipe can only be accepted from this listener after it has been
33
+ // fully established. This means that both its connection and all its
34
+ // side channels have been established.
35
+ //
36
+ class Listener final {
37
+ // Use the passkey idiom to allow make_shared to call what should be a private
38
+ // constructor. See https://abseil.io/tips/134 for more information.
39
+ struct ConstructorToken {};
40
+
41
+ public:
42
+ Listener(
43
+ ConstructorToken token,
44
+ std::shared_ptr<ContextImpl> context,
45
+ std::string id,
46
+ const std::vector<std::string>& urls);
47
+
48
+ //
49
+ // Entry points for user code
50
+ //
51
+
52
+ using accept_callback_fn =
53
+ std::function<void(const Error&, std::shared_ptr<Pipe>)>;
54
+
55
+ void accept(accept_callback_fn fn);
56
+
57
+ // Returns map with the materialized address of listeners by transport.
58
+ //
59
+ // If you don't bind a transport listener to a specific port or address, it
60
+ // may generate its address automatically. Then, in order to connect to the
61
+ // listener, the user must use a separate mechanism to communicate the
62
+ // materialized address to whoever wants to connect.
63
+ //
64
+ const std::map<std::string, std::string>& addresses() const;
65
+
66
+ // Returns materialized address for specific transport.
67
+ //
68
+ // See `addresses()` for more information.
69
+ //
70
+ const std::string& address(const std::string& transport) const;
71
+
72
+ // Returns URL with materialized address for specific transport.
73
+ //
74
+ // See `addresses()` for more information.
75
+ //
76
+ std::string url(const std::string& transport) const;
77
+
78
+ // Put the listener in a terminal state, aborting its pending operations and
79
+ // rejecting future ones, and release its resrouces. This may be carried out
80
+ // asynchronously, in background. Since the pipes may occasionally use the
81
+ // listener to open new connections, closing a listener may trigger errors
82
+ // in the pipes.
83
+ void close();
84
+
85
+ ~Listener();
86
+
87
+ private:
88
+ // Using a shared_ptr allows us to detach the lifetime of the implementation
89
+ // from the public object's one and perform the destruction asynchronously.
90
+ const std::shared_ptr<ListenerImpl> impl_;
91
+
92
+ // Allow context to access constructor token.
93
+ friend ContextImpl;
94
+ };
95
+
96
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/message.h ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <cstddef>
12
+ #include <string>
13
+ #include <vector>
14
+
15
+ #include <tensorpipe/common/buffer.h>
16
+ #include <tensorpipe/common/optional.h>
17
+
18
+ namespace tensorpipe {
19
+
20
+ // Messages consist of a primary buffer and zero or more separate
21
+ // buffers. The primary buffer is always a host-side memory region that
22
+ // contains a serialized version of the message we're dealing with. This
23
+ // serialized message, in turn, may have references to the separate
24
+ // buffers that accompany the primary buffer. These separate buffers may
25
+ // point to any type of memory, host-side or device-side.
26
+ //
27
+ class Message final {
28
+ public:
29
+ std::string metadata;
30
+
31
+ struct Payload {
32
+ void* data{nullptr};
33
+ size_t length{0};
34
+
35
+ // Users may include arbitrary metadata in the following fields.
36
+ // This may contain allocation hints for the receiver, for example.
37
+ std::string metadata;
38
+ };
39
+
40
+ // Holds the payloads that are transferred over the primary connection.
41
+ std::vector<Payload> payloads;
42
+
43
+ struct Tensor {
44
+ tensorpipe::Buffer buffer;
45
+ size_t length{0};
46
+
47
+ // Users may optionally specify the target device, on which the receiver
48
+ // should allocate memory for this tensor. If left unset, the receiver will
49
+ // choose one at their convenience.
50
+ optional<Device> targetDevice;
51
+
52
+ // Users may include arbitrary metadata in the following field.
53
+ // This may contain allocation hints for the receiver, for example.
54
+ std::string metadata;
55
+ };
56
+
57
+ // Holds the tensors that are offered to the side channels.
58
+ std::vector<Tensor> tensors;
59
+ };
60
+
61
+ // Descriptors consist of metadata required by the receiver to allocate memory
62
+ // for an incoming message.
63
+ class Descriptor final {
64
+ public:
65
+ std::string metadata;
66
+
67
+ struct Payload {
68
+ size_t length{0};
69
+ std::string metadata;
70
+ };
71
+ std::vector<Payload> payloads;
72
+
73
+ struct Tensor {
74
+ size_t length{0};
75
+
76
+ // This is the sender-side device from which this tensor is being sent.
77
+ Device sourceDevice;
78
+
79
+ // The sender may optionally specify a target device, in which case the
80
+ // receiver must allocate memory for this tensor on the specified device.
81
+ optional<Device> targetDevice;
82
+
83
+ std::string metadata;
84
+ };
85
+ std::vector<Tensor> tensors;
86
+ };
87
+
88
+ // Allocations consist of actual memory allocations provided by the receiver for
89
+ // an incoming message. They must match the length and target devices specified
90
+ // in the corresponding Descriptor.
91
+ class Allocation final {
92
+ public:
93
+ struct Payload {
94
+ void* data{nullptr};
95
+ };
96
+ std::vector<Payload> payloads;
97
+
98
+ struct Tensor {
99
+ tensorpipe::Buffer buffer;
100
+ };
101
+ std::vector<Tensor> tensors;
102
+ };
103
+
104
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/pipe.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <functional>
12
+ #include <memory>
13
+ #include <string>
14
+
15
+ #include <tensorpipe/common/error.h>
16
+ #include <tensorpipe/core/message.h>
17
+ #include <tensorpipe/transport/context.h>
18
+
19
+ namespace tensorpipe {
20
+
21
+ class ContextImpl;
22
+ class ListenerImpl;
23
+ class PipeImpl;
24
+
25
+ // The pipe.
26
+ //
27
+ // Pipes represent a set of connections between a pair of processes.
28
+ // Unlike POSIX pipes, they are message oriented instead of byte
29
+ // oriented. Messages that are sent through the pipe may use whatever
30
+ // channels are at their disposal to make it happen. If the pair of
31
+ // processes happen to be colocated on the same machine, they may
32
+ // leverage a region of shared memory to communicate the primary
33
+ // buffer of a message. Secondary buffers may use shared memory as
34
+ // well, if they're located in CPU memory, or use a CUDA device to
35
+ // device copy if they're located in NVIDIA GPU memory. If the pair is
36
+ // located across the world, they may simply use a set of TCP
37
+ // connections to communicate.
38
+ //
39
+ class Pipe final {
40
+ // Use the passkey idiom to allow make_shared to call what should be a private
41
+ // constructor. See https://abseil.io/tips/134 for more information.
42
+ struct ConstructorToken {};
43
+
44
+ public:
45
+ //
46
+ // Initialization
47
+ //
48
+
49
+ Pipe(
50
+ ConstructorToken token,
51
+ std::shared_ptr<ContextImpl> context,
52
+ std::string id,
53
+ std::string remoteName,
54
+ const std::string& url);
55
+
56
+ Pipe(ConstructorToken token, std::shared_ptr<PipeImpl> impl);
57
+
58
+ //
59
+ // Entry points for user code
60
+ //
61
+
62
+ using read_descriptor_callback_fn =
63
+ std::function<void(const Error&, Descriptor)>;
64
+
65
+ void readDescriptor(read_descriptor_callback_fn fn);
66
+
67
+ using read_callback_fn = std::function<void(const Error&)>;
68
+
69
+ void read(Allocation allocation, read_callback_fn fn);
70
+
71
+ using write_callback_fn = std::function<void(const Error&)>;
72
+
73
+ void write(Message message, write_callback_fn fn);
74
+
75
+ // Retrieve the user-defined name that was given to the constructor of the
76
+ // context on the remote side, if any (if not, this will be the empty string).
77
+ // This is intended to help in logging and debugging only.
78
+ const std::string& getRemoteName();
79
+
80
+ // Put the pipe in a terminal state, aborting its pending operations and
81
+ // rejecting future ones, and release its resrouces. This may be carried out
82
+ // asynchronously, in background.
83
+ void close();
84
+
85
+ ~Pipe();
86
+
87
+ private:
88
+ // Using a shared_ptr allows us to detach the lifetime of the implementation
89
+ // from the public object's one and perform the destruction asynchronously.
90
+ const std::shared_ptr<PipeImpl> impl_;
91
+
92
+ // Allow context to access constructor token.
93
+ friend ContextImpl;
94
+ // Allow listener to access constructor token.
95
+ friend ListenerImpl;
96
+ };
97
+
98
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/context.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <memory>
12
+ #include <string>
13
+
14
+ namespace tensorpipe {
15
+ namespace transport {
16
+
17
+ class Connection;
18
+ class Listener;
19
+
20
+ class Context {
21
+ public:
22
+ virtual std::shared_ptr<Connection> connect(std::string addr) = 0;
23
+
24
+ virtual std::shared_ptr<Listener> listen(std::string addr) = 0;
25
+
26
+ // Return whether the context is able to operate correctly.
27
+ //
28
+ // Some transport types may be unable to perform as intended under
29
+ // some circumstances (e.g., specialized hardware unavailable, lack
30
+ // of permissions). They can report it through this method in order
31
+ // for the core context to avoid registering them in the first place.
32
+ //
33
+ virtual bool isViable() const = 0;
34
+
35
+ // Return string to describe the domain for this context.
36
+ //
37
+ // Two processes with a context of the same type can connect to each
38
+ // other if one side's domain descriptor is "accepted" by the other
39
+ // one, using the canCommunicateWithRemote method below. That method
40
+ // must be symmetric, and unless overridden defaults to string
41
+ // comparison.
42
+ //
43
+ // For example, for a transport that leverages TCP/IP, this may be
44
+ // as simple as the address family (assuming we can route between
45
+ // any two processes). For a transport that leverages shared memory,
46
+ // this descriptor must uniquely identify the machine, such that
47
+ // only co-located processes generate the same domain descriptor.
48
+ //
49
+ virtual const std::string& domainDescriptor() const = 0;
50
+
51
+ // Compare local and remote domain descriptor for compatibility.
52
+ //
53
+ // Determine whether a connection can be opened between this context
54
+ // and a remote one that has the given domain descriptor. This
55
+ // function needs to be symmetric: if we called this method on the
56
+ // remote context with the local descriptor we should get the same
57
+ // answer. Unless overridden it defaults to string comparison.
58
+ //
59
+ virtual bool canCommunicateWithRemote(
60
+ const std::string& remoteDomainDescriptor) const {
61
+ return domainDescriptor() == remoteDomainDescriptor;
62
+ }
63
+
64
+ // Tell the context what its identifier is.
65
+ //
66
+ // This is only supposed to be called from the high-level context or from
67
+ // channel contexts. It will only used for logging and debugging purposes.
68
+ virtual void setId(std::string id) = 0;
69
+
70
+ virtual void close() = 0;
71
+
72
+ virtual void join() = 0;
73
+
74
+ virtual ~Context() = default;
75
+ };
76
+
77
+ } // namespace transport
78
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/error.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <string>
12
+
13
+ #include <tensorpipe/common/error.h>
14
+
15
+ namespace tensorpipe {
16
+ namespace transport {
17
+
18
+ class ContextClosedError final : public BaseError {
19
+ public:
20
+ ContextClosedError() {}
21
+
22
+ std::string what() const override;
23
+ };
24
+
25
+ class ListenerClosedError final : public BaseError {
26
+ public:
27
+ ListenerClosedError() {}
28
+
29
+ std::string what() const override;
30
+ };
31
+
32
+ class ConnectionClosedError final : public BaseError {
33
+ public:
34
+ ConnectionClosedError() {}
35
+
36
+ std::string what() const override;
37
+ };
38
+
39
+ class ContextNotViableError final : public BaseError {
40
+ public:
41
+ ContextNotViableError() {}
42
+
43
+ std::string what() const override;
44
+ };
45
+
46
+ } // namespace transport
47
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/error.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <string>
12
+
13
+ #include <tensorpipe/transport/error.h>
14
+
15
+ namespace tensorpipe {
16
+ namespace transport {
17
+ namespace ibv {
18
+
19
+ class IbvError final : public BaseError {
20
+ public:
21
+ explicit IbvError(std::string error) : error_(error) {}
22
+
23
+ std::string what() const override;
24
+
25
+ private:
26
+ std::string error_;
27
+ };
28
+
29
+ class GetaddrinfoError final : public BaseError {
30
+ public:
31
+ explicit GetaddrinfoError(int error) : error_(error) {}
32
+
33
+ std::string what() const override;
34
+
35
+ private:
36
+ int error_;
37
+ };
38
+
39
+ class NoAddrFoundError final : public BaseError {
40
+ public:
41
+ NoAddrFoundError() {}
42
+
43
+ std::string what() const override;
44
+ };
45
+
46
+ } // namespace ibv
47
+ } // namespace transport
48
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/factory.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <memory>
12
+
13
+ #include <tensorpipe/transport/context.h>
14
+
15
+ namespace tensorpipe {
16
+ namespace transport {
17
+ namespace ibv {
18
+
19
+ std::shared_ptr<Context> create();
20
+
21
+ } // namespace ibv
22
+ } // namespace transport
23
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/utility.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <string>
12
+ #include <tuple>
13
+
14
+ #include <tensorpipe/common/error.h>
15
+
16
+ namespace tensorpipe {
17
+ namespace transport {
18
+ namespace ibv {
19
+
20
+ std::tuple<Error, std::string> lookupAddrForIface(std::string iface);
21
+
22
+ std::tuple<Error, std::string> lookupAddrForHostname();
23
+
24
+ } // namespace ibv
25
+ } // namespace transport
26
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/shm/factory.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <memory>
12
+
13
+ #include <tensorpipe/transport/context.h>
14
+
15
+ namespace tensorpipe {
16
+ namespace transport {
17
+ namespace shm {
18
+
19
+ std::shared_ptr<Context> create();
20
+
21
+ } // namespace shm
22
+ } // namespace transport
23
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/error.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <string>
12
+
13
+ #include <tensorpipe/transport/error.h>
14
+
15
+ namespace tensorpipe {
16
+ namespace transport {
17
+ namespace uv {
18
+
19
+ class UVError final : public BaseError {
20
+ public:
21
+ explicit UVError(int error) : error_(error) {}
22
+
23
+ std::string what() const override;
24
+
25
+ private:
26
+ int error_;
27
+ };
28
+
29
+ class NoAddrFoundError final : public BaseError {
30
+ public:
31
+ NoAddrFoundError() {}
32
+
33
+ std::string what() const override;
34
+ };
35
+
36
+ } // namespace uv
37
+ } // namespace transport
38
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/factory.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <memory>
12
+
13
+ #include <tensorpipe/transport/context.h>
14
+
15
+ namespace tensorpipe {
16
+ namespace transport {
17
+ namespace uv {
18
+
19
+ std::shared_ptr<Context> create();
20
+
21
+ } // namespace uv
22
+ } // namespace transport
23
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/utility.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <string>
12
+ #include <tuple>
13
+
14
+ #include <sys/socket.h>
15
+
16
+ #include <tensorpipe/common/error.h>
17
+ #include <tensorpipe/common/optional.h>
18
+
19
+ namespace tensorpipe {
20
+ namespace transport {
21
+ namespace uv {
22
+
23
+ std::tuple<Error, std::string> lookupAddrForIface(std::string iface);
24
+
25
+ std::tuple<Error, std::string> lookupAddrForHostname();
26
+
27
+ // Try to replicate the same logic used by NCCL to find a node's own address.
28
+ // Roughly, it returns the "first" usable address it can find, and prioritizes
29
+ // the interfaces with an `ib` prefix and de-prioritizes those with a `docker`
30
+ // or `lo` prefix. It can optionally only return only IPv4 or IPv4 addresses.
31
+ std::tuple<Error, std::string> lookupAddrLikeNccl(
32
+ optional<sa_family_t> familyFilter = nullopt);
33
+
34
+ } // namespace uv
35
+ } // namespace transport
36
+ } // namespace tensorpipe
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifdef USE_CUDA
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/cuda/CUDACachingAllocator.h>
5
+ #include <c10/cuda/CUDAException.h>
6
+ #include <c10/util/Logging.h>
7
+ #include <cuda_runtime_api.h>
8
+ #include <torch/csrc/Export.h>
9
+ #include <cstddef>
10
+ namespace torch {
11
+
12
+ TORCH_CUDA_CU_API bool CudaIPCCollect();
13
+
14
+ struct CudaIPCReceivedData final {
15
+ CudaIPCReceivedData() = default;
16
+ explicit CudaIPCReceivedData(std::shared_ptr<void> shared_ptr)
17
+ : shared_ptr_(std::move(shared_ptr)) {}
18
+ std::shared_ptr<void> shared_ptr_;
19
+ };
20
+
21
+ struct CudaIPCSentData final {
22
+ std::string handle_;
23
+ uint64_t offset_;
24
+ uint64_t* counter_ptr_; // Reference counter shared memory block
25
+ at::DataPtr original_ptr_; // Original mem allocation
26
+ cudaEvent_t event_; // Sync cuEventDestroy
27
+ bool event_sync_required_;
28
+ at::Device device_;
29
+
30
+ CudaIPCSentData(
31
+ std::string handle,
32
+ uint64_t offset,
33
+ uint64_t* counter_ptr,
34
+ at::Device device);
35
+ ~CudaIPCSentData();
36
+
37
+ uint64_t counter_value();
38
+ std::string handle() {
39
+ return handle_;
40
+ }
41
+ uint64_t offset() {
42
+ return offset_;
43
+ }
44
+ void set_original_ptr(at::DataPtr data_ptr) {
45
+ original_ptr_ = std::move(data_ptr);
46
+ }
47
+ };
48
+
49
+ TORCH_CUDA_CU_API at::DataPtr GetNewRefCountedSentData(
50
+ void* data,
51
+ at::Device device);
52
+
53
+ namespace {
54
+
55
+ inline constexpr int64_t CUDA_IPC_REF_COUNTER_FILE_SIZE = 10000;
56
+ inline constexpr int64_t CUDA_IPC_WARN_AFTER_X_BLOCKS_IN_LIMBO = 1000;
57
+ // This was determined empirically that CUDA (v10.1 and below) have the limit
58
+ // on the number of recorded blocking interprocess events. It is around ~22,000.
59
+ // And to give us leeway, we picked 1000 as it gives us enough events to share
60
+ // tensors effectively.
61
+ inline constexpr int64_t CUDA_IPC_MAXIMUM_EVENTS_TO_USE = 1000;
62
+
63
+ // All to be deleted data blocks with non zero reference counter goes there
64
+ struct CudaIPCSentDataLimbo final {
65
+ ~CudaIPCSentDataLimbo();
66
+ bool collect();
67
+ void add(std::unique_ptr<CudaIPCSentData> shared_block);
68
+ uint64_t size();
69
+
70
+ private:
71
+ // TODO: Can be changed to FIFO in order to avoid full traverse on every
72
+ // collect()
73
+ std::vector<std::unique_ptr<CudaIPCSentData>> shared_blocks_;
74
+ std::mutex limbo_mutex_;
75
+ };
76
+
77
+ struct CudaIPCRefCountersFile final {
78
+ CudaIPCRefCountersFile(
79
+ std::string handle,
80
+ uint64_t size,
81
+ at::DataPtr data_ptr)
82
+ : next_offset_(0),
83
+ size_(size),
84
+ used_slots_(0),
85
+ handle_(std::move(handle)),
86
+ refcounted_shared_mem_(std::move(data_ptr)) {}
87
+
88
+ uint64_t* counter_ptr() {
89
+ return static_cast<uint64_t*>(refcounted_shared_mem_.get()) + next_offset_;
90
+ }
91
+
92
+ void set_counter(uint64_t value) {
93
+ *counter_ptr() = value;
94
+ }
95
+
96
+ bool have_offsets() {
97
+ return next_offset_ < size_;
98
+ }
99
+
100
+ bool offsets_in_use() {
101
+ return used_slots_;
102
+ }
103
+
104
+ uint64_t get_offset() {
105
+ return next_offset_;
106
+ }
107
+
108
+ void rotate_offset() {
109
+ next_offset_++;
110
+ used_slots_++;
111
+ }
112
+
113
+ void return_offset(uint64_t offset /* unused */) {
114
+ used_slots_--;
115
+ }
116
+
117
+ std::string handle() {
118
+ return handle_;
119
+ }
120
+
121
+ private:
122
+ uint64_t next_offset_;
123
+ uint64_t size_;
124
+ uint64_t used_slots_;
125
+ std::string handle_;
126
+ at::DataPtr refcounted_shared_mem_;
127
+ };
128
+
129
+ } // namespace
130
+ } // namespace torch
131
+
132
+ namespace c10 {
133
+ namespace {
134
+ class CudaIPCCollectCallback : public FreeMemoryCallback {
135
+ public:
136
+ bool Execute() override {
137
+ return torch::CudaIPCCollect();
138
+ }
139
+ };
140
+ } // namespace
141
+
142
+ } // namespace c10
143
+
144
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables,modernize-avoid-c-arrays)
6
+ extern PyMethodDef DataLoaderMethods[];
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/python_headers.h>
5
+
6
+ #include <ATen/Device.h>
7
+
8
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
9
+ struct TORCH_API THPDevice {
10
+ PyObject_HEAD at::Device device;
11
+ };
12
+
13
+ TORCH_API extern PyTypeObject THPDeviceType;
14
+
15
+ inline bool THPDevice_Check(PyObject* obj) {
16
+ return Py_TYPE(obj) == &THPDeviceType;
17
+ }
18
+
19
+ TORCH_API PyObject* THPDevice_New(const at::Device& device);
20
+
21
+ TORCH_API void THPDevice_init(PyObject* module);
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ const int DTYPE_NAME_LEN = 64;
8
+
9
+ struct TORCH_API THPDtype {
10
+ PyObject_HEAD at::ScalarType scalar_type;
11
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
12
+ char name[DTYPE_NAME_LEN + 1];
13
+ };
14
+
15
+ TORCH_API extern PyTypeObject THPDtypeType;
16
+
17
+ inline bool THPDtype_Check(PyObject* obj) {
18
+ return Py_TYPE(obj) == &THPDtypeType;
19
+ }
20
+
21
+ inline bool THPPythonScalarType_Check(PyObject* obj) {
22
+ return obj == (PyObject*)(&PyFloat_Type) ||
23
+ obj == (PyObject*)(&PyBool_Type) || obj == (PyObject*)(&PyLong_Type);
24
+ }
25
+
26
+ TORCH_API PyObject* THPDtype_New(
27
+ at::ScalarType scalar_type,
28
+ const std::string& name);
29
+
30
+ void THPDtype_init(PyObject* module);
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Provides conversions between Python tensor objects and at::Tensor.
4
+
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ #include <ATen/Device.h>
8
+ #include <c10/core/Backend.h>
9
+ #include <c10/core/Layout.h>
10
+ #include <c10/core/ScalarType.h>
11
+ #include <c10/core/ScalarTypeToTypeMeta.h>
12
+ #include <torch/csrc/Export.h>
13
+
14
+ #include <memory>
15
+ #include <string>
16
+
17
+ struct THPDtype;
18
+ struct THPLayout;
19
+
20
+ namespace c10 {
21
+ struct Storage;
22
+ }
23
+
24
+ namespace torch {
25
+ void registerDtypeObject(THPDtype* dtype, at::ScalarType scalarType);
26
+ void registerLayoutObject(THPLayout* thp_layout, at::Layout layout);
27
+
28
+ TORCH_PYTHON_API PyObject* createPyObject(const at::Storage& storage);
29
+ at::Storage createStorage(PyObject* obj);
30
+ std::tuple<at::Storage, at::ScalarType, bool> createStorageGetType(
31
+ PyObject* obj);
32
+ bool isStorage(PyObject* obj);
33
+
34
+ TORCH_PYTHON_API THPDtype* getTHPDtype(at::ScalarType scalarType);
35
+ THPLayout* getTHPLayout(at::Layout layout);
36
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <exception>
4
+ #include <memory>
5
+ #include <mutex>
6
+ #include <queue>
7
+ #include <string>
8
+ #include <system_error>
9
+
10
+ #include <ATen/detail/FunctionTraits.h>
11
+ #include <c10/util/C++17.h>
12
+ #include <c10/util/Exception.h>
13
+ #include <c10/util/StringUtil.h>
14
+ #include <pybind11/pybind11.h>
15
+ #include <torch/csrc/Export.h>
16
+ #include <torch/csrc/jit/runtime/jit_exception.h>
17
+ #include <torch/csrc/utils/cpp_stacktraces.h>
18
+ #include <torch/csrc/utils/pybind.h>
19
+
20
+ #if defined(USE_DISTRIBUTED) && defined(USE_C10D)
21
+ #include <torch/csrc/distributed/c10d/exception.h>
22
+ #endif
23
+
24
+ static inline void PyErr_SetString(PyObject* type, const std::string& message) {
25
+ PyErr_SetString(type, message.c_str());
26
+ }
27
+ /// NOTE [ Conversion Cpp Python Warning ]
28
+ /// The warning handler cannot set python warnings immediately
29
+ /// as it requires acquiring the GIL (potential deadlock)
30
+ /// and would need to cleanly exit if the warning raised a
31
+ /// python error. To solve this, we buffer the warnings and
32
+ /// process them when we go back to python.
33
+ /// This requires the two try/catch blocks below to handle the
34
+ /// following cases:
35
+ /// - If there is no Error raised in the inner try/catch, the
36
+ /// buffered warnings are processed as python warnings.
37
+ /// - If they don't raise an error, the function process with the
38
+ /// original return code.
39
+ /// - If any of them raise an error, the error is set (PyErr_*) and
40
+ /// the destructor will raise a cpp exception python_error() that
41
+ /// will be caught by the outer try/catch that will be able to change
42
+ /// the return value of the function to reflect the error.
43
+ /// - If an Error was raised in the inner try/catch, the inner try/catch
44
+ /// must set the python error. The buffered warnings are then
45
+ /// processed as cpp warnings as we cannot predict before hand
46
+ /// whether a python warning will raise an error or not and we
47
+ /// cannot handle two errors at the same time.
48
+ /// This advanced handler will only be used in the current thread.
49
+ /// If any other thread is used, warnings will be processed as
50
+ /// cpp warnings.
51
+ #define HANDLE_TH_ERRORS \
52
+ try { \
53
+ torch::PyWarningHandler __enforce_warning_buffer; \
54
+ try {
55
+ #define _CATCH_GENERIC_ERROR(ErrorType, PythonErrorType, retstmnt) \
56
+ catch (const c10::ErrorType& e) { \
57
+ auto msg = torch::get_cpp_stacktraces_enabled() \
58
+ ? e.what() \
59
+ : e.what_without_backtrace(); \
60
+ PyErr_SetString(PythonErrorType, torch::processErrorMsg(msg)); \
61
+ retstmnt; \
62
+ }
63
+
64
+ // Only catch torch-specific exceptions
65
+ #define CATCH_CORE_ERRORS(retstmnt) \
66
+ catch (python_error & e) { \
67
+ e.restore(); \
68
+ retstmnt; \
69
+ } \
70
+ catch (py::error_already_set & e) { \
71
+ e.restore(); \
72
+ retstmnt; \
73
+ } \
74
+ _CATCH_GENERIC_ERROR(IndexError, PyExc_IndexError, retstmnt) \
75
+ _CATCH_GENERIC_ERROR(ValueError, PyExc_ValueError, retstmnt) \
76
+ _CATCH_GENERIC_ERROR(TypeError, PyExc_TypeError, retstmnt) \
77
+ _CATCH_GENERIC_ERROR( \
78
+ NotImplementedError, PyExc_NotImplementedError, retstmnt) \
79
+ _CATCH_GENERIC_ERROR(LinAlgError, THPException_LinAlgError, retstmnt) \
80
+ _CATCH_GENERIC_ERROR( \
81
+ OutOfMemoryError, THPException_OutOfMemoryError, retstmnt) \
82
+ _CATCH_GENERIC_ERROR( \
83
+ DistBackendError, THPException_DistBackendError, retstmnt) \
84
+ _CATCH_GENERIC_ERROR( \
85
+ DistNetworkError, THPException_DistNetworkError, retstmnt) \
86
+ _CATCH_GENERIC_ERROR(DistStoreError, THPException_DistStoreError, retstmnt) \
87
+ _CATCH_GENERIC_ERROR(DistError, THPException_DistError, retstmnt) \
88
+ _CATCH_GENERIC_ERROR(Error, PyExc_RuntimeError, retstmnt) \
89
+ catch (torch::PyTorchError & e) { \
90
+ auto msg = torch::processErrorMsg(e.what()); \
91
+ PyErr_SetString(e.python_type(), msg); \
92
+ retstmnt; \
93
+ }
94
+
95
+ #define CATCH_TH_ERRORS(retstmnt) CATCH_CORE_ERRORS(retstmnt)
96
+
97
+ #define CATCH_ALL_ERRORS(retstmnt) \
98
+ CATCH_TH_ERRORS(retstmnt) \
99
+ catch (const std::exception& e) { \
100
+ auto msg = torch::processErrorMsg(e.what()); \
101
+ PyErr_SetString(PyExc_RuntimeError, msg); \
102
+ retstmnt; \
103
+ }
104
+
105
+ #define END_HANDLE_TH_ERRORS_PYBIND \
106
+ } \
107
+ catch (...) { \
108
+ __enforce_warning_buffer.set_in_exception(); \
109
+ throw; \
110
+ } \
111
+ } \
112
+ catch (py::error_already_set & e) { \
113
+ throw; \
114
+ } \
115
+ catch (py::builtin_exception & e) { \
116
+ throw; \
117
+ } \
118
+ catch (torch::jit::JITException & e) { \
119
+ throw; \
120
+ } \
121
+ catch (const std::exception& e) { \
122
+ torch::translate_exception_to_python(std::current_exception()); \
123
+ throw py::error_already_set(); \
124
+ }
125
+
126
+ #define END_HANDLE_TH_ERRORS_RET(retval) \
127
+ } \
128
+ catch (...) { \
129
+ __enforce_warning_buffer.set_in_exception(); \
130
+ throw; \
131
+ } \
132
+ } \
133
+ catch (const std::exception& e) { \
134
+ torch::translate_exception_to_python(std::current_exception()); \
135
+ return retval; \
136
+ }
137
+
138
+ #define END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS_RET(nullptr)
139
+
140
+ extern PyObject *THPException_FatalError, *THPException_LinAlgError,
141
+ *THPException_OutOfMemoryError, *THPException_DistError,
142
+ *THPException_DistBackendError, *THPException_DistNetworkError,
143
+ *THPException_DistStoreError;
144
+
145
+ // Throwing this exception means that the python error flags have been already
146
+ // set and control should be immediately returned to the interpreter.
147
+ struct python_error : public std::exception {
148
+ python_error() : type(nullptr), value(nullptr), traceback(nullptr) {}
149
+
150
+ python_error(const python_error& other)
151
+ : type(other.type),
152
+ value(other.value),
153
+ traceback(other.traceback),
154
+ message(other.message) {
155
+ pybind11::gil_scoped_acquire gil;
156
+ Py_XINCREF(type);
157
+ Py_XINCREF(value);
158
+ Py_XINCREF(traceback);
159
+ }
160
+
161
+ python_error(python_error&& other) noexcept
162
+ : type(other.type),
163
+ value(other.value),
164
+ traceback(other.traceback),
165
+ message(std::move(other.message)) {
166
+ other.type = nullptr;
167
+ other.value = nullptr;
168
+ other.traceback = nullptr;
169
+ }
170
+
171
+ ~python_error() override {
172
+ if (type || value || traceback) {
173
+ pybind11::gil_scoped_acquire gil;
174
+ Py_XDECREF(type);
175
+ Py_XDECREF(value);
176
+ Py_XDECREF(traceback);
177
+ }
178
+ }
179
+
180
+ const char* what() const noexcept override {
181
+ return message.c_str();
182
+ }
183
+
184
+ void build_message() {
185
+ // Ensure we have the GIL.
186
+ pybind11::gil_scoped_acquire gil;
187
+
188
+ // No errors should be set when we enter the function since PyErr_Fetch
189
+ // clears the error indicator.
190
+ TORCH_INTERNAL_ASSERT(!PyErr_Occurred());
191
+
192
+ // Default message.
193
+ message = "python_error";
194
+
195
+ // Try to retrieve the error message from the value.
196
+ if (value != nullptr) {
197
+ // Reference count should not be zero.
198
+ TORCH_INTERNAL_ASSERT(Py_REFCNT(value) > 0);
199
+
200
+ PyObject* pyStr = PyObject_Str(value);
201
+ if (pyStr != nullptr) {
202
+ PyObject* encodedString =
203
+ PyUnicode_AsEncodedString(pyStr, "utf-8", "strict");
204
+ if (encodedString != nullptr) {
205
+ char* bytes = PyBytes_AS_STRING(encodedString);
206
+ if (bytes != nullptr) {
207
+ // Set the message.
208
+ message = std::string(bytes);
209
+ }
210
+ Py_XDECREF(encodedString);
211
+ }
212
+ Py_XDECREF(pyStr);
213
+ }
214
+ }
215
+
216
+ // Clear any errors since we don't want to propagate errors for functions
217
+ // that are trying to build a string for the error message.
218
+ PyErr_Clear();
219
+ }
220
+
221
+ /** Saves the exception so that it can be re-thrown on a different thread */
222
+ inline void persist() {
223
+ if (type)
224
+ return; // Don't overwrite exceptions
225
+ // PyErr_Fetch overwrites the pointers
226
+ pybind11::gil_scoped_acquire gil;
227
+ Py_XDECREF(type);
228
+ Py_XDECREF(value);
229
+ Py_XDECREF(traceback);
230
+ PyErr_Fetch(&type, &value, &traceback);
231
+ build_message();
232
+ }
233
+
234
+ /** Sets the current Python error from this exception */
235
+ inline void restore() {
236
+ if (!type)
237
+ return;
238
+ // PyErr_Restore steals references
239
+ pybind11::gil_scoped_acquire gil;
240
+ Py_XINCREF(type);
241
+ Py_XINCREF(value);
242
+ Py_XINCREF(traceback);
243
+ PyErr_Restore(type, value, traceback);
244
+ }
245
+
246
+ PyObject* type;
247
+ PyObject* value;
248
+ PyObject* traceback;
249
+
250
+ // Message to return to the user when 'what()' is invoked.
251
+ std::string message;
252
+ };
253
+
254
+ bool THPException_init(PyObject* module);
255
+
256
+ namespace torch {
257
+
258
+ // Set python current exception from a C++ exception
259
+ TORCH_PYTHON_API void translate_exception_to_python(const std::exception_ptr&);
260
+
261
+ TORCH_PYTHON_API std::string processErrorMsg(std::string str);
262
+
263
+ // Abstract base class for exceptions which translate to specific Python types
264
+ struct PyTorchError : public std::exception {
265
+ PyTorchError() = default;
266
+ PyTorchError(std::string msg_) : msg(std::move(msg_)) {}
267
+ virtual PyObject* python_type() = 0;
268
+ const char* what() const noexcept override {
269
+ return msg.c_str();
270
+ }
271
+ std::string msg;
272
+ };
273
+
274
+ // Declare a printf-like function on gcc & clang
275
+ // The compiler can then warn on invalid format specifiers
276
+ #ifdef __GNUC__
277
+ #define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) \
278
+ __attribute__((format(printf, FORMAT_INDEX, VA_ARGS_INDEX)))
279
+ #else
280
+ #define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX)
281
+ #endif
282
+
283
+ // Translates to Python IndexError
284
+ struct IndexError : public PyTorchError {
285
+ using PyTorchError::PyTorchError;
286
+ IndexError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
287
+ PyObject* python_type() override {
288
+ return PyExc_IndexError;
289
+ }
290
+ };
291
+
292
+ // Translates to Python TypeError
293
+ struct TypeError : public PyTorchError {
294
+ using PyTorchError::PyTorchError;
295
+ TORCH_PYTHON_API TypeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
296
+ PyObject* python_type() override {
297
+ return PyExc_TypeError;
298
+ }
299
+ };
300
+
301
+ // Translates to Python ValueError
302
+ struct ValueError : public PyTorchError {
303
+ using PyTorchError::PyTorchError;
304
+ TORCH_PYTHON_API ValueError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
305
+ PyObject* python_type() override {
306
+ return PyExc_ValueError;
307
+ }
308
+ };
309
+
310
+ // Translates to Python NotImplementedError
311
+ struct NotImplementedError : public PyTorchError {
312
+ NotImplementedError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
313
+ NotImplementedError() = default;
314
+ PyObject* python_type() override {
315
+ return PyExc_NotImplementedError;
316
+ }
317
+ };
318
+
319
+ // Translates to Python AttributeError
320
+ struct AttributeError : public PyTorchError {
321
+ AttributeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
322
+ PyObject* python_type() override {
323
+ return PyExc_AttributeError;
324
+ }
325
+ };
326
+
327
+ // Translates to Python LinAlgError
328
+ struct LinAlgError : public PyTorchError {
329
+ LinAlgError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
330
+ PyObject* python_type() override {
331
+ return THPException_LinAlgError;
332
+ }
333
+ };
334
+
335
+ // ATen warning handler for Python
336
+ struct PyWarningHandler {
337
+ // Move actual handler into a separate class with a noexcept
338
+ // destructor. Otherwise, we need to force all WarningHandler
339
+ // subclasses to have a noexcept(false) destructor.
340
+ struct InternalHandler : at::WarningHandler {
341
+ ~InternalHandler() override = default;
342
+ void process(const c10::Warning& warning) override;
343
+
344
+ std::vector<c10::Warning> warning_buffer_;
345
+ };
346
+
347
+ public:
348
+ /// See NOTE [ Conversion Cpp Python Warning ] for noexcept justification
349
+ TORCH_PYTHON_API PyWarningHandler() noexcept(true);
350
+ // NOLINTNEXTLINE(bugprone-exception-escape)
351
+ TORCH_PYTHON_API ~PyWarningHandler() noexcept(false);
352
+
353
+ /** Call if an exception has been thrown
354
+
355
+ * Necessary to determine if it is safe to throw from the desctructor since
356
+ * std::uncaught_exception is buggy on some platforms and generally
357
+ * unreliable across dynamic library calls.
358
+ */
359
+ void set_in_exception() {
360
+ in_exception_ = true;
361
+ }
362
+
363
+ private:
364
+ InternalHandler internal_handler_;
365
+ at::WarningHandler* prev_handler_;
366
+ bool in_exception_;
367
+ };
368
+
369
+ namespace detail {
370
+ template <typename Func, size_t i>
371
+ using Arg = typename invoke_traits<Func>::template arg<i>::type;
372
+
373
+ template <typename Func, size_t... Is>
374
+ auto wrap_pybind_function_impl_(
375
+ Func&& f,
376
+ std::index_sequence<Is...>,
377
+ bool release_gil) {
378
+ using result_type = typename invoke_traits<Func>::result_type;
379
+ namespace py = pybind11;
380
+
381
+ // f=f is needed to handle function references on older compilers
382
+ return [f = std::forward<Func>(f),
383
+ release_gil](Arg<Func, Is>... args) -> result_type {
384
+ HANDLE_TH_ERRORS
385
+ if (release_gil) {
386
+ py::gil_scoped_release no_gil;
387
+ return c10::guts::invoke(f, std::forward<Arg<Func, Is>>(args)...);
388
+ } else {
389
+ return c10::guts::invoke(f, std::forward<Arg<Func, Is>>(args)...);
390
+ }
391
+ END_HANDLE_TH_ERRORS_PYBIND
392
+ };
393
+ }
394
+ } // namespace detail
395
+
396
+ // Wrap a function with TH error and warning handling.
397
+ // Returns a function object suitable for registering with pybind11.
398
+ template <typename Func>
399
+ auto wrap_pybind_function(Func&& f) {
400
+ using traits = invoke_traits<Func>;
401
+ return torch::detail::wrap_pybind_function_impl_(
402
+ std::forward<Func>(f), std::make_index_sequence<traits::arity>{}, false);
403
+ }
404
+
405
+ // Wrap a function with TH error, warning handling and releases the GIL.
406
+ // Returns a function object suitable for registering with pybind11.
407
+ template <typename Func>
408
+ auto wrap_pybind_function_no_gil(Func&& f) {
409
+ using traits = invoke_traits<Func>;
410
+ return torch::detail::wrap_pybind_function_impl_(
411
+ std::forward<Func>(f), std::make_index_sequence<traits::arity>{}, true);
412
+ }
413
+
414
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ #ifdef THP_BUILD_MAIN_LIB
6
+ #define TORCH_PYTHON_API C10_EXPORT
7
+ #else
8
+ #define TORCH_PYTHON_API C10_IMPORT
9
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Generator.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
8
+ struct THPGenerator {
9
+ PyObject_HEAD at::Generator cdata;
10
+ };
11
+
12
+ // Creates a new Python object wrapping the default at::Generator. The reference
13
+ // is borrowed. The caller should ensure that the at::Generator object lifetime
14
+ // last at least as long as the Python wrapper.
15
+ TORCH_PYTHON_API PyObject* THPGenerator_initDefaultGenerator(
16
+ at::Generator cdata);
17
+
18
+ #define THPGenerator_Check(obj) PyObject_IsInstance(obj, THPGeneratorClass)
19
+
20
+ TORCH_PYTHON_API extern PyObject* THPGeneratorClass;
21
+
22
+ bool THPGenerator_init(PyObject* module);
23
+
24
+ TORCH_PYTHON_API PyObject* THPGenerator_Wrap(at::Generator gen);
25
+
26
+ // Creates a new Python object for a Generator. The Generator must not already
27
+ // have a PyObject* associated with it.
28
+ PyObject* THPGenerator_NewWithVar(PyTypeObject* type, at::Generator gen);
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <ATen/Layout.h>
6
+
7
+ #include <string>
8
+
9
+ const int LAYOUT_NAME_LEN = 64;
10
+
11
+ struct THPLayout {
12
+ PyObject_HEAD at::Layout layout;
13
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
14
+ char name[LAYOUT_NAME_LEN + 1];
15
+ };
16
+
17
+ extern PyTypeObject THPLayoutType;
18
+
19
+ inline bool THPLayout_Check(PyObject* obj) {
20
+ return Py_TYPE(obj) == &THPLayoutType;
21
+ }
22
+
23
+ PyObject* THPLayout_New(at::Layout layout, const std::string& name);
24
+
25
+ void THPLayout_init(PyObject* module);
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <c10/core/MemoryFormat.h>
6
+
7
+ #include <string>
8
+
9
+ const int MEMORY_FORMAT_NAME_LEN = 64;
10
+
11
+ struct THPMemoryFormat {
12
+ PyObject_HEAD at::MemoryFormat memory_format;
13
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
14
+ char name[MEMORY_FORMAT_NAME_LEN + 1];
15
+ };
16
+
17
+ extern PyTypeObject THPMemoryFormatType;
18
+
19
+ inline bool THPMemoryFormat_Check(PyObject* obj) {
20
+ return Py_TYPE(obj) == &THPMemoryFormatType;
21
+ }
22
+
23
+ PyObject* THPMemoryFormat_New(
24
+ at::MemoryFormat memory_format,
25
+ const std::string& name);
26
+
27
+ void THPMemoryFormat_init(PyObject* module);
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #ifndef THP_MODULE_INC
2
+ #define THP_MODULE_INC
3
+
4
+ #define THP_STATELESS_ATTRIBUTE_NAME "_torch"
5
+
6
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <torch/csrc/Export.h>
5
+
6
+ TORCH_PYTHON_API c10::impl::PyInterpreter* getPyInterpreter();
7
+ TORCH_PYTHON_API bool isMainPyInterpreter();
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <c10/core/QScheme.h>
6
+
7
+ #include <string>
8
+
9
+ constexpr int QSCHEME_NAME_LEN = 64;
10
+
11
+ struct THPQScheme {
12
+ PyObject_HEAD at::QScheme qscheme;
13
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
14
+ char name[QSCHEME_NAME_LEN + 1];
15
+ };
16
+
17
+ extern PyTypeObject THPQSchemeType;
18
+
19
+ inline bool THPQScheme_Check(PyObject* obj) {
20
+ return Py_TYPE(obj) == &THPQSchemeType;
21
+ }
22
+
23
+ PyObject* THPQScheme_New(at::QScheme qscheme, const std::string& name);
24
+
25
+ void THPQScheme_init(PyObject* module);
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/variable.h>
4
+ #include <torch/csrc/python_headers.h>
5
+ #include <cstdint>
6
+
7
+ extern PyTypeObject THPSizeType;
8
+
9
+ #define THPSize_Check(obj) (Py_TYPE(obj) == &THPSizeType)
10
+
11
+ PyObject* THPSize_New(const torch::autograd::Variable& t);
12
+ PyObject* THPSize_NewFromSizes(int64_t dim, const int64_t* sizes);
13
+ PyObject* THPSize_NewFromSymSizes(const at::Tensor& t);
14
+
15
+ void THPSize_init(PyObject* module);
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_STORAGE_INC
2
+ #define THP_STORAGE_INC
3
+
4
+ #include <torch/csrc/Types.h>
5
+
6
+ #define THPStorageStr "torch.UntypedStorage"
7
+
8
+ struct THPStorage {
9
+ PyObject_HEAD;
10
+ c10::MaybeOwned<c10::Storage> cdata;
11
+ bool is_hermetic;
12
+ };
13
+
14
+ TORCH_PYTHON_API PyObject* THPStorage_Wrap(c10::Storage storage);
15
+ TORCH_PYTHON_API PyObject* THPStorage_NewWithStorage(
16
+ PyTypeObject* type,
17
+ c10::Storage _storage,
18
+ c10::impl::PyInterpreterStatus status,
19
+ bool allow_preexisting_pyobj = false);
20
+ extern PyTypeObject* THPStorageClass;
21
+
22
+ static inline bool THPStorage_CheckTypeExact(PyTypeObject* tp) {
23
+ return tp == THPStorageClass;
24
+ }
25
+
26
+ static inline bool THPStorage_CheckExact(PyObject* obj) {
27
+ return THPStorage_CheckTypeExact(Py_TYPE(obj));
28
+ }
29
+
30
+ inline bool THPStorage_Check(PyObject* obj) {
31
+ if (!THPStorageClass)
32
+ return false;
33
+
34
+ const auto result = PyObject_IsInstance(obj, (PyObject*)THPStorageClass);
35
+ if (result == -1)
36
+ throw python_error();
37
+ return result;
38
+ }
39
+
40
+ bool THPStorage_init(PyObject* module);
41
+ void THPStorage_postInit(PyObject* module);
42
+
43
+ void THPStorage_assertNotNull(THPStorage* storage);
44
+ void THPStorage_assertNotNull(PyObject* obj);
45
+
46
+ extern PyTypeObject THPStorageType;
47
+
48
+ inline const c10::Storage& THPStorage_Unpack(THPStorage* storage) {
49
+ return *storage->cdata;
50
+ }
51
+
52
+ inline const c10::Storage& THPStorage_Unpack(PyObject* obj) {
53
+ return THPStorage_Unpack(reinterpret_cast<THPStorage*>(obj));
54
+ }
55
+
56
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_STORAGE_METHODS_INC
2
+ #define THP_STORAGE_METHODS_INC
3
+
4
+ #include <Python.h>
5
+
6
+ PyMethodDef* THPStorage_getMethods();
7
+
8
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_STORAGE_SHARING_INC
2
+ #define THP_STORAGE_SHARING_INC
3
+
4
+ #include <Python.h>
5
+
6
+ PyMethodDef* THPStorage_getSharingMethods();
7
+
8
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_STREAM_INC
2
+ #define THP_STREAM_INC
3
+
4
+ #include <c10/core/Stream.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <torch/csrc/python_headers.h>
7
+
8
+ struct THPStream {
9
+ PyObject_HEAD int64_t stream_id;
10
+ int64_t device_type;
11
+ int64_t device_index;
12
+ };
13
+ extern TORCH_API PyTypeObject* THPStreamClass;
14
+
15
+ void THPStream_init(PyObject* module);
16
+
17
+ inline bool THPStream_Check(PyObject* obj) {
18
+ return THPStreamClass && PyObject_IsInstance(obj, (PyObject*)THPStreamClass);
19
+ }
20
+
21
+ PyObject* THPStream_Wrap(const c10::Stream& stream);
22
+
23
+ #endif // THP_STREAM_INC
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #define TH_CONCAT_STRING_2(x, y) TH_CONCAT_STRING_2_EXPAND(x, y)
4
+ #define TH_CONCAT_STRING_2_EXPAND(x, y) #x #y
5
+
6
+ #define TH_CONCAT_STRING_3(x, y, z) TH_CONCAT_STRING_3_EXPAND(x, y, z)
7
+ #define TH_CONCAT_STRING_3_EXPAND(x, y, z) #x #y #z
8
+
9
+ #define TH_CONCAT_STRING_4(x, y, z, w) TH_CONCAT_STRING_4_EXPAND(x, y, z, w)
10
+ #define TH_CONCAT_STRING_4_EXPAND(x, y, z, w) #x #y #z #w
11
+
12
+ #define TH_CONCAT_2(x, y) TH_CONCAT_2_EXPAND(x, y)
13
+ #define TH_CONCAT_2_EXPAND(x, y) x##y
14
+
15
+ #define TH_CONCAT_3(x, y, z) TH_CONCAT_3_EXPAND(x, y, z)
16
+ #define TH_CONCAT_3_EXPAND(x, y, z) x##y##z
17
+
18
+ #define TH_CONCAT_4_EXPAND(x, y, z, w) x##y##z##w
19
+ #define TH_CONCAT_4(x, y, z, w) TH_CONCAT_4_EXPAND(x, y, z, w)
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_H
2
+ #define THP_H
3
+
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ // Back-compatibility macros, Thanks to http://cx-oracle.sourceforge.net/
8
+ // define PyInt_* macros for Python 3.x. NB: We must include Python.h first,
9
+ // otherwise we'll incorrectly conclude PyInt_Check isn't defined!
10
+ #ifndef PyInt_Check
11
+ #define PyInt_Check PyLong_Check
12
+ #define PyInt_FromLong PyLong_FromLong
13
+ #define PyInt_AsLong PyLong_AsLong
14
+ #define PyInt_Type PyLong_Type
15
+ #endif
16
+
17
+ #include <torch/csrc/Exceptions.h>
18
+ #include <torch/csrc/Generator.h>
19
+ #include <torch/csrc/Module.h>
20
+ #include <torch/csrc/Size.h>
21
+ #include <torch/csrc/Storage.h>
22
+ #include <torch/csrc/Types.h>
23
+ #include <torch/csrc/utils.h> // This requires defined Storage and Tensor types
24
+ #include <torch/csrc/utils/byte_order.h>
25
+
26
+ #include <torch/csrc/serialization.h>
27
+
28
+ #include <torch/csrc/autograd/python_autograd.h>
29
+
30
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <ATen/ATen.h>
6
+
7
+ struct THPDTypeInfo {
8
+ PyObject_HEAD at::ScalarType type;
9
+ };
10
+
11
+ struct THPFInfo : THPDTypeInfo {};
12
+
13
+ struct THPIInfo : THPDTypeInfo {};
14
+
15
+ extern PyTypeObject THPFInfoType;
16
+ extern PyTypeObject THPIInfoType;
17
+
18
+ inline bool THPFInfo_Check(PyObject* obj) {
19
+ return Py_TYPE(obj) == &THPFInfoType;
20
+ }
21
+
22
+ inline bool THPIInfo_Check(PyObject* obj) {
23
+ return Py_TYPE(obj) == &THPIInfoType;
24
+ }
25
+
26
+ void THPDTypeInfo_init(PyObject* module);
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_TYPES_INC
2
+ #define THP_TYPES_INC
3
+
4
+ #include <cstddef>
5
+
6
+ #ifndef INT64_MAX
7
+ #include <cstdint>
8
+ #endif
9
+
10
+ template <typename T>
11
+ struct THPTypeInfo {};
12
+
13
+ #endif