RMM  23.12
RAPIDS Memory Manager
cuda_async_view_memory_resource.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2021, NVIDIA CORPORATION.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #pragma once
17 
18 #include <rmm/cuda_device.hpp>
19 #include <rmm/cuda_stream_view.hpp>
20 #include <rmm/detail/cuda_util.hpp>
21 #include <rmm/detail/dynamic_load_runtime.hpp>
22 #include <rmm/detail/error.hpp>
24 
25 #include <rmm/detail/thrust_namespace.h>
26 #include <thrust/optional.h>
27 
28 #include <cuda_runtime_api.h>
29 
30 #include <cstddef>
31 #include <limits>
32 
33 #if CUDART_VERSION >= 11020 // 11.2 introduced cudaMallocAsync
34 #define RMM_CUDA_MALLOC_ASYNC_SUPPORT
35 #endif
36 
37 namespace rmm::mr {
49  public:
50 #ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
61  cuda_async_view_memory_resource(cudaMemPool_t valid_pool_handle)
62  : cuda_pool_handle_{[valid_pool_handle]() {
63  RMM_EXPECTS(nullptr != valid_pool_handle, "Unexpected null pool handle.");
64  return valid_pool_handle;
65  }()}
66  {
67  // Check if cudaMallocAsync Memory pool supported
68  auto const device = rmm::get_current_cuda_device();
69  int cuda_pool_supported{};
70  auto result =
71  cudaDeviceGetAttribute(&cuda_pool_supported, cudaDevAttrMemoryPoolsSupported, device.value());
72  RMM_EXPECTS(result == cudaSuccess && cuda_pool_supported,
73  "cudaMallocAsync not supported with this CUDA driver/runtime version");
74  }
75 #endif
76 
77 #ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
82  [[nodiscard]] cudaMemPool_t pool_handle() const noexcept { return cuda_pool_handle_; }
83 #endif
84 
87  default;
89  default;
91  default;
93  default;
94 
101  [[nodiscard]] bool supports_streams() const noexcept override { return true; }
102 
108  [[nodiscard]] bool supports_get_mem_info() const noexcept override { return false; }
109 
110  private:
111 #ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
112  cudaMemPool_t cuda_pool_handle_{};
113 #endif
114 
124  void* do_allocate(std::size_t bytes, rmm::cuda_stream_view stream) override
125  {
126  void* ptr{nullptr};
127 #ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
128  if (bytes > 0) {
129  RMM_CUDA_TRY_ALLOC(rmm::detail::async_alloc::cudaMallocFromPoolAsync(
130  &ptr, bytes, pool_handle(), stream.value()));
131  }
132 #else
133  (void)bytes;
134  (void)stream;
135 #endif
136  return ptr;
137  }
138 
147  void do_deallocate(void* ptr,
148  [[maybe_unused]] std::size_t bytes,
149  rmm::cuda_stream_view stream) override
150  {
151 #ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
152  if (ptr != nullptr) {
153  RMM_ASSERT_CUDA_SUCCESS(rmm::detail::async_alloc::cudaFreeAsync(ptr, stream.value()));
154  }
155 #else
156  (void)ptr;
157  (void)bytes;
158  (void)stream;
159 #endif
160  }
161 
169  [[nodiscard]] bool do_is_equal(device_memory_resource const& other) const noexcept override
170  {
171  return dynamic_cast<cuda_async_view_memory_resource const*>(&other) != nullptr;
172  }
173 
181  [[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
182  rmm::cuda_stream_view) const override
183  {
184  return std::make_pair(0, 0);
185  }
186 };
187  // end of group
189 } // namespace rmm::mr
Strongly-typed non-owning wrapper for CUDA streams with default constructor.
Definition: cuda_stream_view.hpp:41
constexpr cudaStream_t value() const noexcept
Get the wrapped stream.
Definition: cuda_stream_view.hpp:75
device_memory_resource derived class that uses cudaMallocAsync/cudaFreeAsync for allocation/deallocat...
Definition: cuda_async_view_memory_resource.hpp:48
bool supports_streams() const noexcept override
Query whether the resource supports use of non-null CUDA streams for allocation/deallocation....
Definition: cuda_async_view_memory_resource.hpp:101
bool supports_get_mem_info() const noexcept override
Query whether the resource supports the get_mem_info API.
Definition: cuda_async_view_memory_resource.hpp:108
cuda_async_view_memory_resource & operator=(cuda_async_view_memory_resource &&)=default
Default move assignment operator.
cuda_async_view_memory_resource(cuda_async_view_memory_resource &&)=default
Default move constructor.
cuda_async_view_memory_resource(cuda_async_view_memory_resource const &)=default
Default copy constructor.
cuda_async_view_memory_resource & operator=(cuda_async_view_memory_resource const &)=default
Default copy assignment operator.
Base class for all libcudf device memory allocation.
Definition: device_memory_resource.hpp:89
cuda_device_id get_current_cuda_device()
Returns a cuda_device_id for the current device.
Definition: cuda_device.hpp:86