RMM  23.12
RAPIDS Memory Manager
binning_memory_resource.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020-2021, NVIDIA CORPORATION.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #pragma once
17 
18 #include <rmm/detail/aligned.hpp>
21 
22 #include <cuda_runtime_api.h>
23 
24 #include <algorithm>
25 #include <cassert>
26 #include <map>
27 #include <memory>
28 #include <vector>
29 
30 namespace rmm::mr {
43 template <typename Upstream>
45  public:
56  explicit binning_memory_resource(Upstream* upstream_resource)
57  : upstream_mr_{[upstream_resource]() {
58  RMM_EXPECTS(nullptr != upstream_resource, "Unexpected null upstream pointer.");
59  return upstream_resource;
60  }()}
61  {
62  }
63 
76  binning_memory_resource(Upstream* upstream_resource,
77  int8_t min_size_exponent, // NOLINT(bugprone-easily-swappable-parameters)
78  int8_t max_size_exponent)
79  : upstream_mr_{[upstream_resource]() {
80  RMM_EXPECTS(nullptr != upstream_resource, "Unexpected null upstream pointer.");
81  return upstream_resource;
82  }()}
83  {
84  for (auto i = min_size_exponent; i <= max_size_exponent; i++) {
85  add_bin(1 << i);
86  }
87  }
88 
93  ~binning_memory_resource() override = default;
94 
95  binning_memory_resource() = delete;
98  binning_memory_resource& operator=(binning_memory_resource const&) = delete;
99  binning_memory_resource& operator=(binning_memory_resource&&) = delete;
100 
107  [[nodiscard]] bool supports_streams() const noexcept override { return true; }
108 
114  [[nodiscard]] bool supports_get_mem_info() const noexcept override { return false; }
115 
121  [[nodiscard]] Upstream* get_upstream() const noexcept { return upstream_mr_; }
122 
139  void add_bin(std::size_t allocation_size, device_memory_resource* bin_resource = nullptr)
140  {
141  allocation_size =
142  rmm::detail::align_up(allocation_size, rmm::detail::CUDA_ALLOCATION_ALIGNMENT);
143 
144  if (nullptr != bin_resource) {
145  resource_bins_.insert({allocation_size, bin_resource});
146  } else if (resource_bins_.count(allocation_size) == 0) { // do nothing if bin already exists
147 
148  owned_bin_resources_.push_back(
149  std::make_unique<fixed_size_memory_resource<Upstream>>(upstream_mr_, allocation_size));
150  resource_bins_.insert({allocation_size, owned_bin_resources_.back().get()});
151  }
152  }
153 
154  private:
163  device_memory_resource* get_resource(std::size_t bytes)
164  {
165  auto iter = resource_bins_.lower_bound(bytes);
166  return (iter != resource_bins_.cend()) ? iter->second
167  : static_cast<device_memory_resource*>(get_upstream());
168  }
169 
179  void* do_allocate(std::size_t bytes, cuda_stream_view stream) override
180  {
181  if (bytes <= 0) { return nullptr; }
182  return get_resource(bytes)->allocate(bytes, stream);
183  }
184 
195  void do_deallocate(void* ptr, std::size_t bytes, cuda_stream_view stream) override
196  {
197  auto res = get_resource(bytes);
198  if (res != nullptr) { res->deallocate(ptr, bytes, stream); }
199  }
200 
209  [[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
210  [[maybe_unused]] cuda_stream_view stream) const override
211  {
212  return std::make_pair(0, 0);
213  }
214 
215  Upstream* upstream_mr_; // The upstream memory_resource from which to allocate blocks.
216 
217  std::vector<std::unique_ptr<fixed_size_memory_resource<Upstream>>> owned_bin_resources_;
218 
219  std::map<std::size_t, device_memory_resource*> resource_bins_;
220 };
221  // end of group
223 } // namespace rmm::mr
Strongly-typed non-owning wrapper for CUDA streams with default constructor.
Definition: cuda_stream_view.hpp:41
Allocates memory from upstream resources associated with bin sizes.
Definition: binning_memory_resource.hpp:44
binning_memory_resource(Upstream *upstream_resource)
Construct a new binning memory resource object.
Definition: binning_memory_resource.hpp:56
void add_bin(std::size_t allocation_size, device_memory_resource *bin_resource=nullptr)
Add a bin allocator to this resource.
Definition: binning_memory_resource.hpp:139
bool supports_get_mem_info() const noexcept override
Query whether the resource supports the get_mem_info API.
Definition: binning_memory_resource.hpp:114
~binning_memory_resource() override=default
Destroy the binning_memory_resource and free all memory allocated from the upstream resource.
Upstream * get_upstream() const noexcept
Get the upstream memory_resource object.
Definition: binning_memory_resource.hpp:121
binning_memory_resource(Upstream *upstream_resource, int8_t min_size_exponent, int8_t max_size_exponent)
Construct a new binning memory resource object with a range of initial bins.
Definition: binning_memory_resource.hpp:76
bool supports_streams() const noexcept override
Query whether the resource supports use of non-null streams for allocation/deallocation.
Definition: binning_memory_resource.hpp:107
Base class for all libcudf device memory allocation.
Definition: device_memory_resource.hpp:89
void * allocate(std::size_t bytes, cuda_stream_view stream=cuda_stream_view{})
Allocates memory of size at least bytes.
Definition: device_memory_resource.hpp:116
A device_memory_resource which allocates memory blocks of a single fixed size.
Definition: fixed_size_memory_resource.hpp:53