45 #ifndef KOKKOS_MEMORYPOOL_HPP 46 #define KOKKOS_MEMORYPOOL_HPP 48 #include <Kokkos_Core_fwd.hpp> 51 #include <impl/Kokkos_ConcurrentBitset.hpp> 52 #include <impl/Kokkos_Error.hpp> 53 #include <impl/Kokkos_SharedAlloc.hpp> 67 void memory_pool_bounds_verification(
size_t min_block_alloc_size,
68 size_t max_block_alloc_size,
69 size_t min_superblock_size,
70 size_t max_superblock_size,
71 size_t max_block_per_superblock,
72 size_t min_total_alloc_size);
80 void _print_memory_pool_state(std::ostream &s, uint32_t
const *sb_state_ptr,
81 int32_t sb_count, uint32_t sb_size_lg2,
82 uint32_t sb_state_size, uint32_t state_shift,
83 uint32_t state_used_mask);
87 template <
typename DeviceType>
90 using CB = Kokkos::Impl::concurrent_bitset;
92 enum : uint32_t { bits_per_int_lg2 = CB::bits_per_int_lg2 };
93 enum : uint32_t { state_shift = CB::state_shift };
94 enum : uint32_t { state_used_mask = CB::state_used_mask };
95 enum : uint32_t { state_header_mask = CB::state_header_mask };
96 enum : uint32_t { max_bit_count_lg2 = CB::max_bit_count_lg2 };
97 enum : uint32_t { max_bit_count = CB::max_bit_count };
99 enum : uint32_t { HINT_PER_BLOCK_SIZE = 2 };
121 using base_memory_space =
typename DeviceType::memory_space;
125 base_memory_space>::accessible
128 using Tracker = Kokkos::Impl::SharedAllocationTracker;
129 using Record = Kokkos::Impl::SharedAllocationRecord<base_memory_space>;
132 uint32_t *m_sb_state_array;
133 uint32_t m_sb_state_size;
134 uint32_t m_sb_size_lg2;
135 uint32_t m_max_block_size_lg2;
136 uint32_t m_min_block_size_lg2;
138 int32_t m_hint_offset;
139 int32_t m_data_offset;
140 int32_t m_unused_padding;
143 using memory_space =
typename DeviceType::memory_space;
146 enum : uint32_t { max_superblock_size = 1LU << 31 };
147 enum : uint32_t { max_block_per_superblock = max_bit_count };
151 KOKKOS_INLINE_FUNCTION
152 bool operator==(MemoryPool
const &other)
const {
153 return m_sb_state_array == other.m_sb_state_array;
156 KOKKOS_INLINE_FUNCTION
157 size_t capacity() const noexcept {
158 return size_t(m_sb_count) << m_sb_size_lg2;
161 KOKKOS_INLINE_FUNCTION
162 size_t min_block_size() const noexcept {
163 return (1LU << m_min_block_size_lg2);
166 KOKKOS_INLINE_FUNCTION
167 size_t max_block_size() const noexcept {
168 return (1LU << m_max_block_size_lg2);
171 struct usage_statistics {
172 size_t capacity_bytes;
173 size_t superblock_bytes;
174 size_t max_block_bytes;
175 size_t min_block_bytes;
176 size_t capacity_superblocks;
177 size_t consumed_superblocks;
178 size_t consumed_blocks;
179 size_t consumed_bytes;
180 size_t reserved_blocks;
181 size_t reserved_bytes;
184 void get_usage_statistics(usage_statistics &stats)
const {
187 const size_t alloc_size = m_hint_offset *
sizeof(uint32_t);
189 uint32_t *
const sb_state_array =
190 accessible ? m_sb_state_array : (uint32_t *)host.
allocate(alloc_size);
193 Kokkos::Impl::DeepCopy<Kokkos::HostSpace, base_memory_space>(
194 sb_state_array, m_sb_state_array, alloc_size);
197 stats.superblock_bytes = (1LU << m_sb_size_lg2);
198 stats.max_block_bytes = (1LU << m_max_block_size_lg2);
199 stats.min_block_bytes = (1LU << m_min_block_size_lg2);
200 stats.capacity_bytes = stats.superblock_bytes * m_sb_count;
201 stats.capacity_superblocks = m_sb_count;
202 stats.consumed_superblocks = 0;
203 stats.consumed_blocks = 0;
204 stats.consumed_bytes = 0;
205 stats.reserved_blocks = 0;
206 stats.reserved_bytes = 0;
208 const uint32_t *sb_state_ptr = sb_state_array;
210 for (int32_t i = 0; i < m_sb_count; ++i, sb_state_ptr += m_sb_state_size) {
211 const uint32_t block_count_lg2 = (*sb_state_ptr) >> state_shift;
213 if (block_count_lg2) {
214 const uint32_t block_count = 1u << block_count_lg2;
215 const uint32_t block_size_lg2 = m_sb_size_lg2 - block_count_lg2;
216 const uint32_t block_size = 1u << block_size_lg2;
217 const uint32_t block_used = (*sb_state_ptr) & state_used_mask;
219 stats.consumed_superblocks++;
220 stats.consumed_blocks += block_used;
221 stats.consumed_bytes += block_used * block_size;
222 stats.reserved_blocks += block_count - block_used;
223 stats.reserved_bytes += (block_count - block_used) * block_size;
232 void print_state(std::ostream &s)
const {
235 const size_t alloc_size = m_hint_offset *
sizeof(uint32_t);
237 uint32_t *
const sb_state_array =
238 accessible ? m_sb_state_array : (uint32_t *)host.
allocate(alloc_size);
241 Kokkos::Impl::DeepCopy<Kokkos::HostSpace, base_memory_space>(
242 sb_state_array, m_sb_state_array, alloc_size);
245 Impl::_print_memory_pool_state(s, sb_state_array, m_sb_count, m_sb_size_lg2,
246 m_sb_state_size, state_shift,
256 KOKKOS_DEFAULTED_FUNCTION MemoryPool(MemoryPool &&) =
default;
257 KOKKOS_DEFAULTED_FUNCTION MemoryPool(
const MemoryPool &) =
default;
258 KOKKOS_DEFAULTED_FUNCTION MemoryPool &operator=(MemoryPool &&) =
default;
259 KOKKOS_DEFAULTED_FUNCTION MemoryPool &operator=(
const MemoryPool &) =
default;
261 KOKKOS_INLINE_FUNCTION MemoryPool()
263 m_sb_state_array(nullptr),
266 m_max_block_size_lg2(0),
267 m_min_block_size_lg2(0),
271 m_unused_padding(0) {}
287 MemoryPool(
const base_memory_space &memspace,
288 const size_t min_total_alloc_size,
size_t min_block_alloc_size = 0,
289 size_t max_block_alloc_size = 0,
size_t min_superblock_size = 0)
291 m_sb_state_array(nullptr),
294 m_max_block_size_lg2(0),
295 m_min_block_size_lg2(0),
299 m_unused_padding(0) {
300 const uint32_t int_align_lg2 = 3;
301 const uint32_t int_align_mask = (1u << int_align_lg2) - 1;
302 const uint32_t default_min_block_size = 1u << 6;
303 const uint32_t default_max_block_size = 1u << 12;
304 const uint32_t default_min_superblock_size = 1u << 20;
309 if (0 == min_block_alloc_size) {
312 min_superblock_size =
313 std::min(
size_t(default_min_superblock_size), min_total_alloc_size);
315 min_block_alloc_size =
316 std::min(
size_t(default_min_block_size), min_superblock_size);
318 max_block_alloc_size =
319 std::min(
size_t(default_max_block_size), min_superblock_size);
320 }
else if (0 == min_superblock_size) {
326 const size_t max_superblock =
327 min_block_alloc_size * max_block_per_superblock;
329 min_superblock_size =
330 std::min(max_superblock,
331 std::min(
size_t(max_superblock_size), min_total_alloc_size));
334 if (0 == max_block_alloc_size) {
335 max_block_alloc_size = min_superblock_size;
349 Kokkos::Impl::memory_pool_bounds_verification(
350 min_block_alloc_size, max_block_alloc_size, min_superblock_size,
351 max_superblock_size, max_block_per_superblock, min_total_alloc_size);
357 m_min_block_size_lg2 =
358 Kokkos::Impl::integral_power_of_two_that_contains(min_block_alloc_size);
360 m_max_block_size_lg2 =
361 Kokkos::Impl::integral_power_of_two_that_contains(max_block_alloc_size);
364 Kokkos::Impl::integral_power_of_two_that_contains(min_superblock_size);
370 const uint64_t sb_size_mask = (1LU << m_sb_size_lg2) - 1;
372 m_sb_count = (min_total_alloc_size + sb_size_mask) >> m_sb_size_lg2;
379 const uint32_t max_block_count_lg2 = m_sb_size_lg2 - m_min_block_size_lg2;
382 (CB::buffer_bound_lg2(max_block_count_lg2) + int_align_mask) &
388 const size_t all_sb_state_size =
389 (m_sb_count * m_sb_state_size + int_align_mask) & ~int_align_mask;
393 const int32_t number_block_sizes =
394 1 + m_max_block_size_lg2 - m_min_block_size_lg2;
399 const int32_t block_size_array_size =
400 (number_block_sizes + int_align_mask) & ~int_align_mask;
402 m_hint_offset = all_sb_state_size;
403 m_data_offset = m_hint_offset + block_size_array_size * HINT_PER_BLOCK_SIZE;
407 const size_t header_size = m_data_offset *
sizeof(uint32_t);
408 const size_t alloc_size =
409 header_size + (size_t(m_sb_count) << m_sb_size_lg2);
411 Record *rec = Record::allocate(memspace,
"Kokkos::MemoryPool", alloc_size);
413 m_tracker.assign_allocated_record_to_uninitialized(rec);
415 m_sb_state_array = (uint32_t *)rec->data();
419 uint32_t *
const sb_state_array =
420 accessible ? m_sb_state_array : (uint32_t *)host.
allocate(header_size);
422 for (int32_t i = 0; i < m_data_offset; ++i) sb_state_array[i] = 0;
426 for (int32_t i = 0; i < number_block_sizes; ++i) {
427 const uint32_t block_size_lg2 = i + m_min_block_size_lg2;
428 const uint32_t block_count_lg2 = m_sb_size_lg2 - block_size_lg2;
429 const uint32_t block_state = block_count_lg2 << state_shift;
430 const uint32_t hint_begin = m_hint_offset + i * HINT_PER_BLOCK_SIZE;
436 const int32_t jbeg = (i * m_sb_count) / number_block_sizes;
437 const int32_t jend = ((i + 1) * m_sb_count) / number_block_sizes;
439 sb_state_array[hint_begin] = uint32_t(jbeg);
440 sb_state_array[hint_begin + 1] = uint32_t(jbeg);
442 for (int32_t j = jbeg; j < jend; ++j) {
443 sb_state_array[j * m_sb_state_size] = block_state;
450 Kokkos::Impl::DeepCopy<base_memory_space, Kokkos::HostSpace>(
451 m_sb_state_array, sb_state_array, header_size);
455 Kokkos::memory_fence();
465 KOKKOS_FORCEINLINE_FUNCTION
466 uint32_t get_block_size_lg2(uint32_t n)
const noexcept {
467 const unsigned i = Kokkos::Impl::integral_power_of_two_that_contains(n);
469 return i < m_min_block_size_lg2 ? m_min_block_size_lg2 : i;
474 KOKKOS_INLINE_FUNCTION
475 uint32_t allocate_block_size(uint64_t alloc_size)
const noexcept {
476 return alloc_size <= (1UL << m_max_block_size_lg2)
477 ? (1UL << get_block_size_lg2(uint32_t(alloc_size)))
492 void *allocate(
size_t alloc_size, int32_t attempt_limit = 1) const noexcept {
493 if (
size_t(1LU << m_max_block_size_lg2) < alloc_size) {
495 "Kokkos MemoryPool allocation request exceeded specified maximum " 499 if (0 == alloc_size)
return nullptr;
503 const uint32_t block_size_lg2 = get_block_size_lg2(alloc_size);
508 const uint32_t block_count_lg2 = m_sb_size_lg2 - block_size_lg2;
509 const uint32_t block_state = block_count_lg2 << state_shift;
510 const uint32_t block_count = 1u << block_count_lg2;
516 volatile uint32_t *
const hint_sb_id_ptr =
519 + HINT_PER_BLOCK_SIZE
520 * (block_size_lg2 - m_min_block_size_lg2);
522 const int32_t sb_id_begin = int32_t(hint_sb_id_ptr[1]);
527 #if defined(KOKKOS_ENABLE_SYCL) && !defined(KOKKOS_ARCH_INTEL_GEN) 528 const uint32_t block_id_hint = alloc_size;
530 const uint32_t block_id_hint =
531 (uint32_t)(Kokkos::Impl::clock_tic()
532 #if defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_CUDA) 535 + (threadIdx.x + blockDim.x * threadIdx.y)
541 uint32_t sb_state = block_state;
545 volatile uint32_t *sb_state_array =
nullptr;
547 while (attempt_limit) {
548 int32_t hint_sb_id = -1;
553 sb_id = hint_sb_id = int32_t(*hint_sb_id_ptr);
555 sb_state_array = m_sb_state_array + (sb_id * m_sb_state_size);
562 if (sb_state == (state_header_mask & *sb_state_array)) {
567 const uint32_t count_lg2 = sb_state >> state_shift;
568 const uint32_t mask = (1u << count_lg2) - 1;
571 sb_state_array, count_lg2, block_id_hint & mask, sb_state);
578 if (0 <= result.
first) {
580 const uint32_t size_lg2 = m_sb_size_lg2 - count_lg2;
584 p = ((
char *)(m_sb_state_array + m_data_offset)) +
585 (uint64_t(sb_id) << m_sb_size_lg2)
586 + (uint64_t(result.
first) << size_lg2);
589 printf(
" MemoryPool(0x%lx) pointer(0x%lx) allocate(%lu) sb_id(%d) sb_state(0x%x) block_size(%d) block_capacity(%d) block_id(%d) block_claimed(%d)\n" 590 , (uintptr_t)m_sb_state_array
614 sb_state = block_state;
617 bool update_hint =
false;
618 int32_t sb_id_empty = -1;
619 int32_t sb_id_large = -1;
620 uint32_t sb_state_large = 0;
622 sb_state_array = m_sb_state_array + sb_id_begin * m_sb_state_size;
624 for (int32_t i = 0,
id = sb_id_begin; i < m_sb_count; ++i) {
629 const uint32_t full_state = *sb_state_array;
630 const uint32_t used = full_state & state_used_mask;
631 const uint32_t state = full_state & state_header_mask;
633 if (state == block_state) {
636 if (used < block_count) {
643 update_hint = used + 1 < block_count;
647 }
else if (0 == used) {
650 if (-1 == sb_id_empty) {
657 }
else if ((-1 == sb_id_empty ) &&
658 (-1 == sb_id_large ) &&
659 (state < block_state ) &&
661 (used < (1u << (state >> state_shift)))) {
667 sb_state_large = state;
672 if (++
id < m_sb_count) {
673 sb_state_array += m_sb_state_size;
676 sb_state_array = m_sb_state_array;
686 if (0 <= sb_id_empty) {
695 sb_state_array = m_sb_state_array + (sb_id * m_sb_state_size);
700 const uint32_t state_empty = state_header_mask & *sb_state_array;
704 state_empty == Kokkos::atomic_compare_exchange(
705 sb_state_array, state_empty, block_state);
706 }
else if (0 <= sb_id_large) {
710 sb_state = sb_state_large;
712 sb_state_array = m_sb_state_array + (sb_id * m_sb_state_size);
720 Kokkos::atomic_compare_exchange(hint_sb_id_ptr, uint32_t(hint_sb_id),
737 KOKKOS_INLINE_FUNCTION
738 void deallocate(
void *p,
size_t )
const noexcept {
739 if (
nullptr == p)
return;
743 ((
char *)p) - ((
char *)(m_sb_state_array + m_data_offset));
746 const int ok_contains =
747 (0 <= d) && (
size_t(d) < (size_t(m_sb_count) << m_sb_size_lg2));
749 int ok_block_aligned = 0;
750 int ok_dealloc_once = 0;
753 const int sb_id = d >> m_sb_size_lg2;
756 volatile uint32_t *
const sb_state_array =
757 m_sb_state_array + (sb_id * m_sb_state_size);
759 const uint32_t block_state = (*sb_state_array) & state_header_mask;
760 const uint32_t block_size_lg2 =
761 m_sb_size_lg2 - (block_state >> state_shift);
763 ok_block_aligned = 0 == (d & ((1UL << block_size_lg2) - 1));
765 if (ok_block_aligned) {
770 (d & (ptrdiff_t(1LU << m_sb_size_lg2) - 1)) >> block_size_lg2;
772 const int result = CB::release(sb_state_array, bit, block_state);
774 ok_dealloc_once = 0 <= result;
777 printf(
" MemoryPool(0x%lx) pointer(0x%lx) deallocate sb_id(%d) block_size(%d) block_capacity(%d) block_id(%d) block_claimed(%d)\n" 778 , (uintptr_t)m_sb_state_array
781 , (1u << block_size_lg2)
782 , (1u << (m_sb_size_lg2 - block_size_lg2))
789 if (!ok_contains || !ok_block_aligned || !ok_dealloc_once) {
791 printf(
" MemoryPool(0x%lx) pointer(0x%lx) deallocate ok_contains(%d) ok_block_aligned(%d) ok_dealloc_once(%d)\n" 792 , (uintptr_t)m_sb_state_array
795 ,
int(ok_block_aligned)
796 ,
int(ok_dealloc_once) );
798 Kokkos::abort(
"Kokkos MemoryPool::deallocate given erroneous pointer");
804 KOKKOS_INLINE_FUNCTION
805 int number_of_superblocks() const noexcept {
return m_sb_count; }
807 KOKKOS_INLINE_FUNCTION
808 void superblock_state(
int sb_id,
int &block_size,
int &block_count_capacity,
809 int &block_count_used)
const noexcept {
811 block_count_capacity = 0;
812 block_count_used = 0;
815 Kokkos::Impl::ActiveExecutionMemorySpace,
816 base_memory_space>::accessible) {
819 const uint32_t state =
820 ((uint32_t
volatile *)m_sb_state_array)[sb_id * m_sb_state_size];
822 const uint32_t block_count_lg2 = state >> state_shift;
823 const uint32_t block_used = state & state_used_mask;
825 block_size = 1LU << (m_sb_size_lg2 - block_count_lg2);
826 block_count_capacity = 1LU << block_count_lg2;
827 block_count_used = block_used;
void * allocate(const size_t arg_alloc_size) const
Allocate untracked memory in the space.
Replacement for std::pair that works on CUDA devices.
first_type first
The first element of the pair.
Memory management for host memory.
Declaration of parallel operators.
void deallocate(void *const arg_alloc_ptr, const size_t arg_alloc_size) const
Deallocate untracked memory in the space.
second_type second
The second element of the pair.
Access relationship between DstMemorySpace and SrcMemorySpace.