38 template <
typename Key,
typename Hash>
48 void Rehash(int64_t buckets)
override;
50 void Insert(
const void* input_keys,
51 const void* input_values,
54 int64_t
count)
override;
56 void Activate(
const void* input_keys,
59 int64_t count)
override;
61 void Find(
const void* input_keys,
64 int64_t count)
override;
66 void Erase(
const void* input_keys,
68 int64_t count)
override;
71 void Clear()
override;
73 int64_t
Size()
const override;
91 const void* input_values,
96 void Allocate(int64_t bucket_count, int64_t capacity);
102 template <
typename Key,
typename Hash>
107 :
DeviceHashmap(init_capacity, dsize_key, dsize_value, device) {
108 int64_t init_buckets = init_capacity * 2;
109 Allocate(init_buckets, init_capacity);
112 template <
typename Key,
typename Hash>
117 template <
typename Key,
typename Hash>
119 int64_t iterator_count =
Size();
124 if (iterator_count > 0) {
130 active_keys = this->
buffer_->GetKeyBuffer().IndexGet({active_indices});
132 this->
buffer_->GetValueBuffer().IndexGet({active_indices});
135 float avg_capacity_per_bucket =
145 if (iterator_count > 0) {
150 static_cast<addr_t*
>(output_addrs.GetDataPtr()),
151 output_masks.GetDataPtr<
bool>(), iterator_count);
156 template <
typename Key,
typename Hash>
158 const void* input_values,
164 float avg_capacity_per_bucket =
166 int64_t expected_buckets =
std::max(
168 int64_t(
std::ceil(new_size / avg_capacity_per_bucket)));
172 InsertImpl(input_keys, input_values, output_addrs, output_masks, count);
175 template <
typename Key,
typename Hash>
180 Insert(input_keys,
nullptr, output_addrs, output_masks, count);
183 template <
typename Key,
typename Hash>
188 if (count == 0)
return;
194 const int64_t num_blocks =
195 (count + kThreadsPerBlock - 1) / kThreadsPerBlock;
196 FindKernel<<<num_blocks, kThreadsPerBlock>>>(
197 impl_, input_keys, output_addrs, output_masks,
count);
202 template <
typename Key,
typename Hash>
206 if (count == 0)
return;
211 auto iterator_addrs =
static_cast<addr_t*
>(
214 const int64_t num_blocks =
215 (count + kThreadsPerBlock - 1) / kThreadsPerBlock;
216 EraseKernelPass0<<<num_blocks, kThreadsPerBlock>>>(
217 impl_, input_keys, iterator_addrs, output_masks,
count);
218 EraseKernelPass1<<<num_blocks, kThreadsPerBlock>>>(
impl_, iterator_addrs,
219 output_masks,
count);
226 template <
typename Key,
typename Hash>
234 const int64_t num_blocks =
235 (
impl_.bucket_count_ * kWarpSize + kThreadsPerBlock - 1) /
237 GetActiveIndicesKernel<<<num_blocks, kThreadsPerBlock>>>(
238 impl_, output_addrs, iterator_count);
247 return static_cast<int64_t
>(ret);
250 template <
typename Key,
typename Hash>
257 sizeof(
Slab) * this->bucket_count_));
265 template <
typename Key,
typename Hash>
270 template <
typename Key,
typename Hash>
275 template <
typename Key,
typename Hash>
277 thrust::device_vector<int64_t> elems_per_bucket(
impl_.bucket_count_);
278 thrust::fill(elems_per_bucket.begin(), elems_per_bucket.end(), 0);
280 const int64_t num_blocks =
281 (
impl_.capacity_ + kThreadsPerBlock - 1) / kThreadsPerBlock;
282 CountElemsPerBucketKernel<<<num_blocks, kThreadsPerBlock>>>(
283 impl_, thrust::raw_pointer_cast(elems_per_bucket.data()));
287 std::vector<int64_t> result(impl_.bucket_count_);
288 thrust::copy(elems_per_bucket.begin(), elems_per_bucket.end(),
293 template <
typename Key,
typename Hash>
298 template <
typename Key,
typename Hash>
300 const void* input_values,
304 if (count == 0)
return;
309 *thrust::device_ptr<int>(
impl_.buffer_accessor_.heap_counter_) =
310 prev_heap_counter + count;
312 const int64_t num_blocks =
313 (count + kThreadsPerBlock - 1) / kThreadsPerBlock;
314 InsertKernelPass0<<<num_blocks, kThreadsPerBlock>>>(
315 impl_, input_keys, output_addrs, prev_heap_counter,
count);
316 InsertKernelPass1<<<num_blocks, kThreadsPerBlock>>>(
317 impl_, input_keys, output_addrs, output_masks,
count);
318 InsertKernelPass2<<<num_blocks, kThreadsPerBlock>>>(
319 impl_, input_values, output_addrs, output_masks,
count);
324 template <
typename Key,
typename Hash>
335 this->dsize_value_, this->
buffer_->GetKeyBuffer(),
336 this->
buffer_->GetValueBuffer(),
347 sizeof(
Slab) * this->bucket_count_));
351 impl_.Setup(this->bucket_count_, this->capacity_, this->dsize_key_,
355 template <
typename Key,
typename Hash>
void Clear() override
Clear stored map without reallocating memory.
Definition: SlabHashmap.h:251
void Find(const void *input_keys, addr_t *output_addrs, bool *output_masks, int64_t count) override
Parallel find a contiguous array of keys.
Definition: SlabHashmap.h:184
CUDAHashmapBufferAccessor buffer_accessor_
Definition: SlabHashmap.h:85
void Erase(const void *input_keys, bool *output_masks, int64_t count) override
Parallel erase a contiguous array of keys.
Definition: SlabHashmap.h:203
void ReleaseCache()
Definition: CUDAUtils.cpp:56
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t timeout_in_ms capture_handle capture_handle capture_handle image_handle temperature_c k4a_image_t image_handle uint8_t image_handle image_handle image_handle image_handle uint32_t
Definition: K4aPlugin.cpp:557
float LoadFactor() const override
Definition: SlabHashmap.h:294
static void Free(void *ptr, const Device &device)
Definition: MemoryManager.cpp:44
void Rehash(int64_t buckets) override
Definition: SlabHashmap.h:118
#define OPEN3D_CUDA_CHECK(err)
Definition: CUDAUtils.h:59
__host__ void Setup(int64_t capacity, int64_t dsize_key, int64_t dsize_value, Tensor &keys, Tensor &values, Tensor &heap)
Definition: CUDAHashmapBufferAccessor.h:50
void Activate(const void *input_keys, addr_t *output_addrs, bool *output_masks, int64_t count) override
Definition: SlabHashmap.h:176
Definition: SlabHashmap.h:39
__host__ void HostAllocate(const Device &device)
Definition: CUDAHashmapBufferAccessor.h:80
static void MemcpyToHost(void *host_ptr, const void *src_ptr, const Device &src_device, size_t num_bytes)
Same as Memcpy, but with host (CPU:0) as default dst_device.
Definition: MemoryManager.cpp:88
static void * Malloc(size_t byte_size, const Device &device)
Definition: MemoryManager.cpp:40
FN_SPECIFIERS MiniVec< float, N > ceil(const MiniVec< float, N > &a)
Definition: MiniVec.h:108
Definition: DeviceHashmap.h:39
int64_t GetActiveIndices(addr_t *output_indices) override
Parallel collect all iterators in the hash table.
Definition: SlabHashmap.h:227
int64_t Size() const override
Definition: SlabHashmap.h:266
static const Dtype Int32
Definition: Dtype.h:46
SlabHashmapImpl< Key, Hash > GetImpl()
Definition: SlabHashmap.h:78
Definition: CUDAHashmapBufferAccessor.h:48
~SlabHashmap()
Definition: SlabHashmap.h:113
Tensor To(Dtype dtype, bool copy=false) const
Definition: Tensor.cpp:540
Device device_
Definition: DeviceHashmap.h:113
std::shared_ptr< SlabNodeManager > node_mgr_
Definition: SlabHashmap.h:86
int count
Definition: FilePCD.cpp:61
void Free()
Definition: SlabHashmap.h:356
static const Dtype Int64
Definition: Dtype.h:47
Definition: PinholeCameraIntrinsic.cpp:35
void Allocate(int64_t bucket_count, int64_t capacity)
Definition: SlabHashmap.h:325
__host__ int HeapCounter(const Device &device) const
Definition: CUDAHashmapBufferAccessor.h:102
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t timeout_in_ms capture_handle capture_handle capture_handle image_handle float
Definition: K4aPlugin.cpp:465
int64_t capacity_
Definition: DeviceHashmap.h:109
SlabHashmap(int64_t init_capacity, int64_t dsize_key, int64_t dsize_value, const Device &device)
Definition: SlabHashmap.h:103
SlabHashmapImpl< Key, Hash > impl_
Definition: SlabHashmap.h:83
uint32_t addr_t
Definition: HashmapBuffer.h:58
int64_t bucket_count_
Definition: SlabHashmap.h:99
int64_t dsize_key_
Definition: DeviceHashmap.h:110
T * GetDataPtr()
Definition: Tensor.h:1005
void Insert(const void *input_keys, const void *input_values, addr_t *output_addrs, bool *output_masks, int64_t count) override
Parallel insert contiguous arrays of keys and values.
Definition: SlabHashmap.h:157
__host__ void HostFree(const Device &device)
Definition: CUDAHashmapBufferAccessor.h:85
__host__ void Reset(const Device &device)
Definition: CUDAHashmapBufferAccessor.h:67
Definition: SlabNodeManager.h:59
static const Dtype Bool
Definition: Dtype.h:52
int64_t dsize_value_
Definition: DeviceHashmap.h:111
int64_t GetLength() const
Definition: Tensor.h:986
int64_t GetBucketCount() const override
Definition: SlabHashmap.h:271
Definition: SlabHashmapImpl.h:54
void InsertImpl(const void *input_keys, const void *input_values, addr_t *output_addrs, bool *output_masks, int64_t count)
Definition: SlabHashmap.h:299
std::shared_ptr< HashmapBuffer > buffer_
Definition: DeviceHashmap.h:115
#define max(x, y)
Definition: SVD3x3CPU.h:38
std::vector< int64_t > BucketSizes() const override
Definition: SlabHashmap.h:276