29 #include <tbb/parallel_for.h> 30 #include <tbb/parallel_sort.h> 96 template <
class T,
int NDIM,
class OUTPUT_ALLOCATOR>
99 const size_t batch_size,
100 const int64_t*
const row_splits,
101 const T*
const voxel_size,
102 const T*
const points_range_min,
103 const T*
const points_range_max,
104 const int64_t max_points_per_voxel,
105 const int64_t max_voxels,
106 OUTPUT_ALLOCATOR& output_allocator) {
109 const Vec_t inv_voxel_size = T(1) / Vec_t(voxel_size);
110 const Vec_t points_range_min_vec(points_range_min);
111 const Vec_t points_range_max_vec(points_range_max);
113 ceil((points_range_max_vec - points_range_min_vec) * inv_voxel_size)
114 .template cast<int32_t>();
116 for (
int i = 0; i < NDIM; ++i) {
118 for (
int j = 0; j < i; ++j) {
119 strides[i] *= extents[j];
122 const int64_t batch_hash = strides[NDIM - 1] * extents[NDIM - 1];
123 const int64_t invalid_hash = batch_hash * batch_size;
125 std::vector<int64_t> indices_batches(num_points, 0);
126 tbb::parallel_for(tbb::blocked_range<int64_t>(0, batch_size),
127 [&](
const tbb::blocked_range<int64_t>& r) {
128 for (int64_t i = r.begin(); i != r.end(); ++i) {
129 for (int64_t idx = row_splits[i];
130 idx < row_splits[i + 1]; ++idx) {
131 indices_batches[idx] = i;
136 auto CoordFn = [&](
const Vec_t& point) {
137 auto coords = ((point - points_range_min_vec) * inv_voxel_size)
138 .template cast<int64_t>();
142 auto HashFn = [&](
const Vec_t& point,
const int64_t& idx) {
143 if ((point >= points_range_min_vec && point <= points_range_max_vec)
145 auto coords = CoordFn(point);
146 int64_t hash = coords.dot(strides);
147 hash += indices_batches[idx] * batch_hash;
153 std::vector<std::pair<int64_t, int64_t>> hashes_indices(num_points);
154 std::vector<uint64_t> num_voxels(batch_size, 0);
156 tbb::parallel_for(tbb::blocked_range<int64_t>(0, num_points),
157 [&](
const tbb::blocked_range<int64_t>& r) {
158 for (int64_t i = r.begin(); i != r.end(); ++i) {
159 Vec_t pos(points + NDIM * i);
160 hashes_indices[i].first = HashFn(pos, i);
161 hashes_indices[i].second = i;
165 tbb::parallel_sort(hashes_indices);
168 tbb::blocked_range<int64_t>(0, hashes_indices.size()),
169 [&](
const tbb::blocked_range<int64_t>& r) {
170 for (int64_t i = r.begin(); i != r.end(); ++i) {
171 int64_t batch_id = hashes_indices[i].first / batch_hash;
172 if (batch_id >= batch_size)
break;
177 int64_t batch_id_prev =
178 hashes_indices[i - 1].first / batch_hash;
179 if ((batch_id != batch_id_prev) ||
180 (hashes_indices[i].first !=
181 hashes_indices[i - 1].first)) {
187 tbb::parallel_for(tbb::blocked_range<int64_t>(0, batch_size),
188 [&](
const tbb::blocked_range<int64_t>& r) {
189 for (int64_t i = r.begin(); i != r.end(); ++i) {
190 num_voxels[i] = std::min(int64_t(num_voxels[i]),
195 int64_t* out_batch_splits =
nullptr;
196 output_allocator.AllocVoxelBatchSplits(&out_batch_splits, batch_size + 1);
197 out_batch_splits[0] = 0;
200 for (int64_t i = 1; i < batch_size + 1; ++i) {
201 out_batch_splits[i] = out_batch_splits[i - 1] + num_voxels[i - 1];
204 uint64_t total_voxels = out_batch_splits[batch_size];
206 int32_t* out_voxel_coords =
nullptr;
207 output_allocator.AllocVoxelCoords(&out_voxel_coords, total_voxels, NDIM);
209 int64_t* out_voxel_row_splits =
nullptr;
210 output_allocator.AllocVoxelPointRowSplits(&out_voxel_row_splits,
213 std::vector<int64_t> tmp_point_indices;
216 for (int64_t voxel_i = 0; voxel_i < total_voxels; ++voxel_i) {
218 auto coord = CoordFn(
219 Vec_t(points + hashes_indices[hash_i].second * NDIM));
220 for (
int d = 0; d < NDIM; ++d) {
221 out_voxel_coords[voxel_i * NDIM + d] = coord[d];
223 out_voxel_row_splits[voxel_i] = tmp_point_indices.size();
226 int64_t points_per_voxel = 0;
227 const int64_t current_hash = hashes_indices[hash_i].first;
228 int64_t batch_id = current_hash / batch_hash;
229 num_voxels[batch_id]--;
230 for (; hash_i < hashes_indices.size(); ++hash_i) {
231 if (current_hash != hashes_indices[hash_i].first) {
235 if (points_per_voxel < max_points_per_voxel) {
236 tmp_point_indices.push_back(hashes_indices[hash_i].second);
242 if (num_voxels[batch_id] == 0) {
243 for (; hash_i < hashes_indices.size(); ++hash_i) {
244 if ((int64_t)(hashes_indices[hash_i].first / batch_hash) !=
251 out_voxel_row_splits[total_voxels] = tmp_point_indices.size();
254 int64_t* out_point_indices =
nullptr;
255 output_allocator.AllocVoxelPointIndices(&out_point_indices,
256 tmp_point_indices.size());
257 memcpy(out_point_indices, tmp_point_indices.data(),
258 tmp_point_indices.size() *
sizeof(int64_t));
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t int32_t
Definition: K4aPlugin.cpp:408
FN_SPECIFIERS MiniVec< float, N > ceil(const MiniVec< float, N > &a)
Definition: MiniVec.h:108
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample uint64_t
Definition: K4aPlugin.cpp:362
void VoxelizeCPU(const size_t num_points, const T *const points, const size_t batch_size, const int64_t *const row_splits, const T *const voxel_size, const T *const points_range_min, const T *const points_range_max, const int64_t max_points_per_voxel, const int64_t max_voxels, OUTPUT_ALLOCATOR &output_allocator)
Definition: Voxelize.h:97
Definition: PinholeCameraIntrinsic.cpp:35
uint32_t AtomicFetchAddRelaxed(uint32_t *address, uint32_t val)
Definition: Atomic.h:44
Definition: Dispatch.h:110