50 #if defined(__CUDACC__) 70 int64_t resolution3 = resolution * resolution * resolution;
73 NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
83 bool integrate_color =
false;
84 if (color.NumElements() != 0) {
86 integrate_color =
true;
90 const int* indices_ptr = indices.GetDataPtr<
int>();
92 int64_t n = indices.GetLength() * resolution3;
101 indices_ptr[workload_idx / resolution3];
102 int voxel_idx = workload_idx % resolution3;
109 int64_t xb =
static_cast<int64_t
>(block_key_ptr[0]);
110 int64_t yb =
static_cast<int64_t
>(block_key_ptr[1]);
111 int64_t zb =
static_cast<int64_t
>(block_key_ptr[2]);
115 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv,
119 int64_t x = (xb * resolution + xv);
120 int64_t y = (yb * resolution + yv);
121 int64_t z = (zb * resolution + zv);
124 float xc, yc, zc, u, v;
125 transform_indexer.RigidTransform(
126 static_cast<float>(x),
127 static_cast<float>(y),
128 static_cast<float>(z), &xc, &yc, &zc);
131 transform_indexer.Project(xc, yc, zc, &u, &v);
138 float depth = *depth_indexer.
GetDataPtr<
float>(
139 static_cast<int64_t
>(u),
140 static_cast<int64_t>(v)) /
143 float sdf = (depth - zc);
144 if (depth <= 0 || depth > depth_max || zc <= 0 ||
148 sdf = sdf < sdf_trunc ? sdf : sdf_trunc;
153 voxel_block_buffer_indexer
157 if (integrate_color) {
160 static_cast<int64_t
>(u),
161 static_cast<int64_t>(v));
163 voxel_ptr->Integrate(sdf, color_ptr[0],
167 voxel_ptr->Integrate(sdf);
171 #if defined(__CUDACC__) 176 #if defined(__CUDACC__) 177 void ExtractSurfacePointsCUDA
191 float weight_threshold,
194 int64_t resolution3 = resolution * resolution * resolution;
197 NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
206 const int64_t* indices_ptr = indices.
GetDataPtr<int64_t>();
209 int64_t n = n_blocks * resolution3;
212 #if defined(__CUDACC__) 214 block_values.GetDevice());
215 int* count_ptr =
count.GetDataPtr<
int>();
217 std::atomic<int> count_atomic(0);
218 std::atomic<int>* count_ptr = &count_atomic;
221 if (valid_size < 0) {
223 "No estimated max point cloud size provided, using a 2-pass " 224 "estimation. Surface extraction could be slow.");
227 voxel_block_buffer_indexer.ElementByteSize(), [&]() {
236 return DeviceGetVoxelAt<voxel_t>(
237 xo, yo, zo, curr_block_idx,
238 static_cast<int>(resolution),
239 nb_block_masks_indexer,
240 nb_block_indices_indexer,
241 voxel_block_buffer_indexer);
246 int64_t workload_block_idx =
247 workload_idx / resolution3;
249 indices_ptr[workload_block_idx];
250 int64_t voxel_idx = workload_idx % resolution3;
254 voxel_indexer.WorkloadToCoord(voxel_idx, &xv,
258 voxel_block_buffer_indexer
259 .GetDataPtr<voxel_t>(xv, yv, zv,
261 float tsdf_o = voxel_ptr->GetTSDF();
262 float weight_o = voxel_ptr->GetWeight();
263 if (weight_o <= weight_threshold)
return;
266 for (
int i = 0; i < 3; ++i) {
267 voxel_t* ptr = GetVoxelAt(
268 static_cast<int>(xv) + (i == 0),
269 static_cast<int>(yv) + (i == 1),
270 static_cast<int>(zv) + (i == 2),
272 workload_block_idx));
273 if (ptr ==
nullptr)
continue;
275 float tsdf_i = ptr->GetTSDF();
276 float weight_i = ptr->GetWeight();
278 if (weight_i > weight_threshold &&
279 tsdf_i * tsdf_o < 0) {
286 #if defined(__CUDACC__) 287 valid_size =
count[0].Item<
int>();
290 valid_size = (*count_ptr).load();
295 int max_count = valid_size;
296 if (points.GetLength() == 0) {
298 block_values.GetDevice());
303 bool extract_normal =
false;
305 if (normals.has_value()) {
306 extract_normal =
true;
307 if (normals.value().get().GetLength() == 0) {
309 block_values.GetDevice());
316 voxel_block_buffer_indexer.ElementByteSize(), [&]() {
318 bool extract_color =
false;
320 if (voxel_t::HasColor() && colors.has_value()) {
321 extract_color =
true;
322 if (colors.value().get().GetLength() == 0) {
323 colors.value().get() =
325 block_values.GetDevice());
335 int xo,
int yo,
int zo,
336 int curr_block_idx) -> voxel_t* {
337 return DeviceGetVoxelAt<voxel_t>(
338 xo, yo, zo, curr_block_idx,
339 static_cast<int>(resolution),
340 nb_block_masks_indexer,
341 nb_block_indices_indexer,
342 voxel_block_buffer_indexer);
345 int xo,
int yo,
int zo,
348 return DeviceGetNormalAt<voxel_t>(
349 xo, yo, zo, curr_block_idx, n,
350 static_cast<int>(resolution),
351 voxel_size, nb_block_masks_indexer,
352 nb_block_indices_indexer,
353 voxel_block_buffer_indexer);
357 int64_t workload_block_idx =
358 workload_idx / resolution3;
359 int64_t block_idx = indices_ptr[workload_block_idx];
360 int64_t voxel_idx = workload_idx % resolution3;
367 int64_t xb =
static_cast<int64_t
>(block_key_ptr[0]);
368 int64_t yb =
static_cast<int64_t
>(block_key_ptr[1]);
369 int64_t zb =
static_cast<int64_t
>(block_key_ptr[2]);
373 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv,
377 voxel_block_buffer_indexer
378 .GetDataPtr<voxel_t>(xv, yv, zv,
380 float tsdf_o = voxel_ptr->GetTSDF();
381 float weight_o = voxel_ptr->GetWeight();
383 if (weight_o <= weight_threshold)
return;
385 int64_t x = xb * resolution + xv;
386 int64_t y = yb * resolution + yv;
387 int64_t z = zb * resolution + zv;
389 float no[3] = {0}, ni[3] = {0};
390 if (extract_normal) {
392 static_cast<int>(xv),
393 static_cast<int>(yv),
394 static_cast<int>(zv),
395 static_cast<int>(workload_block_idx),
400 for (
int i = 0; i < 3; ++i) {
401 voxel_t* ptr = GetVoxelAt(
402 static_cast<int>(xv) + (i == 0),
403 static_cast<int>(yv) + (i == 1),
404 static_cast<int>(zv) + (i == 2),
405 static_cast<int>(workload_block_idx));
406 if (ptr ==
nullptr)
continue;
408 float tsdf_i = ptr->GetTSDF();
409 float weight_i = ptr->GetWeight();
411 if (weight_i > weight_threshold &&
412 tsdf_i * tsdf_o < 0) {
414 (0 - tsdf_o) / (tsdf_i - tsdf_o);
417 if (idx >= valid_size) {
418 printf(
"Point cloud size larger than " 419 "estimated, please increase the " 425 point_indexer.GetDataPtr<
float>(
427 point_ptr[0] = voxel_size *
428 (x + ratio *
int(i == 0));
429 point_ptr[1] = voxel_size *
430 (y + ratio *
int(i == 1));
431 point_ptr[2] = voxel_size *
432 (z + ratio *
int(i == 2));
439 float r_o = voxel_ptr->GetR();
440 float g_o = voxel_ptr->GetG();
441 float b_o = voxel_ptr->GetB();
443 float r_i = ptr->GetR();
444 float g_i = ptr->GetG();
445 float b_i = ptr->GetB();
447 color_ptr[0] = ((1 - ratio) * r_o +
450 color_ptr[1] = ((1 - ratio) * g_o +
453 color_ptr[2] = ((1 - ratio) * b_o +
458 if (extract_normal) {
460 static_cast<int>(xv) + (i == 0),
461 static_cast<int>(yv) + (i == 1),
462 static_cast<int>(zv) + (i == 2),
470 float nx = (1 - ratio) * no[0] +
472 float ny = (1 - ratio) * no[1] +
474 float nz = (1 - ratio) * no[2] +
476 float norm =
static_cast<float>(
477 sqrt(nx * nx + ny * ny +
480 normal_ptr[0] = nx / norm;
481 normal_ptr[1] = ny / norm;
482 normal_ptr[2] = nz / norm;
488 #if defined(__CUDACC__) 489 int total_count =
count.Item<
int>();
491 int total_count = (*count_ptr).load();
495 valid_size = total_count;
497 #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) 502 #if defined(__CUDACC__) 503 void ExtractSurfaceMeshCUDA
519 float weight_threshold,
522 int64_t resolution3 = resolution * resolution * resolution;
525 NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
526 int n_blocks =
static_cast<int>(indices.
GetLength());
534 {n_blocks, resolution, resolution, resolution, 4},
core::Int32,
535 block_keys.GetDevice());
536 }
catch (
const std::runtime_error&) {
538 "[MeshExtractionKernel] Unable to allocate assistance mesh " 539 "structure for Marching " 540 "Cubes with {} active voxel blocks. Please consider using a " 541 "larger voxel size (currently {}) for TSDF " 542 "integration, or using tsdf_volume.cpu() to perform mesh " 543 "extraction on CPU.",
544 n_blocks, voxel_size);
554 const int64_t* indices_ptr = indices.
GetDataPtr<int64_t>();
555 const int64_t* inv_indices_ptr = inv_indices.GetDataPtr<int64_t>();
556 int64_t n = n_blocks * resolution3;
565 int xo,
int yo,
int zo,
566 int curr_block_idx) -> voxel_t* {
567 return DeviceGetVoxelAt<voxel_t>(
568 xo, yo, zo, curr_block_idx,
569 static_cast<int>(resolution),
570 nb_block_masks_indexer,
571 nb_block_indices_indexer,
572 voxel_block_buffer_indexer);
576 int64_t workload_block_idx = widx / resolution3;
577 int64_t voxel_idx = widx % resolution3;
581 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
586 for (
int i = 0; i < 8; ++i) {
587 voxel_t* voxel_ptr_i = GetVoxelAt(
588 static_cast<int>(xv) + vtx_shifts[i][0],
589 static_cast<int>(yv) + vtx_shifts[i][1],
590 static_cast<int>(zv) + vtx_shifts[i][2],
591 static_cast<int>(workload_block_idx));
592 if (voxel_ptr_i ==
nullptr)
return;
594 float tsdf_i = voxel_ptr_i->GetTSDF();
595 float weight_i = voxel_ptr_i->GetWeight();
596 if (weight_i <= weight_threshold)
return;
598 table_idx |= ((tsdf_i < 0) ? (1 << i) : 0);
601 int* mesh_struct_ptr =
603 xv, yv, zv, workload_block_idx);
604 mesh_struct_ptr[3] = table_idx;
606 if (table_idx == 0 || table_idx == 255)
return;
609 int edges_with_vertices = edge_table[table_idx];
610 for (
int i = 0; i < 12; ++i) {
611 if (edges_with_vertices & (1 << i)) {
612 int64_t xv_i = xv + edge_shifts[i][0];
613 int64_t yv_i = yv + edge_shifts[i][1];
614 int64_t zv_i = zv + edge_shifts[i][2];
615 int edge_i = edge_shifts[i][3];
617 int dxb =
static_cast<int>(xv_i / resolution);
618 int dyb =
static_cast<int>(yv_i / resolution);
619 int dzb =
static_cast<int>(zv_i / resolution);
622 (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
624 int64_t block_idx_i =
625 *nb_block_indices_indexer
631 xv_i - dxb * resolution,
632 yv_i - dyb * resolution,
633 zv_i - dzb * resolution,
634 inv_indices_ptr[block_idx_i]);
637 mesh_ptr_i[edge_i] = -1;
644 #if defined(__CUDACC__) 646 block_values.GetDevice());
648 int* count_ptr =
count.GetDataPtr<
int>();
650 std::atomic<int> count_atomic(0);
651 std::atomic<int>* count_ptr = &count_atomic;
654 if (vertex_count < 0) {
658 int64_t workload_block_idx = widx / resolution3;
659 int64_t voxel_idx = widx % resolution3;
663 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
666 int* mesh_struct_ptr =
668 xv, yv, zv, workload_block_idx);
671 if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
672 mesh_struct_ptr[2] != -1) {
677 for (
int e = 0; e < 3; ++e) {
678 int vertex_idx = mesh_struct_ptr[e];
679 if (vertex_idx != -1)
continue;
685 #if defined(__CUDACC__) 686 vertex_count =
count.Item<
int>();
688 vertex_count = (*count_ptr).load();
694 block_values.GetDevice());
696 bool extract_normal =
false;
698 if (normals.has_value()) {
699 extract_normal =
true;
701 block_values.GetDevice());
708 #if defined(__CUDACC__) 710 block_values.GetDevice());
711 count_ptr =
count.GetDataPtr<
int>();
718 bool extract_color =
false;
720 if (voxel_t::HasColor() && colors.has_value()) {
721 extract_color =
true;
730 int xo,
int yo,
int zo,
731 int curr_block_idx) -> voxel_t* {
732 return DeviceGetVoxelAt<voxel_t>(
733 xo, yo, zo, curr_block_idx,
734 static_cast<int>(resolution),
735 nb_block_masks_indexer,
736 nb_block_indices_indexer,
737 voxel_block_buffer_indexer);
743 return DeviceGetNormalAt<voxel_t>(
744 xo, yo, zo, curr_block_idx, n,
745 static_cast<int>(resolution), voxel_size,
746 nb_block_masks_indexer,
747 nb_block_indices_indexer,
748 voxel_block_buffer_indexer);
752 int64_t workload_block_idx = widx / resolution3;
753 int64_t block_idx = indices_ptr[workload_block_idx];
754 int64_t voxel_idx = widx % resolution3;
758 block_keys_indexer.GetDataPtr<
int>(block_idx);
759 int64_t xb =
static_cast<int64_t
>(block_key_ptr[0]);
760 int64_t yb =
static_cast<int64_t
>(block_key_ptr[1]);
761 int64_t zb =
static_cast<int64_t
>(block_key_ptr[2]);
765 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
768 int64_t x = xb * resolution + xv;
769 int64_t y = yb * resolution + yv;
770 int64_t z = zb * resolution + zv;
773 int* mesh_struct_ptr =
775 xv, yv, zv, workload_block_idx);
778 if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
779 mesh_struct_ptr[2] != -1) {
785 voxel_block_buffer_indexer.
GetDataPtr<voxel_t>(
786 xv, yv, zv, block_idx);
787 float tsdf_o = voxel_ptr->GetTSDF();
788 float no[3] = {0}, ne[3] = {0};
790 if (extract_normal) {
791 GetNormalAt(static_cast<int>(xv), static_cast<int>(yv),
792 static_cast<int>(zv),
793 static_cast<int>(workload_block_idx), no);
797 for (
int e = 0; e < 3; ++e) {
798 int vertex_idx = mesh_struct_ptr[e];
799 if (vertex_idx != -1)
continue;
801 voxel_t* voxel_ptr_e = GetVoxelAt(
802 static_cast<int>(xv) + (e == 0),
803 static_cast<int>(yv) + (e == 1),
804 static_cast<int>(zv) + (e == 2),
805 static_cast<int>(workload_block_idx));
807 voxel_ptr_e !=
nullptr &&
808 "Internal error: GetVoxelAt returns nullptr.");
809 float tsdf_e = voxel_ptr_e->GetTSDF();
810 float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o);
813 mesh_struct_ptr[e] = idx;
815 float ratio_x = ratio *
int(e == 0);
816 float ratio_y = ratio *
int(e == 1);
817 float ratio_z = ratio *
int(e == 2);
821 vertex_ptr[0] = voxel_size * (x + ratio_x);
822 vertex_ptr[1] = voxel_size * (y + ratio_y);
823 vertex_ptr[2] = voxel_size * (z + ratio_z);
825 if (extract_normal) {
828 GetNormalAt(static_cast<int>(xv) + (e == 0),
829 static_cast<int>(yv) + (e == 1),
830 static_cast<int>(zv) + (e == 2),
831 static_cast<int>(workload_block_idx),
833 float nx = (1 - ratio) * no[0] + ratio * ne[0];
834 float ny = (1 - ratio) * no[1] + ratio * ne[1];
835 float nz = (1 - ratio) * no[2] + ratio * ne[2];
836 float norm =
static_cast<float>(
837 sqrt(nx * nx + ny * ny + nz * nz) + 1e-5);
838 normal_ptr[0] = nx / norm;
839 normal_ptr[1] = ny / norm;
840 normal_ptr[2] = nz / norm;
846 float r_o = voxel_ptr->GetR();
847 float g_o = voxel_ptr->GetG();
848 float b_o = voxel_ptr->GetB();
850 float r_e = voxel_ptr_e->GetR();
851 float g_e = voxel_ptr_e->GetG();
852 float b_e = voxel_ptr_e->GetB();
854 ((1 - ratio) * r_o + ratio * r_e) / 255.0f;
856 ((1 - ratio) * g_o + ratio * g_e) / 255.0f;
858 ((1 - ratio) * b_o + ratio * b_e) / 255.0f;
865 int triangle_count = vertex_count * 3;
867 block_values.GetDevice());
870 #if defined(__CUDACC__) 872 block_values.GetDevice());
873 count_ptr =
count.GetDataPtr<
int>();
879 int64_t workload_block_idx = widx / resolution3;
880 int64_t voxel_idx = widx % resolution3;
884 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
887 int* mesh_struct_ptr = mesh_structure_indexer.
GetDataPtr<
int>(
888 xv, yv, zv, workload_block_idx);
890 int table_idx = mesh_struct_ptr[3];
891 if (tri_count[table_idx] == 0)
return;
893 for (
size_t tri = 0; tri < 16; tri += 3) {
894 if (tri_table[table_idx][tri] == -1)
return;
898 for (
size_t vertex = 0; vertex < 3; ++vertex) {
899 int edge = tri_table[table_idx][tri + vertex];
901 int64_t xv_i = xv + edge_shifts[edge][0];
902 int64_t yv_i = yv + edge_shifts[edge][1];
903 int64_t zv_i = zv + edge_shifts[edge][2];
904 int64_t edge_i = edge_shifts[edge][3];
906 int dxb =
static_cast<int>(xv_i / resolution);
907 int dyb =
static_cast<int>(yv_i / resolution);
908 int dzb =
static_cast<int>(zv_i / resolution);
910 int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
912 int64_t block_idx_i =
913 *nb_block_indices_indexer.
GetDataPtr<int64_t>(
914 workload_block_idx, nb_idx);
915 int* mesh_struct_ptr_i = mesh_structure_indexer.
GetDataPtr<
int>(
916 xv_i - dxb * resolution, yv_i - dyb * resolution,
917 zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]);
919 int64_t* triangle_ptr =
920 triangle_indexer.GetDataPtr<int64_t>(tri_idx);
921 triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i];
926 #if defined(__CUDACC__) 927 triangle_count =
count.Item<
int>();
929 triangle_count = (*count_ptr).load();
932 triangles = triangles.Slice(0, 0, triangle_count);
935 #if defined(__CUDACC__) 936 void EstimateRangeCUDA
947 int64_t block_resolution,
955 int h_down = h / down_factor;
956 int w_down = w / down_factor;
962 const int fragment_size = 16;
963 const int frag_buffer_size = 65535;
972 #if defined(__CUDACC__) 975 int* count_ptr =
count.GetDataPtr<
int>();
977 std::atomic<int> count_atomic(0);
978 std::atomic<int>* count_ptr = &count_atomic;
990 int* key = block_keys_indexer.
GetDataPtr<
int>(workload_idx);
992 int u_min = w_down - 1, v_min = h_down - 1, u_max = 0,
994 float z_min = depth_max, z_max = depth_min;
996 float xc, yc, zc, u, v;
999 for (
int i = 0; i < 8; ++i) {
1000 float xw = (key[0] + ((i & 1) > 0)) * block_resolution *
1002 float yw = (key[1] + ((i & 2) > 0)) * block_resolution *
1004 float zw = (key[2] + ((i & 4) > 0)) * block_resolution *
1009 if (zc <= 0)
continue;
1012 w2c_transform_indexer.
Project(xc, yc, zc, &u, &v);
1016 v_min = min(static_cast<int>(floorf(v)), v_min);
1017 v_max = max(static_cast<int>(ceilf(v)), v_max);
1019 u_min = min(static_cast<int>(floorf(u)), u_min);
1020 u_max = max(static_cast<int>(ceilf(u)), u_max);
1022 z_min = min(z_min, zc);
1023 z_max = max(z_max, zc);
1026 v_min = max(0, v_min);
1027 v_max = min(h_down - 1, v_max);
1029 u_min = max(0, u_min);
1030 u_max = min(w_down - 1, u_max);
1032 if (v_min >= v_max || u_min >= u_max || z_min >= z_max)
return;
1036 ceilf(
float(v_max - v_min + 1) /
float(fragment_size));
1038 ceilf(
float(u_max - u_min + 1) /
float(fragment_size));
1040 int frag_count = frag_v_count * frag_u_count;
1042 int frag_count_end = frag_count_start + frag_count;
1043 if (frag_count_end >= frag_buffer_size) {
1044 printf(
"Fragment count exceeding buffer size, abort!\n");
1048 for (
int frag_v = 0; frag_v < frag_v_count; ++frag_v) {
1049 for (
int frag_u = 0; frag_u < frag_u_count;
1051 float* frag_ptr = frag_buffer_indexer.GetDataPtr<
float>(
1052 frag_count_start +
offset);
1054 frag_ptr[0] = z_min;
1055 frag_ptr[1] = z_max;
1058 frag_ptr[2] = v_min + frag_v * fragment_size;
1059 frag_ptr[3] = u_min + frag_u * fragment_size;
1062 frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1,
1063 static_cast<float>(v_max));
1064 frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1,
1065 static_cast<float>(u_max));
1069 #if defined(__CUDACC__) 1070 int frag_count =
count[0].Item<
int>();
1072 int frag_count = (*count_ptr).load();
1078 int v = workload_idx / w_down;
1079 int u = workload_idx % w_down;
1081 range_map_indexer.GetDataPtr<
float>(u, v);
1082 range_ptr[0] = depth_max;
1083 range_ptr[1] = depth_min;
1088 block_keys.
GetDevice(), frag_count * fragment_size * fragment_size,
1090 int frag_idx = workload_idx / (fragment_size * fragment_size);
1091 int local_idx = workload_idx % (fragment_size * fragment_size);
1092 int dv = local_idx / fragment_size;
1093 int du = local_idx % fragment_size;
1096 frag_buffer_indexer.GetDataPtr<
float>(frag_idx);
1097 int v_min =
static_cast<int>(frag_ptr[2]);
1098 int u_min =
static_cast<int>(frag_ptr[3]);
1099 int v_max =
static_cast<int>(frag_ptr[4]);
1100 int u_max =
static_cast<int>(frag_ptr[5]);
1104 if (v > v_max || u > u_max)
return;
1106 float z_min = frag_ptr[0];
1107 float z_max = frag_ptr[1];
1108 float* range_ptr = range_map_indexer.GetDataPtr<
float>(u, v);
1110 atomicMinf(&(range_ptr[0]), z_min);
1111 atomicMaxf(&(range_ptr[1]), z_max);
1113 #pragma omp critical(EstimateRangeCPU) 1115 range_ptr[0] = min(z_min, range_ptr[0]);
1116 range_ptr[1] = max(z_max, range_ptr[1]);
1120 #if defined(__CUDACC__) 1132 return (xin == x && yin == y && zin == z) ?
block_idx : -1;
1142 block_idx = block_idx_in;
1146 #if defined(__CUDACC__) 1151 (std::shared_ptr<core::DeviceHashBackend>& hashmap,
1162 int64_t block_resolution,
1168 float weight_threshold) {
1173 #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) 1177 if (cuda_hashmap ==
nullptr) {
1179 "Unsupported backend: CUDA raycasting only supports STDGPU.");
1181 auto hashmap_impl = cuda_hashmap->GetImpl();
1186 auto hashmap_impl = *cpu_hashmap->
GetImpl();
1197 bool enable_vertex = (vertex_map.GetLength() != 0);
1198 bool enable_depth = (depth_map.GetLength() != 0);
1199 bool enable_color = (color_map.GetLength() != 0);
1200 bool enable_normal = (normal_map.GetLength() != 0);
1201 if (!enable_vertex && !enable_depth && !enable_color && !enable_normal) {
1206 if (enable_vertex) {
1215 if (enable_normal) {
1226 float block_size = voxel_size * block_resolution;
1234 hashmap->GetDevice(), rows * cols,
1238 int x_v,
int y_v,
int z_v,
1241 int x_vn = (x_v + block_resolution) % block_resolution;
1242 int y_vn = (y_v + block_resolution) % block_resolution;
1243 int z_vn = (z_v + block_resolution) % block_resolution;
1245 int dx_b =
Sign(x_v - x_vn);
1246 int dy_b =
Sign(y_v - y_vn);
1247 int dz_b =
Sign(z_v - z_vn);
1249 if (dx_b == 0 && dy_b == 0 && dz_b == 0) {
1250 return voxel_block_buffer_indexer
1254 Key key(x_b + dx_b, y_b + dy_b, z_b + dz_b);
1256 cache.
Check(key[0], key[1], key[2]);
1257 if (block_addr < 0) {
1258 auto iter = hashmap_impl.find(key);
1259 if (iter == hashmap_impl.end())
return nullptr;
1260 block_addr = iter->second;
1261 cache.
Update(key[0], key[1], key[2],
1265 return voxel_block_buffer_indexer
1273 float x_d,
float y_d,
float z_d,
1276 float x_g = x_o + t * x_d;
1277 float y_g = y_o + t * y_d;
1278 float z_g = z_o + t * z_d;
1281 int x_b =
static_cast<int>(floorf(x_g / block_size));
1282 int y_b =
static_cast<int>(floorf(y_g / block_size));
1283 int z_b =
static_cast<int>(floorf(z_g / block_size));
1285 Key key(x_b, y_b, z_b);
1286 int block_addr = cache.
Check(x_b, y_b, z_b);
1287 if (block_addr < 0) {
1288 auto iter = hashmap_impl.find(key);
1289 if (iter == hashmap_impl.end())
return nullptr;
1290 block_addr = iter->second;
1291 cache.
Update(x_b, y_b, z_b, block_addr);
1295 int x_v =
int((x_g - x_b * block_size) / voxel_size);
1296 int y_v =
int((y_g - y_b * block_size) / voxel_size);
1297 int z_v =
int((z_g - z_b * block_size) / voxel_size);
1298 return voxel_block_buffer_indexer.
GetDataPtr<voxel_t>(
1299 x_v, y_v, z_v, block_addr);
1302 int64_t
y = workload_idx / cols;
1303 int64_t
x = workload_idx % cols;
1305 float *depth_ptr =
nullptr, *vertex_ptr =
nullptr,
1306 *normal_ptr =
nullptr, *color_ptr =
nullptr;
1311 if (enable_vertex) {
1312 vertex_ptr = vertex_map_indexer.
GetDataPtr<
float>(
x,
y);
1323 if (enable_normal) {
1324 normal_ptr = normal_map_indexer.
GetDataPtr<
float>(
x,
y);
1330 const float* range =
1331 range_map_indexer.
GetDataPtr<
float>(x / 8, y / 8);
1333 const float t_max = range[1];
1334 if (t >= t_max)
return;
1337 float x_c = 0, y_c = 0, z_c = 0;
1338 float x_g = 0, y_g = 0, z_g = 0;
1339 float x_o = 0, y_o = 0, z_o = 0;
1344 float tsdf_prev = -1.0f;
1353 c2w_transform_indexer.
Unproject(static_cast<float>(x),
1354 static_cast<float>(y), 1.0f,
1358 float x_d = (x_g - x_o);
1359 float y_d = (y_g - y_o);
1360 float z_d = (z_g - z_o);
1363 bool surface_found =
false;
1365 voxel_t* voxel_ptr = GetVoxelAtT(x_o, y_o, z_o, x_d,
1366 y_d, z_d, t, cache);
1373 tsdf = voxel_ptr->GetTSDF();
1374 w = voxel_ptr->GetWeight();
1375 if (tsdf_prev > 0 && w >= weight_threshold &&
1377 surface_found =
true;
1381 float delta = tsdf * sdf_trunc;
1382 t += delta < voxel_size ? voxel_size : delta;
1386 if (surface_found) {
1387 float t_intersect = (t * tsdf_prev - t_prev * tsdf) /
1389 x_g = x_o + t_intersect * x_d;
1390 y_g = y_o + t_intersect * y_d;
1391 z_g = z_o + t_intersect * z_d;
1395 *depth_ptr = t_intersect * depth_scale;
1397 if (enable_vertex) {
1399 x_g, y_g, z_g, vertex_ptr + 0,
1400 vertex_ptr + 1, vertex_ptr + 2);
1406 if (enable_color || enable_normal) {
1408 static_cast<int>(floorf(x_g / block_size));
1410 static_cast<int>(floorf(y_g / block_size));
1412 static_cast<int>(floorf(z_g / block_size));
1413 float x_v = (x_g -
float(x_b) * block_size) /
1415 float y_v = (y_g -
float(y_b) * block_size) /
1417 float z_v = (z_g -
float(z_b) * block_size) /
1420 Key key(x_b, y_b, z_b);
1422 int block_addr = cache.
Check(x_b, y_b, z_b);
1423 if (block_addr < 0) {
1424 auto iter = hashmap_impl.find(key);
1425 if (iter == hashmap_impl.end())
return;
1426 block_addr = iter->second;
1427 cache.
Update(x_b, y_b, z_b, block_addr);
1430 int x_v_floor =
static_cast<int>(floorf(x_v));
1431 int y_v_floor =
static_cast<int>(floorf(y_v));
1432 int z_v_floor =
static_cast<int>(floorf(z_v));
1434 float ratio_x = x_v -
float(x_v_floor);
1435 float ratio_y = y_v -
float(y_v_floor);
1436 float ratio_z = z_v -
float(z_v_floor);
1438 float sum_weight_color = 0.0;
1439 float sum_weight_normal = 0.0;
1440 for (
int k = 0; k < 8; ++k) {
1441 int dx_v = (k & 1) > 0 ? 1 : 0;
1442 int dy_v = (k & 2) > 0 ? 1 : 0;
1443 int dz_v = (k & 4) > 0 ? 1 : 0;
1444 float ratio = (dx_v * (ratio_x) +
1445 (1 - dx_v) * (1 - ratio_x)) *
1447 (1 - dy_v) * (1 - ratio_y)) *
1449 (1 - dz_v) * (1 - ratio_z));
1451 voxel_t* voxel_ptr_k = GetVoxelAtP(
1452 x_b, y_b, z_b, x_v_floor + dx_v,
1453 y_v_floor + dy_v, z_v_floor + dz_v,
1456 if (enable_color && voxel_ptr_k &&
1457 voxel_ptr_k->GetWeight() > 0) {
1458 sum_weight_color += ratio;
1459 color_ptr[0] += ratio * voxel_ptr_k->GetR();
1460 color_ptr[1] += ratio * voxel_ptr_k->GetG();
1461 color_ptr[2] += ratio * voxel_ptr_k->GetB();
1464 if (enable_normal) {
1465 for (
int dim = 0; dim < 3; ++dim) {
1466 voxel_t* voxel_ptr_k_plus = GetVoxelAtP(
1468 x_v_floor + dx_v + (dim == 0),
1469 y_v_floor + dy_v + (dim == 1),
1470 z_v_floor + dz_v + (dim == 2),
1472 voxel_t* voxel_ptr_k_minus =
1473 GetVoxelAtP(x_b, y_b, z_b,
1483 if (voxel_ptr_k_plus &&
1484 voxel_ptr_k_plus->GetWeight() > 0) {
1493 if (voxel_ptr_k_minus &&
1494 voxel_ptr_k_minus->GetWeight() >
1503 sum_weight_normal += valid ? ratio : 0;
1508 if (enable_color && sum_weight_color > 0) {
1509 sum_weight_color *= 255.0;
1510 color_ptr[0] /= sum_weight_color;
1511 color_ptr[1] /= sum_weight_color;
1512 color_ptr[2] /= sum_weight_color;
1514 if (enable_normal && sum_weight_normal > 0) {
1515 normal_ptr[0] /= sum_weight_normal;
1516 normal_ptr[1] /= sum_weight_normal;
1517 normal_ptr[2] /= sum_weight_normal;
1519 sqrt(normal_ptr[0] * normal_ptr[0] +
1520 normal_ptr[1] * normal_ptr[1] +
1521 normal_ptr[2] * normal_ptr[2]);
1522 w2c_transform_indexer.
Rotate(
1523 normal_ptr[0] / norm,
1524 normal_ptr[1] / norm,
1525 normal_ptr[2] / norm, normal_ptr + 0,
1526 normal_ptr + 1, normal_ptr + 2);
1533 #if defined(__CUDACC__) std::shared_ptr< tbb::concurrent_unordered_map< Key, buf_index_t, Hash, Eq > > GetImpl() const
Definition: TBBHashBackend.h:76
TArrayIndexer< int64_t > NDArrayIndexer
Definition: GeometryIndexer.h:380
Definition: StdGPUHashBackend.h:134
int x
Definition: TSDFVoxelGridImpl.h:1126
Definition: GeometryIndexer.h:180
Definition: TSDFVoxelGridImpl.h:1125
const Dtype Int64
Definition: Dtype.cpp:66
void ExtractSurfaceMeshCPU(const core::Tensor &block_indices, const core::Tensor &inv_block_indices, const core::Tensor &nb_block_indices, const core::Tensor &nb_block_masks, const core::Tensor &block_keys, const core::Tensor &block_values, core::Tensor &vertices, core::Tensor &triangles, utility::optional< std::reference_wrapper< core::Tensor >> vertex_normals, utility::optional< std::reference_wrapper< core::Tensor >> vertex_colors, int64_t block_resolution, float voxel_size, float weight_threshold, int &vertex_count)
Definition: TSDFVoxelGridImpl.h:507
OPEN3D_HOST_DEVICE void * GetDataPtr() const
Definition: GeometryIndexer.h:335
OPEN3D_HOST_DEVICE int Sign(int x)
Definition: GeometryMacros.h:96
void OPEN3D_DEVICE Update(int xin, int yin, int zin, int block_idx_in)
Definition: TSDFVoxelGridImpl.h:1135
Definition: Dispatch.h:129
void ParallelFor(const Device &device, int64_t n, const func_t &func)
Definition: ParallelFor.h:122
const Dtype Float32
Definition: Dtype.cpp:61
void EstimateRangeCPU(const core::Tensor &block_keys, core::Tensor &range_minmax_map, const core::Tensor &intrinsics, const core::Tensor &extrinsics, int h, int w, int down_factor, int64_t block_resolution, float voxel_size, float depth_min, float depth_max)
Definition: TSDFVoxelGridImpl.h:940
Device GetDevice() const
Definition: Tensor.cpp:1365
uint32_t buf_index_t
Definition: HashBackendBuffer.h:63
#define OPEN3D_DEVICE
Definition: CUDAUtils.h:64
#define OPEN3D_ATOMIC_ADD(X, Y)
Definition: GeometryMacros.h:58
#define LogWarning(...)
Definition: Logging.h:84
void Synchronize()
Definition: CUDAUtils.cpp:78
Definition: Dispatch.h:113
#define LogDebug(...)
Definition: Logging.h:103
math::float4 color
Definition: LineSetBuffers.cpp:64
core::Tensor InverseTransformation(const core::Tensor &T)
TODO(wei): find a proper place for such functionalities.
Definition: Utility.h:96
const Dtype Int32
Definition: Dtype.cpp:65
int z
Definition: TSDFVoxelGridImpl.h:1128
Definition: TBBHashBackend.h:41
int y
Definition: TSDFVoxelGridImpl.h:1127
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t timeout_in_ms capture_handle capture_handle capture_handle image_handle temperature_c int
Definition: K4aPlugin.cpp:479
static Tensor Zeros(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with zeros.
Definition: Tensor.cpp:380
OPEN3D_HOST_DEVICE index_t ElementByteSize()
Definition: GeometryIndexer.h:246
void ExtractSurfacePointsCPU(const core::Tensor &block_indices, const core::Tensor &nb_block_indices, const core::Tensor &nb_block_masks, const core::Tensor &block_keys, const core::Tensor &block_values, core::Tensor &points, utility::optional< std::reference_wrapper< core::Tensor >> normals, utility::optional< std::reference_wrapper< core::Tensor >> colors, int64_t block_resolution, float voxel_size, float weight_threshold, int &valid_size)
Definition: TSDFVoxelGridImpl.h:181
Definition: Optional.h:79
int block_idx
Definition: TSDFVoxelGridImpl.h:1129
#define DISPATCH_BYTESIZE_TO_VOXEL(BYTESIZE,...)
Definition: TSDFVoxel.h:33
Definition: PinholeCameraIntrinsic.cpp:35
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t timeout_in_ms capture_handle capture_handle capture_handle image_handle float
Definition: K4aPlugin.cpp:465
T * GetDataPtr()
Definition: Tensor.h:1074
#define OPEN3D_ASSERT(...)
Definition: Macro.h:67
void IntegrateCPU(const core::Tensor &depth, const core::Tensor &color, const core::Tensor &block_indices, const core::Tensor &block_keys, core::Tensor &block_values, const core::Tensor &intrinsics, const core::Tensor &extrinsics, int64_t resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_max)
Definition: TSDFVoxelGridImpl.h:55
int64_t GetLength() const
Definition: Tensor.h:1055
OPEN3D_HOST_DEVICE bool InBoundary(float x, float y) const
Definition: GeometryIndexer.h:314
int OPEN3D_DEVICE Check(int xin, int yin, int zin)
Definition: TSDFVoxelGridImpl.h:1131
#define LogInfo(...)
Definition: Logging.h:94
#define LogError(...)
Definition: Logging.h:72
void RayCastCPU(std::shared_ptr< core::DeviceHashBackend > &hashmap, const core::Tensor &block_values, const core::Tensor &range_map, core::Tensor &vertex_map, core::Tensor &depth_map, core::Tensor &color_map, core::Tensor &normal_map, const core::Tensor &intrinsics, const core::Tensor &extrinsics, int h, int w, int64_t block_resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_min, float depth_max, float weight_threshold)
Definition: TSDFVoxelGridImpl.h:1151