Loading [MathJax]/extensions/TeX/AMSsymbols.js
Open3D (C++ API)  0.16.0
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
VoxelBlockGridImpl.h
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - Open3D: www.open3d.org -
3 // ----------------------------------------------------------------------------
4 // The MIT License (MIT)
5 //
6 // Copyright (c) 2018-2021 www.open3d.org
7 //
8 // Permission is hereby granted, free of charge, to any person obtaining a copy
9 // of this software and associated documentation files (the "Software"), to deal
10 // in the Software without restriction, including without limitation the rights
11 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 // copies of the Software, and to permit persons to whom the Software is
13 // furnished to do so, subject to the following conditions:
14 //
15 // The above copyright notice and this permission notice shall be included in
16 // all copies or substantial portions of the Software.
17 //
18 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 // IN THE SOFTWARE.
25 // ----------------------------------------------------------------------------
26 
27 #include <atomic>
28 #include <cmath>
29 
30 #include "open3d/core/Dispatch.h"
31 #include "open3d/core/Dtype.h"
33 #include "open3d/core/SizeVector.h"
34 #include "open3d/core/Tensor.h"
40 #include "open3d/utility/Logging.h"
41 #include "open3d/utility/Timer.h"
42 
43 namespace open3d {
44 namespace t {
45 namespace geometry {
46 namespace kernel {
47 namespace voxel_grid {
48 
49 using index_t = int;
51 
52 #if defined(__CUDACC__)
53 void GetVoxelCoordinatesAndFlattenedIndicesCUDA
54 #else
56 #endif
57  (const core::Tensor& buf_indices,
58  const core::Tensor& block_keys,
59  core::Tensor& voxel_coords,
60  core::Tensor& flattened_indices,
61  index_t resolution,
62  float voxel_size) {
63  core::Device device = buf_indices.GetDevice();
64 
65  const index_t* buf_indices_ptr = buf_indices.GetDataPtr<index_t>();
66  const index_t* block_key_ptr = block_keys.GetDataPtr<index_t>();
67 
68  float* voxel_coords_ptr = voxel_coords.GetDataPtr<float>();
69  int64_t* flattened_indices_ptr = flattened_indices.GetDataPtr<int64_t>();
70 
71  index_t n = flattened_indices.GetLength();
72  ArrayIndexer voxel_indexer({resolution, resolution, resolution});
73  index_t resolution3 = resolution * resolution * resolution;
74 
75  core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) {
76  index_t block_idx = buf_indices_ptr[workload_idx / resolution3];
77  index_t voxel_idx = workload_idx % resolution3;
78 
79  index_t block_key_offset = block_idx * 3;
80  index_t xb = block_key_ptr[block_key_offset + 0];
81  index_t yb = block_key_ptr[block_key_offset + 1];
82  index_t zb = block_key_ptr[block_key_offset + 2];
83 
84  index_t xv, yv, zv;
85  voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
86 
87  float x = (xb * resolution + xv) * voxel_size;
88  float y = (yb * resolution + yv) * voxel_size;
89  float z = (zb * resolution + zv) * voxel_size;
90 
91  flattened_indices_ptr[workload_idx] =
92  block_idx * resolution3 + voxel_idx;
93 
94  index_t voxel_coords_offset = workload_idx * 3;
95  voxel_coords_ptr[voxel_coords_offset + 0] = x;
96  voxel_coords_ptr[voxel_coords_offset + 1] = y;
97  voxel_coords_ptr[voxel_coords_offset + 2] = z;
98  });
99 }
100 
101 inline OPEN3D_DEVICE index_t
103  index_t yo,
104  index_t zo,
105  index_t curr_block_idx,
106  index_t resolution,
107  const ArrayIndexer& nb_block_masks_indexer,
108  const ArrayIndexer& nb_block_indices_indexer) {
109  index_t xn = (xo + resolution) % resolution;
110  index_t yn = (yo + resolution) % resolution;
111  index_t zn = (zo + resolution) % resolution;
112 
113  index_t dxb = Sign(xo - xn);
114  index_t dyb = Sign(yo - yn);
115  index_t dzb = Sign(zo - zn);
116 
117  index_t nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
118 
119  bool block_mask_i =
120  *nb_block_masks_indexer.GetDataPtr<bool>(curr_block_idx, nb_idx);
121  if (!block_mask_i) return -1;
122 
123  index_t block_idx_i = *nb_block_indices_indexer.GetDataPtr<index_t>(
124  curr_block_idx, nb_idx);
125 
126  return (((block_idx_i * resolution) + zn) * resolution + yn) * resolution +
127  xn;
128 }
129 
130 template <typename tsdf_t>
132  const tsdf_t* tsdf_base_ptr,
133  index_t xo,
134  index_t yo,
135  index_t zo,
136  index_t curr_block_idx,
137  float* n,
138  index_t resolution,
139  const ArrayIndexer& nb_block_masks_indexer,
140  const ArrayIndexer& nb_block_indices_indexer) {
141  auto GetLinearIdx = [&] OPEN3D_DEVICE(index_t xo, index_t yo,
142  index_t zo) -> index_t {
143  return DeviceGetLinearIdx(xo, yo, zo, curr_block_idx, resolution,
144  nb_block_masks_indexer,
145  nb_block_indices_indexer);
146  };
147  index_t vxp = GetLinearIdx(xo + 1, yo, zo);
148  index_t vxn = GetLinearIdx(xo - 1, yo, zo);
149  index_t vyp = GetLinearIdx(xo, yo + 1, zo);
150  index_t vyn = GetLinearIdx(xo, yo - 1, zo);
151  index_t vzp = GetLinearIdx(xo, yo, zo + 1);
152  index_t vzn = GetLinearIdx(xo, yo, zo - 1);
153  if (vxp >= 0 && vxn >= 0) n[0] = tsdf_base_ptr[vxp] - tsdf_base_ptr[vxn];
154  if (vyp >= 0 && vyn >= 0) n[1] = tsdf_base_ptr[vyp] - tsdf_base_ptr[vyn];
155  if (vzp >= 0 && vzn >= 0) n[2] = tsdf_base_ptr[vzp] - tsdf_base_ptr[vzn];
156 };
157 
158 template <typename input_depth_t,
159  typename input_color_t,
160  typename tsdf_t,
161  typename weight_t,
162  typename color_t>
163 #if defined(__CUDACC__)
164 void IntegrateCUDA
165 #else
166 void IntegrateCPU
167 #endif
168  (const core::Tensor& depth,
169  const core::Tensor& color,
170  const core::Tensor& indices,
171  const core::Tensor& block_keys,
172  TensorMap& block_value_map,
173  const core::Tensor& depth_intrinsic,
174  const core::Tensor& color_intrinsic,
175  const core::Tensor& extrinsics,
176  index_t resolution,
177  float voxel_size,
178  float sdf_trunc,
179  float depth_scale,
180  float depth_max) {
181  // Parameters
182  index_t resolution2 = resolution * resolution;
183  index_t resolution3 = resolution2 * resolution;
184 
185  TransformIndexer transform_indexer(depth_intrinsic, extrinsics, voxel_size);
186  TransformIndexer colormap_indexer(
187  color_intrinsic,
189 
190  ArrayIndexer voxel_indexer({resolution, resolution, resolution});
191 
192  ArrayIndexer block_keys_indexer(block_keys, 1);
193  ArrayIndexer depth_indexer(depth, 2);
194  core::Device device = block_keys.GetDevice();
195 
196  const index_t* indices_ptr = indices.GetDataPtr<index_t>();
197 
198  if (!block_value_map.Contains("tsdf") ||
199  !block_value_map.Contains("weight")) {
201  "TSDF and/or weight not allocated in blocks, please implement "
202  "customized integration.");
203  }
204  tsdf_t* tsdf_base_ptr = block_value_map.at("tsdf").GetDataPtr<tsdf_t>();
205  weight_t* weight_base_ptr =
206  block_value_map.at("weight").GetDataPtr<weight_t>();
207 
208  bool integrate_color =
209  block_value_map.Contains("color") && color.NumElements() > 0;
210  color_t* color_base_ptr = nullptr;
211  ArrayIndexer color_indexer;
212 
213  float color_multiplier = 1.0;
214  if (integrate_color) {
215  color_base_ptr = block_value_map.at("color").GetDataPtr<color_t>();
216  color_indexer = ArrayIndexer(color, 2);
217 
218  // Float32: [0, 1] -> [0, 255]
219  if (color.GetDtype() == core::Float32) {
220  color_multiplier = 255.0;
221  }
222  }
223 
224  index_t n = indices.GetLength() * resolution3;
225  core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) {
226  // Natural index (0, N) -> (block_idx, voxel_idx)
227  index_t block_idx = indices_ptr[workload_idx / resolution3];
228  index_t voxel_idx = workload_idx % resolution3;
229 
231  // block_idx -> (x_block, y_block, z_block)
232  index_t* block_key_ptr =
233  block_keys_indexer.GetDataPtr<index_t>(block_idx);
234  index_t xb = block_key_ptr[0];
235  index_t yb = block_key_ptr[1];
236  index_t zb = block_key_ptr[2];
237 
238  // voxel_idx -> (x_voxel, y_voxel, z_voxel)
239  index_t xv, yv, zv;
240  voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
241 
242  // coordinate in world (in voxel)
243  index_t x = xb * resolution + xv;
244  index_t y = yb * resolution + yv;
245  index_t z = zb * resolution + zv;
246 
247  // coordinate in camera (in voxel -> in meter)
248  float xc, yc, zc, u, v;
249  transform_indexer.RigidTransform(static_cast<float>(x),
250  static_cast<float>(y),
251  static_cast<float>(z), &xc, &yc, &zc);
252 
253  // coordinate in image (in pixel)
254  transform_indexer.Project(xc, yc, zc, &u, &v);
255  if (!depth_indexer.InBoundary(u, v)) {
256  return;
257  }
258 
259  index_t ui = static_cast<index_t>(u);
260  index_t vi = static_cast<index_t>(v);
261 
262  // Associate image workload and compute SDF and
263  // TSDF.
264  float depth =
265  *depth_indexer.GetDataPtr<input_depth_t>(ui, vi) / depth_scale;
266 
267  float sdf = depth - zc;
268  if (depth <= 0 || depth > depth_max || zc <= 0 || sdf < -sdf_trunc) {
269  return;
270  }
271  sdf = sdf < sdf_trunc ? sdf : sdf_trunc;
272  sdf /= sdf_trunc;
273 
274  index_t linear_idx = block_idx * resolution3 + voxel_idx;
275 
276  tsdf_t* tsdf_ptr = tsdf_base_ptr + linear_idx;
277  weight_t* weight_ptr = weight_base_ptr + linear_idx;
278 
279  float inv_wsum = 1.0f / (*weight_ptr + 1);
280  float weight = *weight_ptr;
281  *tsdf_ptr = (weight * (*tsdf_ptr) + sdf) * inv_wsum;
282 
283  if (integrate_color) {
284  color_t* color_ptr = color_base_ptr + 3 * linear_idx;
285 
286  // Unproject ui, vi with depth_intrinsic, then project back with
287  // color_intrinsic
288  float x, y, z;
289  transform_indexer.Unproject(ui, vi, 1.0, &x, &y, &z);
290 
291  float uf, vf;
292  colormap_indexer.Project(x, y, z, &uf, &vf);
293  if (color_indexer.InBoundary(uf, vf)) {
294  ui = round(uf);
295  vi = round(vf);
296 
297  input_color_t* input_color_ptr =
298  color_indexer.GetDataPtr<input_color_t>(ui, vi);
299 
300  for (index_t i = 0; i < 3; ++i) {
301  color_ptr[i] = (weight * color_ptr[i] +
302  input_color_ptr[i] * color_multiplier) *
303  inv_wsum;
304  }
305  }
306  }
307  *weight_ptr = weight + 1;
308  });
309 
310 #if defined(__CUDACC__)
312 #endif
313 }
314 
315 #if defined(__CUDACC__)
316 void EstimateRangeCUDA
317 #else
318 void EstimateRangeCPU
319 #endif
320  (const core::Tensor& block_keys,
321  core::Tensor& range_minmax_map,
322  const core::Tensor& intrinsics,
323  const core::Tensor& extrinsics,
324  int h,
325  int w,
326  int down_factor,
327  int64_t block_resolution,
328  float voxel_size,
329  float depth_min,
330  float depth_max,
331  core::Tensor& fragment_buffer) {
332 
333  // TODO(wei): reserve it in a reusable buffer
334 
335  // Every 2 channels: (min, max)
336  int h_down = h / down_factor;
337  int w_down = w / down_factor;
338  range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Float32,
339  block_keys.GetDevice());
340  NDArrayIndexer range_map_indexer(range_minmax_map, 2);
341 
342  // Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max)
343  const int fragment_size = 16;
344 
345  if (fragment_buffer.GetDataPtr() == 0 ||
346  fragment_buffer.NumElements() == 0) {
347  // Rough heuristic; should tend to overallocate
348  const int reserve_frag_buffer_size =
349  h_down * w_down / (fragment_size * fragment_size) / voxel_size;
350  fragment_buffer = core::Tensor({reserve_frag_buffer_size, 6},
351  core::Float32, block_keys.GetDevice());
352  }
353 
354  const int frag_buffer_size = fragment_buffer.NumElements() / 6;
355 
356  NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1);
357  NDArrayIndexer block_keys_indexer(block_keys, 1);
358  TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
359 #if defined(__CUDACC__)
360  core::Tensor count(std::vector<int>{0}, {1}, core::Int32,
361  block_keys.GetDevice());
362  int* count_ptr = count.GetDataPtr<int>();
363 #else
364  std::atomic<int> count_atomic(0);
365  std::atomic<int>* count_ptr = &count_atomic;
366 #endif
367 
368 #ifndef __CUDACC__
369  using std::max;
370  using std::min;
371 #endif
372 
373  // Pass 0: iterate over blocks, fill-in an rendering fragment array
375  block_keys.GetDevice(), block_keys.GetLength(),
376  [=] OPEN3D_DEVICE(int64_t workload_idx) {
377  int* key = block_keys_indexer.GetDataPtr<int>(workload_idx);
378 
379  int u_min = w_down - 1, v_min = h_down - 1, u_max = 0,
380  v_max = 0;
381  float z_min = depth_max, z_max = depth_min;
382 
383  float xc, yc, zc, u, v;
384 
385  // Project 8 corners to low-res image and form a rectangle
386  for (int i = 0; i < 8; ++i) {
387  float xw = (key[0] + ((i & 1) > 0)) * block_resolution *
388  voxel_size;
389  float yw = (key[1] + ((i & 2) > 0)) * block_resolution *
390  voxel_size;
391  float zw = (key[2] + ((i & 4) > 0)) * block_resolution *
392  voxel_size;
393 
394  w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc,
395  &zc);
396  if (zc <= 0) continue;
397 
398  // Project to the down sampled image buffer
399  w2c_transform_indexer.Project(xc, yc, zc, &u, &v);
400  u /= down_factor;
401  v /= down_factor;
402 
403  v_min = min(static_cast<int>(floorf(v)), v_min);
404  v_max = max(static_cast<int>(ceilf(v)), v_max);
405 
406  u_min = min(static_cast<int>(floorf(u)), u_min);
407  u_max = max(static_cast<int>(ceilf(u)), u_max);
408 
409  z_min = min(z_min, zc);
410  z_max = max(z_max, zc);
411  }
412 
413  v_min = max(0, v_min);
414  v_max = min(h_down - 1, v_max);
415 
416  u_min = max(0, u_min);
417  u_max = min(w_down - 1, u_max);
418 
419  if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return;
420 
421  // Divide the rectangle into small 16x16 fragments
422  int frag_v_count =
423  ceilf(float(v_max - v_min + 1) / float(fragment_size));
424  int frag_u_count =
425  ceilf(float(u_max - u_min + 1) / float(fragment_size));
426 
427  int frag_count = frag_v_count * frag_u_count;
428  int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, frag_count);
429  int frag_count_end = frag_count_start + frag_count;
430  if (frag_count_end >= frag_buffer_size) {
431  return;
432  }
433 
434  int offset = 0;
435  for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) {
436  for (int frag_u = 0; frag_u < frag_u_count;
437  ++frag_u, ++offset) {
438  float* frag_ptr = frag_buffer_indexer.GetDataPtr<float>(
439  frag_count_start + offset);
440  // zmin, zmax
441  frag_ptr[0] = z_min;
442  frag_ptr[1] = z_max;
443 
444  // vmin, umin
445  frag_ptr[2] = v_min + frag_v * fragment_size;
446  frag_ptr[3] = u_min + frag_u * fragment_size;
447 
448  // vmax, umax
449  frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1,
450  static_cast<float>(v_max));
451  frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1,
452  static_cast<float>(u_max));
453  }
454  }
455  });
456 #if defined(__CUDACC__)
457  int needed_frag_count = count[0].Item<int>();
458 #else
459  int needed_frag_count = (*count_ptr).load();
460 #endif
461 
462  int frag_count = needed_frag_count;
463  if (frag_count >= frag_buffer_size) {
465  "Could not generate full range map; allocated {} fragments but "
466  "needed {}",
467  frag_buffer_size, frag_count);
468  frag_count = frag_buffer_size - 1;
469  } else {
470  utility::LogDebug("EstimateRange Allocated {} fragments and needed {}",
471  frag_buffer_size, frag_count);
472  }
473 
474  // Pass 0.5: Fill in range map to prepare for atomic min/max
475  core::ParallelFor(block_keys.GetDevice(), h_down * w_down,
476  [=] OPEN3D_DEVICE(int64_t workload_idx) {
477  int v = workload_idx / w_down;
478  int u = workload_idx % w_down;
479  float* range_ptr =
480  range_map_indexer.GetDataPtr<float>(u, v);
481  range_ptr[0] = depth_max;
482  range_ptr[1] = depth_min;
483  });
484 
485  // Pass 1: iterate over rendering fragment array, fill-in range
487  block_keys.GetDevice(), frag_count * fragment_size * fragment_size,
488  [=] OPEN3D_DEVICE(int64_t workload_idx) {
489  int frag_idx = workload_idx / (fragment_size * fragment_size);
490  int local_idx = workload_idx % (fragment_size * fragment_size);
491  int dv = local_idx / fragment_size;
492  int du = local_idx % fragment_size;
493 
494  float* frag_ptr =
495  frag_buffer_indexer.GetDataPtr<float>(frag_idx);
496  int v_min = static_cast<int>(frag_ptr[2]);
497  int u_min = static_cast<int>(frag_ptr[3]);
498  int v_max = static_cast<int>(frag_ptr[4]);
499  int u_max = static_cast<int>(frag_ptr[5]);
500 
501  int v = v_min + dv;
502  int u = u_min + du;
503  if (v > v_max || u > u_max) return;
504 
505  float z_min = frag_ptr[0];
506  float z_max = frag_ptr[1];
507  float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v);
508 #ifdef __CUDACC__
509  atomicMinf(&(range_ptr[0]), z_min);
510  atomicMaxf(&(range_ptr[1]), z_max);
511 #else
512 #pragma omp critical(EstimateRangeCPU)
513  {
514  range_ptr[0] = min(z_min, range_ptr[0]);
515  range_ptr[1] = max(z_max, range_ptr[1]);
516  }
517 #endif
518  });
519 
520 #if defined(__CUDACC__)
522 #endif
523 
524  if (needed_frag_count != frag_count) {
525  utility::LogInfo("Reallocating {} fragments for EstimateRange (was {})",
526  needed_frag_count, frag_count);
527 
528  fragment_buffer = core::Tensor({needed_frag_count, 6}, core::Float32,
529  block_keys.GetDevice());
530  }
531 }
532 
533 struct MiniVecCache {
538 
540  return (xin == x && yin == y && zin == z) ? block_idx : -1;
541  }
542 
543  inline void OPEN3D_DEVICE Update(index_t xin,
544  index_t yin,
545  index_t zin,
546  index_t block_idx_in) {
547  x = xin;
548  y = yin;
549  z = zin;
550  block_idx = block_idx_in;
551  }
552 };
553 
554 template <typename tsdf_t, typename weight_t, typename color_t>
555 #if defined(__CUDACC__)
556 void RayCastCUDA
557 #else
558 void RayCastCPU
559 #endif
560  (std::shared_ptr<core::HashMap>& hashmap,
561  const TensorMap& block_value_map,
562  const core::Tensor& range,
563  TensorMap& renderings_map,
564  const core::Tensor& intrinsic,
565  const core::Tensor& extrinsics,
566  index_t h,
567  index_t w,
568  index_t block_resolution,
569  float voxel_size,
570  float depth_scale,
571  float depth_min,
572  float depth_max,
573  float weight_threshold,
574  float trunc_voxel_multiplier,
575  int range_map_down_factor) {
576  using Key = utility::MiniVec<index_t, 3>;
579 
580  auto device_hashmap = hashmap->GetDeviceHashBackend();
581 #if defined(__CUDACC__)
582  auto cuda_hashmap =
583  std::dynamic_pointer_cast<core::StdGPUHashBackend<Key, Hash, Eq>>(
584  device_hashmap);
585  if (cuda_hashmap == nullptr) {
587  "Unsupported backend: CUDA raycasting only supports STDGPU.");
588  }
589  auto hashmap_impl = cuda_hashmap->GetImpl();
590 #else
591  auto cpu_hashmap =
592  std::dynamic_pointer_cast<core::TBBHashBackend<Key, Hash, Eq>>(
593  device_hashmap);
594  if (cpu_hashmap == nullptr) {
596  "Unsupported backend: CPU raycasting only supports TBB.");
597  }
598  auto hashmap_impl = *cpu_hashmap->GetImpl();
599 #endif
600 
601  core::Device device = hashmap->GetDevice();
602 
603  ArrayIndexer range_indexer(range, 2);
604 
605  // Geometry
606  ArrayIndexer depth_indexer;
607  ArrayIndexer vertex_indexer;
608  ArrayIndexer normal_indexer;
609 
610  // Diff rendering
611  ArrayIndexer index_indexer;
612  ArrayIndexer mask_indexer;
613  ArrayIndexer interp_ratio_indexer;
614  ArrayIndexer interp_ratio_dx_indexer;
615  ArrayIndexer interp_ratio_dy_indexer;
616  ArrayIndexer interp_ratio_dz_indexer;
617 
618  // Color
619  ArrayIndexer color_indexer;
620 
621  if (!block_value_map.Contains("tsdf") ||
622  !block_value_map.Contains("weight")) {
624  "TSDF and/or weight not allocated in blocks, please implement "
625  "customized integration.");
626  }
627  const tsdf_t* tsdf_base_ptr =
628  block_value_map.at("tsdf").GetDataPtr<tsdf_t>();
629  const weight_t* weight_base_ptr =
630  block_value_map.at("weight").GetDataPtr<weight_t>();
631 
632  // Geometry
633  if (renderings_map.Contains("depth")) {
634  depth_indexer = ArrayIndexer(renderings_map.at("depth"), 2);
635  }
636  if (renderings_map.Contains("vertex")) {
637  vertex_indexer = ArrayIndexer(renderings_map.at("vertex"), 2);
638  }
639  if (renderings_map.Contains("normal")) {
640  normal_indexer = ArrayIndexer(renderings_map.at("normal"), 2);
641  }
642 
643  // Diff rendering
644  if (renderings_map.Contains("index")) {
645  index_indexer = ArrayIndexer(renderings_map.at("index"), 2);
646  }
647  if (renderings_map.Contains("mask")) {
648  mask_indexer = ArrayIndexer(renderings_map.at("mask"), 2);
649  }
650  if (renderings_map.Contains("interp_ratio")) {
651  interp_ratio_indexer =
652  ArrayIndexer(renderings_map.at("interp_ratio"), 2);
653  }
654  if (renderings_map.Contains("interp_ratio_dx")) {
655  interp_ratio_dx_indexer =
656  ArrayIndexer(renderings_map.at("interp_ratio_dx"), 2);
657  }
658  if (renderings_map.Contains("interp_ratio_dy")) {
659  interp_ratio_dy_indexer =
660  ArrayIndexer(renderings_map.at("interp_ratio_dy"), 2);
661  }
662  if (renderings_map.Contains("interp_ratio_dz")) {
663  interp_ratio_dz_indexer =
664  ArrayIndexer(renderings_map.at("interp_ratio_dz"), 2);
665  }
666 
667  // Color
668  bool render_color = false;
669  if (block_value_map.Contains("color") && renderings_map.Contains("color")) {
670  render_color = true;
671  color_indexer = ArrayIndexer(renderings_map.at("color"), 2);
672  }
673  const color_t* color_base_ptr =
674  render_color ? block_value_map.at("color").GetDataPtr<color_t>()
675  : nullptr;
676 
677  bool visit_neighbors = render_color || normal_indexer.GetDataPtr() ||
678  mask_indexer.GetDataPtr() ||
679  index_indexer.GetDataPtr() ||
680  interp_ratio_indexer.GetDataPtr() ||
681  interp_ratio_dx_indexer.GetDataPtr() ||
682  interp_ratio_dy_indexer.GetDataPtr() ||
683  interp_ratio_dz_indexer.GetDataPtr();
684 
685  TransformIndexer c2w_transform_indexer(
686  intrinsic, t::geometry::InverseTransformation(extrinsics));
687  TransformIndexer w2c_transform_indexer(intrinsic, extrinsics);
688 
689  index_t rows = h;
690  index_t cols = w;
691  index_t n = rows * cols;
692 
693  float block_size = voxel_size * block_resolution;
694  index_t resolution2 = block_resolution * block_resolution;
695  index_t resolution3 = resolution2 * block_resolution;
696 
697 #ifndef __CUDACC__
698  using std::max;
699  using std::sqrt;
700 #endif
701 
702  core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) {
703  auto GetLinearIdxAtP = [&] OPEN3D_DEVICE(
704  index_t x_b, index_t y_b, index_t z_b,
705  index_t x_v, index_t y_v, index_t z_v,
706  core::buf_index_t block_buf_idx,
707  MiniVecCache & cache) -> index_t {
708  index_t x_vn = (x_v + block_resolution) % block_resolution;
709  index_t y_vn = (y_v + block_resolution) % block_resolution;
710  index_t z_vn = (z_v + block_resolution) % block_resolution;
711 
712  index_t dx_b = Sign(x_v - x_vn);
713  index_t dy_b = Sign(y_v - y_vn);
714  index_t dz_b = Sign(z_v - z_vn);
715 
716  if (dx_b == 0 && dy_b == 0 && dz_b == 0) {
717  return block_buf_idx * resolution3 + z_v * resolution2 +
718  y_v * block_resolution + x_v;
719  } else {
720  Key key(x_b + dx_b, y_b + dy_b, z_b + dz_b);
721 
722  index_t block_buf_idx = cache.Check(key[0], key[1], key[2]);
723  if (block_buf_idx < 0) {
724  auto iter = hashmap_impl.find(key);
725  if (iter == hashmap_impl.end()) return -1;
726  block_buf_idx = iter->second;
727  cache.Update(key[0], key[1], key[2], block_buf_idx);
728  }
729 
730  return block_buf_idx * resolution3 + z_vn * resolution2 +
731  y_vn * block_resolution + x_vn;
732  }
733  };
734 
735  auto GetLinearIdxAtT = [&] OPEN3D_DEVICE(
736  float x_o, float y_o, float z_o,
737  float x_d, float y_d, float z_d, float t,
738  MiniVecCache& cache) -> index_t {
739  float x_g = x_o + t * x_d;
740  float y_g = y_o + t * y_d;
741  float z_g = z_o + t * z_d;
742 
743  // MiniVec coordinate and look up
744  index_t x_b = static_cast<index_t>(floorf(x_g / block_size));
745  index_t y_b = static_cast<index_t>(floorf(y_g / block_size));
746  index_t z_b = static_cast<index_t>(floorf(z_g / block_size));
747 
748  Key key(x_b, y_b, z_b);
749  index_t block_buf_idx = cache.Check(x_b, y_b, z_b);
750  if (block_buf_idx < 0) {
751  auto iter = hashmap_impl.find(key);
752  if (iter == hashmap_impl.end()) return -1;
753  block_buf_idx = iter->second;
754  cache.Update(x_b, y_b, z_b, block_buf_idx);
755  }
756 
757  // Voxel coordinate and look up
758  index_t x_v = index_t((x_g - x_b * block_size) / voxel_size);
759  index_t y_v = index_t((y_g - y_b * block_size) / voxel_size);
760  index_t z_v = index_t((z_g - z_b * block_size) / voxel_size);
761 
762  return block_buf_idx * resolution3 + z_v * resolution2 +
763  y_v * block_resolution + x_v;
764  };
765 
766  index_t y = workload_idx / cols;
767  index_t x = workload_idx % cols;
768 
769  const float* range = range_indexer.GetDataPtr<float>(
770  x / range_map_down_factor, y / range_map_down_factor);
771 
772  float* depth_ptr = nullptr;
773  float* vertex_ptr = nullptr;
774  float* color_ptr = nullptr;
775  float* normal_ptr = nullptr;
776 
777  int64_t* index_ptr = nullptr;
778  bool* mask_ptr = nullptr;
779  float* interp_ratio_ptr = nullptr;
780  float* interp_ratio_dx_ptr = nullptr;
781  float* interp_ratio_dy_ptr = nullptr;
782  float* interp_ratio_dz_ptr = nullptr;
783 
784  if (vertex_indexer.GetDataPtr()) {
785  vertex_ptr = vertex_indexer.GetDataPtr<float>(x, y);
786  vertex_ptr[0] = 0;
787  vertex_ptr[1] = 0;
788  vertex_ptr[2] = 0;
789  }
790  if (depth_indexer.GetDataPtr()) {
791  depth_ptr = depth_indexer.GetDataPtr<float>(x, y);
792  depth_ptr[0] = 0;
793  }
794  if (normal_indexer.GetDataPtr()) {
795  normal_ptr = normal_indexer.GetDataPtr<float>(x, y);
796  normal_ptr[0] = 0;
797  normal_ptr[1] = 0;
798  normal_ptr[2] = 0;
799  }
800 
801  if (mask_indexer.GetDataPtr()) {
802  mask_ptr = mask_indexer.GetDataPtr<bool>(x, y);
803 #ifdef __CUDACC__
804 #pragma unroll
805 #endif
806  for (int i = 0; i < 8; ++i) {
807  mask_ptr[i] = false;
808  }
809  }
810  if (index_indexer.GetDataPtr()) {
811  index_ptr = index_indexer.GetDataPtr<int64_t>(x, y);
812 #ifdef __CUDACC__
813 #pragma unroll
814 #endif
815  for (int i = 0; i < 8; ++i) {
816  index_ptr[i] = 0;
817  }
818  }
819  if (interp_ratio_indexer.GetDataPtr()) {
820  interp_ratio_ptr = interp_ratio_indexer.GetDataPtr<float>(x, y);
821 #ifdef __CUDACC__
822 #pragma unroll
823 #endif
824  for (int i = 0; i < 8; ++i) {
825  interp_ratio_ptr[i] = 0;
826  }
827  }
828  if (interp_ratio_dx_indexer.GetDataPtr()) {
829  interp_ratio_dx_ptr =
830  interp_ratio_dx_indexer.GetDataPtr<float>(x, y);
831 #ifdef __CUDACC__
832 #pragma unroll
833 #endif
834  for (int i = 0; i < 8; ++i) {
835  interp_ratio_dx_ptr[i] = 0;
836  }
837  }
838  if (interp_ratio_dy_indexer.GetDataPtr()) {
839  interp_ratio_dy_ptr =
840  interp_ratio_dy_indexer.GetDataPtr<float>(x, y);
841 #ifdef __CUDACC__
842 #pragma unroll
843 #endif
844  for (int i = 0; i < 8; ++i) {
845  interp_ratio_dy_ptr[i] = 0;
846  }
847  }
848  if (interp_ratio_dz_indexer.GetDataPtr()) {
849  interp_ratio_dz_ptr =
850  interp_ratio_dz_indexer.GetDataPtr<float>(x, y);
851 #ifdef __CUDACC__
852 #pragma unroll
853 #endif
854  for (int i = 0; i < 8; ++i) {
855  interp_ratio_dz_ptr[i] = 0;
856  }
857  }
858 
859  if (color_indexer.GetDataPtr()) {
860  color_ptr = color_indexer.GetDataPtr<float>(x, y);
861  color_ptr[0] = 0;
862  color_ptr[1] = 0;
863  color_ptr[2] = 0;
864  }
865 
866  float t = range[0];
867  const float t_max = range[1];
868  if (t >= t_max) return;
869 
870  // Coordinates in camera and global
871  float x_c = 0, y_c = 0, z_c = 0;
872  float x_g = 0, y_g = 0, z_g = 0;
873  float x_o = 0, y_o = 0, z_o = 0;
874 
875  // Iterative ray intersection check
876  float t_prev = t;
877 
878  float tsdf_prev = -1.0f;
879  float tsdf = 1.0;
880  float sdf_trunc = voxel_size * trunc_voxel_multiplier;
881  float w = 0.0;
882 
883  // Camera origin
884  c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o, &z_o);
885 
886  // Direction
887  c2w_transform_indexer.Unproject(static_cast<float>(x),
888  static_cast<float>(y), 1.0f, &x_c, &y_c,
889  &z_c);
890  c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g, &y_g, &z_g);
891  float x_d = (x_g - x_o);
892  float y_d = (y_g - y_o);
893  float z_d = (z_g - z_o);
894 
895  MiniVecCache cache{0, 0, 0, -1};
896  bool surface_found = false;
897  while (t < t_max) {
898  index_t linear_idx =
899  GetLinearIdxAtT(x_o, y_o, z_o, x_d, y_d, z_d, t, cache);
900 
901  if (linear_idx < 0) {
902  t_prev = t;
903  t += block_size;
904  } else {
905  tsdf_prev = tsdf;
906  tsdf = tsdf_base_ptr[linear_idx];
907  w = weight_base_ptr[linear_idx];
908  if (tsdf_prev > 0 && w >= weight_threshold && tsdf <= 0) {
909  surface_found = true;
910  break;
911  }
912  t_prev = t;
913  float delta = tsdf * sdf_trunc;
914  t += delta < voxel_size ? voxel_size : delta;
915  }
916  }
917 
918  if (surface_found) {
919  float t_intersect =
920  (t * tsdf_prev - t_prev * tsdf) / (tsdf_prev - tsdf);
921  x_g = x_o + t_intersect * x_d;
922  y_g = y_o + t_intersect * y_d;
923  z_g = z_o + t_intersect * z_d;
924 
925  // Trivial vertex assignment
926  if (depth_ptr) {
927  *depth_ptr = t_intersect * depth_scale;
928  }
929  if (vertex_ptr) {
930  w2c_transform_indexer.RigidTransform(
931  x_g, y_g, z_g, vertex_ptr + 0, vertex_ptr + 1,
932  vertex_ptr + 2);
933  }
934  if (!visit_neighbors) return;
935 
936  // Trilinear interpolation
937  // TODO(wei): simplify the flow by splitting the
938  // functions given what is enabled
939  index_t x_b = static_cast<index_t>(floorf(x_g / block_size));
940  index_t y_b = static_cast<index_t>(floorf(y_g / block_size));
941  index_t z_b = static_cast<index_t>(floorf(z_g / block_size));
942  float x_v = (x_g - float(x_b) * block_size) / voxel_size;
943  float y_v = (y_g - float(y_b) * block_size) / voxel_size;
944  float z_v = (z_g - float(z_b) * block_size) / voxel_size;
945 
946  Key key(x_b, y_b, z_b);
947 
948  index_t block_buf_idx = cache.Check(x_b, y_b, z_b);
949  if (block_buf_idx < 0) {
950  auto iter = hashmap_impl.find(key);
951  if (iter == hashmap_impl.end()) return;
952  block_buf_idx = iter->second;
953  cache.Update(x_b, y_b, z_b, block_buf_idx);
954  }
955 
956  index_t x_v_floor = static_cast<index_t>(floorf(x_v));
957  index_t y_v_floor = static_cast<index_t>(floorf(y_v));
958  index_t z_v_floor = static_cast<index_t>(floorf(z_v));
959 
960  float ratio_x = x_v - float(x_v_floor);
961  float ratio_y = y_v - float(y_v_floor);
962  float ratio_z = z_v - float(z_v_floor);
963 
964  float sum_r = 0.0;
965  for (index_t k = 0; k < 8; ++k) {
966  index_t dx_v = (k & 1) > 0 ? 1 : 0;
967  index_t dy_v = (k & 2) > 0 ? 1 : 0;
968  index_t dz_v = (k & 4) > 0 ? 1 : 0;
969 
970  index_t linear_idx_k = GetLinearIdxAtP(
971  x_b, y_b, z_b, x_v_floor + dx_v, y_v_floor + dy_v,
972  z_v_floor + dz_v, block_buf_idx, cache);
973 
974  if (linear_idx_k >= 0 && weight_base_ptr[linear_idx_k] > 0) {
975  float rx = dx_v * (ratio_x) + (1 - dx_v) * (1 - ratio_x);
976  float ry = dy_v * (ratio_y) + (1 - dy_v) * (1 - ratio_y);
977  float rz = dz_v * (ratio_z) + (1 - dz_v) * (1 - ratio_z);
978  float r = rx * ry * rz;
979 
980  if (interp_ratio_ptr) {
981  interp_ratio_ptr[k] = r;
982  }
983  if (mask_ptr) {
984  mask_ptr[k] = true;
985  }
986  if (index_ptr) {
987  index_ptr[k] = linear_idx_k;
988  }
989 
990  float tsdf_k = tsdf_base_ptr[linear_idx_k];
991  float interp_ratio_dx = ry * rz * (2 * dx_v - 1);
992  float interp_ratio_dy = rx * rz * (2 * dy_v - 1);
993  float interp_ratio_dz = rx * ry * (2 * dz_v - 1);
994 
995  if (interp_ratio_dx_ptr) {
996  interp_ratio_dx_ptr[k] = interp_ratio_dx;
997  }
998  if (interp_ratio_dy_ptr) {
999  interp_ratio_dy_ptr[k] = interp_ratio_dy;
1000  }
1001  if (interp_ratio_dz_ptr) {
1002  interp_ratio_dz_ptr[k] = interp_ratio_dz;
1003  }
1004 
1005  if (normal_ptr) {
1006  normal_ptr[0] += interp_ratio_dx * tsdf_k;
1007  normal_ptr[1] += interp_ratio_dy * tsdf_k;
1008  normal_ptr[2] += interp_ratio_dz * tsdf_k;
1009  }
1010 
1011  if (color_ptr) {
1012  index_t color_linear_idx = linear_idx_k * 3;
1013  color_ptr[0] +=
1014  r * color_base_ptr[color_linear_idx + 0];
1015  color_ptr[1] +=
1016  r * color_base_ptr[color_linear_idx + 1];
1017  color_ptr[2] +=
1018  r * color_base_ptr[color_linear_idx + 2];
1019  }
1020 
1021  sum_r += r;
1022  }
1023  } // loop over 8 neighbors
1024 
1025  if (sum_r > 0) {
1026  sum_r *= 255.0;
1027  if (color_ptr) {
1028  color_ptr[0] /= sum_r;
1029  color_ptr[1] /= sum_r;
1030  color_ptr[2] /= sum_r;
1031  }
1032 
1033  if (normal_ptr) {
1034  constexpr float EPSILON = 1e-5f;
1035  float norm = sqrt(normal_ptr[0] * normal_ptr[0] +
1036  normal_ptr[1] * normal_ptr[1] +
1037  normal_ptr[2] * normal_ptr[2]);
1038  norm = std::max(norm, EPSILON);
1039  w2c_transform_indexer.Rotate(
1040  -normal_ptr[0] / norm, -normal_ptr[1] / norm,
1041  -normal_ptr[2] / norm, normal_ptr + 0,
1042  normal_ptr + 1, normal_ptr + 2);
1043  }
1044  }
1045  } // surface-found
1046  });
1047 
1048 #if defined(__CUDACC__)
1050 #endif
1051 }
1052 
1053 template <typename tsdf_t, typename weight_t, typename color_t>
1054 #if defined(__CUDACC__)
1055 void ExtractPointCloudCUDA
1056 #else
1058 #endif
1059  (const core::Tensor& indices,
1060  const core::Tensor& nb_indices,
1061  const core::Tensor& nb_masks,
1062  const core::Tensor& block_keys,
1063  const TensorMap& block_value_map,
1065  core::Tensor& normals,
1066  core::Tensor& colors,
1067  index_t resolution,
1068  float voxel_size,
1069  float weight_threshold,
1070  int& valid_size) {
1071  core::Device device = block_keys.GetDevice();
1072 
1073  // Parameters
1074  index_t resolution2 = resolution * resolution;
1075  index_t resolution3 = resolution2 * resolution;
1076 
1077  // Shape / transform indexers, no data involved
1078  ArrayIndexer voxel_indexer({resolution, resolution, resolution});
1079 
1080  // Real data indexer
1081  ArrayIndexer block_keys_indexer(block_keys, 1);
1082  ArrayIndexer nb_block_masks_indexer(nb_masks, 2);
1083  ArrayIndexer nb_block_indices_indexer(nb_indices, 2);
1084 
1085  // Plain arrays that does not require indexers
1086  const index_t* indices_ptr = indices.GetDataPtr<index_t>();
1087 
1088  if (!block_value_map.Contains("tsdf") ||
1089  !block_value_map.Contains("weight")) {
1091  "TSDF and/or weight not allocated in blocks, please implement "
1092  "customized integration.");
1093  }
1094  const tsdf_t* tsdf_base_ptr =
1095  block_value_map.at("tsdf").GetDataPtr<tsdf_t>();
1096  const weight_t* weight_base_ptr =
1097  block_value_map.at("weight").GetDataPtr<weight_t>();
1098  const color_t* color_base_ptr = nullptr;
1099  if (block_value_map.Contains("color")) {
1100  color_base_ptr = block_value_map.at("color").GetDataPtr<color_t>();
1101  }
1102 
1103  index_t n_blocks = indices.GetLength();
1104  index_t n = n_blocks * resolution3;
1105 
1106  // Output
1107 #if defined(__CUDACC__)
1108  core::Tensor count(std::vector<index_t>{0}, {1}, core::Int32,
1109  block_keys.GetDevice());
1110  index_t* count_ptr = count.GetDataPtr<index_t>();
1111 #else
1112  std::atomic<index_t> count_atomic(0);
1113  std::atomic<index_t>* count_ptr = &count_atomic;
1114 #endif
1115 
1116  if (valid_size < 0) {
1118  "No estimated max point cloud size provided, using a 2-pass "
1119  "estimation. Surface extraction could be slow.");
1120  // This pass determines valid number of points.
1121 
1122  core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) {
1123  auto GetLinearIdx = [&] OPEN3D_DEVICE(
1124  index_t xo, index_t yo, index_t zo,
1125  index_t curr_block_idx) -> index_t {
1126  return DeviceGetLinearIdx(xo, yo, zo, curr_block_idx,
1127  resolution, nb_block_masks_indexer,
1128  nb_block_indices_indexer);
1129  };
1130 
1131  // Natural index (0, N) -> (block_idx,
1132  // voxel_idx)
1133  index_t workload_block_idx = workload_idx / resolution3;
1134  index_t block_idx = indices_ptr[workload_block_idx];
1135  index_t voxel_idx = workload_idx % resolution3;
1136 
1137  // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1138  index_t xv, yv, zv;
1139  voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1140 
1141  index_t linear_idx = block_idx * resolution3 + voxel_idx;
1142  float tsdf_o = tsdf_base_ptr[linear_idx];
1143  float weight_o = weight_base_ptr[linear_idx];
1144  if (weight_o <= weight_threshold) return;
1145 
1146  // Enumerate x-y-z directions
1147  for (index_t i = 0; i < 3; ++i) {
1148  index_t linear_idx_i =
1149  GetLinearIdx(xv + (i == 0), yv + (i == 1),
1150  zv + (i == 2), workload_block_idx);
1151  if (linear_idx_i < 0) continue;
1152 
1153  float tsdf_i = tsdf_base_ptr[linear_idx_i];
1154  float weight_i = weight_base_ptr[linear_idx_i];
1155  if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) {
1156  OPEN3D_ATOMIC_ADD(count_ptr, 1);
1157  }
1158  }
1159  });
1160 
1161 #if defined(__CUDACC__)
1162  valid_size = count[0].Item<index_t>();
1163  count[0] = 0;
1164 #else
1165  valid_size = (*count_ptr).load();
1166  (*count_ptr) = 0;
1167 #endif
1168  }
1169 
1170  if (points.GetLength() == 0) {
1171  points = core::Tensor({valid_size, 3}, core::Float32, device);
1172  }
1173  ArrayIndexer point_indexer(points, 1);
1174 
1175  // Normals
1176  ArrayIndexer normal_indexer;
1177  normals = core::Tensor({valid_size, 3}, core::Float32, device);
1178  normal_indexer = ArrayIndexer(normals, 1);
1179 
1180  // This pass extracts exact surface points.
1181 
1182  // Colors
1183  ArrayIndexer color_indexer;
1184  if (color_base_ptr) {
1185  colors = core::Tensor({valid_size, 3}, core::Float32, device);
1186  color_indexer = ArrayIndexer(colors, 1);
1187  }
1188 
1189  core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) {
1190  auto GetLinearIdx = [&] OPEN3D_DEVICE(
1191  index_t xo, index_t yo, index_t zo,
1192  index_t curr_block_idx) -> index_t {
1193  return DeviceGetLinearIdx(xo, yo, zo, curr_block_idx, resolution,
1194  nb_block_masks_indexer,
1195  nb_block_indices_indexer);
1196  };
1197 
1198  auto GetNormal = [&] OPEN3D_DEVICE(index_t xo, index_t yo, index_t zo,
1199  index_t curr_block_idx, float* n) {
1200  return DeviceGetNormal<tsdf_t>(
1201  tsdf_base_ptr, xo, yo, zo, curr_block_idx, n, resolution,
1202  nb_block_masks_indexer, nb_block_indices_indexer);
1203  };
1204 
1205  // Natural index (0, N) -> (block_idx, voxel_idx)
1206  index_t workload_block_idx = workload_idx / resolution3;
1207  index_t block_idx = indices_ptr[workload_block_idx];
1208  index_t voxel_idx = workload_idx % resolution3;
1209 
1211  // block_idx -> (x_block, y_block, z_block)
1212  index_t* block_key_ptr =
1213  block_keys_indexer.GetDataPtr<index_t>(block_idx);
1214  index_t xb = block_key_ptr[0];
1215  index_t yb = block_key_ptr[1];
1216  index_t zb = block_key_ptr[2];
1217 
1218  // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1219  index_t xv, yv, zv;
1220  voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1221 
1222  index_t linear_idx = block_idx * resolution3 + voxel_idx;
1223  float tsdf_o = tsdf_base_ptr[linear_idx];
1224  float weight_o = weight_base_ptr[linear_idx];
1225  if (weight_o <= weight_threshold) return;
1226 
1227  float no[3] = {0}, ne[3] = {0};
1228 
1229  // Get normal at origin
1230  GetNormal(xv, yv, zv, workload_block_idx, no);
1231 
1232  index_t x = xb * resolution + xv;
1233  index_t y = yb * resolution + yv;
1234  index_t z = zb * resolution + zv;
1235 
1236  // Enumerate x-y-z axis
1237  for (index_t i = 0; i < 3; ++i) {
1238  index_t linear_idx_i =
1239  GetLinearIdx(xv + (i == 0), yv + (i == 1), zv + (i == 2),
1240  workload_block_idx);
1241  if (linear_idx_i < 0) continue;
1242 
1243  float tsdf_i = tsdf_base_ptr[linear_idx_i];
1244  float weight_i = weight_base_ptr[linear_idx_i];
1245  if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) {
1246  float ratio = (0 - tsdf_o) / (tsdf_i - tsdf_o);
1247 
1248  index_t idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
1249  if (idx >= valid_size) {
1250  printf("Point cloud size larger than "
1251  "estimated, please increase the "
1252  "estimation!\n");
1253  return;
1254  }
1255 
1256  float* point_ptr = point_indexer.GetDataPtr<float>(idx);
1257  point_ptr[0] = voxel_size * (x + ratio * int(i == 0));
1258  point_ptr[1] = voxel_size * (y + ratio * int(i == 1));
1259  point_ptr[2] = voxel_size * (z + ratio * int(i == 2));
1260 
1261  // Get normal at edge and interpolate
1262  float* normal_ptr = normal_indexer.GetDataPtr<float>(idx);
1263  GetNormal(xv + (i == 0), yv + (i == 1), zv + (i == 2),
1264  workload_block_idx, ne);
1265  float nx = (1 - ratio) * no[0] + ratio * ne[0];
1266  float ny = (1 - ratio) * no[1] + ratio * ne[1];
1267  float nz = (1 - ratio) * no[2] + ratio * ne[2];
1268  float norm = static_cast<float>(
1269  sqrt(nx * nx + ny * ny + nz * nz) + 1e-5);
1270  normal_ptr[0] = nx / norm;
1271  normal_ptr[1] = ny / norm;
1272  normal_ptr[2] = nz / norm;
1273 
1274  if (color_base_ptr) {
1275  float* color_ptr = color_indexer.GetDataPtr<float>(idx);
1276  const color_t* color_o_ptr =
1277  color_base_ptr + 3 * linear_idx;
1278  float r_o = color_o_ptr[0];
1279  float g_o = color_o_ptr[1];
1280  float b_o = color_o_ptr[2];
1281 
1282  const color_t* color_i_ptr =
1283  color_base_ptr + 3 * linear_idx_i;
1284  float r_i = color_i_ptr[0];
1285  float g_i = color_i_ptr[1];
1286  float b_i = color_i_ptr[2];
1287 
1288  color_ptr[0] = ((1 - ratio) * r_o + ratio * r_i) / 255.0f;
1289  color_ptr[1] = ((1 - ratio) * g_o + ratio * g_i) / 255.0f;
1290  color_ptr[2] = ((1 - ratio) * b_o + ratio * b_i) / 255.0f;
1291  }
1292  }
1293  }
1294  });
1295 
1296 #if defined(__CUDACC__)
1297  index_t total_count = count.Item<index_t>();
1298 #else
1299  index_t total_count = (*count_ptr).load();
1300 #endif
1301 
1302  utility::LogDebug("{} vertices extracted", total_count);
1303  valid_size = total_count;
1304 
1305 #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
1307 #endif
1308 }
1309 
1310 template <typename tsdf_t, typename weight_t, typename color_t>
1311 #if defined(__CUDACC__)
1312 void ExtractTriangleMeshCUDA
1313 #else
1315 #endif
1316  (const core::Tensor& block_indices,
1317  const core::Tensor& inv_block_indices,
1318  const core::Tensor& nb_block_indices,
1319  const core::Tensor& nb_block_masks,
1320  const core::Tensor& block_keys,
1321  const TensorMap& block_value_map,
1322  core::Tensor& vertices,
1323  core::Tensor& triangles,
1324  core::Tensor& vertex_normals,
1325  core::Tensor& vertex_colors,
1326  index_t block_resolution,
1327  float voxel_size,
1328  float weight_threshold,
1329  index_t& vertex_count) {
1330  core::Device device = block_indices.GetDevice();
1331 
1332  index_t resolution = block_resolution;
1333  index_t resolution3 = resolution * resolution * resolution;
1334 
1335  // Shape / transform indexers, no data involved
1336  ArrayIndexer voxel_indexer({resolution, resolution, resolution});
1337  index_t n_blocks = static_cast<index_t>(block_indices.GetLength());
1338 
1339  // TODO(wei): profile performance by replacing the table to a hashmap.
1340  // Voxel-wise mesh info. 4 channels correspond to:
1341  // 3 edges' corresponding vertex index + 1 table index.
1342  core::Tensor mesh_structure;
1343  try {
1344  mesh_structure = core::Tensor::Zeros(
1345  {n_blocks, resolution, resolution, resolution, 4}, core::Int32,
1346  device);
1347  } catch (const std::runtime_error&) {
1349  "Unable to allocate assistance mesh structure for Marching "
1350  "Cubes with {} active voxel blocks. Please consider using a "
1351  "larger voxel size (currently {}) for TSDF integration, or "
1352  "using tsdf_volume.cpu() to perform mesh extraction on CPU.",
1353  n_blocks, voxel_size);
1354  }
1355 
1356  // Real data indexer
1357  ArrayIndexer mesh_structure_indexer(mesh_structure, 4);
1358  ArrayIndexer nb_block_masks_indexer(nb_block_masks, 2);
1359  ArrayIndexer nb_block_indices_indexer(nb_block_indices, 2);
1360 
1361  // Plain arrays that does not require indexers
1362  const index_t* indices_ptr = block_indices.GetDataPtr<index_t>();
1363  const index_t* inv_indices_ptr = inv_block_indices.GetDataPtr<index_t>();
1364 
1365  if (!block_value_map.Contains("tsdf") ||
1366  !block_value_map.Contains("weight")) {
1368  "TSDF and/or weight not allocated in blocks, please implement "
1369  "customized integration.");
1370  }
1371  const tsdf_t* tsdf_base_ptr =
1372  block_value_map.at("tsdf").GetDataPtr<tsdf_t>();
1373  const weight_t* weight_base_ptr =
1374  block_value_map.at("weight").GetDataPtr<weight_t>();
1375  const color_t* color_base_ptr = nullptr;
1376  if (block_value_map.Contains("color")) {
1377  color_base_ptr = block_value_map.at("color").GetDataPtr<color_t>();
1378  }
1379 
1380  index_t n = n_blocks * resolution3;
1381  // Pass 0: analyze mesh structure, set up one-on-one correspondences
1382  // from edges to vertices.
1383 
1384  core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t widx) {
1385  auto GetLinearIdx = [&] OPEN3D_DEVICE(
1386  index_t xo, index_t yo, index_t zo,
1387  index_t curr_block_idx) -> index_t {
1388  return DeviceGetLinearIdx(xo, yo, zo, curr_block_idx,
1389  static_cast<index_t>(resolution),
1390  nb_block_masks_indexer,
1391  nb_block_indices_indexer);
1392  };
1393 
1394  // Natural index (0, N) -> (block_idx, voxel_idx)
1395  index_t workload_block_idx = widx / resolution3;
1396  index_t voxel_idx = widx % resolution3;
1397 
1398  // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1399  index_t xv, yv, zv;
1400  voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1401 
1402  // Check per-vertex sign in the cube to determine cube
1403  // type
1404  index_t table_idx = 0;
1405  for (index_t i = 0; i < 8; ++i) {
1406  index_t linear_idx_i =
1407  GetLinearIdx(xv + vtx_shifts[i][0], yv + vtx_shifts[i][1],
1408  zv + vtx_shifts[i][2], workload_block_idx);
1409  if (linear_idx_i < 0) return;
1410 
1411  float tsdf_i = tsdf_base_ptr[linear_idx_i];
1412  float weight_i = weight_base_ptr[linear_idx_i];
1413  if (weight_i <= weight_threshold) return;
1414 
1415  table_idx |= ((tsdf_i < 0) ? (1 << i) : 0);
1416  }
1417 
1418  index_t* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<index_t>(
1419  xv, yv, zv, workload_block_idx);
1420  mesh_struct_ptr[3] = table_idx;
1421 
1422  if (table_idx == 0 || table_idx == 255) return;
1423 
1424  // Check per-edge sign determine the cube type
1425  index_t edges_with_vertices = edge_table[table_idx];
1426  for (index_t i = 0; i < 12; ++i) {
1427  if (edges_with_vertices & (1 << i)) {
1428  index_t xv_i = xv + edge_shifts[i][0];
1429  index_t yv_i = yv + edge_shifts[i][1];
1430  index_t zv_i = zv + edge_shifts[i][2];
1431  index_t edge_i = edge_shifts[i][3];
1432 
1433  index_t dxb = xv_i / resolution;
1434  index_t dyb = yv_i / resolution;
1435  index_t dzb = zv_i / resolution;
1436 
1437  index_t nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
1438 
1439  index_t block_idx_i =
1440  *nb_block_indices_indexer.GetDataPtr<index_t>(
1441  workload_block_idx, nb_idx);
1442  index_t* mesh_ptr_i =
1443  mesh_structure_indexer.GetDataPtr<index_t>(
1444  xv_i - dxb * resolution,
1445  yv_i - dyb * resolution,
1446  zv_i - dzb * resolution,
1447  inv_indices_ptr[block_idx_i]);
1448 
1449  // Non-atomic write, but we are safe
1450  mesh_ptr_i[edge_i] = -1;
1451  }
1452  }
1453  });
1454 
1455  // Pass 1: determine valid number of vertices (if not preset)
1456 #if defined(__CUDACC__)
1457  core::Tensor count(std::vector<index_t>{0}, {}, core::Int32, device);
1458 
1459  index_t* count_ptr = count.GetDataPtr<index_t>();
1460 #else
1461  std::atomic<index_t> count_atomic(0);
1462  std::atomic<index_t>* count_ptr = &count_atomic;
1463 #endif
1464 
1465  if (vertex_count < 0) {
1466  core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t widx) {
1467  // Natural index (0, N) -> (block_idx, voxel_idx)
1468  index_t workload_block_idx = widx / resolution3;
1469  index_t voxel_idx = widx % resolution3;
1470 
1471  // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1472  index_t xv, yv, zv;
1473  voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1474 
1475  // Obtain voxel's mesh struct ptr
1476  index_t* mesh_struct_ptr =
1477  mesh_structure_indexer.GetDataPtr<index_t>(
1478  xv, yv, zv, workload_block_idx);
1479 
1480  // Early quit -- no allocated vertex to compute
1481  if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
1482  mesh_struct_ptr[2] != -1) {
1483  return;
1484  }
1485 
1486  // Enumerate 3 edges in the voxel
1487  for (index_t e = 0; e < 3; ++e) {
1488  index_t vertex_idx = mesh_struct_ptr[e];
1489  if (vertex_idx != -1) continue;
1490 
1491  OPEN3D_ATOMIC_ADD(count_ptr, 1);
1492  }
1493  });
1494 
1495 #if defined(__CUDACC__)
1496  vertex_count = count.Item<index_t>();
1497 #else
1498  vertex_count = (*count_ptr).load();
1499 #endif
1500  }
1501 
1502  utility::LogDebug("Total vertex count = {}", vertex_count);
1503  vertices = core::Tensor({vertex_count, 3}, core::Float32, device);
1504 
1505  vertex_normals = core::Tensor({vertex_count, 3}, core::Float32, device);
1506  ArrayIndexer normal_indexer = ArrayIndexer(vertex_normals, 1);
1507 
1508  ArrayIndexer color_indexer;
1509  if (color_base_ptr) {
1510  vertex_colors = core::Tensor({vertex_count, 3}, core::Float32, device);
1511  color_indexer = ArrayIndexer(vertex_colors, 1);
1512  }
1513 
1514  ArrayIndexer block_keys_indexer(block_keys, 1);
1515  ArrayIndexer vertex_indexer(vertices, 1);
1516 
1517 #if defined(__CUDACC__)
1518  count = core::Tensor(std::vector<index_t>{0}, {}, core::Int32, device);
1519  count_ptr = count.GetDataPtr<index_t>();
1520 #else
1521  (*count_ptr) = 0;
1522 #endif
1523 
1524  // Pass 2: extract vertices.
1525 
1526  core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t widx) {
1527  auto GetLinearIdx = [&] OPEN3D_DEVICE(
1528  index_t xo, index_t yo, index_t zo,
1529  index_t curr_block_idx) -> index_t {
1530  return DeviceGetLinearIdx(xo, yo, zo, curr_block_idx, resolution,
1531  nb_block_masks_indexer,
1532  nb_block_indices_indexer);
1533  };
1534 
1535  auto GetNormal = [&] OPEN3D_DEVICE(index_t xo, index_t yo, index_t zo,
1536  index_t curr_block_idx, float* n) {
1537  return DeviceGetNormal<tsdf_t>(
1538  tsdf_base_ptr, xo, yo, zo, curr_block_idx, n, resolution,
1539  nb_block_masks_indexer, nb_block_indices_indexer);
1540  };
1541 
1542  // Natural index (0, N) -> (block_idx, voxel_idx)
1543  index_t workload_block_idx = widx / resolution3;
1544  index_t block_idx = indices_ptr[workload_block_idx];
1545  index_t voxel_idx = widx % resolution3;
1546 
1547  // block_idx -> (x_block, y_block, z_block)
1548  index_t* block_key_ptr =
1549  block_keys_indexer.GetDataPtr<index_t>(block_idx);
1550  index_t xb = block_key_ptr[0];
1551  index_t yb = block_key_ptr[1];
1552  index_t zb = block_key_ptr[2];
1553 
1554  // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1555  index_t xv, yv, zv;
1556  voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1557 
1558  // global coordinate (in voxels)
1559  index_t x = xb * resolution + xv;
1560  index_t y = yb * resolution + yv;
1561  index_t z = zb * resolution + zv;
1562 
1563  // Obtain voxel's mesh struct ptr
1564  index_t* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<index_t>(
1565  xv, yv, zv, workload_block_idx);
1566 
1567  // Early quit -- no allocated vertex to compute
1568  if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
1569  mesh_struct_ptr[2] != -1) {
1570  return;
1571  }
1572 
1573  // Obtain voxel ptr
1574  index_t linear_idx = resolution3 * block_idx + voxel_idx;
1575  float tsdf_o = tsdf_base_ptr[linear_idx];
1576 
1577  float no[3] = {0}, ne[3] = {0};
1578 
1579  // Get normal at origin
1580  GetNormal(xv, yv, zv, workload_block_idx, no);
1581 
1582  // Enumerate 3 edges in the voxel
1583  for (index_t e = 0; e < 3; ++e) {
1584  index_t vertex_idx = mesh_struct_ptr[e];
1585  if (vertex_idx != -1) continue;
1586 
1587  index_t linear_idx_e =
1588  GetLinearIdx(xv + (e == 0), yv + (e == 1), zv + (e == 2),
1589  workload_block_idx);
1590  OPEN3D_ASSERT(linear_idx_e > 0 &&
1591  "Internal error: GetVoxelAt returns nullptr.");
1592  float tsdf_e = tsdf_base_ptr[linear_idx_e];
1593  float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o);
1594 
1595  index_t idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
1596  mesh_struct_ptr[e] = idx;
1597 
1598  float ratio_x = ratio * index_t(e == 0);
1599  float ratio_y = ratio * index_t(e == 1);
1600  float ratio_z = ratio * index_t(e == 2);
1601 
1602  float* vertex_ptr = vertex_indexer.GetDataPtr<float>(idx);
1603  vertex_ptr[0] = voxel_size * (x + ratio_x);
1604  vertex_ptr[1] = voxel_size * (y + ratio_y);
1605  vertex_ptr[2] = voxel_size * (z + ratio_z);
1606 
1607  // Get normal at edge and interpolate
1608  float* normal_ptr = normal_indexer.GetDataPtr<float>(idx);
1609  GetNormal(xv + (e == 0), yv + (e == 1), zv + (e == 2),
1610  workload_block_idx, ne);
1611  float nx = (1 - ratio) * no[0] + ratio * ne[0];
1612  float ny = (1 - ratio) * no[1] + ratio * ne[1];
1613  float nz = (1 - ratio) * no[2] + ratio * ne[2];
1614  float norm = static_cast<float>(sqrt(nx * nx + ny * ny + nz * nz) +
1615  1e-5);
1616  normal_ptr[0] = nx / norm;
1617  normal_ptr[1] = ny / norm;
1618  normal_ptr[2] = nz / norm;
1619 
1620  if (color_base_ptr) {
1621  float* color_ptr = color_indexer.GetDataPtr<float>(idx);
1622  float r_o = color_base_ptr[linear_idx * 3 + 0];
1623  float g_o = color_base_ptr[linear_idx * 3 + 1];
1624  float b_o = color_base_ptr[linear_idx * 3 + 2];
1625 
1626  float r_e = color_base_ptr[linear_idx_e * 3 + 0];
1627  float g_e = color_base_ptr[linear_idx_e * 3 + 1];
1628  float b_e = color_base_ptr[linear_idx_e * 3 + 2];
1629 
1630  color_ptr[0] = ((1 - ratio) * r_o + ratio * r_e) / 255.0f;
1631  color_ptr[1] = ((1 - ratio) * g_o + ratio * g_e) / 255.0f;
1632  color_ptr[2] = ((1 - ratio) * b_o + ratio * b_e) / 255.0f;
1633  }
1634  }
1635  });
1636 
1637  // Pass 3: connect vertices and form triangles.
1638  index_t triangle_count = vertex_count * 3;
1639  triangles = core::Tensor({triangle_count, 3}, core::Int32, device);
1640  ArrayIndexer triangle_indexer(triangles, 1);
1641 
1642 #if defined(__CUDACC__)
1643  count = core::Tensor(std::vector<index_t>{0}, {}, core::Int32, device);
1644  count_ptr = count.GetDataPtr<index_t>();
1645 #else
1646  (*count_ptr) = 0;
1647 #endif
1648  core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t widx) {
1649  // Natural index (0, N) -> (block_idx, voxel_idx)
1650  index_t workload_block_idx = widx / resolution3;
1651  index_t voxel_idx = widx % resolution3;
1652 
1653  // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1654  index_t xv, yv, zv;
1655  voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1656 
1657  // Obtain voxel's mesh struct ptr
1658  index_t* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<index_t>(
1659  xv, yv, zv, workload_block_idx);
1660 
1661  index_t table_idx = mesh_struct_ptr[3];
1662  if (tri_count[table_idx] == 0) return;
1663 
1664  for (index_t tri = 0; tri < 16; tri += 3) {
1665  if (tri_table[table_idx][tri] == -1) return;
1666 
1667  index_t tri_idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
1668 
1669  for (index_t vertex = 0; vertex < 3; ++vertex) {
1670  index_t edge = tri_table[table_idx][tri + vertex];
1671 
1672  index_t xv_i = xv + edge_shifts[edge][0];
1673  index_t yv_i = yv + edge_shifts[edge][1];
1674  index_t zv_i = zv + edge_shifts[edge][2];
1675  index_t edge_i = edge_shifts[edge][3];
1676 
1677  index_t dxb = xv_i / resolution;
1678  index_t dyb = yv_i / resolution;
1679  index_t dzb = zv_i / resolution;
1680 
1681  index_t nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
1682 
1683  index_t block_idx_i =
1684  *nb_block_indices_indexer.GetDataPtr<index_t>(
1685  workload_block_idx, nb_idx);
1686  index_t* mesh_struct_ptr_i =
1687  mesh_structure_indexer.GetDataPtr<index_t>(
1688  xv_i - dxb * resolution,
1689  yv_i - dyb * resolution,
1690  zv_i - dzb * resolution,
1691  inv_indices_ptr[block_idx_i]);
1692 
1693  index_t* triangle_ptr =
1694  triangle_indexer.GetDataPtr<index_t>(tri_idx);
1695  triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i];
1696  }
1697  }
1698  });
1699 
1700 #if defined(__CUDACC__)
1701  triangle_count = count.Item<index_t>();
1702 #else
1703  triangle_count = (*count_ptr).load();
1704 #endif
1705  utility::LogDebug("Total triangle count = {}", triangle_count);
1706  triangles = triangles.Slice(0, 0, triangle_count);
1707 }
1708 
1709 } // namespace voxel_grid
1710 } // namespace kernel
1711 } // namespace geometry
1712 } // namespace t
1713 } // namespace open3d
OPEN3D_HOST_DEVICE void Rotate(float x_in, float y_in, float z_in, float *x_out, float *y_out, float *z_out) const
Transform a 3D coordinate in camera coordinate to world coordinate.
Definition: GeometryIndexer.h:100
Definition: StdGPUHashBackend.h:134
Definition: GeometryIndexer.h:180
void RayCastCPU(std::shared_ptr< core::HashMap > &hashmap, const TensorMap &block_value_map, const core::Tensor &range_map, TensorMap &renderings_map, const core::Tensor &intrinsic, const core::Tensor &extrinsic, index_t h, index_t w, index_t block_resolution, float voxel_size, float depth_scale, float depth_min, float depth_max, float weight_threshold, float trunc_voxel_multiplier, int range_map_down_factor)
Definition: VoxelBlockGridImpl.h:560
TArrayIndexer< index_t > ArrayIndexer
Definition: VoxelBlockGridImpl.h:50
OPEN3D_DEVICE void DeviceGetNormal(const tsdf_t *tsdf_base_ptr, index_t xo, index_t yo, index_t zo, index_t curr_block_idx, float *n, index_t resolution, const ArrayIndexer &nb_block_masks_indexer, const ArrayIndexer &nb_block_indices_indexer)
Definition: VoxelBlockGridImpl.h:131
OPEN3D_HOST_DEVICE void Project(float x_in, float y_in, float z_in, float *u_out, float *v_out) const
Project a 3D coordinate in camera coordinate to a 2D uv coordinate.
Definition: GeometryIndexer.h:119
OPEN3D_HOST_DEVICE void * GetDataPtr() const
Definition: GeometryIndexer.h:334
OPEN3D_HOST_DEVICE int Sign(int x)
Definition: GeometryMacros.h:96
void ExtractTriangleMeshCPU(const core::Tensor &block_indices, const core::Tensor &inv_block_indices, const core::Tensor &nb_block_indices, const core::Tensor &nb_block_masks, const core::Tensor &block_keys, const TensorMap &block_value_map, core::Tensor &vertices, core::Tensor &triangles, core::Tensor &vertex_normals, core::Tensor &vertex_colors, index_t block_resolution, float voxel_size, float weight_threshold, index_t &vertex_count)
Definition: VoxelBlockGridImpl.h:1316
Definition: Dispatch.h:129
Helper class for converting coordinates/indices between 3D/3D, 3D/2D, 2D/3D.
Definition: GeometryIndexer.h:44
void OPEN3D_DEVICE Update(index_t xin, index_t yin, index_t zin, index_t block_idx_in)
Definition: VoxelBlockGridImpl.h:543
void ParallelFor(const Device &device, int64_t n, const func_t &func)
Definition: ParallelFor.h:122
const Dtype Float32
Definition: Dtype.cpp:61
int points
Definition: FilePCD.cpp:73
uint32_t buf_index_t
Definition: HashBackendBuffer.h:63
OPEN3D_DEVICE index_t DeviceGetLinearIdx(index_t xo, index_t yo, index_t zo, index_t curr_block_idx, index_t resolution, const ArrayIndexer &nb_block_masks_indexer, const ArrayIndexer &nb_block_indices_indexer)
Definition: VoxelBlockGridImpl.h:102
Device GetDevice() const override
Definition: Tensor.cpp:1384
#define OPEN3D_DEVICE
Definition: CUDAUtils.h:64
int count
Definition: FilePCD.cpp:61
OPEN3D_HOST_DEVICE void Unproject(float u_in, float v_in, float d_in, float *x_out, float *y_out, float *z_out) const
Unproject a 2D uv coordinate with depth to 3D in camera coordinate.
Definition: GeometryIndexer.h:130
void ExtractPointCloudCPU(const core::Tensor &block_indices, const core::Tensor &nb_block_indices, const core::Tensor &nb_block_masks, const core::Tensor &block_keys, const TensorMap &block_value_map, core::Tensor &points, core::Tensor &normals, core::Tensor &colors, index_t block_resolution, float voxel_size, float weight_threshold, index_t &valid_size)
Definition: VoxelBlockGridImpl.h:1059
#define OPEN3D_ATOMIC_ADD(X, Y)
Definition: GeometryMacros.h:58
OPEN3D_HOST_DEVICE void RigidTransform(float x_in, float y_in, float z_in, float *x_out, float *y_out, float *z_out) const
Transform a 3D coordinate in camera coordinate to world coordinate.
Definition: GeometryIndexer.h:81
#define LogWarning(...)
Definition: Logging.h:79
void Synchronize()
Definition: CUDAUtils.cpp:77
Definition: Dispatch.h:113
void IntegrateCPU(const core::Tensor &depth, const core::Tensor &color, const core::Tensor &block_indices, const core::Tensor &block_keys, TensorMap &block_value_map, const core::Tensor &depth_intrinsic, const core::Tensor &color_intrinsic, const core::Tensor &extrinsic, index_t resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_max)
Definition: VoxelBlockGridImpl.h:168
index_t OPEN3D_DEVICE Check(index_t xin, index_t yin, index_t zin)
Definition: VoxelBlockGridImpl.h:539
#define LogDebug(...)
Definition: Logging.h:98
math::float4 color
Definition: LineSetBuffers.cpp:64
core::Tensor InverseTransformation(const core::Tensor &T)
TODO(wei): find a proper place for such functionalities.
Definition: Utility.h:96
const Dtype Int32
Definition: Dtype.cpp:65
Definition: Device.h:37
void GetVoxelCoordinatesAndFlattenedIndicesCPU(const core::Tensor &buf_indices, const core::Tensor &block_keys, core::Tensor &voxel_coords, core::Tensor &flattened_indices, index_t block_resolution, float voxel_size)
Definition: VoxelBlockGridImpl.h:57
Definition: TBBHashBackend.h:41
int offset
Definition: FilePCD.cpp:64
int index_t
Definition: VoxelBlockGrid.h:41
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t timeout_in_ms capture_handle capture_handle capture_handle image_handle temperature_c int
Definition: K4aPlugin.cpp:489
static Tensor Zeros(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with zeros.
Definition: Tensor.cpp:392
Definition: VoxelBlockGridImpl.h:533
Definition: PinholeCameraIntrinsic.cpp:35
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t timeout_in_ms capture_handle capture_handle capture_handle image_handle float
Definition: K4aPlugin.cpp:475
T * GetDataPtr()
Definition: Tensor.h:1149
#define OPEN3D_ASSERT(...)
Definition: Macro.h:67
Definition: MiniVec.h:43
static Tensor Eye(int64_t n, Dtype dtype, const Device &device)
Create an identity matrix of size n x n.
Definition: Tensor.cpp:404
index_t x
Definition: VoxelBlockGridImpl.h:534
index_t z
Definition: VoxelBlockGridImpl.h:536
index_t y
Definition: VoxelBlockGridImpl.h:535
int64_t GetLength() const
Definition: Tensor.h:1130
OPEN3D_HOST_DEVICE bool InBoundary(float x, float y) const
Definition: GeometryIndexer.h:313
index_t block_idx
Definition: VoxelBlockGridImpl.h:537
#define LogInfo(...)
Definition: Logging.h:89
static const Dtype Float64
Definition: Dtype.h:43
#define LogError(...)
Definition: Logging.h:67
void EstimateRangeCPU(const core::Tensor &block_keys, core::Tensor &range_minmax_map, const core::Tensor &intrinsics, const core::Tensor &extrinsics, int h, int w, int down_factor, int64_t block_resolution, float voxel_size, float depth_min, float depth_max, core::Tensor &fragment_buffer)
Definition: VoxelBlockGridImpl.h:320
Definition: TensorMap.h:50