Loading [MathJax]/extensions/TeX/AMSsymbols.js
Open3D (C++ API)  0.16.0
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
PointCloudImpl.h
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - Open3D: www.open3d.org -
3 // ----------------------------------------------------------------------------
4 // The MIT License (MIT)
5 //
6 // Copyright (c) 2018-2021 www.open3d.org
7 //
8 // Permission is hereby granted, free of charge, to any person obtaining a copy
9 // of this software and associated documentation files (the "Software"), to deal
10 // in the Software without restriction, including without limitation the rights
11 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 // copies of the Software, and to permit persons to whom the Software is
13 // furnished to do so, subject to the following conditions:
14 //
15 // The above copyright notice and this permission notice shall be included in
16 // all copies or substantial portions of the Software.
17 //
18 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 // IN THE SOFTWARE.
25 // ----------------------------------------------------------------------------
26 
27 #include <atomic>
28 #include <vector>
29 
30 #include "open3d/core/CUDAUtils.h"
31 #include "open3d/core/Dispatch.h"
32 #include "open3d/core/Dtype.h"
35 #include "open3d/core/SizeVector.h"
36 #include "open3d/core/Tensor.h"
43 #include "open3d/utility/Logging.h"
44 
45 namespace open3d {
46 namespace t {
47 namespace geometry {
48 namespace kernel {
49 namespace pointcloud {
50 
51 #ifndef __CUDACC__
52 using std::abs;
53 using std::max;
54 using std::min;
55 using std::sqrt;
56 #endif
57 
58 #if defined(__CUDACC__)
59 void UnprojectCUDA
60 #else
61 void UnprojectCPU
62 #endif
63  (const core::Tensor& depth,
65  image_colors,
68  const core::Tensor& intrinsics,
69  const core::Tensor& extrinsics,
70  float depth_scale,
71  float depth_max,
72  int64_t stride) {
73 
74  const bool have_colors = image_colors.has_value();
75  NDArrayIndexer depth_indexer(depth, 2);
76  NDArrayIndexer image_colors_indexer;
77 
79  TransformIndexer ti(intrinsics, pose, 1.0f);
80 
81  // Output
82  int64_t rows_strided = depth_indexer.GetShape(0) / stride;
83  int64_t cols_strided = depth_indexer.GetShape(1) / stride;
84 
85  points = core::Tensor({rows_strided * cols_strided, 3}, core::Float32,
86  depth.GetDevice());
87  NDArrayIndexer point_indexer(points, 1);
88  NDArrayIndexer colors_indexer;
89  if (have_colors) {
90  const auto& imcol = image_colors.value().get();
91  image_colors_indexer = NDArrayIndexer{imcol, 2};
92  colors.value().get() = core::Tensor({rows_strided * cols_strided, 3},
93  core::Float32, imcol.GetDevice());
94  colors_indexer = NDArrayIndexer(colors.value().get(), 1);
95  }
96 
97  // Counter
98 #if defined(__CUDACC__)
99  core::Tensor count(std::vector<int>{0}, {}, core::Int32, depth.GetDevice());
100  int* count_ptr = count.GetDataPtr<int>();
101 #else
102  std::atomic<int> count_atomic(0);
103  std::atomic<int>* count_ptr = &count_atomic;
104 #endif
105 
106  int64_t n = rows_strided * cols_strided;
107 
108  DISPATCH_DTYPE_TO_TEMPLATE(depth.GetDtype(), [&]() {
110  depth.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
111  int64_t y = (workload_idx / cols_strided) * stride;
112  int64_t x = (workload_idx % cols_strided) * stride;
113 
114  float d = *depth_indexer.GetDataPtr<scalar_t>(x, y) /
115  depth_scale;
116  if (d > 0 && d < depth_max) {
117  int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
118 
119  float x_c = 0, y_c = 0, z_c = 0;
120  ti.Unproject(static_cast<float>(x),
121  static_cast<float>(y), d, &x_c, &y_c,
122  &z_c);
123 
124  float* vertex = point_indexer.GetDataPtr<float>(idx);
125  ti.RigidTransform(x_c, y_c, z_c, vertex + 0, vertex + 1,
126  vertex + 2);
127  if (have_colors) {
128  float* pcd_pixel =
129  colors_indexer.GetDataPtr<float>(idx);
130  float* image_pixel =
131  image_colors_indexer.GetDataPtr<float>(x,
132  y);
133  *pcd_pixel = *image_pixel;
134  *(pcd_pixel + 1) = *(image_pixel + 1);
135  *(pcd_pixel + 2) = *(image_pixel + 2);
136  }
137  }
138  });
139  });
140 #if defined(__CUDACC__)
141  int total_pts_count = count.Item<int>();
142 #else
143  int total_pts_count = (*count_ptr).load();
144 #endif
145 
146 #ifdef __CUDACC__
148 #endif
149  points = points.Slice(0, 0, total_pts_count);
150  if (have_colors) {
151  colors.value().get() =
152  colors.value().get().Slice(0, 0, total_pts_count);
153  }
154 }
155 
156 #if defined(__CUDACC__)
157 void GetPointMaskWithinAABBCUDA
158 #else
160 #endif
162  const core::Tensor& min_bound,
163  const core::Tensor& max_bound,
164  core::Tensor& mask) {
165 
166  DISPATCH_DTYPE_TO_TEMPLATE(points.GetDtype(), [&]() {
167  const scalar_t* points_ptr = points.GetDataPtr<scalar_t>();
168  const int64_t n = points.GetLength();
169  const scalar_t* min_bound_ptr = min_bound.GetDataPtr<scalar_t>();
170  const scalar_t* max_bound_ptr = max_bound.GetDataPtr<scalar_t>();
171  bool* mask_ptr = mask.GetDataPtr<bool>();
172 
174  points.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
175  const scalar_t x = points_ptr[3 * workload_idx + 0];
176  const scalar_t y = points_ptr[3 * workload_idx + 1];
177  const scalar_t z = points_ptr[3 * workload_idx + 2];
178 
179  if (x >= min_bound_ptr[0] && x <= max_bound_ptr[0] &&
180  y >= min_bound_ptr[1] && y <= max_bound_ptr[1] &&
181  z >= min_bound_ptr[2] && z <= max_bound_ptr[2]) {
182  mask_ptr[workload_idx] = true;
183  } else {
184  mask_ptr[workload_idx] = false;
185  }
186  });
187  });
188 }
189 
190 template <typename scalar_t>
192  scalar_t* u,
193  scalar_t* v) {
194  // Unless the x and y coords are both close to zero, we can simply take (
195  // -y, x, 0 ) and normalize it.
196  // If both x and y are close to zero, then the vector is close to the
197  // z-axis, so it's far from colinear to the x-axis for instance. So we
198  // take the crossed product with (1,0,0) and normalize it.
199  if (!(abs(query[0] - query[2]) < 1e-6) ||
200  !(abs(query[1] - query[2]) < 1e-6)) {
201  const scalar_t norm2_inv =
202  1.0 / sqrt(query[0] * query[0] + query[1] * query[1]);
203  v[0] = -1 * query[1] * norm2_inv;
204  v[1] = query[0] * norm2_inv;
205  v[2] = 0;
206  } else {
207  const scalar_t norm2_inv =
208  1.0 / sqrt(query[1] * query[1] + query[2] * query[2]);
209  v[0] = 0;
210  v[1] = -1 * query[2] * norm2_inv;
211  v[2] = query[1] * norm2_inv;
212  }
213 
214  core::linalg::kernel::cross_3x1(query, v, u);
215 }
216 
217 template <typename scalar_t>
218 inline OPEN3D_HOST_DEVICE void Swap(scalar_t* x, scalar_t* y) {
219  scalar_t tmp = *x;
220  *x = *y;
221  *y = tmp;
222 }
223 
224 template <typename scalar_t>
225 inline OPEN3D_HOST_DEVICE void Heapify(scalar_t* arr, int n, int root) {
226  int largest = root;
227  int l = 2 * root + 1;
228  int r = 2 * root + 2;
229 
230  if (l < n && arr[l] > arr[largest]) {
231  largest = l;
232  }
233  if (r < n && arr[r] > arr[largest]) {
234  largest = r;
235  }
236  if (largest != root) {
237  Swap<scalar_t>(&arr[root], &arr[largest]);
238  Heapify<scalar_t>(arr, n, largest);
239  }
240 }
241 
242 template <typename scalar_t>
243 OPEN3D_HOST_DEVICE void HeapSort(scalar_t* arr, int n) {
244  for (int i = n / 2 - 1; i >= 0; i--) Heapify(arr, n, i);
245 
246  for (int i = n - 1; i > 0; i--) {
247  Swap<scalar_t>(&arr[0], &arr[i]);
248  Heapify<scalar_t>(arr, i, 0);
249  }
250 }
251 
252 template <typename scalar_t>
253 OPEN3D_HOST_DEVICE bool IsBoundaryPoints(const scalar_t* angles,
254  int counts,
255  double angle_threshold) {
256  scalar_t diff;
257  scalar_t max_diff = 0;
258  // Compute the maximal angle difference between two consecutive angles.
259  for (int i = 0; i < counts - 1; i++) {
260  diff = angles[i + 1] - angles[i];
261  max_diff = max(max_diff, diff);
262  }
263 
264  // Get the angle difference between the last and the first.
265  diff = 2 * M_PI - angles[counts - 1] + angles[0];
266  max_diff = max(max_diff, diff);
267 
268  return max_diff > angle_threshold * M_PI / 180.0 ? true : false;
269 }
270 
271 #if defined(__CUDACC__)
272 void ComputeBoundaryPointsCUDA
273 #else
275 #endif
277  const core::Tensor& normals,
278  const core::Tensor& indices,
279  const core::Tensor& counts,
280  core::Tensor& mask,
281  double angle_threshold) {
282 
283  const int nn_size = indices.GetShape()[1];
284 
286  const scalar_t* points_ptr = points.GetDataPtr<scalar_t>();
287  const scalar_t* normals_ptr = normals.GetDataPtr<scalar_t>();
288  const int64_t n = points.GetLength();
289  const int32_t* indices_ptr = indices.GetDataPtr<int32_t>();
290  const int32_t* counts_ptr = counts.GetDataPtr<int32_t>();
291  bool* mask_ptr = mask.GetDataPtr<bool>();
292 
294  indices.GetShape(), -10, points.GetDtype(), points.GetDevice());
295  scalar_t* angles_ptr = angles.GetDataPtr<scalar_t>();
296 
298  points.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
299  scalar_t u[3], v[3];
300  GetCoordinateSystemOnPlane(normals_ptr + 3 * workload_idx,
301  u, v);
302 
303  // Ignore the point itself.
304  int indices_size = counts_ptr[workload_idx] - 1;
305  if (indices_size > 0) {
306  const scalar_t* query = points_ptr + 3 * workload_idx;
307  for (int i = 1; i < indices_size + 1; i++) {
308  const int idx = workload_idx * nn_size + i;
309 
310  const scalar_t* point_ref =
311  points_ptr + 3 * indices_ptr[idx];
312  const scalar_t delta[3] = {point_ref[0] - query[0],
313  point_ref[1] - query[1],
314  point_ref[2] - query[2]};
315  const scalar_t angle = atan2(
318 
319  angles_ptr[idx] = angle;
320  }
321 
322  // Sort the angles in ascending order.
323  HeapSort<scalar_t>(
324  angles_ptr + workload_idx * nn_size + 1,
325  indices_size);
326 
327  mask_ptr[workload_idx] = IsBoundaryPoints<scalar_t>(
328  angles_ptr + workload_idx * nn_size + 1,
329  indices_size, angle_threshold);
330  }
331  });
332  });
333 }
334 
335 // This is a `two-pass` estimate method for covariance which is numerically more
336 // robust than the `textbook` method generally used for covariance computation.
337 template <typename scalar_t>
339  const scalar_t* points_ptr,
340  const int32_t* indices_ptr,
341  const int32_t& indices_count,
342  scalar_t* covariance_ptr) {
343  if (indices_count < 3) {
344  covariance_ptr[0] = 1.0;
345  covariance_ptr[1] = 0.0;
346  covariance_ptr[2] = 0.0;
347  covariance_ptr[3] = 0.0;
348  covariance_ptr[4] = 1.0;
349  covariance_ptr[5] = 0.0;
350  covariance_ptr[6] = 0.0;
351  covariance_ptr[7] = 0.0;
352  covariance_ptr[8] = 1.0;
353  return;
354  }
355 
356  double centroid[3] = {0};
357  for (int32_t i = 0; i < indices_count; ++i) {
358  int32_t idx = 3 * indices_ptr[i];
359  centroid[0] += points_ptr[idx];
360  centroid[1] += points_ptr[idx + 1];
361  centroid[2] += points_ptr[idx + 2];
362  }
363 
364  centroid[0] /= indices_count;
365  centroid[1] /= indices_count;
366  centroid[2] /= indices_count;
367 
368  // cumulants must always be Float64 to ensure precision.
369  double cumulants[6] = {0};
370  for (int32_t i = 0; i < indices_count; ++i) {
371  int32_t idx = 3 * indices_ptr[i];
372  const double x = static_cast<double>(points_ptr[idx]) - centroid[0];
373  const double y = static_cast<double>(points_ptr[idx + 1]) - centroid[1];
374  const double z = static_cast<double>(points_ptr[idx + 2]) - centroid[2];
375 
376  cumulants[0] += x * x;
377  cumulants[1] += y * y;
378  cumulants[2] += z * z;
379 
380  cumulants[3] += x * y;
381  cumulants[4] += x * z;
382  cumulants[5] += y * z;
383  }
384 
385  // Using Bessel's correction (dividing by (n - 1) instead of n).
386  // Refer:
387  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
388  const double normalization_factor = static_cast<double>(indices_count - 1);
389  for (int i = 0; i < 6; ++i) {
390  cumulants[i] /= normalization_factor;
391  }
392 
393  // Covariances(0, 0)
394  covariance_ptr[0] = static_cast<scalar_t>(cumulants[0]);
395  // Covariances(1, 1)
396  covariance_ptr[4] = static_cast<scalar_t>(cumulants[1]);
397  // Covariances(2, 2)
398  covariance_ptr[8] = static_cast<scalar_t>(cumulants[2]);
399 
400  // Covariances(0, 1) = Covariances(1, 0)
401  covariance_ptr[1] = static_cast<scalar_t>(cumulants[3]);
402  covariance_ptr[3] = covariance_ptr[1];
403 
404  // Covariances(0, 2) = Covariances(2, 0)
405  covariance_ptr[2] = static_cast<scalar_t>(cumulants[4]);
406  covariance_ptr[6] = covariance_ptr[2];
407 
408  // Covariances(1, 2) = Covariances(2, 1)
409  covariance_ptr[5] = static_cast<scalar_t>(cumulants[5]);
410  covariance_ptr[7] = covariance_ptr[5];
411 }
412 
413 #if defined(__CUDACC__)
414 void EstimateCovariancesUsingHybridSearchCUDA
415 #else
417 #endif
419  core::Tensor& covariances,
420  const double& radius,
421  const int64_t& max_nn) {
422  core::Dtype dtype = points.GetDtype();
423  int64_t n = points.GetLength();
424 
426  bool check = tree.HybridIndex(radius);
427  if (!check) {
428  utility::LogError("Building FixedRadiusIndex failed.");
429  }
430 
431  core::Tensor indices, distance, counts;
432  std::tie(indices, distance, counts) =
433  tree.HybridSearch(points, radius, max_nn);
434 
435  DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(dtype, [&]() {
436  const scalar_t* points_ptr = points.GetDataPtr<scalar_t>();
437  int32_t* neighbour_indices_ptr = indices.GetDataPtr<int32_t>();
438  int32_t* neighbour_counts_ptr = counts.GetDataPtr<int32_t>();
439  scalar_t* covariances_ptr = covariances.GetDataPtr<scalar_t>();
440 
442  points.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
443  // NNS [Hybrid Search].
444  const int32_t neighbour_offset = max_nn * workload_idx;
445  // Count of valid correspondences per point.
446  const int32_t neighbour_count =
447  neighbour_counts_ptr[workload_idx];
448  // Covariance is of shape {3, 3}, so it has an
449  // offset factor of 9 x workload_idx.
450  const int32_t covariances_offset = 9 * workload_idx;
451 
453  points_ptr,
454  neighbour_indices_ptr + neighbour_offset,
455  neighbour_count,
456  covariances_ptr + covariances_offset);
457  });
458  });
459 
461 }
462 
463 #if defined(__CUDACC__)
464 void EstimateCovariancesUsingRadiusSearchCUDA
465 #else
467 #endif
469  core::Tensor& covariances,
470  const double& radius) {
471  core::Dtype dtype = points.GetDtype();
472  int64_t n = points.GetLength();
473 
475  bool check = tree.FixedRadiusIndex(radius);
476  if (!check) {
477  utility::LogError("Building Radius-Index failed.");
478  }
479 
480  core::Tensor indices, distance, counts;
481  std::tie(indices, distance, counts) =
482  tree.FixedRadiusSearch(points, radius);
483 
484  DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(dtype, [&]() {
485  const scalar_t* points_ptr = points.GetDataPtr<scalar_t>();
486  const int32_t* neighbour_indices_ptr = indices.GetDataPtr<int32_t>();
487  const int32_t* neighbour_counts_ptr = counts.GetDataPtr<int32_t>();
488  scalar_t* covariances_ptr = covariances.GetDataPtr<scalar_t>();
489 
491  points.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
492  const int32_t neighbour_offset =
493  neighbour_counts_ptr[workload_idx];
494  const int32_t neighbour_count =
495  (neighbour_counts_ptr[workload_idx + 1] -
496  neighbour_counts_ptr[workload_idx]);
497  // Covariance is of shape {3, 3}, so it has an offset
498  // factor of 9 x workload_idx.
499  const int32_t covariances_offset = 9 * workload_idx;
500 
502  points_ptr,
503  neighbour_indices_ptr + neighbour_offset,
504  neighbour_count,
505  covariances_ptr + covariances_offset);
506  });
507  });
508 
510 }
511 
512 #if defined(__CUDACC__)
513 void EstimateCovariancesUsingKNNSearchCUDA
514 #else
516 #endif
518  core::Tensor& covariances,
519  const int64_t& max_nn) {
520  core::Dtype dtype = points.GetDtype();
521  int64_t n = points.GetLength();
522 
524  bool check = tree.KnnIndex();
525  if (!check) {
526  utility::LogError("Building KNN-Index failed.");
527  }
528 
529  core::Tensor indices, distance;
530  std::tie(indices, distance) = tree.KnnSearch(points, max_nn);
531 
532  indices = indices.Contiguous();
533  int32_t nn_count = static_cast<int32_t>(indices.GetShape()[1]);
534 
535  if (nn_count < 3) {
537  "Not enough neighbors to compute Covariances / Normals. "
538  "Try "
539  "increasing the max_nn parameter.");
540  }
541 
542  DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(dtype, [&]() {
543  auto points_ptr = points.GetDataPtr<scalar_t>();
544  auto neighbour_indices_ptr = indices.GetDataPtr<int32_t>();
545  auto covariances_ptr = covariances.GetDataPtr<scalar_t>();
546 
548  points.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
549  // NNS [KNN Search].
550  const int32_t neighbour_offset = nn_count * workload_idx;
551  // Covariance is of shape {3, 3}, so it has an offset
552  // factor of 9 x workload_idx.
553  const int32_t covariances_offset = 9 * workload_idx;
554 
556  points_ptr,
557  neighbour_indices_ptr + neighbour_offset, nn_count,
558  covariances_ptr + covariances_offset);
559  });
560  });
561 
563 }
564 
565 template <typename scalar_t>
567  const scalar_t eval0,
568  scalar_t* eigen_vector0) {
569  scalar_t row0[3] = {A[0] - eval0, A[1], A[2]};
570  scalar_t row1[3] = {A[1], A[4] - eval0, A[5]};
571  scalar_t row2[3] = {A[2], A[5], A[8] - eval0};
572 
573  scalar_t r0xr1[3], r0xr2[3], r1xr2[3];
574 
575  core::linalg::kernel::cross_3x1(row0, row1, r0xr1);
576  core::linalg::kernel::cross_3x1(row0, row2, r0xr2);
577  core::linalg::kernel::cross_3x1(row1, row2, r1xr2);
578 
579  scalar_t d0 = core::linalg::kernel::dot_3x1(r0xr1, r0xr1);
580  scalar_t d1 = core::linalg::kernel::dot_3x1(r0xr2, r0xr2);
581  scalar_t d2 = core::linalg::kernel::dot_3x1(r1xr2, r1xr2);
582 
583  scalar_t dmax = d0;
584  int imax = 0;
585  if (d1 > dmax) {
586  dmax = d1;
587  imax = 1;
588  }
589  if (d2 > dmax) {
590  imax = 2;
591  }
592 
593  if (imax == 0) {
594  scalar_t sqrt_d = sqrt(d0);
595  eigen_vector0[0] = r0xr1[0] / sqrt_d;
596  eigen_vector0[1] = r0xr1[1] / sqrt_d;
597  eigen_vector0[2] = r0xr1[2] / sqrt_d;
598  return;
599  } else if (imax == 1) {
600  scalar_t sqrt_d = sqrt(d1);
601  eigen_vector0[0] = r0xr2[0] / sqrt_d;
602  eigen_vector0[1] = r0xr2[1] / sqrt_d;
603  eigen_vector0[2] = r0xr2[2] / sqrt_d;
604  return;
605  } else {
606  scalar_t sqrt_d = sqrt(d2);
607  eigen_vector0[0] = r1xr2[0] / sqrt_d;
608  eigen_vector0[1] = r1xr2[1] / sqrt_d;
609  eigen_vector0[2] = r1xr2[2] / sqrt_d;
610  return;
611  }
612 }
613 
614 template <typename scalar_t>
616  const scalar_t* evec0,
617  const scalar_t eval1,
618  scalar_t* eigen_vector1) {
619  scalar_t U[3];
620  if (abs(evec0[0]) > abs(evec0[1])) {
621  scalar_t inv_length =
622  1.0 / sqrt(evec0[0] * evec0[0] + evec0[2] * evec0[2]);
623  U[0] = -evec0[2] * inv_length;
624  U[1] = 0.0;
625  U[2] = evec0[0] * inv_length;
626  } else {
627  scalar_t inv_length =
628  1.0 / sqrt(evec0[1] * evec0[1] + evec0[2] * evec0[2]);
629  U[0] = 0.0;
630  U[1] = evec0[2] * inv_length;
631  U[2] = -evec0[1] * inv_length;
632  }
633  scalar_t V[3], AU[3], AV[3];
634  core::linalg::kernel::cross_3x1(evec0, U, V);
635  core::linalg::kernel::matmul3x3_3x1(A, U, AU);
636  core::linalg::kernel::matmul3x3_3x1(A, V, AV);
637 
638  scalar_t m00 = core::linalg::kernel::dot_3x1(U, AU) - eval1;
639  scalar_t m01 = core::linalg::kernel::dot_3x1(U, AV);
640  scalar_t m11 = core::linalg::kernel::dot_3x1(V, AV) - eval1;
641 
642  scalar_t absM00 = abs(m00);
643  scalar_t absM01 = abs(m01);
644  scalar_t absM11 = abs(m11);
645  scalar_t max_abs_comp;
646 
647  if (absM00 >= absM11) {
648  max_abs_comp = max(absM00, absM01);
649  if (max_abs_comp > 0) {
650  if (absM00 >= absM01) {
651  m01 /= m00;
652  m00 = 1 / sqrt(1 + m01 * m01);
653  m01 *= m00;
654  } else {
655  m00 /= m01;
656  m01 = 1 / sqrt(1 + m00 * m00);
657  m00 *= m01;
658  }
659  eigen_vector1[0] = m01 * U[0] - m00 * V[0];
660  eigen_vector1[1] = m01 * U[1] - m00 * V[1];
661  eigen_vector1[2] = m01 * U[2] - m00 * V[2];
662  return;
663  } else {
664  eigen_vector1[0] = U[0];
665  eigen_vector1[1] = U[1];
666  eigen_vector1[2] = U[2];
667  return;
668  }
669  } else {
670  max_abs_comp = max(absM11, absM01);
671  if (max_abs_comp > 0) {
672  if (absM11 >= absM01) {
673  m01 /= m11;
674  m11 = 1 / sqrt(1 + m01 * m01);
675  m01 *= m11;
676  } else {
677  m11 /= m01;
678  m01 = 1 / sqrt(1 + m11 * m11);
679  m11 *= m01;
680  }
681  eigen_vector1[0] = m11 * U[0] - m01 * V[0];
682  eigen_vector1[1] = m11 * U[1] - m01 * V[1];
683  eigen_vector1[2] = m11 * U[2] - m01 * V[2];
684  return;
685  } else {
686  eigen_vector1[0] = U[0];
687  eigen_vector1[1] = U[1];
688  eigen_vector1[2] = U[2];
689  return;
690  }
691  }
692 }
693 
694 template <typename scalar_t>
696  const scalar_t* covariance_ptr, scalar_t* normals_ptr) {
697  // Based on:
698  // https://www.geometrictools.com/Documentation/RobustEigenSymmetric3x3.pdf
699  // which handles edge cases like points on a plane.
700  scalar_t max_coeff = covariance_ptr[0];
701 
702  for (int i = 1; i < 9; ++i) {
703  if (max_coeff < covariance_ptr[i]) {
704  max_coeff = covariance_ptr[i];
705  }
706  }
707 
708  if (max_coeff == 0) {
709  normals_ptr[0] = 0.0;
710  normals_ptr[1] = 0.0;
711  normals_ptr[2] = 0.0;
712  return;
713  }
714 
715  scalar_t A[9] = {0};
716 
717  for (int i = 0; i < 9; ++i) {
718  A[i] = covariance_ptr[i] / max_coeff;
719  }
720 
721  scalar_t norm = A[1] * A[1] + A[2] * A[2] + A[5] * A[5];
722 
723  if (norm > 0) {
724  scalar_t eval[3];
725  scalar_t evec0[3];
726  scalar_t evec1[3];
727  scalar_t evec2[3];
728 
729  scalar_t q = (A[0] + A[4] + A[8]) / 3.0;
730 
731  scalar_t b00 = A[0] - q;
732  scalar_t b11 = A[4] - q;
733  scalar_t b22 = A[8] - q;
734 
735  scalar_t p =
736  sqrt((b00 * b00 + b11 * b11 + b22 * b22 + norm * 2.0) / 6.0);
737 
738  scalar_t c00 = b11 * b22 - A[5] * A[5];
739  scalar_t c01 = A[1] * b22 - A[5] * A[2];
740  scalar_t c02 = A[1] * A[5] - b11 * A[2];
741  scalar_t det = (b00 * c00 - A[1] * c01 + A[2] * c02) / (p * p * p);
742 
743  scalar_t half_det = det * 0.5;
744  half_det = min(max(half_det, static_cast<scalar_t>(-1.0)),
745  static_cast<scalar_t>(1.0));
746 
747  scalar_t angle = acos(half_det) / 3.0;
748  const scalar_t two_thrids_pi = 2.09439510239319549;
749 
750  scalar_t beta2 = cos(angle) * 2.0;
751  scalar_t beta0 = cos(angle + two_thrids_pi) * 2.0;
752  scalar_t beta1 = -(beta0 + beta2);
753 
754  eval[0] = q + p * beta0;
755  eval[1] = q + p * beta1;
756  eval[2] = q + p * beta2;
757 
758  if (half_det >= 0) {
759  ComputeEigenvector0<scalar_t>(A, eval[2], evec2);
760 
761  if (eval[2] < eval[0] && eval[2] < eval[1]) {
762  normals_ptr[0] = evec2[0];
763  normals_ptr[1] = evec2[1];
764  normals_ptr[2] = evec2[2];
765 
766  return;
767  }
768 
769  ComputeEigenvector1<scalar_t>(A, evec2, eval[1], evec1);
770 
771  if (eval[1] < eval[0] && eval[1] < eval[2]) {
772  normals_ptr[0] = evec1[0];
773  normals_ptr[1] = evec1[1];
774  normals_ptr[2] = evec1[2];
775 
776  return;
777  }
778 
779  normals_ptr[0] = evec1[1] * evec2[2] - evec1[2] * evec2[1];
780  normals_ptr[1] = evec1[2] * evec2[0] - evec1[0] * evec2[2];
781  normals_ptr[2] = evec1[0] * evec2[1] - evec1[1] * evec2[0];
782 
783  return;
784  } else {
785  ComputeEigenvector0<scalar_t>(A, eval[0], evec0);
786 
787  if (eval[0] < eval[1] && eval[0] < eval[2]) {
788  normals_ptr[0] = evec0[0];
789  normals_ptr[1] = evec0[1];
790  normals_ptr[2] = evec0[2];
791  return;
792  }
793 
794  ComputeEigenvector1<scalar_t>(A, evec0, eval[1], evec1);
795 
796  if (eval[1] < eval[0] && eval[1] < eval[2]) {
797  normals_ptr[0] = evec1[0];
798  normals_ptr[1] = evec1[1];
799  normals_ptr[2] = evec1[2];
800  return;
801  }
802 
803  normals_ptr[0] = evec0[1] * evec1[2] - evec0[2] * evec1[1];
804  normals_ptr[1] = evec0[2] * evec1[0] - evec0[0] * evec1[2];
805  normals_ptr[2] = evec0[0] * evec1[1] - evec0[1] * evec1[0];
806  return;
807  }
808  } else {
809  if (covariance_ptr[0] < covariance_ptr[4] &&
810  covariance_ptr[0] < covariance_ptr[8]) {
811  normals_ptr[0] = 1.0;
812  normals_ptr[1] = 0.0;
813  normals_ptr[2] = 0.0;
814  return;
815  } else if (covariance_ptr[0] < covariance_ptr[4] &&
816  covariance_ptr[0] < covariance_ptr[8]) {
817  normals_ptr[0] = 0.0;
818  normals_ptr[1] = 1.0;
819  normals_ptr[2] = 0.0;
820  return;
821  } else {
822  normals_ptr[0] = 0.0;
823  normals_ptr[1] = 0.0;
824  normals_ptr[2] = 1.0;
825  return;
826  }
827  }
828 }
829 
830 #if defined(__CUDACC__)
831 void EstimateNormalsFromCovariancesCUDA
832 #else
834 #endif
835  (const core::Tensor& covariances,
836  core::Tensor& normals,
837  const bool has_normals) {
838  core::Dtype dtype = covariances.GetDtype();
839  int64_t n = covariances.GetLength();
840 
841  DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(dtype, [&]() {
842  const scalar_t* covariances_ptr = covariances.GetDataPtr<scalar_t>();
843  scalar_t* normals_ptr = normals.GetDataPtr<scalar_t>();
844 
846  covariances.GetDevice(), n,
847  [=] OPEN3D_DEVICE(int64_t workload_idx) {
848  int32_t covariances_offset = 9 * workload_idx;
849  int32_t normals_offset = 3 * workload_idx;
850  scalar_t normals_output[3] = {0};
851  EstimatePointWiseNormalsWithFastEigen3x3<scalar_t>(
852  covariances_ptr + covariances_offset,
853  normals_output);
854 
855  if ((normals_output[0] * normals_output[0] +
856  normals_output[1] * normals_output[1] +
857  normals_output[2] * normals_output[2]) == 0.0 &&
858  !has_normals) {
859  normals_output[0] = 0.0;
860  normals_output[1] = 0.0;
861  normals_output[2] = 1.0;
862  }
863  if (has_normals) {
864  if ((normals_ptr[normals_offset] * normals_output[0] +
865  normals_ptr[normals_offset + 1] *
866  normals_output[1] +
867  normals_ptr[normals_offset + 2] *
868  normals_output[2]) < 0.0) {
869  normals_output[0] *= -1;
870  normals_output[1] *= -1;
871  normals_output[2] *= -1;
872  }
873  }
874 
875  normals_ptr[normals_offset] = normals_output[0];
876  normals_ptr[normals_offset + 1] = normals_output[1];
877  normals_ptr[normals_offset + 2] = normals_output[2];
878  });
879  });
880 
881  core::cuda::Synchronize(covariances.GetDevice());
882 }
883 
884 template <typename scalar_t>
886  const scalar_t* points_ptr,
887  const scalar_t* normals_ptr,
888  const scalar_t* colors_ptr,
889  const int32_t& idx_offset,
890  const int32_t* indices_ptr,
891  const int32_t& indices_count,
892  scalar_t* color_gradients_ptr) {
893  if (indices_count < 4) {
894  color_gradients_ptr[idx_offset] = 0;
895  color_gradients_ptr[idx_offset + 1] = 0;
896  color_gradients_ptr[idx_offset + 2] = 0;
897  } else {
898  scalar_t vt[3] = {points_ptr[idx_offset], points_ptr[idx_offset + 1],
899  points_ptr[idx_offset + 2]};
900 
901  scalar_t nt[3] = {normals_ptr[idx_offset], normals_ptr[idx_offset + 1],
902  normals_ptr[idx_offset + 2]};
903 
904  scalar_t it = (colors_ptr[idx_offset] + colors_ptr[idx_offset + 1] +
905  colors_ptr[idx_offset + 2]) /
906  3.0;
907 
908  scalar_t AtA[9] = {0};
909  scalar_t Atb[3] = {0};
910 
911  // approximate image gradient of vt's tangential plane
912  // projection (p') of a point p on a plane defined by
913  // normal n, where o is the closest point to p on the
914  // plane, is given by:
915  // p' = p - [(p - o).dot(n)] * n p'
916  // => p - [(p.dot(n) - s)] * n [where s = o.dot(n)]
917 
918  // Computing the scalar s.
919  scalar_t s = vt[0] * nt[0] + vt[1] * nt[1] + vt[2] * nt[2];
920 
921  int i = 1;
922  for (; i < indices_count; i++) {
923  int64_t neighbour_idx_offset = 3 * indices_ptr[i];
924 
925  if (neighbour_idx_offset == -1) {
926  break;
927  }
928 
929  scalar_t vt_adj[3] = {points_ptr[neighbour_idx_offset],
930  points_ptr[neighbour_idx_offset + 1],
931  points_ptr[neighbour_idx_offset + 2]};
932 
933  // p' = p - d * n [where d = p.dot(n) - s]
934  // Computing the scalar d.
935  scalar_t d = vt_adj[0] * nt[0] + vt_adj[1] * nt[1] +
936  vt_adj[2] * nt[2] - s;
937 
938  // Computing the p' (projection of the point).
939  scalar_t vt_proj[3] = {vt_adj[0] - d * nt[0], vt_adj[1] - d * nt[1],
940  vt_adj[2] - d * nt[2]};
941 
942  scalar_t it_adj = (colors_ptr[neighbour_idx_offset + 0] +
943  colors_ptr[neighbour_idx_offset + 1] +
944  colors_ptr[neighbour_idx_offset + 2]) /
945  3.0;
946 
947  scalar_t A[3] = {vt_proj[0] - vt[0], vt_proj[1] - vt[1],
948  vt_proj[2] - vt[2]};
949 
950  AtA[0] += A[0] * A[0];
951  AtA[1] += A[1] * A[0];
952  AtA[2] += A[2] * A[0];
953  AtA[4] += A[1] * A[1];
954  AtA[5] += A[2] * A[1];
955  AtA[8] += A[2] * A[2];
956 
957  scalar_t b = it_adj - it;
958 
959  Atb[0] += A[0] * b;
960  Atb[1] += A[1] * b;
961  Atb[2] += A[2] * b;
962  }
963 
964  // Orthogonal constraint.
965  scalar_t A[3] = {(i - 1) * nt[0], (i - 1) * nt[1], (i - 1) * nt[2]};
966 
967  AtA[0] += A[0] * A[0];
968  AtA[1] += A[0] * A[1];
969  AtA[2] += A[0] * A[2];
970  AtA[4] += A[1] * A[1];
971  AtA[5] += A[1] * A[2];
972  AtA[8] += A[2] * A[2];
973 
974  // Symmetry.
975  AtA[3] = AtA[1];
976  AtA[6] = AtA[2];
977  AtA[7] = AtA[5];
978 
980  color_gradients_ptr + idx_offset);
981  }
982 }
983 
984 #if defined(__CUDACC__)
985 void EstimateColorGradientsUsingHybridSearchCUDA
986 #else
988 #endif
990  const core::Tensor& normals,
991  const core::Tensor& colors,
992  core::Tensor& color_gradients,
993  const double& radius,
994  const int64_t& max_nn) {
995  core::Dtype dtype = points.GetDtype();
996  int64_t n = points.GetLength();
997 
999 
1000  bool check = tree.HybridIndex(radius);
1001  if (!check) {
1002  utility::LogError("NearestNeighborSearch::HybridIndex is not set.");
1003  }
1004 
1005  core::Tensor indices, distance, counts;
1006  std::tie(indices, distance, counts) =
1007  tree.HybridSearch(points, radius, max_nn);
1008 
1009  DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(dtype, [&]() {
1010  auto points_ptr = points.GetDataPtr<scalar_t>();
1011  auto normals_ptr = normals.GetDataPtr<scalar_t>();
1012  auto colors_ptr = colors.GetDataPtr<scalar_t>();
1013  auto neighbour_indices_ptr = indices.GetDataPtr<int32_t>();
1014  auto neighbour_counts_ptr = counts.GetDataPtr<int32_t>();
1015  auto color_gradients_ptr = color_gradients.GetDataPtr<scalar_t>();
1016 
1018  points.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
1019  // NNS [Hybrid Search].
1020  int32_t neighbour_offset = max_nn * workload_idx;
1021  // Count of valid correspondences per point.
1022  int32_t neighbour_count =
1023  neighbour_counts_ptr[workload_idx];
1024  int32_t idx_offset = 3 * workload_idx;
1025 
1027  points_ptr, normals_ptr, colors_ptr, idx_offset,
1028  neighbour_indices_ptr + neighbour_offset,
1029  neighbour_count, color_gradients_ptr);
1030  });
1031  });
1032 
1034 }
1035 
1036 #if defined(__CUDACC__)
1037 void EstimateColorGradientsUsingKNNSearchCUDA
1038 #else
1040 #endif
1042  const core::Tensor& normals,
1043  const core::Tensor& colors,
1044  core::Tensor& color_gradients,
1045  const int64_t& max_nn) {
1046  core::Dtype dtype = points.GetDtype();
1047  int64_t n = points.GetLength();
1048 
1050 
1051  bool check = tree.KnnIndex();
1052  if (!check) {
1053  utility::LogError("KnnIndex is not set.");
1054  }
1055 
1056  core::Tensor indices, distance;
1057  std::tie(indices, distance) = tree.KnnSearch(points, max_nn);
1058 
1059  indices = indices.To(core::Int32).Contiguous();
1060  int64_t nn_count = indices.GetShape()[1];
1061 
1062  if (nn_count < 4) {
1064  "Not enough neighbors to compute Covariances / Normals. "
1065  "Try "
1066  "changing the search parameter.");
1067  }
1068 
1069  DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(dtype, [&]() {
1070  auto points_ptr = points.GetDataPtr<scalar_t>();
1071  auto normals_ptr = normals.GetDataPtr<scalar_t>();
1072  auto colors_ptr = colors.GetDataPtr<scalar_t>();
1073  auto neighbour_indices_ptr = indices.GetDataPtr<int32_t>();
1074  auto color_gradients_ptr = color_gradients.GetDataPtr<scalar_t>();
1075 
1077  points.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
1078  int32_t neighbour_offset = max_nn * workload_idx;
1079  int32_t idx_offset = 3 * workload_idx;
1080 
1082  points_ptr, normals_ptr, colors_ptr, idx_offset,
1083  neighbour_indices_ptr + neighbour_offset, nn_count,
1084  color_gradients_ptr);
1085  });
1086  });
1087 
1089 }
1090 
1091 #if defined(__CUDACC__)
1092 void EstimateColorGradientsUsingRadiusSearchCUDA
1093 #else
1095 #endif
1097  const core::Tensor& normals,
1098  const core::Tensor& colors,
1099  core::Tensor& color_gradients,
1100  const double& radius) {
1101  core::Dtype dtype = points.GetDtype();
1102  int64_t n = points.GetLength();
1103 
1105 
1106  bool check = tree.FixedRadiusIndex(radius);
1107  if (!check) {
1108  utility::LogError("RadiusIndex is not set.");
1109  }
1110 
1111  core::Tensor indices, distance, counts;
1112  std::tie(indices, distance, counts) =
1113  tree.FixedRadiusSearch(points, radius);
1114 
1115  indices = indices.To(core::Int32).Contiguous();
1116  counts = counts.Contiguous();
1117 
1118  DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(dtype, [&]() {
1119  auto points_ptr = points.GetDataPtr<scalar_t>();
1120  auto normals_ptr = normals.GetDataPtr<scalar_t>();
1121  auto colors_ptr = colors.GetDataPtr<scalar_t>();
1122  auto neighbour_indices_ptr = indices.GetDataPtr<int32_t>();
1123  auto neighbour_counts_ptr = counts.GetDataPtr<int32_t>();
1124  auto color_gradients_ptr = color_gradients.GetDataPtr<scalar_t>();
1125 
1127  points.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
1128  int32_t neighbour_offset =
1129  neighbour_counts_ptr[workload_idx];
1130  // Count of valid correspondences per point.
1131  const int32_t neighbour_count =
1132  (neighbour_counts_ptr[workload_idx + 1] -
1133  neighbour_counts_ptr[workload_idx]);
1134  int32_t idx_offset = 3 * workload_idx;
1135 
1137  points_ptr, normals_ptr, colors_ptr, idx_offset,
1138  neighbour_indices_ptr + neighbour_offset,
1139  neighbour_count, color_gradients_ptr);
1140  });
1141  });
1142 
1144 }
1145 
1146 } // namespace pointcloud
1147 } // namespace kernel
1148 } // namespace geometry
1149 } // namespace t
1150 } // namespace open3d
OPEN3D_DEVICE OPEN3D_FORCE_INLINE void solve_svd3x3(const scalar_t *A_3x3, const scalar_t *B_3x1, scalar_t *X_3x1)
Definition: SVD3x3.h:2190
OPEN3D_HOST_DEVICE index_t GetShape(int i) const
Definition: GeometryIndexer.h:330
TArrayIndexer< int64_t > NDArrayIndexer
Definition: GeometryIndexer.h:379
bool has_normals
Definition: FilePCD.cpp:80
Definition: GeometryIndexer.h:180
void EstimateCovariancesUsingRadiusSearchCPU(const core::Tensor &points, core::Tensor &covariances, const double &radius)
Definition: PointCloudImpl.h:468
OPEN3D_HOST_DEVICE void ComputeEigenvector0(const scalar_t *A, const scalar_t eval0, scalar_t *eigen_vector0)
Definition: PointCloudImpl.h:566
bool FixedRadiusIndex(utility::optional< double > radius={})
Definition: NearestNeighborSearch.cpp:59
static Tensor Full(const SizeVector &shape, T fill_value, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with specified value.
Definition: Tensor.h:271
OPEN3D_HOST_DEVICE void GetCoordinateSystemOnPlane(const scalar_t *query, scalar_t *u, scalar_t *v)
Definition: PointCloudImpl.h:191
OPEN3D_HOST_DEVICE void * GetDataPtr() const
Definition: GeometryIndexer.h:334
OPEN3D_HOST_DEVICE OPEN3D_FORCE_INLINE void cross_3x1(const scalar_t *A_3x1_input, const scalar_t *B_3x1_input, scalar_t *C_3x1_output)
Definition: Matrix.h:82
Definition: Dtype.h:39
Helper class for converting coordinates/indices between 3D/3D, 3D/2D, 2D/3D.
Definition: GeometryIndexer.h:44
bool HybridIndex(utility::optional< double > radius={})
Definition: NearestNeighborSearch.cpp:79
OPEN3D_HOST_DEVICE bool IsBoundaryPoints(const scalar_t *angles, int counts, double angle_threshold)
Definition: PointCloudImpl.h:253
void ParallelFor(const Device &device, int64_t n, const func_t &func)
Definition: ParallelFor.h:122
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t int32_t
Definition: K4aPlugin.cpp:408
std::tuple< Tensor, Tensor, Tensor > FixedRadiusSearch(const Tensor &query_points, double radius, bool sort=true)
Definition: NearestNeighborSearch.cpp:117
OPEN3D_HOST_DEVICE OPEN3D_FORCE_INLINE scalar_t dot_3x1(const scalar_t *A_3x1_input, const scalar_t *B_3x1_input)
Definition: Matrix.h:96
const Dtype Float32
Definition: Dtype.cpp:61
OPEN3D_HOST_DEVICE void EstimatePointWiseNormalsWithFastEigen3x3(const scalar_t *covariance_ptr, scalar_t *normals_ptr)
Definition: PointCloudImpl.h:695
std::tuple< Tensor, Tensor, Tensor > HybridSearch(const Tensor &query_points, const double radius, const int max_knn) const
Definition: NearestNeighborSearch.cpp:149
OPEN3D_HOST_DEVICE void EstimatePointWiseColorGradientKernel(const scalar_t *points_ptr, const scalar_t *normals_ptr, const scalar_t *colors_ptr, const int32_t &idx_offset, const int32_t *indices_ptr, const int32_t &indices_count, scalar_t *color_gradients_ptr)
Definition: PointCloudImpl.h:885
void EstimateColorGradientsUsingHybridSearchCPU(const core::Tensor &points, const core::Tensor &normals, const core::Tensor &colors, core::Tensor &color_gradient, const double &radius, const int64_t &max_nn)
Definition: PointCloudImpl.h:989
int points
Definition: FilePCD.cpp:73
Dtype GetDtype() const
Definition: Tensor.h:1169
Device GetDevice() const override
Definition: Tensor.cpp:1384
#define OPEN3D_DEVICE
Definition: CUDAUtils.h:64
int count
Definition: FilePCD.cpp:61
OPEN3D_HOST_DEVICE void Unproject(float u_in, float v_in, float d_in, float *x_out, float *y_out, float *z_out) const
Unproject a 2D uv coordinate with depth to 3D in camera coordinate.
Definition: GeometryIndexer.h:130
#define OPEN3D_ATOMIC_ADD(X, Y)
Definition: GeometryMacros.h:58
OPEN3D_HOST_DEVICE void RigidTransform(float x_in, float y_in, float z_in, float *x_out, float *y_out, float *z_out) const
Transform a 3D coordinate in camera coordinate to world coordinate.
Definition: GeometryIndexer.h:81
void Synchronize()
Definition: CUDAUtils.cpp:77
#define OPEN3D_HOST_DEVICE
Definition: CUDAUtils.h:63
core::Tensor InverseTransformation(const core::Tensor &T)
TODO(wei): find a proper place for such functionalities.
Definition: Utility.h:96
const Dtype Int32
Definition: Dtype.cpp:65
OPEN3D_HOST_DEVICE void Swap(scalar_t *x, scalar_t *y)
Definition: PointCloudImpl.h:218
void EstimateCovariancesUsingHybridSearchCPU(const core::Tensor &points, core::Tensor &covariances, const double &radius, const int64_t &max_nn)
Definition: PointCloudImpl.h:418
OPEN3D_HOST_DEVICE void EstimatePointWiseRobustNormalizedCovarianceKernel(const scalar_t *points_ptr, const int32_t *indices_ptr, const int32_t &indices_count, scalar_t *covariance_ptr)
Definition: PointCloudImpl.h:338
void ComputeBoundaryPointsCPU(const core::Tensor &points, const core::Tensor &normals, const core::Tensor &indices, const core::Tensor &counts, core::Tensor &mask, double angle_threshold)
Definition: PointCloudImpl.h:276
Tensor To(Dtype dtype, bool copy=false) const
Definition: Tensor.cpp:725
Tensor Contiguous() const
Definition: Tensor.cpp:758
OPEN3D_HOST_DEVICE void Heapify(scalar_t *arr, int n, int root)
Definition: PointCloudImpl.h:225
A Class for nearest neighbor search.
Definition: NearestNeighborSearch.h:44
size_t stride
Definition: TriangleMeshBuffers.cpp:184
Definition: Optional.h:79
SizeVector GetShape() const
Definition: Tensor.h:1132
std::pair< Tensor, Tensor > KnnSearch(const Tensor &query_points, int knn)
Definition: NearestNeighborSearch.cpp:98
void UnprojectCPU(const core::Tensor &depth, utility::optional< std::reference_wrapper< const core::Tensor >> image_colors, core::Tensor &points, utility::optional< std::reference_wrapper< core::Tensor >> colors, const core::Tensor &intrinsics, const core::Tensor &extrinsics, float depth_scale, float depth_max, int64_t stride)
Definition: PointCloudImpl.h:63
#define DISPATCH_DTYPE_TO_TEMPLATE(DTYPE,...)
Definition: Dispatch.h:49
void EstimateColorGradientsUsingRadiusSearchCPU(const core::Tensor &points, const core::Tensor &normals, const core::Tensor &colors, core::Tensor &color_gradient, const double &radius)
Definition: PointCloudImpl.h:1096
void EstimateCovariancesUsingKNNSearchCPU(const core::Tensor &points, core::Tensor &covariances, const int64_t &max_nn)
Definition: PointCloudImpl.h:517
Definition: PinholeCameraIntrinsic.cpp:35
void EstimateNormalsFromCovariancesCPU(const core::Tensor &covariances, core::Tensor &normals, const bool has_normals)
Definition: PointCloudImpl.h:835
void GetPointMaskWithinAABBCPU(const core::Tensor &points, const core::Tensor &min_bound, const core::Tensor &max_bound, core::Tensor &mask)
Definition: PointCloudImpl.h:161
void EstimateColorGradientsUsingKNNSearchCPU(const core::Tensor &points, const core::Tensor &normals, const core::Tensor &colors, core::Tensor &color_gradient, const int64_t &max_nn)
Definition: PointCloudImpl.h:1041
OPEN3D_HOST_DEVICE void ComputeEigenvector1(const scalar_t *A, const scalar_t *evec0, const scalar_t eval1, scalar_t *eigen_vector1)
Definition: PointCloudImpl.h:615
#define DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(DTYPE,...)
Definition: Dispatch.h:96
T * GetDataPtr()
Definition: Tensor.h:1149
OPEN3D_HOST_DEVICE void HeapSort(scalar_t *arr, int n)
Definition: PointCloudImpl.h:243
Common CUDA utilities.
int64_t GetLength() const
Definition: Tensor.h:1130
bool KnnIndex()
Definition: NearestNeighborSearch.cpp:42
#define LogError(...)
Definition: Logging.h:67