Open3D (C++ API)  0.13.0
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
RGBDOdometryJacobianImpl.h
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - Open3D: www.open3d.org -
3 // ----------------------------------------------------------------------------
4 // The MIT License (MIT)
5 //
6 // Copyright (c) 2018 www.open3d.org
7 //
8 // Permission is hereby granted, free of charge, to any person obtaining a copy
9 // of this software and associated documentation files (the "Software"), to deal
10 // in the Software without restriction, including without limitation the rights
11 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 // copies of the Software, and to permit persons to whom the Software is
13 // furnished to do so, subject to the following conditions:
14 //
15 // The above copyright notice and this permission notice shall be included in
16 // all copies or substantial portions of the Software.
17 //
18 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 // IN THE SOFTWARE.
25 // ----------------------------------------------------------------------------
26 
27 // Private header. Do not include in Open3d.h.
28 
29 #include "open3d/core/Tensor.h"
32 
33 namespace open3d {
34 namespace t {
35 namespace pipelines {
36 namespace kernel {
37 namespace odometry {
38 
39 using t::geometry::kernel::NDArrayIndexer;
40 using t::geometry::kernel::TransformIndexer;
41 
42 #ifndef __CUDACC__
43 using std::abs;
44 using std::max;
45 #endif
46 
47 inline OPEN3D_HOST_DEVICE float HuberDeriv(float r, float delta) {
48  float abs_r = abs(r);
49  return abs_r < delta ? r : delta * Sign(r);
50 }
51 
52 inline OPEN3D_HOST_DEVICE float HuberLoss(float r, float delta) {
53  float abs_r = abs(r);
54  return abs_r < delta ? 0.5 * r * r : delta * abs_r - 0.5 * delta * delta;
55 }
56 
58  int x,
59  int y,
60  const float depth_outlier_trunc,
61  const NDArrayIndexer& source_vertex_indexer,
62  const NDArrayIndexer& target_vertex_indexer,
63  const NDArrayIndexer& target_normal_indexer,
64  const TransformIndexer& ti,
65  float* J_ij,
66  float& r) {
67  float* source_v = source_vertex_indexer.GetDataPtr<float>(x, y);
68  if (ISNAN(source_v[0])) {
69  return false;
70  }
71 
72  // Transform source points to the target camera's coordinate space.
73  float T_source_to_target_v[3], u, v;
74  ti.RigidTransform(source_v[0], source_v[1], source_v[2],
75  &T_source_to_target_v[0], &T_source_to_target_v[1],
76  &T_source_to_target_v[2]);
77  ti.Project(T_source_to_target_v[0], T_source_to_target_v[1],
78  T_source_to_target_v[2], &u, &v);
79  u = roundf(u);
80  v = roundf(v);
81 
82  if (T_source_to_target_v[2] < 0 ||
83  !target_vertex_indexer.InBoundary(u, v)) {
84  return false;
85  }
86 
87  int ui = static_cast<int>(u);
88  int vi = static_cast<int>(v);
89  float* target_v = target_vertex_indexer.GetDataPtr<float>(ui, vi);
90  float* target_n = target_normal_indexer.GetDataPtr<float>(ui, vi);
91  if (ISNAN(target_v[0]) || ISNAN(target_n[0])) {
92  return false;
93  }
94 
95  r = (T_source_to_target_v[0] - target_v[0]) * target_n[0] +
96  (T_source_to_target_v[1] - target_v[1]) * target_n[1] +
97  (T_source_to_target_v[2] - target_v[2]) * target_n[2];
98  if (abs(r) > depth_outlier_trunc) {
99  return false;
100  }
101 
102  J_ij[0] = -T_source_to_target_v[2] * target_n[1] +
103  T_source_to_target_v[1] * target_n[2];
104  J_ij[1] = T_source_to_target_v[2] * target_n[0] -
105  T_source_to_target_v[0] * target_n[2];
106  J_ij[2] = -T_source_to_target_v[1] * target_n[0] +
107  T_source_to_target_v[0] * target_n[1];
108  J_ij[3] = target_n[0];
109  J_ij[4] = target_n[1];
110  J_ij[5] = target_n[2];
111 
112  return true;
113 }
114 
116  int x,
117  int y,
118  const float depth_outlier_trunc,
119  const NDArrayIndexer& source_depth_indexer,
120  const NDArrayIndexer& target_depth_indexer,
121  const NDArrayIndexer& source_intensity_indexer,
122  const NDArrayIndexer& target_intensity_indexer,
123  const NDArrayIndexer& target_intensity_dx_indexer,
124  const NDArrayIndexer& target_intensity_dy_indexer,
125  const NDArrayIndexer& source_vertex_indexer,
126  const TransformIndexer& ti,
127  float* J_I,
128  float& r_I) {
129  const float sobel_scale = 0.125;
130 
131  float* source_v = source_vertex_indexer.GetDataPtr<float>(x, y);
132  if (ISNAN(source_v[0])) {
133  return false;
134  }
135 
136  // Transform source points to the target camera's coordinate space.
137  float T_source_to_target_v[3], u_tf, v_tf;
138  ti.RigidTransform(source_v[0], source_v[1], source_v[2],
139  &T_source_to_target_v[0], &T_source_to_target_v[1],
140  &T_source_to_target_v[2]);
141  ti.Project(T_source_to_target_v[0], T_source_to_target_v[1],
142  T_source_to_target_v[2], &u_tf, &v_tf);
143  int u_t = int(roundf(u_tf));
144  int v_t = int(roundf(v_tf));
145 
146  if (T_source_to_target_v[2] < 0 ||
147  !target_depth_indexer.InBoundary(u_t, v_t)) {
148  return false;
149  }
150 
151  float fx, fy;
152  ti.GetFocalLength(&fx, &fy);
153 
154  float depth_t = *target_depth_indexer.GetDataPtr<float>(u_t, v_t);
155  float diff_D = depth_t - T_source_to_target_v[2];
156  if (ISNAN(depth_t) || abs(diff_D) > depth_outlier_trunc) {
157  return false;
158  }
159 
160  float diff_I = *target_intensity_indexer.GetDataPtr<float>(u_t, v_t) -
161  *source_intensity_indexer.GetDataPtr<float>(x, y);
162  float dIdx = sobel_scale *
163  (*target_intensity_dx_indexer.GetDataPtr<float>(u_t, v_t));
164  float dIdy = sobel_scale *
165  (*target_intensity_dy_indexer.GetDataPtr<float>(u_t, v_t));
166 
167  float invz = 1 / T_source_to_target_v[2];
168  float c0 = dIdx * fx * invz;
169  float c1 = dIdy * fy * invz;
170  float c2 = -(c0 * T_source_to_target_v[0] + c1 * T_source_to_target_v[1]) *
171  invz;
172 
173  J_I[0] = (-T_source_to_target_v[2] * c1 + T_source_to_target_v[1] * c2);
174  J_I[1] = (T_source_to_target_v[2] * c0 - T_source_to_target_v[0] * c2);
175  J_I[2] = (-T_source_to_target_v[1] * c0 + T_source_to_target_v[0] * c1);
176  J_I[3] = (c0);
177  J_I[4] = (c1);
178  J_I[5] = (c2);
179  r_I = diff_I;
180 
181  return true;
182 }
183 
185  int x,
186  int y,
187  const float depth_outlier_trunc,
188  const NDArrayIndexer& source_depth_indexer,
189  const NDArrayIndexer& target_depth_indexer,
190  const NDArrayIndexer& source_intensity_indexer,
191  const NDArrayIndexer& target_intensity_indexer,
192  const NDArrayIndexer& target_depth_dx_indexer,
193  const NDArrayIndexer& target_depth_dy_indexer,
194  const NDArrayIndexer& target_intensity_dx_indexer,
195  const NDArrayIndexer& target_intensity_dy_indexer,
196  const NDArrayIndexer& source_vertex_indexer,
197  const TransformIndexer& ti,
198  float* J_I,
199  float* J_D,
200  float& r_I,
201  float& r_D) {
202  // sqrt 0.5, according to
203  // http://redwood-data.org/indoor_lidar_rgbd/supp.pdf
204  const float sqrt_lambda_intensity = 0.707;
205  const float sqrt_lambda_depth = 0.707;
206  const float sobel_scale = 0.125;
207 
208  float* source_v = source_vertex_indexer.GetDataPtr<float>(x, y);
209  if (ISNAN(source_v[0])) {
210  return false;
211  }
212 
213  // Transform source points to the target camera coordinate space.
214  float T_source_to_target_v[3], u_tf, v_tf;
215  ti.RigidTransform(source_v[0], source_v[1], source_v[2],
216  &T_source_to_target_v[0], &T_source_to_target_v[1],
217  &T_source_to_target_v[2]);
218  ti.Project(T_source_to_target_v[0], T_source_to_target_v[1],
219  T_source_to_target_v[2], &u_tf, &v_tf);
220  int u_t = int(roundf(u_tf));
221  int v_t = int(roundf(v_tf));
222 
223  if (T_source_to_target_v[2] < 0 ||
224  !target_depth_indexer.InBoundary(u_t, v_t)) {
225  return false;
226  }
227 
228  float fx, fy;
229  ti.GetFocalLength(&fx, &fy);
230 
231  float depth_t = *target_depth_indexer.GetDataPtr<float>(u_t, v_t);
232  float diff_D = depth_t - T_source_to_target_v[2];
233  if (ISNAN(depth_t) || abs(diff_D) > depth_outlier_trunc) {
234  return false;
235  }
236 
237  float dDdx = sobel_scale *
238  (*target_depth_dx_indexer.GetDataPtr<float>(u_t, v_t));
239  float dDdy = sobel_scale *
240  (*target_depth_dy_indexer.GetDataPtr<float>(u_t, v_t));
241  if (ISNAN(dDdx) || ISNAN(dDdy)) {
242  return false;
243  }
244 
245  float diff_I = *target_intensity_indexer.GetDataPtr<float>(u_t, v_t) -
246  *source_intensity_indexer.GetDataPtr<float>(x, y);
247  float dIdx = sobel_scale *
248  (*target_intensity_dx_indexer.GetDataPtr<float>(u_t, v_t));
249  float dIdy = sobel_scale *
250  (*target_intensity_dy_indexer.GetDataPtr<float>(u_t, v_t));
251 
252  float invz = 1 / T_source_to_target_v[2];
253  float c0 = dIdx * fx * invz;
254  float c1 = dIdy * fy * invz;
255  float c2 = -(c0 * T_source_to_target_v[0] + c1 * T_source_to_target_v[1]) *
256  invz;
257  float d0 = dDdx * fx * invz;
258  float d1 = dDdy * fy * invz;
259  float d2 = -(d0 * T_source_to_target_v[0] + d1 * T_source_to_target_v[1]) *
260  invz;
261 
262  J_I[0] = sqrt_lambda_intensity *
263  (-T_source_to_target_v[2] * c1 + T_source_to_target_v[1] * c2);
264  J_I[1] = sqrt_lambda_intensity *
265  (T_source_to_target_v[2] * c0 - T_source_to_target_v[0] * c2);
266  J_I[2] = sqrt_lambda_intensity *
267  (-T_source_to_target_v[1] * c0 + T_source_to_target_v[0] * c1);
268  J_I[3] = sqrt_lambda_intensity * (c0);
269  J_I[4] = sqrt_lambda_intensity * (c1);
270  J_I[5] = sqrt_lambda_intensity * (c2);
271  r_I = sqrt_lambda_intensity * diff_I;
272 
273  J_D[0] = sqrt_lambda_depth *
274  ((-T_source_to_target_v[2] * d1 + T_source_to_target_v[1] * d2) -
275  T_source_to_target_v[1]);
276  J_D[1] = sqrt_lambda_depth *
277  ((T_source_to_target_v[2] * d0 - T_source_to_target_v[0] * d2) +
278  T_source_to_target_v[0]);
279  J_D[2] = sqrt_lambda_depth *
280  ((-T_source_to_target_v[1] * d0 + T_source_to_target_v[0] * d1));
281  J_D[3] = sqrt_lambda_depth * (d0);
282  J_D[4] = sqrt_lambda_depth * (d1);
283  J_D[5] = sqrt_lambda_depth * (d2 - 1.0f);
284 
285  r_D = sqrt_lambda_depth * diff_D;
286 
287  return true;
288 }
289 
290 } // namespace odometry
291 } // namespace kernel
292 } // namespace pipelines
293 } // namespace t
294 } // namespace open3d
Definition: GeometryIndexer.h:168
#define ISNAN(X)
Definition: GeometryMacros.h:38
OPEN3D_HOST_DEVICE void Project(float x_in, float y_in, float z_in, float *u_out, float *v_out) const
Project a 3D coordinate in camera coordinate to a 2D uv coordinate.
Definition: GeometryIndexer.h:117
OPEN3D_HOST_DEVICE int Sign(int x)
Definition: GeometryMacros.h:62
Helper class for converting coordinates/indices between 3D/3D, 3D/2D, 2D/3D.
Definition: GeometryIndexer.h:42
OPEN3D_HOST_DEVICE bool GetJacobianIntensity(int x, int y, const float depth_outlier_trunc, const NDArrayIndexer &source_depth_indexer, const NDArrayIndexer &target_depth_indexer, const NDArrayIndexer &source_intensity_indexer, const NDArrayIndexer &target_intensity_indexer, const NDArrayIndexer &target_intensity_dx_indexer, const NDArrayIndexer &target_intensity_dy_indexer, const NDArrayIndexer &source_vertex_indexer, const TransformIndexer &ti, float *J_I, float &r_I)
Definition: RGBDOdometryJacobianImpl.h:115
OPEN3D_HOST_DEVICE void RigidTransform(float x_in, float y_in, float z_in, float *x_out, float *y_out, float *z_out) const
Transform a 3D coordinate in camera coordinate to world coordinate.
Definition: GeometryIndexer.h:79
OPEN3D_HOST_DEVICE T * GetDataPtr(int64_t x) const
Definition: GeometryIndexer.h:324
#define OPEN3D_HOST_DEVICE
Definition: CUDAUtils.h:56
OPEN3D_HOST_DEVICE void GetFocalLength(float *fx, float *fy) const
Definition: GeometryIndexer.h:139
OPEN3D_HOST_DEVICE float HuberLoss(float r, float delta)
Definition: RGBDOdometryJacobianImpl.h:52
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t timeout_in_ms capture_handle capture_handle capture_handle image_handle temperature_c int
Definition: K4aPlugin.cpp:479
OPEN3D_HOST_DEVICE bool GetJacobianHybrid(int x, int y, const float depth_outlier_trunc, const NDArrayIndexer &source_depth_indexer, const NDArrayIndexer &target_depth_indexer, const NDArrayIndexer &source_intensity_indexer, const NDArrayIndexer &target_intensity_indexer, const NDArrayIndexer &target_depth_dx_indexer, const NDArrayIndexer &target_depth_dy_indexer, const NDArrayIndexer &target_intensity_dx_indexer, const NDArrayIndexer &target_intensity_dy_indexer, const NDArrayIndexer &source_vertex_indexer, const TransformIndexer &ti, float *J_I, float *J_D, float &r_I, float &r_D)
Definition: RGBDOdometryJacobianImpl.h:184
OPEN3D_HOST_DEVICE float HuberDeriv(float r, float delta)
Definition: RGBDOdometryJacobianImpl.h:47
Definition: PinholeCameraIntrinsic.cpp:35
OPEN3D_HOST_DEVICE bool InBoundary(float x, float y) const
Definition: GeometryIndexer.h:302
#define max(x, y)
Definition: SVD3x3CPU.h:38
OPEN3D_HOST_DEVICE bool GetJacobianPointToPlane(int x, int y, const float depth_outlier_trunc, const NDArrayIndexer &source_vertex_indexer, const NDArrayIndexer &target_vertex_indexer, const NDArrayIndexer &target_normal_indexer, const TransformIndexer &ti, float *J_ij, float &r)
Definition: RGBDOdometryJacobianImpl.h:57