Pipelines#

colored_icp_registration.py#

 1# ----------------------------------------------------------------------------
 2# -                        Open3D: www.open3d.org                            -
 3# ----------------------------------------------------------------------------
 4# Copyright (c) 2018-2024 www.open3d.org
 5# SPDX-License-Identifier: MIT
 6# ----------------------------------------------------------------------------
 7"""ICP variant that uses both geometry and color for registration"""
 8
 9import open3d as o3d
10import numpy as np
11import copy
12
13
14def draw_registration_result(source, target, transformation):
15    source_temp = copy.deepcopy(source)
16    source_temp.transform(transformation)
17    o3d.visualization.draw([source_temp, target])
18
19
20print("Load two point clouds and show initial pose ...")
21ply_data = o3d.data.DemoColoredICPPointClouds()
22source = o3d.io.read_point_cloud(ply_data.paths[0])
23target = o3d.io.read_point_cloud(ply_data.paths[1])
24
25if __name__ == "__main__":
26    # Draw initial alignment.
27    current_transformation = np.identity(4)
28    # draw_registration_result(source, target, current_transformation)
29    print(current_transformation)
30
31    # Colored pointcloud registration.
32    # This is implementation of following paper:
33    # J. Park, Q.-Y. Zhou, V. Koltun,
34    # Colored Point Cloud Registration Revisited, ICCV 2017.
35    voxel_radius = [0.04, 0.02, 0.01]
36    max_iter = [50, 30, 14]
37    current_transformation = np.identity(4)
38    print("Colored point cloud registration ...\n")
39    for scale in range(3):
40        iter = max_iter[scale]
41        radius = voxel_radius[scale]
42        print([iter, radius, scale])
43
44        print("1. Downsample with a voxel size %.2f" % radius)
45        source_down = source.voxel_down_sample(radius)
46        target_down = target.voxel_down_sample(radius)
47
48        print("2. Estimate normal")
49        source_down.estimate_normals(
50            o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))
51        target_down.estimate_normals(
52            o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))
53
54        print("3. Applying colored point cloud registration")
55        result_icp = o3d.pipelines.registration.registration_colored_icp(
56            source_down, target_down, radius, current_transformation,
57            o3d.pipelines.registration.TransformationEstimationForColoredICP(),
58            o3d.pipelines.registration.ICPConvergenceCriteria(
59                relative_fitness=1e-6, relative_rmse=1e-6, max_iteration=iter))
60        current_transformation = result_icp.transformation
61        print(result_icp, "\n")
62    # draw_registration_result(source, target, result_icp.transformation)
63    print(current_transformation)

doppler_icp_registration.py#

  1# ----------------------------------------------------------------------------
  2# -                        Open3D: www.open3d.org                            -
  3# ----------------------------------------------------------------------------
  4# Copyright (c) 2018-2024 www.open3d.org
  5# SPDX-License-Identifier: MIT
  6# ----------------------------------------------------------------------------
  7"""Example script to run Doppler ICP point cloud registration.
  8
  9This script runs Doppler ICP and point-to-plane ICP on DemoDopplerICPSequence.
 10
 11This is the implementation of the following paper:
 12B. Hexsel, H. Vhavle, Y. Chen,
 13DICP: Doppler Iterative Closest Point Algorithm, RSS 2022.
 14
 15Usage:
 16python doppler_icp_registration.py [-h] \
 17    --source SOURCE --target TARGET [--device {cpu,cuda}]
 18"""
 19
 20import argparse
 21import json
 22import os
 23
 24import numpy as np
 25import open3d as o3d
 26import open3d.t.pipelines.registration as o3d_reg
 27from pyquaternion import Quaternion
 28
 29
 30def translation_quaternion_to_transform(translation,
 31                                        quaternion,
 32                                        inverse=False,
 33                                        quat_xyzw=False):
 34    """Converts translation and WXYZ quaternion to a transformation matrix.
 35
 36    Args:
 37        translation: (3,) ndarray representing the translation vector.
 38        quaternion: (4,) ndarray representing the quaternion.
 39        inverse: If True, returns the inverse transformation.
 40        quat_xyzw: If True, this indicates that quaternion is in XYZW format.
 41
 42    Returns:
 43        (4, 4) ndarray representing the transformation matrix.
 44    """
 45    if quat_xyzw:
 46        quaternion = np.roll(quaternion, 1)
 47    transform = Quaternion(quaternion).transformation_matrix  # [w, x, y, z]
 48    transform[:3, -1] = translation  # [x, y, z]
 49    return np.linalg.inv(transform) if inverse else transform
 50
 51
 52def load_tum_file(filename):
 53    """Loads poses in TUM RGBD format: [timestamp, x, y, z, qx, qy, qz, qw].
 54
 55    Args:
 56        filename (string): Path to the TUM poses file.
 57
 58    Returns:
 59        A tuple containing an array of 4x4 poses and timestamps.
 60    """
 61    # Load the TUM text file.
 62    data = np.loadtxt(filename, delimiter=' ')
 63    print('Loaded %d poses from %s (%.2f secs)' %
 64          (len(data), os.path.basename(filename), data[-1][0] - data[0][0]))
 65
 66    # Parse timestamps and poses.
 67    timestamps = data[:, 0]
 68    poses = np.array([
 69        translation_quaternion_to_transform(tq[:3], tq[3:], quat_xyzw=True)
 70        for tq in data[:, 1:]
 71    ])
 72    return poses, timestamps
 73
 74
 75def get_calibration(demo_sequence):
 76    """Returns the vehicle to sensor calibration transformation and the time
 77    period (in secs) between sequential point cloud scans.
 78
 79    Args:
 80        demo_sequence (DemoDopplerICPSequence): Doppler ICP dataset.
 81
 82    Returns:
 83        A tuple of 4x4 array representing the transform, and the period.
 84    """
 85    with open(demo_sequence.calibration_path) as f:
 86        data = json.load(f)
 87
 88    transform_vehicle_to_sensor = np.array(
 89        data['transform_vehicle_to_sensor']).reshape(4, 4)
 90    period = data['period']
 91
 92    return transform_vehicle_to_sensor, period
 93
 94
 95def get_trajectory(demo_sequence):
 96    """Returns the ground truth trajectory of the dataset.
 97
 98    Args:
 99        demo_sequence (DemoDopplerICPSequence): Doppler ICP dataset.
100
101    Returns:
102        An array of 4x4 poses for this sequence.
103    """
104    return load_tum_file(demo_sequence.trajectory_path)[0]
105
106
107def get_ground_truth_pose(demo_sequence, source_idx, target_idx):
108    """Returns the ground truth poses from the dataset.
109
110    Args:
111        demo_sequence (DemoDopplerICPSequence): Doppler ICP dataset.
112        source_idx (int): Index of the source point cloud pose.
113        target_idx (int): Index of the target point cloud pose.
114
115    Returns:
116        4x4 array representing the transformation between target and source.
117    """
118    poses = get_trajectory(demo_sequence)
119    return np.linalg.inv(poses[target_idx]) @ poses[source_idx]
120
121
122def run_doppler_icp(args):
123    """Runs Doppler ICP on a given pair of point clouds.
124
125    Args:
126        args: Command line arguments.
127    """
128    # Setup data type and device.
129    dtype = o3d.core.float32
130    device = o3d.core.Device('CUDA:0' if args.device == 'cuda' else 'CPU:0')
131
132    # Load the point clouds.
133    demo_sequence = o3d.data.DemoDopplerICPSequence()
134    source = o3d.t.io.read_point_cloud(demo_sequence.paths[args.source])
135    target = o3d.t.io.read_point_cloud(demo_sequence.paths[args.target])
136
137    # Load the calibration parameters.
138    transform_vehicle_to_sensor, period = get_calibration(demo_sequence)
139
140    # Downsample the pointcloud.
141    source_in_S = source.uniform_down_sample(5)
142    target_in_S = target.uniform_down_sample(5)
143
144    # Transform the Open3D point cloud from sensor to vehicle frame.
145    source_in_V = source_in_S.to(device).transform(transform_vehicle_to_sensor)
146    target_in_V = target_in_S.to(device).transform(transform_vehicle_to_sensor)
147
148    # Move tensor to device.
149    init_transform = o3d.core.Tensor(np.eye(4), device=device)
150    transform_vehicle_to_sensor = o3d.core.Tensor(transform_vehicle_to_sensor,
151                                                  device=device)
152
153    # Compute normals for target.
154    target_in_V.estimate_normals(radius=10.0, max_nn=30)
155
156    # Compute direction vectors on source point cloud frame in sensor frame.
157    directions = source_in_S.point.positions.numpy()
158    norms = np.tile(np.linalg.norm(directions, axis=1), (3, 1)).T
159    directions = directions / norms
160    source_in_V.point['directions'] = o3d.core.Tensor(directions, dtype, device)
161
162    # Setup robust kernels.
163    kernel = o3d_reg.robust_kernel.RobustKernel(o3d_reg.robust_kernel.TukeyLoss,
164                                                scaling_parameter=0.5)
165
166    # Setup convergence criteria.
167    criteria = o3d_reg.ICPConvergenceCriteria(relative_fitness=1e-6,
168                                              relative_rmse=1e-6,
169                                              max_iteration=200)
170
171    # Setup transformation estimator.
172    estimator_p2l = o3d_reg.TransformationEstimationPointToPlane(kernel)
173    estimator_dicp = o3d_reg.TransformationEstimationForDopplerICP(
174        period=period * (args.target - args.source),
175        lambda_doppler=0.01,
176        reject_dynamic_outliers=False,
177        doppler_outlier_threshold=2.0,
178        outlier_rejection_min_iteration=2,
179        geometric_robust_loss_min_iteration=0,
180        doppler_robust_loss_min_iteration=2,
181        goemetric_kernel=kernel,
182        doppler_kernel=kernel,
183        transform_vehicle_to_sensor=transform_vehicle_to_sensor)
184
185    # Run Doppler ICP and point-to-plane ICP registration for comparison.
186    max_neighbor_distance = 0.3
187    results = [
188        o3d_reg.icp(source_in_V, target_in_V, max_neighbor_distance,
189                    init_transform, estimator, criteria)
190        for estimator in [estimator_p2l, estimator_dicp]
191    ]
192
193    # Display the poses.
194    np.set_printoptions(suppress=True, precision=4)
195    print('Estimated pose from Point-to-Plane ICP [%s iterations]:' %
196          results[0].num_iterations)
197    print(results[0].transformation.numpy())
198
199    print('\nEstimated pose from Doppler ICP [%s iterations]:' %
200          results[1].num_iterations)
201    print(results[1].transformation.numpy())
202
203    print('\nGround truth pose:')
204    print(get_ground_truth_pose(demo_sequence, args.source, args.target))
205
206
207def parse_args():
208    """Parses the command line arguments.
209
210    Returns:
211        The parsed command line arguments.
212    """
213    parser = argparse.ArgumentParser()
214    parser.add_argument('--source',
215                        '-s',
216                        type=int,
217                        required=True,
218                        help='Source point cloud index')
219    parser.add_argument('--target',
220                        '-t',
221                        type=int,
222                        required=True,
223                        help='Target point cloud index')
224    parser.add_argument('--device',
225                        '-d',
226                        default='cpu',
227                        help='Device backend for the tensor',
228                        choices=['cpu', 'cuda'])
229
230    return parser.parse_args()
231
232
233if __name__ == '__main__':
234    args = parse_args()
235    run_doppler_icp(args)

icp_registration.py#

 1# ----------------------------------------------------------------------------
 2# -                        Open3D: www.open3d.org                            -
 3# ----------------------------------------------------------------------------
 4# Copyright (c) 2018-2024 www.open3d.org
 5# SPDX-License-Identifier: MIT
 6# ----------------------------------------------------------------------------
 7"""ICP (Iterative Closest Point) registration algorithm"""
 8
 9import open3d as o3d
10import numpy as np
11import copy
12
13
14def draw_registration_result(source, target, transformation):
15    source_temp = copy.deepcopy(source)
16    target_temp = copy.deepcopy(target)
17    source_temp.paint_uniform_color([1, 0.706, 0])
18    target_temp.paint_uniform_color([0, 0.651, 0.929])
19    source_temp.transform(transformation)
20    o3d.visualization.draw([source_temp, target_temp])
21
22
23def point_to_point_icp(source, target, threshold, trans_init):
24    print("Apply point-to-point ICP")
25    reg_p2p = o3d.pipelines.registration.registration_icp(
26        source, target, threshold, trans_init,
27        o3d.pipelines.registration.TransformationEstimationPointToPoint())
28    print(reg_p2p)
29    print("Transformation is:")
30    print(reg_p2p.transformation, "\n")
31    draw_registration_result(source, target, reg_p2p.transformation)
32
33
34def point_to_plane_icp(source, target, threshold, trans_init):
35    print("Apply point-to-plane ICP")
36    reg_p2l = o3d.pipelines.registration.registration_icp(
37        source, target, threshold, trans_init,
38        o3d.pipelines.registration.TransformationEstimationPointToPlane())
39    print(reg_p2l)
40    print("Transformation is:")
41    print(reg_p2l.transformation, "\n")
42    draw_registration_result(source, target, reg_p2l.transformation)
43
44
45if __name__ == "__main__":
46    pcd_data = o3d.data.DemoICPPointClouds()
47    source = o3d.io.read_point_cloud(pcd_data.paths[0])
48    target = o3d.io.read_point_cloud(pcd_data.paths[1])
49    threshold = 0.02
50    trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],
51                             [-0.139, 0.967, -0.215, 0.7],
52                             [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])
53    draw_registration_result(source, target, trans_init)
54
55    print("Initial alignment")
56    evaluation = o3d.pipelines.registration.evaluate_registration(
57        source, target, threshold, trans_init)
58    print(evaluation, "\n")
59
60    point_to_point_icp(source, target, threshold, trans_init)
61    point_to_plane_icp(source, target, threshold, trans_init)

multiway_registration.py#

  1# ----------------------------------------------------------------------------
  2# -                        Open3D: www.open3d.org                            -
  3# ----------------------------------------------------------------------------
  4# Copyright (c) 2018-2024 www.open3d.org
  5# SPDX-License-Identifier: MIT
  6# ----------------------------------------------------------------------------
  7"""Align multiple pieces of geometry in a global space"""
  8
  9import open3d as o3d
 10import numpy as np
 11
 12
 13def load_point_clouds(voxel_size=0.0):
 14    pcd_data = o3d.data.DemoICPPointClouds()
 15    pcds = []
 16    for i in range(3):
 17        pcd = o3d.io.read_point_cloud(pcd_data.paths[i])
 18        pcd_down = pcd.voxel_down_sample(voxel_size=voxel_size)
 19        pcds.append(pcd_down)
 20    return pcds
 21
 22
 23def pairwise_registration(source, target, max_correspondence_distance_coarse,
 24                          max_correspondence_distance_fine):
 25    print("Apply point-to-plane ICP")
 26    icp_coarse = o3d.pipelines.registration.registration_icp(
 27        source, target, max_correspondence_distance_coarse, np.identity(4),
 28        o3d.pipelines.registration.TransformationEstimationPointToPlane())
 29    icp_fine = o3d.pipelines.registration.registration_icp(
 30        source, target, max_correspondence_distance_fine,
 31        icp_coarse.transformation,
 32        o3d.pipelines.registration.TransformationEstimationPointToPlane())
 33    transformation_icp = icp_fine.transformation
 34    information_icp = o3d.pipelines.registration.get_information_matrix_from_point_clouds(
 35        source, target, max_correspondence_distance_fine,
 36        icp_fine.transformation)
 37    return transformation_icp, information_icp
 38
 39
 40def full_registration(pcds, max_correspondence_distance_coarse,
 41                      max_correspondence_distance_fine):
 42    pose_graph = o3d.pipelines.registration.PoseGraph()
 43    odometry = np.identity(4)
 44    pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(odometry))
 45    n_pcds = len(pcds)
 46    for source_id in range(n_pcds):
 47        for target_id in range(source_id + 1, n_pcds):
 48            transformation_icp, information_icp = pairwise_registration(
 49                pcds[source_id], pcds[target_id],
 50                max_correspondence_distance_coarse,
 51                max_correspondence_distance_fine)
 52            print("Build o3d.pipelines.registration.PoseGraph")
 53            if target_id == source_id + 1:  # odometry case
 54                odometry = np.dot(transformation_icp, odometry)
 55                pose_graph.nodes.append(
 56                    o3d.pipelines.registration.PoseGraphNode(
 57                        np.linalg.inv(odometry)))
 58                pose_graph.edges.append(
 59                    o3d.pipelines.registration.PoseGraphEdge(source_id,
 60                                                             target_id,
 61                                                             transformation_icp,
 62                                                             information_icp,
 63                                                             uncertain=False))
 64            else:  # loop closure case
 65                pose_graph.edges.append(
 66                    o3d.pipelines.registration.PoseGraphEdge(source_id,
 67                                                             target_id,
 68                                                             transformation_icp,
 69                                                             information_icp,
 70                                                             uncertain=True))
 71    return pose_graph
 72
 73
 74if __name__ == "__main__":
 75    voxel_size = 0.02
 76    pcds_down = load_point_clouds(voxel_size)
 77    o3d.visualization.draw(pcds_down)
 78
 79    print("Full registration ...")
 80    max_correspondence_distance_coarse = voxel_size * 15
 81    max_correspondence_distance_fine = voxel_size * 1.5
 82    with o3d.utility.VerbosityContextManager(
 83            o3d.utility.VerbosityLevel.Debug) as cm:
 84        pose_graph = full_registration(pcds_down,
 85                                       max_correspondence_distance_coarse,
 86                                       max_correspondence_distance_fine)
 87
 88    print("Optimizing PoseGraph ...")
 89    option = o3d.pipelines.registration.GlobalOptimizationOption(
 90        max_correspondence_distance=max_correspondence_distance_fine,
 91        edge_prune_threshold=0.25,
 92        reference_node=0)
 93    with o3d.utility.VerbosityContextManager(
 94            o3d.utility.VerbosityLevel.Debug) as cm:
 95        o3d.pipelines.registration.global_optimization(
 96            pose_graph,
 97            o3d.pipelines.registration.GlobalOptimizationLevenbergMarquardt(),
 98            o3d.pipelines.registration.GlobalOptimizationConvergenceCriteria(),
 99            option)
100
101    print("Transform points and display")
102    for point_id in range(len(pcds_down)):
103        print(pose_graph.nodes[point_id].pose)
104        pcds_down[point_id].transform(pose_graph.nodes[point_id].pose)
105    o3d.visualization.draw(pcds_down)

pose_graph_optimization.py#

 1# ----------------------------------------------------------------------------
 2# -                        Open3D: www.open3d.org                            -
 3# ----------------------------------------------------------------------------
 4# Copyright (c) 2018-2024 www.open3d.org
 5# SPDX-License-Identifier: MIT
 6# ----------------------------------------------------------------------------
 7
 8import open3d as o3d
 9import numpy as np
10import os
11
12if __name__ == "__main__":
13
14    o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
15
16    print("")
17    print(
18        "Parameters for o3d.pipelines.registration.PoseGraph optimization ...")
19    method = o3d.pipelines.registration.GlobalOptimizationLevenbergMarquardt()
20    criteria = o3d.pipelines.registration.GlobalOptimizationConvergenceCriteria(
21    )
22    option = o3d.pipelines.registration.GlobalOptimizationOption()
23    print("")
24    print(method)
25    print(criteria)
26    print(option)
27    print("")
28
29    print(
30        "Optimizing Fragment o3d.pipelines.registration.PoseGraph using open3d ..."
31    )
32
33    pose_graph_data = o3d.data.DemoPoseGraphOptimization()
34    pose_graph_fragment = o3d.io.read_pose_graph(
35        pose_graph_data.pose_graph_fragment_path)
36    print(pose_graph_fragment)
37    o3d.pipelines.registration.global_optimization(pose_graph_fragment, method,
38                                                   criteria, option)
39    o3d.io.write_pose_graph(
40        os.path.join('pose_graph_example_fragment_optimized.json'),
41        pose_graph_fragment)
42    print("")
43
44    print(
45        "Optimizing Global o3d.pipelines.registration.PoseGraph using open3d ..."
46    )
47    pose_graph_global = o3d.io.read_pose_graph(
48        pose_graph_data.pose_graph_global_path)
49    print(pose_graph_global)
50    o3d.pipelines.registration.global_optimization(pose_graph_global, method,
51                                                   criteria, option)
52    o3d.io.write_pose_graph(
53        os.path.join('pose_graph_example_global_optimized.json'),
54        pose_graph_global)

registration_fgr.py#

 1# ----------------------------------------------------------------------------
 2# -                        Open3D: www.open3d.org                            -
 3# ----------------------------------------------------------------------------
 4# Copyright (c) 2018-2024 www.open3d.org
 5# SPDX-License-Identifier: MIT
 6# ----------------------------------------------------------------------------
 7
 8import open3d as o3d
 9
10import argparse
11
12
13def preprocess_point_cloud(pcd, voxel_size):
14    pcd_down = pcd.voxel_down_sample(voxel_size)
15    pcd_down.estimate_normals(
16        o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 2.0,
17                                             max_nn=30))
18    pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
19        pcd_down,
20        o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 5.0,
21                                             max_nn=100))
22    return (pcd_down, pcd_fpfh)
23
24
25if __name__ == '__main__':
26    pcd_data = o3d.data.DemoICPPointClouds()
27    parser = argparse.ArgumentParser(
28        'Global point cloud registration example with RANSAC')
29    parser.add_argument('src',
30                        type=str,
31                        default=pcd_data.paths[0],
32                        nargs='?',
33                        help='path to src point cloud')
34    parser.add_argument('dst',
35                        type=str,
36                        default=pcd_data.paths[1],
37                        nargs='?',
38                        help='path to dst point cloud')
39    parser.add_argument('--voxel_size',
40                        type=float,
41                        default=0.05,
42                        help='voxel size in meter used to downsample inputs')
43    parser.add_argument(
44        '--distance_multiplier',
45        type=float,
46        default=1.5,
47        help='multipler used to compute distance threshold'
48        'between correspondences.'
49        'Threshold is computed by voxel_size * distance_multiplier.')
50    parser.add_argument('--max_iterations',
51                        type=int,
52                        default=64,
53                        help='number of max FGR iterations')
54    parser.add_argument(
55        '--max_tuples',
56        type=int,
57        default=1000,
58        help='max number of accepted tuples for correspondence filtering')
59
60    args = parser.parse_args()
61
62    voxel_size = args.voxel_size
63    distance_threshold = args.distance_multiplier * voxel_size
64
65    o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
66    print('Reading inputs')
67    src = o3d.io.read_point_cloud(args.src)
68    dst = o3d.io.read_point_cloud(args.dst)
69
70    print('Downsampling inputs')
71    src_down, src_fpfh = preprocess_point_cloud(src, voxel_size)
72    dst_down, dst_fpfh = preprocess_point_cloud(dst, voxel_size)
73
74    print('Running FGR')
75    result = o3d.pipelines.registration.registration_fgr_based_on_feature_matching(
76        src_down, dst_down, src_fpfh, dst_fpfh,
77        o3d.pipelines.registration.FastGlobalRegistrationOption(
78            maximum_correspondence_distance=distance_threshold,
79            iteration_number=args.max_iterations,
80            maximum_tuple_count=args.max_tuples))
81
82    src.paint_uniform_color([1, 0, 0])
83    dst.paint_uniform_color([0, 1, 0])
84    o3d.visualization.draw([src.transform(result.transformation), dst])

registration_ransac.py#

  1# ----------------------------------------------------------------------------
  2# -                        Open3D: www.open3d.org                            -
  3# ----------------------------------------------------------------------------
  4# Copyright (c) 2018-2024 www.open3d.org
  5# SPDX-License-Identifier: MIT
  6# ----------------------------------------------------------------------------
  7
  8import open3d as o3d
  9
 10import numpy as np
 11from copy import deepcopy
 12import argparse
 13
 14
 15def visualize_registration(src, dst, transformation=np.eye(4)):
 16    src_trans = deepcopy(src)
 17    src_trans.transform(transformation)
 18    src_trans.paint_uniform_color([1, 0, 0])
 19
 20    dst_clone = deepcopy(dst)
 21    dst_clone.paint_uniform_color([0, 1, 0])
 22
 23    o3d.visualization.draw([src_trans, dst_clone])
 24
 25
 26def preprocess_point_cloud(pcd, voxel_size):
 27    pcd_down = pcd.voxel_down_sample(voxel_size)
 28    pcd_down.estimate_normals(
 29        o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 2.0,
 30                                             max_nn=30))
 31    pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
 32        pcd_down,
 33        o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 5.0,
 34                                             max_nn=100),
 35    )
 36    return (pcd_down, pcd_fpfh)
 37
 38
 39if __name__ == "__main__":
 40    pcd_data = o3d.data.DemoICPPointClouds()
 41
 42    # yapf: disable
 43    parser = argparse.ArgumentParser(
 44        "Global point cloud registration example with RANSAC"
 45    )
 46    parser.add_argument(
 47        "src", type=str, default=pcd_data.paths[0], nargs="?",
 48        help="path to src point cloud",
 49    )
 50    parser.add_argument(
 51        "dst", type=str, default=pcd_data.paths[1], nargs="?",
 52        help="path to dst point cloud",
 53    )
 54    parser.add_argument(
 55        "--voxel_size", type=float, default=0.05,
 56        help="voxel size in meter used to downsample inputs",
 57    )
 58    parser.add_argument(
 59        "--distance_multiplier", type=float, default=1.5,
 60        help="multipler used to compute distance threshold"
 61        "between correspondences."
 62        "Threshold is computed by voxel_size * distance_multiplier.",
 63    )
 64    parser.add_argument(
 65        "--max_iterations", type=int, default=100000,
 66        help="number of max RANSAC iterations",
 67    )
 68    parser.add_argument(
 69        "--confidence", type=float, default=0.999, help="RANSAC confidence"
 70    )
 71    parser.add_argument(
 72        "--mutual_filter", action="store_true",
 73        help="whether to use mutual filter for putative correspondences",
 74    )
 75    parser.add_argument(
 76        "--method", choices=["from_features", "from_correspondences"], default="from_correspondences"
 77    )
 78    # yapf: enable
 79
 80    args = parser.parse_args()
 81
 82    voxel_size = args.voxel_size
 83    distance_threshold = args.distance_multiplier * voxel_size
 84    o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
 85
 86    print("Reading inputs")
 87    src = o3d.io.read_point_cloud(args.src)
 88    dst = o3d.io.read_point_cloud(args.dst)
 89
 90    print("Downsampling inputs")
 91    src_down, src_fpfh = preprocess_point_cloud(src, voxel_size)
 92    dst_down, dst_fpfh = preprocess_point_cloud(dst, voxel_size)
 93
 94    if args.method == "from_features":
 95        print("Running RANSAC from features")
 96        result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(
 97            src_down,
 98            dst_down,
 99            src_fpfh,
100            dst_fpfh,
101            mutual_filter=args.mutual_filter,
102            max_correspondence_distance=distance_threshold,
103            estimation_method=o3d.pipelines.registration.
104            TransformationEstimationPointToPoint(False),
105            ransac_n=3,
106            checkers=[
107                o3d.pipelines.registration.
108                CorrespondenceCheckerBasedOnEdgeLength(0.9),
109                o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(
110                    distance_threshold),
111            ],
112            criteria=o3d.pipelines.registration.RANSACConvergenceCriteria(
113                args.max_iterations, args.confidence),
114        )
115        visualize_registration(src, dst, result.transformation)
116
117    elif args.method == "from_correspondences":
118        print("Running RANSAC from correspondences")
119        # Mimic importing customized external features (e.g. learned FCGF features) in numpy
120        # shape: (feature_dim, num_features)
121        src_fpfh_np = np.asarray(src_fpfh.data).copy()
122        dst_fpfh_np = np.asarray(dst_fpfh.data).copy()
123
124        src_fpfh_import = o3d.pipelines.registration.Feature()
125        src_fpfh_import.data = src_fpfh_np
126
127        dst_fpfh_import = o3d.pipelines.registration.Feature()
128        dst_fpfh_import.data = dst_fpfh_np
129
130        corres = o3d.pipelines.registration.correspondences_from_features(
131            src_fpfh_import, dst_fpfh_import, args.mutual_filter)
132        result = o3d.pipelines.registration.registration_ransac_based_on_correspondence(
133            src_down,
134            dst_down,
135            corres,
136            max_correspondence_distance=distance_threshold,
137            estimation_method=o3d.pipelines.registration.
138            TransformationEstimationPointToPoint(False),
139            ransac_n=3,
140            checkers=[
141                o3d.pipelines.registration.
142                CorrespondenceCheckerBasedOnEdgeLength(0.9),
143                o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(
144                    distance_threshold),
145            ],
146            criteria=o3d.pipelines.registration.RANSACConvergenceCriteria(
147                args.max_iterations, args.confidence),
148        )
149        visualize_registration(src, dst, result.transformation)

rgbd_integration_uniform.py#

 1# ----------------------------------------------------------------------------
 2# -                        Open3D: www.open3d.org                            -
 3# ----------------------------------------------------------------------------
 4# Copyright (c) 2018-2024 www.open3d.org
 5# SPDX-License-Identifier: MIT
 6# ----------------------------------------------------------------------------
 7
 8import open3d as o3d
 9import numpy as np
10
11import os, sys
12
13pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
14sys.path.append(pyexample_path)
15
16from open3d_example import read_trajectory
17
18if __name__ == "__main__":
19    rgbd_data = o3d.data.SampleRedwoodRGBDImages()
20    camera_poses = read_trajectory(rgbd_data.odometry_log_path)
21    camera_intrinsics = o3d.camera.PinholeCameraIntrinsic(
22        o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
23    volume = o3d.pipelines.integration.UniformTSDFVolume(
24        length=4.0,
25        resolution=512,
26        sdf_trunc=0.04,
27        color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8,
28    )
29
30    for i in range(len(camera_poses)):
31        print("Integrate {:d}-th image into the volume.".format(i))
32        color = o3d.io.read_image(rgbd_data.color_paths[i])
33        depth = o3d.io.read_image(rgbd_data.depth_paths[i])
34
35        rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
36            color, depth, depth_trunc=4.0, convert_rgb_to_intensity=False)
37        volume.integrate(
38            rgbd,
39            camera_intrinsics,
40            np.linalg.inv(camera_poses[i].pose),
41        )
42
43    print("Extract triangle mesh")
44    mesh = volume.extract_triangle_mesh()
45    mesh.compute_vertex_normals()
46    o3d.visualization.draw_geometries([mesh])
47
48    print("Extract voxel-aligned debugging point cloud")
49    voxel_pcd = volume.extract_voxel_point_cloud()
50    o3d.visualization.draw_geometries([voxel_pcd])
51
52    print("Extract voxel-aligned debugging voxel grid")
53    voxel_grid = volume.extract_voxel_grid()
54    # o3d.visualization.draw_geometries([voxel_grid])
55
56    # print("Extract point cloud")
57    # pcd = volume.extract_point_cloud()
58    # o3d.visualization.draw_geometries([pcd])

rgbd_odometry.py#

 1# ----------------------------------------------------------------------------
 2# -                        Open3D: www.open3d.org                            -
 3# ----------------------------------------------------------------------------
 4# Copyright (c) 2018-2024 www.open3d.org
 5# SPDX-License-Identifier: MIT
 6# ----------------------------------------------------------------------------
 7"""Find camera movement between two consecutive RGBD image pairs"""
 8
 9import open3d as o3d
10import numpy as np
11
12if __name__ == "__main__":
13    pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(
14        o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
15    rgbd_data = o3d.data.SampleRedwoodRGBDImages()
16    source_color = o3d.io.read_image(rgbd_data.color_paths[0])
17    source_depth = o3d.io.read_image(rgbd_data.depth_paths[0])
18    target_color = o3d.io.read_image(rgbd_data.color_paths[1])
19    target_depth = o3d.io.read_image(rgbd_data.depth_paths[1])
20
21    source_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
22        source_color, source_depth)
23    target_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
24        target_color, target_depth)
25    target_pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
26        target_rgbd_image, pinhole_camera_intrinsic)
27
28    option = o3d.pipelines.odometry.OdometryOption()
29    odo_init = np.identity(4)
30    print(option)
31
32    [success_color_term, trans_color_term,
33     info] = o3d.pipelines.odometry.compute_rgbd_odometry(
34         source_rgbd_image, target_rgbd_image,
35         pinhole_camera_intrinsic, odo_init,
36         o3d.pipelines.odometry.RGBDOdometryJacobianFromColorTerm(), option)
37    [success_hybrid_term, trans_hybrid_term,
38     info] = o3d.pipelines.odometry.compute_rgbd_odometry(
39         source_rgbd_image, target_rgbd_image,
40         pinhole_camera_intrinsic, odo_init,
41         o3d.pipelines.odometry.RGBDOdometryJacobianFromHybridTerm(), option)
42
43    if success_color_term:
44        print("Using RGB-D Odometry")
45        print(trans_color_term)
46        source_pcd_color_term = o3d.geometry.PointCloud.create_from_rgbd_image(
47            source_rgbd_image, pinhole_camera_intrinsic)
48        source_pcd_color_term.transform(trans_color_term)
49        o3d.visualization.draw([target_pcd, source_pcd_color_term])
50
51    if success_hybrid_term:
52        print("Using Hybrid RGB-D Odometry")
53        print(trans_hybrid_term)
54        source_pcd_hybrid_term = o3d.geometry.PointCloud.create_from_rgbd_image(
55            source_rgbd_image, pinhole_camera_intrinsic)
56        source_pcd_hybrid_term.transform(trans_hybrid_term)
57        o3d.visualization.draw([target_pcd, source_pcd_hybrid_term])

robust_icp.py#

 1# ----------------------------------------------------------------------------
 2# -                        Open3D: www.open3d.org                            -
 3# ----------------------------------------------------------------------------
 4# Copyright (c) 2018-2024 www.open3d.org
 5# SPDX-License-Identifier: MIT
 6# ----------------------------------------------------------------------------
 7"""Outlier rejection using robust kernels for ICP"""
 8
 9import open3d as o3d
10import numpy as np
11import copy
12
13
14def draw_registration_result(source, target, transformation):
15    source_temp = copy.deepcopy(source)
16    target_temp = copy.deepcopy(target)
17    source_temp.paint_uniform_color([1, 0.706, 0])
18    target_temp.paint_uniform_color([0, 0.651, 0.929])
19    source_temp.transform(transformation)
20    o3d.visualization.draw([source_temp, target_temp])
21
22
23def apply_noise(pcd, mu, sigma):
24    noisy_pcd = copy.deepcopy(pcd)
25    points = np.asarray(noisy_pcd.points)
26    points += np.random.normal(mu, sigma, size=points.shape)
27    noisy_pcd.points = o3d.utility.Vector3dVector(points)
28    return noisy_pcd
29
30
31if __name__ == "__main__":
32    pcd_data = o3d.data.DemoICPPointClouds()
33    source = o3d.io.read_point_cloud(pcd_data.paths[0])
34    target = o3d.io.read_point_cloud(pcd_data.paths[1])
35    trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],
36                             [-0.139, 0.967, -0.215, 0.7],
37                             [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])
38
39    # Mean and standard deviation.
40    mu, sigma = 0, 0.1
41    source_noisy = apply_noise(source, mu, sigma)
42
43    print("Displaying source point cloud + noise:")
44    o3d.visualization.draw([source_noisy])
45
46    print(
47        "Displaying original source and target point cloud with initial transformation:"
48    )
49    draw_registration_result(source, target, trans_init)
50
51    threshold = 1.0
52    print("Using the noisy source pointcloud to perform robust ICP.\n")
53    print("Robust point-to-plane ICP, threshold={}:".format(threshold))
54    loss = o3d.pipelines.registration.TukeyLoss(k=sigma)
55    print("Using robust loss:", loss)
56    p2l = o3d.pipelines.registration.TransformationEstimationPointToPlane(loss)
57    reg_p2l = o3d.pipelines.registration.registration_icp(
58        source_noisy, target, threshold, trans_init, p2l)
59    print(reg_p2l)
60    print("Transformation is:")
61    print(reg_p2l.transformation)
62    draw_registration_result(source, target, reg_p2l.transformation)