diff --git a/cpp/open3d/t/geometry/TriangleMesh.h b/cpp/open3d/t/geometry/TriangleMesh.h index 2dbb71ec55d..a6d62ffe0c1 100644 --- a/cpp/open3d/t/geometry/TriangleMesh.h +++ b/cpp/open3d/t/geometry/TriangleMesh.h @@ -26,6 +26,9 @@ namespace geometry { class LineSet; class RaycastingScene; +/// Texture Blending method for ProjectImagesToAlbedo() from overlapping images. +enum class BlendingMethod { MAX, AVERAGE }; + /// \class TriangleMesh /// \brief A triangle mesh contains vertices and triangles. /// @@ -979,12 +982,38 @@ class TriangleMesh : public Geometry, public DrawableGeometry { /// \return The reference to itself. TriangleMesh RemoveUnreferencedVertices(); + /// Create an albedo for the triangle mesh using calibrated images. The + /// triangle mesh must have texture coordinates ("texture_uvs" triangle + /// attribute). Ths works by back projecting the images onto the texture + /// surface. Overlapping images are blended together in the resulting + /// albedo. For best results, use images captured with exposure lock to + /// reduce the chance of seams in the output texture. + /// + /// \param images vector of images. + /// \param intrinsic_matrices vector of {3,3} intrinsic matrices describing + /// the pinhole camera. + /// \param extrinsic_matrices vector of {4,4} extrinsic matrices describing + /// the position and orientation of the camera. + /// \param tex_size Output albedo texture size. This is a square image, so + /// only one side is needed. + /// \param update_material Whether to update the material of the triangle + /// mesh, possibly overwriting an existing albedo texture. + /// \param blending_method BlendingMethod enum specifying the blending + /// method for overlapping images: + /// - MAX: For each texel, pick the input pixel with the max weight from + /// all overlapping images. This creates sharp textures but may have + /// visible seams. + /// - AVERAGE: The output texel value is the weighted sum of input + /// pixels. This creates smooth blending without seams, but the results + /// may be blurry. + /// \return Image with albedo texture Image ProjectImagesToAlbedo( const std::vector &images, const std::vector &intrinsic_matrices, const std::vector &extrinsic_matrices, int tex_size = 1024, - bool update_material = true); + bool update_material = true, + BlendingMethod blending_method = BlendingMethod::MAX); protected: core::Device device_ = core::Device("CPU:0"); diff --git a/cpp/pybind/t/geometry/trianglemesh.cpp b/cpp/pybind/t/geometry/trianglemesh.cpp index fabc79dc38e..414d0d3418a 100644 --- a/cpp/pybind/t/geometry/trianglemesh.cpp +++ b/cpp/pybind/t/geometry/trianglemesh.cpp @@ -858,13 +858,6 @@ This function always uses the CPU device. plt.imshow(texture_tensors['albedo'].numpy()) )"); - triangle_mesh.def( - "project_images_to_albedo", &TriangleMesh::ProjectImagesToAlbedo, - "images"_a, "intrinsic_matrices"_a, "extrinsic_matrices"_a, - "tex_size"_a = 1024, "update_material"_a = true, - py::call_guard(), - R"(Create an albedo texture from images of an object taken with a calibrated camera.)"); - triangle_mesh.def("extrude_rotation", &TriangleMesh::ExtrudeRotation, "angle"_a, "axis"_a, "resolution"_a = 16, "translation"_a = 0.0, "capping"_a = true, @@ -1000,10 +993,44 @@ or has a negative value, it is ignored. &TriangleMesh::RemoveUnreferencedVertices, "Removes unreferenced vertices from the mesh in-place."); + py::enum_(m, "BlendingMethod") + .value("MAX", BlendingMethod::MAX) + .value("AVERAGE", BlendingMethod::AVERAGE); triangle_mesh.def("project_images_to_albedo", &TriangleMesh::ProjectImagesToAlbedo, "images"_a, "intrinsic_matrices"_a, "extrinsic_matrices"_a, - "tex_size"_a = 1024, "update_material"_a = true); + "tex_size"_a = 1024, "update_material"_a = true, + "blending_method"_a = BlendingMethod::MAX, + py::call_guard(), R"( +Create an albedo for the triangle mesh using calibrated images. The triangle +mesh must have texture coordinates ("texture_uvs" triangle attribute). Ths works +by back projecting the images onto the texture surface. Overlapping images are +blended together in the resulting albedo. For best results, use images captured +with exposure and white balance lock to reduce the chance of seams in the output +texture. + +Args: + images (List[open3d.t.geometry.Image]): List of images. + intrinsic_matrices (List[open3d.core.Tensor]): List of (3,3) intrinsic matrices describing + the pinhole camera. + extrinsic_matrices (List[open3d.core.Tensor]): List of (4,4) extrinsic matrices describing + the position and orientation of the camera. + tex_size (int): Output albedo texture size. This is a square image, so + only one side is needed. + update_material (bool): Whether to update the material of the triangle + mesh, possibly overwriting an existing albedo texture. + blending_method (BlendingMethod) enum specifying the blending + method for overlapping images:: + + - `MAX`: For each texel, pick the input pixel with the max weight from + all overlapping images. This creates sharp textures but may have + visible seams. + - `AVERAGE`: The output texel value is the weighted sum of input + pixels. This creates smooth blending without seams, but the results + may be blurry. + +Returns: + Image with albedo texture.)"); } } // namespace geometry diff --git a/cpp/tests/t/geometry/TriangleMesh.cpp b/cpp/tests/t/geometry/TriangleMesh.cpp index a431804da9e..f2640d67cdf 100644 --- a/cpp/tests/t/geometry/TriangleMesh.cpp +++ b/cpp/tests/t/geometry/TriangleMesh.cpp @@ -8,10 +8,12 @@ #include "open3d/t/geometry/TriangleMesh.h" #include +#include #include "core/CoreTest.h" #include "open3d/core/Dtype.h" #include "open3d/core/EigenConverter.h" +#include "open3d/core/SizeVector.h" #include "open3d/core/Tensor.h" #include "open3d/core/TensorCheck.h" #include "open3d/geometry/LineSet.h" @@ -1350,7 +1352,6 @@ TEST_P(TriangleMeshPermuteDevices, RemoveUnreferencedVertices) { TEST_P(TriangleMeshPermuteDevices, ProjectImagesToAlbedo) { using namespace t::geometry; - using ls = open3d::geometry::LineSet; core::Device device = GetParam(); TriangleMesh sphere = TriangleMesh::FromLegacy(*geometry::TriangleMesh::CreateSphere( @@ -1379,46 +1380,22 @@ TEST_P(TriangleMeshPermuteDevices, ProjectImagesToAlbedo) { device), }; - Eigen::Map e_intrinsic( - intrinsic_matrix.GetDataPtr()); - Eigen::Map e_extrinsic[3] = { - Eigen::Map( - extrinsic_matrix[0].GetDataPtr()), - Eigen::Map( - extrinsic_matrix[1].GetDataPtr()), - Eigen::Map( - extrinsic_matrix[2].GetDataPtr())}; - std::shared_ptr p_camera[3] = { - ls::CreateCameraVisualization( - 256, 192, e_intrinsic.transpose().cast(), - e_extrinsic[0].transpose().cast()), - ls::CreateCameraVisualization( - 256, 192, e_intrinsic.transpose().cast(), - e_extrinsic[1].transpose().cast()), - ls::CreateCameraVisualization( - 256, 192, e_intrinsic.transpose().cast(), - e_extrinsic[2].transpose().cast())}; - Image albedo = sphere.ProjectImagesToAlbedo( {Image(view[0]), Image(view[1]), Image(view[2])}, {intrinsic_matrix, intrinsic_matrix, intrinsic_matrix}, {extrinsic_matrix[0], extrinsic_matrix[1], extrinsic_matrix[2]}, 256, true); - utility::LogInfo("Mesh: {}", sphere.ToString()); - utility::LogInfo("Texture: {}", albedo.ToString()); - t::io::WriteImage("albedo.png", albedo); - /* t::io::WriteTriangleMesh("sphere-projected.obj", sphere); */ - t::io::WriteTriangleMesh("sphere-projected.glb", sphere); - t::io::WriteTriangleMesh("sphere-projected.npz", sphere); - - visualization::Draw( - {visualization::DrawObject("camera_0", p_camera[0], true), - visualization::DrawObject("camera_1", p_camera[1], true), - visualization::DrawObject("camera_2", p_camera[2], true), - visualization::DrawObject{ - "mesh", std::make_shared(std::move(sphere)), - true}}, - "ProjectImagesToAlbedo", 1024, 768); + + EXPECT_TRUE(sphere.HasMaterial()); + EXPECT_TRUE(sphere.GetMaterial().HasAlbedoMap()); + EXPECT_TRUE(albedo.AsTensor().GetShape().IsCompatible({256, 256, 3})); + EXPECT_TRUE(albedo.GetDtype() == core::UInt8); + core::Tensor mean_color_ref = + core::Tensor::Init({92.465515, 71.62926, 67.55928}); + EXPECT_TRUE(albedo.AsTensor() + .To(core::Float32) + .Mean({0, 1}) + .AllClose(mean_color_ref)); } } // namespace tests diff --git a/docs/tutorial/data/index.rst b/docs/tutorial/data/index.rst index e17c8d4619e..f1fcf2dd21c 100644 --- a/docs/tutorial/data/index.rst +++ b/docs/tutorial/data/index.rst @@ -175,13 +175,13 @@ A 3D Mobius knot mesh in PLY format. data::KnotMesh dataset; auto mesh = io::CreateMeshFromFile(dataset.GetPath()); -TriangleModel with PRB texture +TriangleModel with PBR texture ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MonkeyModel ----------- -The monkey model with PRB texture. +The monkey model with PBR texture. .. code-block:: python @@ -197,7 +197,7 @@ The monkey model with PRB texture. SwordModel ---------- -The sword model with PRB texture. +The sword model with PBR texture. .. code-block:: python @@ -213,7 +213,7 @@ The sword model with PRB texture. CrateModel ---------- -The crate model with PRB texture. +The crate model with PBR texture. .. code-block:: python @@ -229,7 +229,7 @@ The crate model with PRB texture. FlightHelmetModel ----------------- -The flight helmet gltf model with PRB texture. +The flight helmet gltf model with PBR texture. .. code-block:: python diff --git a/examples/python/geometry/triangle_mesh_project_to_albedo.py b/examples/python/geometry/triangle_mesh_project_to_albedo.py index a8ed06e552c..c37c6045127 100644 --- a/examples/python/geometry/triangle_mesh_project_to_albedo.py +++ b/examples/python/geometry/triangle_mesh_project_to_albedo.py @@ -1,22 +1,56 @@ +"""This example demonstrates project_image_to_albedo. Use create_dataset mode to +render images of a 3D mesh or model from different viewpoints. +albedo_from_dataset mode then uses the calibrated images to re-create the albedo +texture for the mesh. +""" import argparse from pathlib import Path import subprocess as sp +import time import numpy as np import open3d as o3d from open3d.visualization import gui, rendering, O3DVisualizer from open3d.core import Tensor +def download_smithsonian_baluster_vase(): + """Download the Smithsonian Baluster Vase 3D model.""" + vase_url = 'https://3d-api.si.edu/content/document/3d_package:d8c62634-4ebc-11ea-b77f-2e728ce88125/resources/F1980.190%E2%80%93194_baluster_vase-150k-4096.glb' + import urllib.request + + def show_progress(block_num, block_size, total_size): + total_size = total_size >> 20 if total_size > 0 else "??" # Convert to MB if known + print( + "Downloading F1980_baluster_vase.glb... " + f"{(block_num * block_size) >>20}MB / {total_size}MB", + end="\r") + + urllib.request.urlretrieve(vase_url, + filename="F1980_baluster_vase.glb", + reporthook=show_progress) + print("\nDownload complete.") + + def create_dataset(meshfile, n_images=10, movie=False): + """Render images of a 3D mesh from different viewpoints. These form a + synthetic dataset to test the project_images_to_albedo function. + """ + # Adjust these parameters to properly frame your model. + # Window system pixel scaling (e.g. 1 for normal, 2 for HiDPI / retina display) SCALING = 2 - width, height = 1024, 1024 + width, height = 1024, 1024 # image width, height focal_length = 512 + d_camera_obj = 0.3 # distance from camera to object K = np.array([[focal_length, 0, width / 2], [0, focal_length, height / 2], [0, 0, 1]]) + t = np.array([0, 0, d_camera_obj]) # origin / object in camera ref frame + model = o3d.io.read_triangle_model(meshfile) + # DefaultLit shader will produce non-uniform images with specular + # highlights, etc. These should be avoided to accurately capture the diffuse + # albedo unlit = rendering.MaterialRecord() unlit.shader = "unlit" - t = np.array([0, 0, 0.3]) # origin / object in camera ref frame def rotate_camera_and_shoot(o3dvis): Rts = [] @@ -52,6 +86,8 @@ def rotate_camera_and_shoot(o3dvis): check=True) print("\nDone.") + print("If the object is properly framed in the GUI window, click on the " + "'Save Images' action in the menu.") o3d.visualization.draw([{ 'geometry': model, 'name': meshfile.name, @@ -62,12 +98,6 @@ def rotate_camera_and_shoot(o3dvis): height=int(height / SCALING), actions=[("Save Images", rotate_camera_and_shoot)]) - # Linux only :-( - # render = rendering.OffscreenRenderer(width, height) - # render.scene.add_geometry(model) - # img = render.render_to_image() - # o3d.io.write_image("render-image.jpg", img) - def albedo_from_images(meshfile, calib_data_file): @@ -79,9 +109,11 @@ def albedo_from_images(meshfile, calib_data_file): Rts = list(Tensor(Rt) for Rt in calib["Rts"]) images = list(o3d.t.io.read_image(imfile) for imfile in calib["images"]) calib.close() - # breakpoint() + start = time.time() albedo = tmeshes[0].project_images_to_albedo(images, Ks, Rts, 1024) + print(f"project_images_to_albedo ran in {time.time()-start:.2f}s") o3d.t.io.write_image("albedo.png", albedo) + o3d.t.io.write_triangle_mesh(meshfile.stem + "_albedo.glb", tmeshes[0]) cam_vis = list({ "name": f"camera-{i:02}", @@ -94,19 +126,24 @@ def albedo_from_images(meshfile, calib_data_file): "geometry": tmeshes[0] }], show_ui=True) - o3d.t.io.write_triangle_mesh(meshfile.stem + "_albedo.glb", tmeshes[0]) if __name__ == "__main__": - parser = argparse.ArgumentParser() + parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("action", choices=('create_dataset', 'albedo_from_images')) - parser.add_argument("meshfile", type=Path) + parser.add_argument("--meshfile", + type=Path, + default=".", + help="Path to mesh file.") parser.add_argument("--n-images", type=int, default=10, help="Number of images to render.") + parser.add_argument("--download_sample_model", + help="Download a sample 3D model for this example.", + action="store_true") parser.add_argument( "--movie", action="store_true", @@ -116,6 +153,12 @@ def albedo_from_images(meshfile, calib_data_file): args = parser.parse_args() if args.action == "create_dataset": + if args.download_sample_model: + download_smithsonian_baluster_vase() + args.meshfile = "F1980_baluster_vase.glb" + if args.meshfile == Path("."): + parser.error("Please provide a path to a mesh file, or use " + "--download_sample_model.") create_dataset(args.meshfile, n_images=args.n_images, movie=args.movie) else: albedo_from_images(args.meshfile, "cameras.npz")