diff --git a/.github/workflows/ubuntu-wheel.yml b/.github/workflows/ubuntu-wheel.yml index bc0dcaf6c0f..0c42b06e67b 100644 --- a/.github/workflows/ubuntu-wheel.yml +++ b/.github/workflows/ubuntu-wheel.yml @@ -109,7 +109,7 @@ jobs: test-wheel-cpu: name: Test wheel CPU - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 needs: [build-wheel] strategy: fail-fast: false diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index d41d3a2a1ce..66fc9aacb98 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -79,6 +79,13 @@ jobs: name: open3d-devel-linux-x86_64 path: open3d-devel-*.tar.xz if-no-files-found: error + - name: Upload viewer to GitHub artifacts + if: ${{ env.BUILD_SHARED_LIBS == 'OFF' }} + uses: actions/upload-artifact@v3 + with: + name: open3d-viewer-Linux + path: open3d-viewer-*-Linux.deb + if-no-files-found: error - name: GCloud CLI auth if: ${{ github.ref == 'refs/heads/master' }} uses: 'google-github-actions/auth@v1' diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index c414bb7136d..3e13d7854c1 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -420,7 +420,9 @@ jobs: # no need to run on Windows runs-on: ubuntu-latest if: ${{ github.ref == 'refs/heads/master' }} - needs: [build-wheel, windows] + # temp workaround for Windows CUDA Debug CI out of space. Still update docs. + # needs: [build-wheel, windows] + needs: [build-wheel] steps: - name: GCloud CLI auth uses: google-github-actions/auth@v1 diff --git a/3rdparty/cmake/FindTBB.cmake b/3rdparty/cmake/FindTBB.cmake new file mode 100644 index 00000000000..da885c5fcd4 --- /dev/null +++ b/3rdparty/cmake/FindTBB.cmake @@ -0,0 +1,21 @@ +# Try to use pre-installed config +find_package(TBB CONFIG) +if(TARGET TBB::tbb) + set(TBB_FOUND TRUE) +else() + message(STATUS "Target TBB::tbb not defined, falling back to manual detection") + find_path(TBB_INCLUDE_DIR tbb/tbb.h) + find_library(TBB_LIBRARY tbb) + if(TBB_INCLUDE_DIR AND TBB_LIBRARY) + message(STATUS "TBB found: ${TBB_LIBRARY}") + add_library(TBB::tbb UNKNOWN IMPORTED) + set_target_properties(TBB::tbb PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${TBB_INCLUDE_DIR}" + IMPORTED_LOCATION "${TBB_LIBRARY}" + ) + set(TBB_FOUND TRUE) + else() + set(TBB_FOUND FALSE) + endif() +endif() + diff --git a/3rdparty/find_dependencies.cmake b/3rdparty/find_dependencies.cmake index fcde6ff7267..169bfbe1a54 100644 --- a/3rdparty/find_dependencies.cmake +++ b/3rdparty/find_dependencies.cmake @@ -269,7 +269,7 @@ endfunction() # If also defines targets, use them instead and pass them via TARGETS option. # function(open3d_find_package_3rdparty_library name) - cmake_parse_arguments(arg "PUBLIC;HEADER;REQUIRED;QUIET" "PACKAGE;PACKAGE_VERSION_VAR" "TARGETS;INCLUDE_DIRS;LIBRARIES" ${ARGN}) + cmake_parse_arguments(arg "PUBLIC;HEADER;REQUIRED;QUIET" "PACKAGE;VERSION;PACKAGE_VERSION_VAR" "TARGETS;INCLUDE_DIRS;LIBRARIES" ${ARGN}) if(arg_UNPARSED_ARGUMENTS) message(STATUS "Unparsed: ${arg_UNPARSED_ARGUMENTS}") message(FATAL_ERROR "Invalid syntax: open3d_find_package_3rdparty_library(${name} ${ARGN})") @@ -281,6 +281,9 @@ function(open3d_find_package_3rdparty_library name) set(arg_PACKAGE_VERSION_VAR "${arg_PACKAGE}_VERSION") endif() set(find_package_args "") + if(arg_VERSION) + list(APPEND find_package_args "${arg_VERSION}") + endif() if(arg_REQUIRED) list(APPEND find_package_args "REQUIRED") endif() @@ -539,11 +542,26 @@ endif() # cutlass if(BUILD_CUDA_MODULE) - include(${Open3D_3RDPARTY_DIR}/cutlass/cutlass.cmake) - open3d_import_3rdparty_library(3rdparty_cutlass - INCLUDE_DIRS ${CUTLASS_INCLUDE_DIRS} - DEPENDS ext_cutlass - ) + if(USE_SYSTEM_CUTLASS) + find_path(3rdparty_cutlass_INCLUDE_DIR NAMES cutlass/cutlass.h) + if(3rdparty_cutlass_INCLUDE_DIR) + add_library(3rdparty_cutlass INTERFACE) + target_include_directories(3rdparty_cutlass INTERFACE ${3rdparty_cutlass_INCLUDE_DIR}) + add_library(Open3D::3rdparty_cutlass ALIAS 3rdparty_cutlass) + if(NOT BUILD_SHARED_LIBS) + install(TARGETS 3rdparty_cutlass EXPORT ${PROJECT_NAME}Targets) + endif() + else() + set(USE_SYSTEM_CUTLASS OFF) + endif() + endif() + if(NOT USE_SYSTEM_CUTLASS) + include(${Open3D_3RDPARTY_DIR}/cutlass/cutlass.cmake) + open3d_import_3rdparty_library(3rdparty_cutlass + INCLUDE_DIRS ${CUTLASS_INCLUDE_DIRS} + DEPENDS ext_cutlass + ) + endif() list(APPEND Open3D_3RDPARTY_PRIVATE_TARGETS_FROM_CUSTOM Open3D::3rdparty_cutlass) endif() @@ -581,6 +599,7 @@ endif() if(USE_SYSTEM_NANOFLANN) open3d_find_package_3rdparty_library(3rdparty_nanoflann PACKAGE nanoflann + VERSION 1.5.0 TARGETS nanoflann::nanoflann ) if(NOT 3rdparty_nanoflann_FOUND) @@ -1109,6 +1128,14 @@ open3d_import_3rdparty_library(3rdparty_poisson ) list(APPEND Open3D_3RDPARTY_PRIVATE_TARGETS_FROM_CUSTOM Open3D::3rdparty_poisson) +# Minizip +if(WITH_MINIZIP) + open3d_pkg_config_3rdparty_library(3rdparty_minizip + SEARCH_ARGS minizip + ) + list(APPEND Open3D_3RDPARTY_PRIVATE_TARGETS_FROM_SYSTEM Open3D::3rdparty_minizip) +endif() + # Googletest if (BUILD_UNIT_TESTS) if(USE_SYSTEM_GOOGLETEST) @@ -1429,9 +1456,15 @@ endif() # msgpack if(USE_SYSTEM_MSGPACK) open3d_find_package_3rdparty_library(3rdparty_msgpack - PACKAGE msgpack - TARGETS msgpackc + PACKAGE msgpack-cxx + TARGETS msgpack-cxx ) + if(NOT 3rdparty_msgpack_FOUND) + open3d_find_package_3rdparty_library(3rdparty_msgpack + PACKAGE msgpack + TARGETS msgpackc + ) + endif() if(NOT 3rdparty_msgpack_FOUND) open3d_pkg_config_3rdparty_library(3rdparty_msgpack SEARCH_ARGS msgpack @@ -1826,25 +1859,47 @@ endif () # Stdgpu if (BUILD_CUDA_MODULE) - include(${Open3D_3RDPARTY_DIR}/stdgpu/stdgpu.cmake) - open3d_import_3rdparty_library(3rdparty_stdgpu - INCLUDE_DIRS ${STDGPU_INCLUDE_DIRS} - LIB_DIR ${STDGPU_LIB_DIR} - LIBRARIES ${STDGPU_LIBRARIES} - DEPENDS ext_stdgpu - ) + if(USE_SYSTEM_STDGPU) + open3d_find_package_3rdparty_library(3rdparty_stdgpu + PACKAGE stdgpu + TARGETS stdgpu::stdgpu + ) + if(NOT 3rdparty_stdgpu_FOUND) + set(USE_SYSTEM_STDGPU OFF) + endif() + endif() + if(NOT USE_SYSTEM_STDGPU) + include(${Open3D_3RDPARTY_DIR}/stdgpu/stdgpu.cmake) + open3d_import_3rdparty_library(3rdparty_stdgpu + INCLUDE_DIRS ${STDGPU_INCLUDE_DIRS} + LIB_DIR ${STDGPU_LIB_DIR} + LIBRARIES ${STDGPU_LIBRARIES} + DEPENDS ext_stdgpu + ) + endif() list(APPEND Open3D_3RDPARTY_PRIVATE_TARGETS_FROM_CUSTOM Open3D::3rdparty_stdgpu) endif () # embree -include(${Open3D_3RDPARTY_DIR}/embree/embree.cmake) -open3d_import_3rdparty_library(3rdparty_embree - HIDDEN - INCLUDE_DIRS ${EMBREE_INCLUDE_DIRS} - LIB_DIR ${EMBREE_LIB_DIR} - LIBRARIES ${EMBREE_LIBRARIES} - DEPENDS ext_embree -) +if(USE_SYSTEM_EMBREE) + open3d_find_package_3rdparty_library(3rdparty_embree + PACKAGE embree + TARGETS embree + ) + if(NOT 3rdparty_embree_FOUND) + set(USE_SYSTEM_EMBREE OFF) + endif() +endif() +if(NOT USE_SYSTEM_EMBREE) + include(${Open3D_3RDPARTY_DIR}/embree/embree.cmake) + open3d_import_3rdparty_library(3rdparty_embree + HIDDEN + INCLUDE_DIRS ${EMBREE_INCLUDE_DIRS} + LIB_DIR ${EMBREE_LIB_DIR} + LIBRARIES ${EMBREE_LIBRARIES} + DEPENDS ext_embree + ) +endif() list(APPEND Open3D_3RDPARTY_PRIVATE_TARGETS_FROM_CUSTOM Open3D::3rdparty_embree) # WebRTC diff --git a/3rdparty/fmt/fmt.cmake b/3rdparty/fmt/fmt.cmake index f53e2f49214..88cd8e2fcef 100644 --- a/3rdparty/fmt/fmt.cmake +++ b/3rdparty/fmt/fmt.cmake @@ -2,9 +2,8 @@ include(ExternalProject) set(FMT_LIB_NAME fmt) -if (MSVC AND MSVC_VERSION VERSION_LESS 1930 OR - CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM") - # MSVC 17.x required for building fmt >6 +if (MSVC OR CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM") + # MSVC has errors when building fmt >6, up till 9.1 # SYCL / DPC++ needs fmt ver <=6 or >= 9.2: https://github.com/fmtlib/fmt/issues/3005 set(FMT_VER "6.0.0") set(FMT_SHA256 diff --git a/3rdparty/nanoflann/nanoflann.cmake b/3rdparty/nanoflann/nanoflann.cmake index 91437f9b77c..5afa4f22d03 100644 --- a/3rdparty/nanoflann/nanoflann.cmake +++ b/3rdparty/nanoflann/nanoflann.cmake @@ -3,8 +3,8 @@ include(ExternalProject) ExternalProject_Add( ext_nanoflann PREFIX nanoflann - URL https://github.com/jlblancoc/nanoflann/archive/refs/tags/v1.3.2.tar.gz - URL_HASH SHA256=e100b5fc8d72e9426a80312d852a62c05ddefd23f17cbb22ccd8b458b11d0bea + URL https://github.com/jlblancoc/nanoflann/archive/refs/tags/v1.5.0.tar.gz + URL_HASH SHA256=89aecfef1a956ccba7e40f24561846d064f309bc547cc184af7f4426e42f8e65 DOWNLOAD_DIR "${OPEN3D_THIRD_PARTY_DOWNLOAD_DIR}/nanoflann" UPDATE_COMMAND "" CONFIGURE_COMMAND "" diff --git a/3rdparty/possionrecon/possionrecon.cmake b/3rdparty/possionrecon/possionrecon.cmake index 0328804d0ce..ef5f87f940c 100644 --- a/3rdparty/possionrecon/possionrecon.cmake +++ b/3rdparty/possionrecon/possionrecon.cmake @@ -3,8 +3,8 @@ include(ExternalProject) ExternalProject_Add( ext_poisson PREFIX poisson - URL https://github.com/isl-org/Open3D-PoissonRecon/archive/fd273ea8c77a36973d6565a495c9969ccfb12d3b.tar.gz - URL_HASH SHA256=917d98e037982d57a159fa166b259ff3dc90ffffe09c6a562a71b400f6869ddf + URL https://github.com/isl-org/Open3D-PoissonRecon/archive/90f3f064e275b275cff445881ecee5a7c495c9e0.tar.gz + URL_HASH SHA256=1310df0c80ff0616b8fcf9b2fb568aa9b2190d0e071b0ead47dba339c146b1d3 DOWNLOAD_DIR "${OPEN3D_THIRD_PARTY_DOWNLOAD_DIR}/poisson" SOURCE_DIR "poisson/src/ext_poisson/PoissonRecon" # Add extra directory level for POISSON_INCLUDE_DIRS. UPDATE_COMMAND "" diff --git a/CHANGELOG.md b/CHANGELOG.md index 72b7367b42a..8965478f135 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,12 @@ ## Master +* Fix tensor based TSDF integration example. +* Use GLIBCXX_USE_CXX11_ABI=ON by default * Python 3.9 support. Tensorflow bump 2.4.1 -> 2.5.0. PyTorch bump 1.7.1 -> 1.8.1 (LTS) * Fix undefined names: docstr and VisibleDeprecationWarning (PR #3844) * Corrected documentation for Tensor based PointClound, LineSet, TriangleMesh (PR #4685) * Corrected documentation for KDTree (typo in Notebook) (PR #4744) +* Corrected documentation for visualisation tutorial * Remove `setuptools` and `wheel` from requirements for end users (PR #5020) * Fix various typos (PR #5070) * Exposed more functionality in SLAM and odometry pipelines @@ -13,6 +16,8 @@ * Fix Python bindings for CUDA device synchronization, voxel grid saving (PR #5425) * Support msgpack versions without cmake * Changed TriangleMesh to store materials in a list so they can be accessed by the material index (PR #5938) +* Support multi-threading in the RayCastingScene function to commit scene (PR #6051). +* Fix some bad triangle generation in TriangleMesh::SimplifyQuadricDecimation ## 0.13 diff --git a/CMakeLists.txt b/CMakeLists.txt index 2c37ce5ff15..e8c97dbf848 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,11 +65,7 @@ else() option(STATIC_WINDOWS_RUNTIME "Use static (MT/MTd) Windows runtime" ON ) endif() option(BUILD_SYCL_MODULE "Build SYCL module with Intel oneAPI" OFF) -if(BUILD_SYCL_MODULE) - option(GLIBCXX_USE_CXX11_ABI "Set -D_GLIBCXX_USE_CXX11_ABI=1" ON ) -else() - option(GLIBCXX_USE_CXX11_ABI "Set -D_GLIBCXX_USE_CXX11_ABI=1" OFF) -endif() +option(GLIBCXX_USE_CXX11_ABI "Set -D_GLIBCXX_USE_CXX11_ABI=1" ON ) option(ENABLE_SYCL_UNIFIED_SHARED_MEMORY "Enable SYCL unified shared memory" OFF) if(BUILD_GUI AND (WIN32 OR UNIX AND NOT LINUX_AARCH64 AND NOT APPLE_AARCH64)) option(BUILD_WEBRTC "Build WebRTC visualizer" ON ) @@ -91,7 +87,9 @@ else() endif() option(USE_SYSTEM_ASSIMP "Use system pre-installed assimp" OFF) option(USE_SYSTEM_CURL "Use system pre-installed curl" OFF) +option(USE_SYSTEM_CUTLASS "Use system pre-installed cutlass" OFF) option(USE_SYSTEM_EIGEN3 "Use system pre-installed eigen3" OFF) +option(USE_SYSTEM_EMBREE "Use system pre-installed Embree" OFF) option(USE_SYSTEM_FILAMENT "Use system pre-installed filament" OFF) option(USE_SYSTEM_FMT "Use system pre-installed fmt" OFF) option(USE_SYSTEM_GLEW "Use system pre-installed glew" OFF) @@ -107,6 +105,7 @@ option(USE_SYSTEM_OPENSSL "Use system pre-installed OpenSSL" OFF option(USE_SYSTEM_PNG "Use system pre-installed png" OFF) option(USE_SYSTEM_PYBIND11 "Use system pre-installed pybind11" OFF) option(USE_SYSTEM_QHULLCPP "Use system pre-installed qhullcpp" OFF) +option(USE_SYSTEM_STDGPU "Use system pre-installed stdgpu" OFF) option(USE_SYSTEM_TBB "Use system pre-installed TBB" OFF) option(USE_SYSTEM_TINYGLTF "Use system pre-installed tinygltf" OFF) option(USE_SYSTEM_TINYOBJLOADER "Use system pre-installed tinyobjloader" OFF) @@ -121,6 +120,7 @@ else() endif() option(PREFER_OSX_HOMEBREW "Prefer Homebrew libs over frameworks" ON ) +option(WITH_MINIZIP "Enable MiniZIP" OFF) # Sensor options option(BUILD_LIBREALSENSE "Build support for Intel RealSense camera" OFF) diff --git a/README.md b/README.md index ced7235af9f..d3c583ee94f 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ For more, please visit the [Open3D documentation](http://www.open3d.org/docs). ## Python quick start Pre-built pip packages support Ubuntu 18.04+, macOS 10.15+ and Windows 10+ -(64-bit) with Python 3.6-3.10. +(64-bit) with Python 3.7-3.10. ```bash # Install @@ -87,8 +87,8 @@ To use Open3D in your C++ project, checkout the following examples -Open3D-Viewer is a standalone 3D viewer app available on Ubuntu and macOS. -Please stay tuned for Windows. Download Open3D Viewer from the +Open3D-Viewer is a standalone 3D viewer app available on Debian (Ubuntu), macOS +and Windows. Download Open3D Viewer from the [release page](https://github.com/isl-org/Open3D/releases). ## Open3D-ML diff --git a/cmake/Open3DPackaging.cmake b/cmake/Open3DPackaging.cmake index 1178cdd5376..e6ce4a3c15c 100644 --- a/cmake/Open3DPackaging.cmake +++ b/cmake/Open3DPackaging.cmake @@ -1,3 +1,6 @@ +# This is packaging for the Open3D library. See +# cpp/apps/Open3DViewer/Debian/CMakeLists.txt for packaging the Debian Open3D +# viewer set(CPACK_GENERATOR TXZ) if(WIN32) set(CPACK_GENERATOR ZIP) diff --git a/cpp/apps/CMakeLists.txt b/cpp/apps/CMakeLists.txt index 6380f272b2d..ac927bd147a 100644 --- a/cpp/apps/CMakeLists.txt +++ b/cpp/apps/CMakeLists.txt @@ -72,8 +72,17 @@ macro(open3d_add_app_gui SRC_DIR APP_NAME TARGET_NAME) RENAME "${APP_NAME}.xml") # Various caches need to be updated for the app to become visible install(CODE "execute_process(COMMAND ${SOURCE_DIR}/postinstall-linux.sh)") + configure_file("${SOURCE_DIR}/Debian/CMakeLists.in.txt" + "${CMAKE_BINARY_DIR}/package-${TARGET_NAME}-deb/CMakeLists.txt" @ONLY) + add_custom_target(package-${TARGET_NAME}-deb + COMMAND cp -a "${CMAKE_BINARY_DIR}/${APP_NAME}" . + COMMAND cp "${SOURCE_DIR}/icon.svg" "${APP_NAME}/${APP_NAME}.svg" + COMMAND cp "${SOURCE_DIR}/${TARGET_NAME}.xml" "${APP_NAME}/" + COMMAND "${CMAKE_COMMAND}" -S . + COMMAND "${CMAKE_COMMAND}" --build . -t package + WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/package-${TARGET_NAME}-deb/" + DEPENDS ${TARGET_NAME}) elseif (WIN32) - # Don't create a command window on launch target_sources(${TARGET_NAME} PRIVATE "${SOURCE_DIR}/icon.rc") # add icon # MSVC puts the binary in bin/Open3D/Release/Open3D.exe diff --git a/cpp/apps/OfflineReconstruction/LegacyReconstructionUtil.h b/cpp/apps/OfflineReconstruction/LegacyReconstructionUtil.h index 9b8f495b680..26d30bfe182 100644 --- a/cpp/apps/OfflineReconstruction/LegacyReconstructionUtil.h +++ b/cpp/apps/OfflineReconstruction/LegacyReconstructionUtil.h @@ -1025,7 +1025,7 @@ class ReconstructionPipeline { Eigen::Matrix6d info; const size_t num_scale = voxel_size.size(); for (size_t i = 0; i < num_scale; i++) { - const double max_dis = config_["voxel_szie"].asDouble() * 1.4; + const double max_dis = config_["voxel_size"].asDouble() * 1.4; const auto src_down = src.VoxelDownSample(voxel_size[i]); const auto dst_down = dst.VoxelDownSample(voxel_size[i]); const pipelines::registration::ICPConvergenceCriteria criteria( diff --git a/cpp/apps/Open3DViewer/Debian/CMakeLists.in.txt b/cpp/apps/Open3DViewer/Debian/CMakeLists.in.txt new file mode 100644 index 00000000000..c318c212612 --- /dev/null +++ b/cpp/apps/Open3DViewer/Debian/CMakeLists.in.txt @@ -0,0 +1,36 @@ +# Create Debian package +cmake_minimum_required(VERSION 3.8.0) +project("Open3D-Debian") + +message(STATUS "Building package for Debian") + +# Install assets +install(DIRECTORY "Open3D" + DESTINATION share + USE_SOURCE_PERMISSIONS + PATTERN "Open3D/Open3D.svg" EXCLUDE + PATTERN "Open3D/Open3D.desktop" EXCLUDE + PATTERN "Open3D/Open3DViewer.xml" EXCLUDE + PATTERN "Open3D/Open3D" EXCLUDE + PATTERN "Open3D/CMakeLists.txt" EXCLUDE +) +install(FILES "Open3D/Open3D.desktop" DESTINATION /usr/share/applications) +install(FILES "Open3D/Open3DViewer.xml" DESTINATION /usr/share/mime/packages) +install(FILES "Open3D/Open3D.svg" DESTINATION /usr/share/icons/hicolor/scalable/apps) +install(PROGRAMS "Open3D/Open3D" DESTINATION bin) + +# CPACK parameter +set(CPACK_GENERATOR "DEB") +set(CPACK_PACKAGE_NAME "open3d-viewer") +set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Open3D Viewer for 3D files") +set(CPACK_PACKAGE_CONTACT "Open3D team <@PROJECT_EMAIL@>") +set(CPACK_DEBIAN_PACKAGE_SECTION "Graphics") +set(CPACK_PACKAGE_VERSION "@OPEN3D_VERSION@") +set(CPACK_DEBIAN_PACKAGE_DEPENDS "libc++1, libgomp1, libpng16-16, libglfw3") +set(CPACK_PACKAGE_HOMEPAGE_URL "@PROJECT_HOMEPAGE_URL@") + +# How to set cpack prefix: https://stackoverflow.com/a/7363073/1255535 +set(CPACK_SET_DESTDIR true) +set(CPACK_INSTALL_PREFIX /usr/local) + +include(CPack) diff --git a/cpp/apps/Open3DViewer/Open3DViewer.desktop.in b/cpp/apps/Open3DViewer/Open3DViewer.desktop.in index 196060fe9f3..3c2e73c1e43 100644 --- a/cpp/apps/Open3DViewer/Open3DViewer.desktop.in +++ b/cpp/apps/Open3DViewer/Open3DViewer.desktop.in @@ -3,7 +3,7 @@ Type=Application Name=Open3D Icon=Open3D Comment=Viewer for triangle meshes and point clouds -Exec=${CMAKE_INSTALL_PREFIX}/bin/Open3D/Open3D %f +Exec=${CMAKE_INSTALL_PREFIX}/bin/Open3D %f Terminal=false Categories=Graphics MimeType=model/stl;model/obj;model/fbx;model/gltf-binary;model/gltf+json;model/x.stl-ascii;model/x.stl-binary;model/x-ply;application/x-off;application/x-xyz;application/x-xyzn;application/x-xyzrgb;application/x-pcd;application/x-pts diff --git a/cpp/apps/Open3DViewer/postinstall-linux.sh b/cpp/apps/Open3DViewer/postinstall-linux.sh index 701af849adf..1310d306809 100755 --- a/cpp/apps/Open3DViewer/postinstall-linux.sh +++ b/cpp/apps/Open3DViewer/postinstall-linux.sh @@ -1,4 +1,4 @@ -#/bin/bash +#!/bin/sh if [ $(id -u) = 0 ]; then update-mime-database /usr/share/mime # add new MIME types diff --git a/cpp/benchmarks/t/geometry/PointCloud.cpp b/cpp/benchmarks/t/geometry/PointCloud.cpp index 39cc5f3368a..814587320a6 100644 --- a/cpp/benchmarks/t/geometry/PointCloud.cpp +++ b/cpp/benchmarks/t/geometry/PointCloud.cpp @@ -69,7 +69,7 @@ void LegacyVoxelDownSample(benchmark::State& state, float voxel_size) { void VoxelDownSample(benchmark::State& state, const core::Device& device, float voxel_size, - const core::HashBackendType& backend) { + const std::string& reduction) { t::geometry::PointCloud pcd; // t::io::CreatePointCloudFromFile lacks support of remove_inf_points and // remove_nan_points @@ -77,10 +77,10 @@ void VoxelDownSample(benchmark::State& state, pcd = pcd.To(device); // Warm up. - pcd.VoxelDownSample(voxel_size, backend); + pcd.VoxelDownSample(voxel_size, reduction); for (auto _ : state) { - pcd.VoxelDownSample(voxel_size, backend); + pcd.VoxelDownSample(voxel_size, reduction); core::cuda::Synchronize(device); } } @@ -387,28 +387,34 @@ BENCHMARK_CAPTURE(ToLegacyPointCloud, CUDA, core::Device("CUDA:0")) ->Unit(benchmark::kMillisecond); #endif -#define ENUM_VOXELSIZE(DEVICE, BACKEND) \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_01, DEVICE, 0.01, BACKEND) \ - ->Unit(benchmark::kMillisecond); \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_02, DEVICE, 0.08, BACKEND) \ - ->Unit(benchmark::kMillisecond); \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_04, DEVICE, 0.04, BACKEND) \ - ->Unit(benchmark::kMillisecond); \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_08, DEVICE, 0.08, BACKEND) \ - ->Unit(benchmark::kMillisecond); \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_16, DEVICE, 0.16, BACKEND) \ - ->Unit(benchmark::kMillisecond); \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_32, DEVICE, 0.32, BACKEND) \ +#define ENUM_VOXELSIZE(DEVICE, REDUCTION) \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_01, DEVICE, 0.01, \ + REDUCTION) \ + ->Unit(benchmark::kMillisecond); \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_02, DEVICE, 0.08, \ + REDUCTION) \ + ->Unit(benchmark::kMillisecond); \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_04, DEVICE, 0.04, \ + REDUCTION) \ + ->Unit(benchmark::kMillisecond); \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_08, DEVICE, 0.08, \ + REDUCTION) \ + ->Unit(benchmark::kMillisecond); \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_16, DEVICE, 0.16, \ + REDUCTION) \ + ->Unit(benchmark::kMillisecond); \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_32, DEVICE, 0.32, \ + REDUCTION) \ ->Unit(benchmark::kMillisecond); +const std::string kReductionMean = "mean"; #ifdef BUILD_CUDA_MODULE -#define ENUM_VOXELDOWNSAMPLE_BACKEND() \ - ENUM_VOXELSIZE(core::Device("CPU:0"), core::HashBackendType::TBB) \ - ENUM_VOXELSIZE(core::Device("CUDA:0"), core::HashBackendType::Slab) \ - ENUM_VOXELSIZE(core::Device("CUDA:0"), core::HashBackendType::StdGPU) +#define ENUM_VOXELDOWNSAMPLE_REDUCTION() \ + ENUM_VOXELSIZE(core::Device("CPU:0"), kReductionMean) \ + ENUM_VOXELSIZE(core::Device("CUDA:0"), kReductionMean) #else -#define ENUM_VOXELDOWNSAMPLE_BACKEND() \ - ENUM_VOXELSIZE(core::Device("CPU:0"), core::HashBackendType::TBB) +#define ENUM_VOXELDOWNSAMPLE_REDUCTION() \ + ENUM_VOXELSIZE(core::Device("CPU:0"), kReductionMean) #endif BENCHMARK_CAPTURE(LegacyVoxelDownSample, Legacy_0_01, 0.01) @@ -423,7 +429,7 @@ BENCHMARK_CAPTURE(LegacyVoxelDownSample, Legacy_0_16, 0.16) ->Unit(benchmark::kMillisecond); BENCHMARK_CAPTURE(LegacyVoxelDownSample, Legacy_0_32, 0.32) ->Unit(benchmark::kMillisecond); -ENUM_VOXELDOWNSAMPLE_BACKEND() +ENUM_VOXELDOWNSAMPLE_REDUCTION() BENCHMARK_CAPTURE(LegacyUniformDownSample, Legacy_2, 2) ->Unit(benchmark::kMillisecond); diff --git a/cpp/open3d/core/AdvancedIndexing.cpp b/cpp/open3d/core/AdvancedIndexing.cpp index f2bbf306c3f..86ce53148ba 100644 --- a/cpp/open3d/core/AdvancedIndexing.cpp +++ b/cpp/open3d/core/AdvancedIndexing.cpp @@ -206,7 +206,7 @@ void AdvancedIndexPreprocessor::RunPreprocess() { // If the indexed_shape_ contains a dimension of size 0 but the // replacement shape does not, the index is out of bounds. This is because // there is no valid number to index an empty tensor. - // Normally, out of bounds is detected in the advanded indexing kernel. We + // Normally, out of bounds is detected in the advanced indexing kernel. We // detected here for more helpful error message. auto contains_zero = [](const SizeVector& vals) -> bool { return std::any_of(vals.begin(), vals.end(), diff --git a/cpp/open3d/core/AdvancedIndexing.h b/cpp/open3d/core/AdvancedIndexing.h index b01d922a61d..17a8253085a 100644 --- a/cpp/open3d/core/AdvancedIndexing.h +++ b/cpp/open3d/core/AdvancedIndexing.h @@ -49,7 +49,7 @@ class AdvancedIndexPreprocessor { const Tensor& tensor, const std::vector& index_tensors); /// Expand all tensors to the broadcasted shape, 0-dim tensors are ignored. - /// Thorws exception if the common broadcasted shape does not exist. + /// Throws exception if the common broadcasted shape does not exist. static std::pair, SizeVector> ExpandToCommonShapeExceptZeroDim(const std::vector& index_tensors); @@ -127,7 +127,7 @@ class AdvancedIndexer { if (indexed_shape.size() != indexed_strides.size()) { utility::LogError( "Internal error: indexed_shape's ndim {} does not equal to " - "indexd_strides' ndim {}", + "indexed_strides' ndim {}", indexed_shape.size(), indexed_strides.size()); } num_indices_ = indexed_shape.size(); diff --git a/cpp/open3d/core/CMakeLists.txt b/cpp/open3d/core/CMakeLists.txt index 43f0802f4d0..d42e645da39 100644 --- a/cpp/open3d/core/CMakeLists.txt +++ b/cpp/open3d/core/CMakeLists.txt @@ -47,6 +47,8 @@ target_sources(core PRIVATE kernel/BinaryEWCPU.cpp kernel/IndexGetSet.cpp kernel/IndexGetSetCPU.cpp + kernel/IndexReduction.cpp + kernel/IndexReductionCPU.cpp kernel/Kernel.cpp kernel/NonZero.cpp kernel/NonZeroCPU.cpp @@ -90,6 +92,7 @@ if (BUILD_CUDA_MODULE) kernel/ArangeCUDA.cu kernel/BinaryEWCUDA.cu kernel/IndexGetSetCUDA.cu + kernel/IndexReductionCUDA.cu kernel/NonZeroCUDA.cu kernel/ReductionCUDA.cu kernel/UnaryEWCUDA.cu diff --git a/cpp/open3d/core/Tensor.cpp b/cpp/open3d/core/Tensor.cpp index 69e04acfb94..9ee485bd63a 100644 --- a/cpp/open3d/core/Tensor.cpp +++ b/cpp/open3d/core/Tensor.cpp @@ -22,6 +22,7 @@ #include "open3d/core/TensorFunction.h" #include "open3d/core/TensorKey.h" #include "open3d/core/kernel/Arange.h" +#include "open3d/core/kernel/IndexReduction.h" #include "open3d/core/kernel/Kernel.h" #include "open3d/core/linalg/Det.h" #include "open3d/core/linalg/Inverse.h" @@ -955,6 +956,43 @@ void Tensor::IndexSet(const std::vector& index_tensors, aip.GetIndexedShape(), aip.GetIndexedStrides()); } +void Tensor::IndexAdd_(int64_t dim, const Tensor& index, const Tensor& src) { + if (index.NumDims() != 1) { + utility::LogError("IndexAdd_ only supports 1D index tensors."); + } + + // Dim check. + if (dim < 0) { + utility::LogError("IndexAdd_ only supports sum at non-negative dim."); + } + if (NumDims() <= dim) { + utility::LogError("Sum dim {} exceeds tensor dim {}.", dim, NumDims()); + } + + // shape check + if (src.NumDims() != NumDims()) { + utility::LogError( + "IndexAdd_ only supports src tensor with same dimension as " + "this tensor."); + } + for (int64_t d = 0; d < NumDims(); ++d) { + if (d != dim && src.GetShape(d) != GetShape(d)) { + utility::LogError( + "IndexAdd_ only supports src tensor with same shape as " + "this " + "tensor except dim {}.", + dim); + } + } + + // Type check. + AssertTensorDtype(index, core::Int64); + AssertTensorDtype(*this, src.GetDtype()); + + // Apply kernel. + kernel::IndexAdd_(dim, index, src, *this); +} + Tensor Tensor::Permute(const SizeVector& dims) const { // Check dimension size if (static_cast(dims.size()) != NumDims()) { diff --git a/cpp/open3d/core/Tensor.h b/cpp/open3d/core/Tensor.h index 8b4d0280077..38422055a30 100644 --- a/cpp/open3d/core/Tensor.h +++ b/cpp/open3d/core/Tensor.h @@ -575,6 +575,16 @@ class Tensor : public IsDevice { void IndexSet(const std::vector& index_tensors, const Tensor& src_tensor); + /// \brief Advanced in-place reduction by index. + /// + /// See + /// https://pytorch.org/docs/stable/generated/torch.Tensor.index_add_.html + /// + /// self[index[i]] = operator(self[index[i]], src[i]). + /// + /// Note: Only support 1D index and src tensors now. + void IndexAdd_(int64_t dim, const Tensor& index, const Tensor& src); + /// \brief Permute (dimension shuffle) the Tensor, returns a view. /// /// \param dims The desired ordering of dimensions. diff --git a/cpp/open3d/core/kernel/IndexReduction.cpp b/cpp/open3d/core/kernel/IndexReduction.cpp new file mode 100644 index 00000000000..128da56e370 --- /dev/null +++ b/cpp/open3d/core/kernel/IndexReduction.cpp @@ -0,0 +1,49 @@ +// ---------------------------------------------------------------------------- +// - Open3D: www.open3d.org - +// ---------------------------------------------------------------------------- +// Copyright (c) 2018-2023 www.open3d.org +// SPDX-License-Identifier: MIT +// ---------------------------------------------------------------------------- + +#include "open3d/core/kernel/IndexReduction.h" + +#include "open3d/utility/Logging.h" + +namespace open3d { +namespace core { +namespace kernel { + +void IndexAdd_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst) { + // Permute the reduction dimension to the first. + SizeVector permute = {}; + for (int64_t d = 0; d <= dim; ++d) { + if (d == 0) { + permute.push_back(dim); + } else { + permute.push_back(d - 1); + } + } + for (int64_t d = dim + 1; d < src.NumDims(); ++d) { + permute.push_back(d); + } + + auto src_permute = src.Permute(permute); + auto dst_permute = dst.Permute(permute); + + if (dst.IsCPU()) { + IndexAddCPU_(dim, index, src_permute, dst_permute); + } else if (dst.IsCUDA()) { +#ifdef BUILD_CUDA_MODULE + IndexAddCUDA_(dim, index, src_permute, dst_permute); +#endif + } else { + utility::LogError("IndexAdd_: Unimplemented device"); + } +} + +} // namespace kernel +} // namespace core +} // namespace open3d diff --git a/cpp/open3d/core/kernel/IndexReduction.h b/cpp/open3d/core/kernel/IndexReduction.h new file mode 100644 index 00000000000..7035b53210b --- /dev/null +++ b/cpp/open3d/core/kernel/IndexReduction.h @@ -0,0 +1,36 @@ +// ---------------------------------------------------------------------------- +// - Open3D: www.open3d.org - +// ---------------------------------------------------------------------------- +// Copyright (c) 2018-2023 www.open3d.org +// SPDX-License-Identifier: MIT +// ---------------------------------------------------------------------------- + +#pragma once + +#include "open3d/core/Tensor.h" +#include "open3d/utility/Logging.h" + +namespace open3d { +namespace core { +namespace kernel { + +void IndexAdd_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst); + +void IndexAddCPU_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst); + +#ifdef BUILD_CUDA_MODULE +void IndexAddCUDA_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst); +#endif + +} // namespace kernel +} // namespace core +} // namespace open3d diff --git a/cpp/open3d/core/kernel/IndexReductionCPU.cpp b/cpp/open3d/core/kernel/IndexReductionCPU.cpp new file mode 100644 index 00000000000..4458832ec96 --- /dev/null +++ b/cpp/open3d/core/kernel/IndexReductionCPU.cpp @@ -0,0 +1,79 @@ +// ---------------------------------------------------------------------------- +// - Open3D: www.open3d.org - +// ---------------------------------------------------------------------------- +// Copyright (c) 2018-2023 www.open3d.org +// SPDX-License-Identifier: MIT +// ---------------------------------------------------------------------------- + +#include "open3d/core/Dispatch.h" +#include "open3d/core/Indexer.h" +#include "open3d/core/Tensor.h" +#include "open3d/utility/Logging.h" + +namespace open3d { +namespace core { +namespace kernel { + +template +void LaunchIndexReductionKernel(int64_t dim, + const Device& device, + const Tensor& index, + const Tensor& src, + Tensor& dst, + const func_t& element_kernel) { + // index: [N,], src: [N, D], dst: [M, D] + // In Indexer, output shape defines the actual master strides. + // However, in IndexAdd_, input dominates the iterations. + // So put dst (output) at indexer's input, and src (input) at output. + Indexer indexer({dst}, src, DtypePolicy::NONE); + + // Index is simply a 1D contiguous tensor, with a different stride + // behavior to src. So use raw pointer for simplicity. + auto index_ptr = index.GetDataPtr(); + + int64_t broadcasting_elems = 1; + for (int64_t d = 1; d < src.NumDims(); ++d) { + broadcasting_elems *= src.GetShape(d); + } + auto element_func = [=](int64_t workload_idx) { + int reduction_idx = workload_idx / broadcasting_elems; + int broadcasting_idx = workload_idx % broadcasting_elems; + + const int64_t idx = index_ptr[reduction_idx]; + int64_t dst_idx = idx * broadcasting_elems + broadcasting_idx; + + void* src_ptr = indexer.GetOutputPtr(0, workload_idx); + void* dst_ptr = indexer.GetInputPtr(0, dst_idx); + // Note input and output is switched here to adapt to the indexer + element_kernel(src_ptr, dst_ptr); + }; + + // TODO: check in detail + // No OpenMP could be faster, otherwise there would be thousands of atomics. + for (int64_t d = 0; d < indexer.NumWorkloads(); ++d) { + element_func(d); + } +} + +template +static OPEN3D_HOST_DEVICE void CPUSumKernel(const void* src, void* dst) { + scalar_t* dst_s_ptr = static_cast(dst); + const scalar_t* src_s_ptr = static_cast(src); + *dst_s_ptr += *src_s_ptr; +} + +void IndexAddCPU_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst) { + DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(src.GetDtype(), [&]() { + LaunchIndexReductionKernel(dim, src.GetDevice(), index, src, dst, + [](const void* src, void* dst) { + CPUSumKernel(src, dst); + }); + }); +} + +} // namespace kernel +} // namespace core +} // namespace open3d diff --git a/cpp/open3d/core/kernel/IndexReductionCUDA.cu b/cpp/open3d/core/kernel/IndexReductionCUDA.cu new file mode 100644 index 00000000000..24c913a46af --- /dev/null +++ b/cpp/open3d/core/kernel/IndexReductionCUDA.cu @@ -0,0 +1,82 @@ +// ---------------------------------------------------------------------------- +// - Open3D: www.open3d.org - +// ---------------------------------------------------------------------------- +// Copyright (c) 2018-2023 www.open3d.org +// SPDX-License-Identifier: MIT +// ---------------------------------------------------------------------------- +#include + +#include "open3d/core/CUDAUtils.h" +#include "open3d/core/Dispatch.h" +#include "open3d/core/Indexer.h" +#include "open3d/core/ParallelFor.h" +#include "open3d/core/Tensor.h" +#include "open3d/t/geometry/kernel/GeometryMacros.h" + +namespace open3d { +namespace core { +namespace kernel { + +template +void LaunchIndexReductionKernel(int64_t dim, + const Device& device, + const Tensor& index, + const Tensor& src, + Tensor& dst, + const func_t& element_kernel) { + OPEN3D_ASSERT_HOST_DEVICE_LAMBDA(func_t); + + // index: [N,], src: [N, D], dst: [M, D] + // In Indexer, output shape defines the actual master strides. + // However, in IndexAdd_, input dominates the iterations. + // So put dst (output) at indexer's input, and src (input) at output. + Indexer indexer({dst}, src, DtypePolicy::NONE); + + // Index is simply a 1D contiguous tensor, with a different stride + // behavior to src. So use raw pointer for simplicity. + auto index_ptr = index.GetDataPtr(); + + int64_t broadcasting_elems = 1; + for (int64_t d = 1; d < src.NumDims(); ++d) { + broadcasting_elems *= src.GetShape(d); + } + auto element_func = [=] OPEN3D_HOST_DEVICE(int64_t workload_idx) { + int reduction_idx = workload_idx / broadcasting_elems; + int broadcasting_idx = workload_idx % broadcasting_elems; + + const int64_t idx = index_ptr[reduction_idx]; + int64_t dst_idx = idx * broadcasting_elems + broadcasting_idx; + + void* src_ptr = indexer.GetOutputPtr(0, workload_idx); + void* dst_ptr = indexer.GetInputPtr(0, dst_idx); + // Note input and output is switched here to adapt to the indexer. + element_kernel(src_ptr, dst_ptr); + }; + + ParallelFor(device, indexer.NumWorkloads(), element_func); + OPEN3D_GET_LAST_CUDA_ERROR("LaunchIndexReductionKernel failed."); +} + +template +static OPEN3D_HOST_DEVICE void CUDASumKernel(const void* src, void* dst) { + scalar_t* dst_s_ptr = static_cast(dst); + const scalar_t* src_s_ptr = static_cast(src); + atomicAdd(dst_s_ptr, *src_s_ptr); +} + +void IndexAddCUDA_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst) { + DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(src.GetDtype(), [&]() { + LaunchIndexReductionKernel( + dim, src.GetDevice(), index, src, dst, + [] OPEN3D_HOST_DEVICE(const void* src, void* dst) { + CUDASumKernel(src, dst); + }); + }); +} + +} // namespace kernel +} // namespace core +} // namespace open3d diff --git a/cpp/open3d/core/nns/NanoFlannImpl.h b/cpp/open3d/core/nns/NanoFlannImpl.h index f090611818e..027d6d0c4ee 100644 --- a/cpp/open3d/core/nns/NanoFlannImpl.h +++ b/cpp/open3d/core/nns/NanoFlannImpl.h @@ -233,7 +233,7 @@ void _RadiusSearchCPU(NanoFlannIndexHolderBase *holder, std::vector> neighbors_distances(num_queries); std::vector neighbors_count(num_queries, 0); - nanoflann::SearchParams params; + nanoflann::SearchParameters params; params.sorted = sort; auto holder_ = @@ -241,7 +241,7 @@ void _RadiusSearchCPU(NanoFlannIndexHolderBase *holder, tbb::parallel_for( tbb::blocked_range(0, num_queries), [&](const tbb::blocked_range &r) { - std::vector> search_result; + std::vector> search_result; for (size_t i = r.begin(); i != r.end(); ++i) { T radius = radii[i]; if (METRIC == L2) { @@ -346,7 +346,7 @@ void _HybridSearchCPU(NanoFlannIndexHolderBase *holder, output_allocator.AllocDistances(&distances_ptr, num_indices); output_allocator.AllocCounts(&counts_ptr, num_queries); - nanoflann::SearchParams params; + nanoflann::SearchParameters params; params.sorted = true; auto holder_ = @@ -354,7 +354,7 @@ void _HybridSearchCPU(NanoFlannIndexHolderBase *holder, tbb::parallel_for( tbb::blocked_range(0, num_queries), [&](const tbb::blocked_range &r) { - std::vector> ret_matches; + std::vector> ret_matches; for (size_t i = r.begin(); i != r.end(); ++i) { size_t num_results = holder_->index_->radiusSearch( &queries[i * dimension], radius_squared, diff --git a/cpp/open3d/data/Dataset.h b/cpp/open3d/data/Dataset.h index 97b7ea9fd8f..417e3515dce 100644 --- a/cpp/open3d/data/Dataset.h +++ b/cpp/open3d/data/Dataset.h @@ -723,26 +723,26 @@ class PaintedPlasterTexture : public DownloadDataset { /// cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni /// sequence, and ground-truth camera trajectory. /// -/// RedwoodIndoorLivingRoom1 -/// ├── colors -/// │ ├── 00000.jpg -/// │ ├── 00001.jpg -/// │ ├── ... -/// │ └── 02869.jpg -/// ├── depth -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02869.png -/// ├── depth_noisy -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02869.png -/// ├── dist-model.txt -/// ├── livingroom1.oni -/// ├── livingroom1-traj.txt -/// └── livingroom.ply +/// RedwoodIndoorLivingRoom1 +/// ├── colors +/// │ ├── 00000.jpg +/// │ ├── 00001.jpg +/// │ ├── ... +/// │ └── 02869.jpg +/// ├── depth +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02869.png +/// ├── depth_noisy +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02869.png +/// ├── dist-model.txt +/// ├── livingroom1.oni +/// ├── livingroom1-traj.txt +/// └── livingroom.ply class RedwoodIndoorLivingRoom1 : public DownloadDataset { public: RedwoodIndoorLivingRoom1(const std::string& data_root = ""); @@ -775,30 +775,30 @@ class RedwoodIndoorLivingRoom1 : public DownloadDataset { }; /// \class RedwoodIndoorLivingRoom2 (Augmented ICL-NUIM Dataset) -/// \brief Data class for `RedwoodIndoorLivingRoom1`, containing dense point +/// \brief Data class for `RedwoodIndoorLivingRoom2`, containing dense point /// cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni /// sequence, and ground-truth camera trajectory. /// -/// RedwoodIndoorLivingRoom2 -/// ├── colors -/// │ ├── 00000.jpg -/// │ ├── 00001.jpg -/// │ ├── ... -/// │ └── 02349.jpg -/// ├── depth -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02349.png -/// ├── depth_noisy -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02349.png -/// ├── dist-model.txt -/// ├── livingroom2.oni -/// ├── livingroom2-traj.txt -/// └── livingroom.ply +/// RedwoodIndoorLivingRoom2 +/// ├── colors +/// │ ├── 00000.jpg +/// │ ├── 00001.jpg +/// │ ├── ... +/// │ └── 02349.jpg +/// ├── depth +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02349.png +/// ├── depth_noisy +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02349.png +/// ├── dist-model.txt +/// ├── livingroom2.oni +/// ├── livingroom2-traj.txt +/// └── livingroom.ply class RedwoodIndoorLivingRoom2 : public DownloadDataset { public: RedwoodIndoorLivingRoom2(const std::string& data_root = ""); @@ -831,30 +831,30 @@ class RedwoodIndoorLivingRoom2 : public DownloadDataset { }; /// \class RedwoodIndoorOffice1 (Augmented ICL-NUIM Dataset) -/// \brief Data class for `RedwoodIndoorLivingRoom1`, containing dense point +/// \brief Data class for `RedwoodIndoorOffice1`, containing dense point /// cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni /// sequence, and ground-truth camera trajectory. /// -/// RedwoodIndoorOffice1 -/// ├── colors -/// │ ├── 00000.jpg -/// │ ├── 00001.jpg -/// │ ├── ... -/// │ └── 02689.jpg -/// ├── depth -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02689.png -/// ├── depth_noisy -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02689.png -/// ├── dist-model.txt -/// ├── office1.oni -/// ├── office1-traj.txt -/// └── office.ply +/// RedwoodIndoorOffice1 +/// ├── colors +/// │ ├── 00000.jpg +/// │ ├── 00001.jpg +/// │ ├── ... +/// │ └── 02689.jpg +/// ├── depth +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02689.png +/// ├── depth_noisy +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02689.png +/// ├── dist-model.txt +/// ├── office1.oni +/// ├── office1-traj.txt +/// └── office.ply class RedwoodIndoorOffice1 : public DownloadDataset { public: RedwoodIndoorOffice1(const std::string& data_root = ""); @@ -887,30 +887,30 @@ class RedwoodIndoorOffice1 : public DownloadDataset { }; /// \class RedwoodIndoorOffice2 (Augmented ICL-NUIM Dataset) -/// \brief Data class for `RedwoodIndoorLivingRoom1`, containing dense point +/// \brief Data class for `RedwoodIndoorOffice2`, containing dense point /// cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni /// sequence, and ground-truth camera trajectory. /// -/// RedwoodIndoorOffice2 -/// ├── colors -/// │ ├── 00000.jpg -/// │ ├── 00001.jpg -/// │ ├── ... -/// │ └── 02537.jpg -/// ├── depth -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02537.png -/// ├── depth_noisy -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02537.png -/// ├── dist-model.txt -/// ├── office2.oni -/// ├── office2-traj.txt -/// └── office.ply +/// RedwoodIndoorOffice2 +/// ├── colors +/// │ ├── 00000.jpg +/// │ ├── 00001.jpg +/// │ ├── ... +/// │ └── 02537.jpg +/// ├── depth +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02537.png +/// ├── depth_noisy +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02537.png +/// ├── dist-model.txt +/// ├── office2.oni +/// ├── office2-traj.txt +/// └── office.ply class RedwoodIndoorOffice2 : public DownloadDataset { public: RedwoodIndoorOffice2(const std::string& data_root = ""); diff --git a/cpp/open3d/geometry/BoundingVolume.h b/cpp/open3d/geometry/BoundingVolume.h index 11586a2132d..e808b232612 100644 --- a/cpp/open3d/geometry/BoundingVolume.h +++ b/cpp/open3d/geometry/BoundingVolume.h @@ -21,7 +21,7 @@ class AxisAlignedBoundingBox; /// \brief A bounding box oriented along an arbitrary frame of reference. /// /// The oriented bounding box is defined by its center position, rotation -/// maxtrix and extent. +/// matrix and extent. class OrientedBoundingBox : public Geometry3D { public: /// \brief Default constructor. diff --git a/cpp/open3d/geometry/EstimateNormals.cpp b/cpp/open3d/geometry/EstimateNormals.cpp index ac084234c25..5bc9abc0ba3 100644 --- a/cpp/open3d/geometry/EstimateNormals.cpp +++ b/cpp/open3d/geometry/EstimateNormals.cpp @@ -359,7 +359,10 @@ void PointCloud::OrientNormalsTowardsCameraLocation( } } -void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { +void PointCloud::OrientNormalsConsistentTangentPlane( + size_t k, + const double lambda /* = 0.0*/, + const double cos_alpha_tol /* = 1.0*/) { if (!HasNormals()) { utility::LogError( "No normals in the PointCloud. Call EstimateNormals() first."); @@ -380,8 +383,20 @@ void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { v1 = pt_map[v1]; size_t edge = EdgeIndex(v0, v1); if (graph_edges.count(edge) == 0) { - double dist = (points_[v0] - points_[v1]).squaredNorm(); - delaunay_graph.push_back(WeightedEdge(v0, v1, dist)); + const auto diff = points_[v0] - points_[v1]; + // penalization on normal-plane distance + double dist = diff.squaredNorm(); + double penalization = lambda * std::abs(diff.dot(normals_[v0])); + + // if cos_alpha_tol < 1 some edges will be excluded. In particular + // the ones connecting points that form an angle below a certain + // threshold (defined by the cosine) + double cos_alpha = + std::abs(diff.dot(normals_[v0])) / std::sqrt(dist); + if (cos_alpha > cos_alpha_tol) + dist = std::numeric_limits::infinity(); + + delaunay_graph.push_back(WeightedEdge(v0, v1, dist + penalization)); graph_edges.insert(edge); } }; @@ -403,12 +418,50 @@ void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { edge.weight_ = NormalWeight(edge.v0_, edge.v1_); } + // The function below takes v0 and its neighbors as inputs. + // The function returns the quartiles of the distances between the neighbors + // and a plane defined by the normal vector of v0 and the point v0. + auto compute_q1q3 = + [&](size_t v0, + std::vector neighbors) -> std::array { + std::vector dist_plane; + + for (size_t vidx1 = 0; vidx1 < neighbors.size(); ++vidx1) { + size_t v1 = size_t(neighbors[vidx1]); + const auto diff = points_[v0] - points_[v1]; + double dist = std::abs(diff.dot(normals_[v0])); + dist_plane.push_back(dist); + } + std::vector dist_plane_ord = dist_plane; + std::sort(dist_plane_ord.begin(), dist_plane_ord.end()); + // calculate quartiles + int q1_idx = static_cast(dist_plane_ord.size() * 0.25); + double q1 = dist_plane_ord[q1_idx]; + + int q3_idx = static_cast(dist_plane_ord.size() * 0.75); + double q3 = dist_plane_ord[q3_idx]; + + std::array q1q3; + q1q3[0] = q1; + q1q3[1] = q3; + + return q1q3; + }; + // Add k nearest neighbors to Riemannian graph KDTreeFlann kdtree(*this); for (size_t v0 = 0; v0 < points_.size(); ++v0) { std::vector neighbors; std::vector dists2; + kdtree.SearchKNN(points_[v0], int(k), neighbors, dists2); + + const double DEFAULT_VALUE = std::numeric_limits::quiet_NaN(); + std::array q1q3 = + lambda == 0 + ? std::array{DEFAULT_VALUE, DEFAULT_VALUE} + : compute_q1q3(v0, neighbors); + for (size_t vidx1 = 0; vidx1 < neighbors.size(); ++vidx1) { size_t v1 = size_t(neighbors[vidx1]); if (v0 == v1) { @@ -416,6 +469,25 @@ void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { } size_t edge = EdgeIndex(v0, v1); if (graph_edges.count(edge) == 0) { + const auto diff = points_[v0] - points_[v1]; + double normal_dist = std::abs(diff.dot(normals_[v0])); + + double dist = diff.squaredNorm(); + + // if cos_alpha_tol < 1 some edges will be excluded. In + // particular the ones connecting points that form an angle + // below a certain threshold (defined by the cosine) + double cos_alpha = + std::abs(diff.dot(normals_[v0])) / std::sqrt(dist); + if (cos_alpha > cos_alpha_tol) continue; + + // if we are in a penalizing framework do not consider outliers + // in terms of distance from the plane (if any) + if (lambda != 0) { + double iqr = q1q3[1] - q1q3[0]; + if (normal_dist > q1q3[1] + 1.5 * iqr) continue; + } + double weight = NormalWeight(v0, v1); mst.push_back(WeightedEdge(v0, v1, weight)); graph_edges.insert(edge); @@ -436,13 +508,14 @@ void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { } // find start node for tree traversal - // init with node that maximizes z - double max_z = std::numeric_limits::lowest(); + // init with node that minimizes z + + double min_z = std::numeric_limits::max(); size_t v0 = 0; for (size_t vidx = 0; vidx < points_.size(); ++vidx) { const Eigen::Vector3d &v = points_[vidx]; - if (v(2) > max_z) { - max_z = v(2); + if (v(2) < min_z) { + min_z = v(2); v0 = vidx; } } @@ -457,7 +530,7 @@ void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { n1 *= -1; } }; - TestAndOrientNormal(Eigen::Vector3d(0, 0, 1), normals_[v0]); + TestAndOrientNormal(Eigen::Vector3d(0, 0, -1), normals_[v0]); while (!traversal_queue.empty()) { v0 = traversal_queue.front(); traversal_queue.pop(); diff --git a/cpp/open3d/geometry/ISSKeypoints.cpp b/cpp/open3d/geometry/ISSKeypoints.cpp index 39f8602a737..f47abae1ad2 100644 --- a/cpp/open3d/geometry/ISSKeypoints.cpp +++ b/cpp/open3d/geometry/ISSKeypoints.cpp @@ -11,8 +11,10 @@ #include #include +#include #include #include +#include #include #include @@ -29,27 +31,26 @@ namespace { bool IsLocalMaxima(int query_idx, const std::vector& indices, const std::vector& third_eigen_values) { - for (const auto& idx : indices) { - if (third_eigen_values[query_idx] < third_eigen_values[idx]) { - return false; - } - } - return true; + return std::none_of( + indices.begin(), indices.end(), + [&third_eigen_values, value = third_eigen_values[query_idx]]( + const int idx) { return value < third_eigen_values[idx]; }); } double ComputeModelResolution(const std::vector& points, const geometry::KDTreeFlann& kdtree) { std::vector indices(2); std::vector distances(2); - double resolution = 0.0; - - for (const auto& point : points) { - if (kdtree.SearchKNN(point, 2, indices, distances) != 0) { - resolution += std::sqrt(distances[1]); - } - } - resolution /= points.size(); - return resolution; + const double resolution = std::accumulate( + points.begin(), points.end(), 0., + [&](double state, const Eigen::Vector3d& point) { + if (kdtree.SearchKNN(point, 2, indices, distances) >= 2) { + state += std::sqrt(distances[1]); + } + return state; + }); + + return resolution / static_cast(points.size()); } } // namespace @@ -118,6 +119,7 @@ std::shared_ptr ComputeISSKeypoints( if (nb_neighbors >= min_neighbors && IsLocalMaxima(i, nn_indices, third_eigen_values)) { +#pragma omp critical kp_indices.emplace_back(i); } } diff --git a/cpp/open3d/geometry/KDTreeFlann.cpp b/cpp/open3d/geometry/KDTreeFlann.cpp index 4324e9cbe36..d93175a8d43 100644 --- a/cpp/open3d/geometry/KDTreeFlann.cpp +++ b/cpp/open3d/geometry/KDTreeFlann.cpp @@ -104,7 +104,7 @@ int KDTreeFlann::SearchKNN(const T &query, indices.resize(knn); distance2.resize(knn); std::vector indices_eigen(knn); - int k = nanoflann_index_->index->knnSearch( + int k = nanoflann_index_->index_->knnSearch( query.data(), knn, indices_eigen.data(), distance2.data()); indices.resize(k); distance2.resize(k); @@ -125,10 +125,10 @@ int KDTreeFlann::SearchRadius(const T &query, size_t(query.rows()) != dimension_) { return -1; } - std::vector> indices_dists; - int k = nanoflann_index_->index->radiusSearch( + std::vector> indices_dists; + int k = nanoflann_index_->index_->radiusSearch( query.data(), radius * radius, indices_dists, - nanoflann::SearchParams(-1, 0.0)); + nanoflann::SearchParameters(0.0)); indices.resize(k); distance2.resize(k); for (int i = 0; i < k; ++i) { @@ -154,7 +154,7 @@ int KDTreeFlann::SearchHybrid(const T &query, } distance2.resize(max_nn); std::vector indices_eigen(max_nn); - int k = nanoflann_index_->index->knnSearch( + int k = nanoflann_index_->index_->knnSearch( query.data(), max_nn, indices_eigen.data(), distance2.data()); k = std::distance(distance2.begin(), std::lower_bound(distance2.begin(), distance2.begin() + k, @@ -178,7 +178,7 @@ bool KDTreeFlann::SetRawData(const Eigen::Map &data) { data_interface_.reset(new Eigen::Map(data)); nanoflann_index_.reset( new KDTree_t(dimension_, std::cref(*data_interface_), 15)); - nanoflann_index_->index->buildIndex(); + nanoflann_index_->index_->buildIndex(); return true; } diff --git a/cpp/open3d/geometry/PointCloud.cpp b/cpp/open3d/geometry/PointCloud.cpp index 3e55bc3acd6..b0b6e0130ae 100644 --- a/cpp/open3d/geometry/PointCloud.cpp +++ b/cpp/open3d/geometry/PointCloud.cpp @@ -541,23 +541,25 @@ std::shared_ptr PointCloud::FarthestPointDownSample( return SelectByIndex(selected_indices); } -std::shared_ptr PointCloud::Crop( - const AxisAlignedBoundingBox &bbox) const { +std::shared_ptr PointCloud::Crop(const AxisAlignedBoundingBox &bbox, + bool invert) const { if (bbox.IsEmpty()) { utility::LogError( "AxisAlignedBoundingBox either has zeros size, or has wrong " "bounds."); } - return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_)); + return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_), + invert); } -std::shared_ptr PointCloud::Crop( - const OrientedBoundingBox &bbox) const { +std::shared_ptr PointCloud::Crop(const OrientedBoundingBox &bbox, + bool invert) const { if (bbox.IsEmpty()) { utility::LogError( "AxisAlignedBoundingBox either has zeros size, or has wrong " "bounds."); } - return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_)); + return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_), + invert); } std::tuple, std::vector> diff --git a/cpp/open3d/geometry/PointCloud.h b/cpp/open3d/geometry/PointCloud.h index b55840106b6..fec0d3033d1 100644 --- a/cpp/open3d/geometry/PointCloud.h +++ b/cpp/open3d/geometry/PointCloud.h @@ -185,7 +185,9 @@ class PointCloud : public Geometry3D { /// clipped. /// /// \param bbox AxisAlignedBoundingBox to crop points. - std::shared_ptr Crop(const AxisAlignedBoundingBox &bbox) const; + /// \param invert Optional boolean to invert cropping. + std::shared_ptr Crop(const AxisAlignedBoundingBox &bbox, + bool invert = false) const; /// \brief Function to crop pointcloud into output pointcloud /// @@ -193,7 +195,9 @@ class PointCloud : public Geometry3D { /// clipped. /// /// \param bbox OrientedBoundingBox to crop points. - std::shared_ptr Crop(const OrientedBoundingBox &bbox) const; + /// \param invert Optional boolean to invert cropping. + std::shared_ptr Crop(const OrientedBoundingBox &bbox, + bool invert = false) const; /// \brief Function to remove points that have less than \p nb_points in a /// sphere of a given radius. @@ -248,10 +252,18 @@ class PointCloud : public Geometry3D { /// \brief Function to consistently orient estimated normals based on /// consistent tangent planes as described in Hoppe et al., "Surface /// Reconstruction from Unorganized Points", 1992. + /// Further details on parameters are described in + /// Piazza, Valentini, Varetti, "Mesh Reconstruction from Point Cloud", + /// 2023. /// /// \param k k nearest neighbour for graph reconstruction for normal /// propagation. - void OrientNormalsConsistentTangentPlane(size_t k); + /// \param lambda penalty constant on the distance of a point from the + /// tangent plane \param cos_alpha_tol treshold that defines the amplitude + /// of the cone spanned by the reference normal + void OrientNormalsConsistentTangentPlane(size_t k, + const double lambda = 0.0, + const double cos_alpha_tol = 1.0); /// \brief Function to compute the point to point distances between point /// clouds. diff --git a/cpp/open3d/geometry/PointCloudSegmentation.cpp b/cpp/open3d/geometry/PointCloudSegmentation.cpp index 8e4f65f626b..2d571507934 100644 --- a/cpp/open3d/geometry/PointCloudSegmentation.cpp +++ b/cpp/open3d/geometry/PointCloudSegmentation.cpp @@ -29,6 +29,7 @@ class RandomSampler { explicit RandomSampler(const size_t total_size) : total_size_(total_size) {} std::vector operator()(size_t sample_size) { + std::lock_guard lock(mutex_); std::vector samples; samples.reserve(sample_size); @@ -48,6 +49,7 @@ class RandomSampler { private: size_t total_size_; + std::mutex mutex_; }; /// \class RANSACResult @@ -64,23 +66,24 @@ class RANSACResult { }; // Calculates the number of inliers given a list of points and a plane model, -// and the total distance between the inliers and the plane. These numbers are -// then used to evaluate how well the plane model fits the given points. +// and the total squared point-to-plane distance. +// These numbers are then used to evaluate how well the plane model fits the +// given points. RANSACResult EvaluateRANSACBasedOnDistance( const std::vector &points, const Eigen::Vector4d plane_model, std::vector &inliers, - double distance_threshold, - double error) { + double distance_threshold) { RANSACResult result; + double error = 0; for (size_t idx = 0; idx < points.size(); ++idx) { Eigen::Vector4d point(points[idx](0), points[idx](1), points[idx](2), 1); double distance = std::abs(plane_model.dot(point)); if (distance < distance_threshold) { - error += distance; + error += distance * distance; inliers.emplace_back(idx); } } @@ -91,7 +94,7 @@ RANSACResult EvaluateRANSACBasedOnDistance( result.inlier_rmse_ = 0; } else { result.fitness_ = (double)inlier_num / (double)points.size(); - result.inlier_rmse_ = error / std::sqrt((double)inlier_num); + result.inlier_rmse_ = std::sqrt(error / (double)inlier_num); } return result; } @@ -202,10 +205,9 @@ std::tuple> PointCloud::SegmentPlane( continue; } - double error = 0; inliers.clear(); auto this_result = EvaluateRANSACBasedOnDistance( - points_, plane_model, inliers, distance_threshold, error); + points_, plane_model, inliers, distance_threshold); #pragma omp critical { if (this_result.fitness_ > result.fitness_ || diff --git a/cpp/open3d/geometry/RGBDImageFactory.cpp b/cpp/open3d/geometry/RGBDImageFactory.cpp index 1a2b5f0ff09..1f2819ebe3a 100644 --- a/cpp/open3d/geometry/RGBDImageFactory.cpp +++ b/cpp/open3d/geometry/RGBDImageFactory.cpp @@ -18,7 +18,9 @@ std::shared_ptr RGBDImage::CreateFromColorAndDepth( bool convert_rgb_to_intensity /* = true*/) { std::shared_ptr rgbd_image = std::make_shared(); if (color.height_ != depth.height_ || color.width_ != depth.width_) { - utility::LogError("Unsupported image format."); + utility::LogError( + "RGB image size ({} {}) and depth image size ({} {}) mismatch.", + color.height_, color.width_, depth.height_, depth.width_); } rgbd_image->depth_ = *depth.ConvertDepthToFloatImage(depth_scale, depth_trunc); @@ -55,7 +57,9 @@ std::shared_ptr RGBDImage::CreateFromSUNFormat( bool convert_rgb_to_intensity /* = true*/) { std::shared_ptr rgbd_image = std::make_shared(); if (color.height_ != depth.height_ || color.width_ != depth.width_) { - utility::LogError("Unsupported image format."); + utility::LogError( + "RGB image size ({} {}) and depth image size ({} {}) mismatch.", + color.height_, color.width_, depth.height_, depth.width_); } for (int v = 0; v < depth.height_; v++) { for (int u = 0; u < depth.width_; u++) { @@ -75,7 +79,9 @@ std::shared_ptr RGBDImage::CreateFromNYUFormat( bool convert_rgb_to_intensity /* = true*/) { std::shared_ptr rgbd_image = std::make_shared(); if (color.height_ != depth.height_ || color.width_ != depth.width_) { - utility::LogError("Unsupported image format."); + utility::LogError( + "RGB image size ({} {}) and depth image size ({} {}) mismatch.", + color.height_, color.width_, depth.height_, depth.width_); } for (int v = 0; v < depth.height_; v++) { for (int u = 0; u < depth.width_; u++) { diff --git a/cpp/open3d/geometry/TriangleMesh.cpp b/cpp/open3d/geometry/TriangleMesh.cpp index 75651d159a0..48faa3b6a19 100644 --- a/cpp/open3d/geometry/TriangleMesh.cpp +++ b/cpp/open3d/geometry/TriangleMesh.cpp @@ -1598,13 +1598,10 @@ void TriangleMesh::RemoveVerticesByMask(const std::vector &vertex_mask) { std::shared_ptr TriangleMesh::SelectByIndex( const std::vector &indices, bool cleanup) const { - if (HasTriangleUvs()) { - utility::LogWarning( - "[SelectByIndex] This mesh contains triangle uvs that are " - "not handled in this function"); - } auto output = std::make_shared(); + bool has_triangle_material_ids = HasTriangleMaterialIds(); bool has_triangle_normals = HasTriangleNormals(); + bool has_triangle_uvs = HasTriangleUvs(); bool has_vertex_normals = HasVertexNormals(); bool has_vertex_colors = HasVertexColors(); @@ -1636,9 +1633,18 @@ std::shared_ptr TriangleMesh::SelectByIndex( if (nvidx0 >= 0 && nvidx1 >= 0 && nvidx2 >= 0) { output->triangles_.push_back( Eigen::Vector3i(nvidx0, nvidx1, nvidx2)); + if (has_triangle_material_ids) { + output->triangle_material_ids_.push_back( + triangle_material_ids_[tidx]); + } if (has_triangle_normals) { output->triangle_normals_.push_back(triangle_normals_[tidx]); } + if (has_triangle_uvs) { + output->triangle_uvs_.push_back(triangle_uvs_[tidx * 3 + 0]); + output->triangle_uvs_.push_back(triangle_uvs_[tidx * 3 + 1]); + output->triangle_uvs_.push_back(triangle_uvs_[tidx * 3 + 2]); + } } } diff --git a/cpp/open3d/geometry/TriangleMeshSimplification.cpp b/cpp/open3d/geometry/TriangleMeshSimplification.cpp index 6603fd20c5e..90a2a166ce7 100644 --- a/cpp/open3d/geometry/TriangleMeshSimplification.cpp +++ b/cpp/open3d/geometry/TriangleMeshSimplification.cpp @@ -311,51 +311,55 @@ std::shared_ptr TriangleMesh::SimplifyQuadricDecimation( AddPerpPlaneQuadric(tria(2), tria(0), tria(1), area); } + // Clear the unused vectors to save some memory + triangle_areas.clear(); + triangle_planes.clear(); + edge_triangle_count.clear(); + // Get valid edges and compute cost - // Note: We could also select all vertex pairs as edges with dist < eps - std::unordered_map> - vbars; - std::unordered_map> - costs; auto CostEdgeComp = [](const CostEdge& a, const CostEdge& b) { return std::get<0>(a) > std::get<0>(b); }; std::priority_queue, decltype(CostEdgeComp)> queue(CostEdgeComp); - auto AddEdge = [&](int vidx0, int vidx1, bool update) { - int min = std::min(vidx0, vidx1); - int max = std::max(vidx0, vidx1); - Eigen::Vector2i edge(min, max); - if (update || vbars.count(edge) == 0) { - const Quadric& Q0 = Qs[min]; - const Quadric& Q1 = Qs[max]; - Quadric Qbar = Q0 + Q1; - double cost; - Eigen::Vector3d vbar; - if (Qbar.IsInvertible()) { - vbar = Qbar.Minimum(); - cost = Qbar.Eval(vbar); + auto compute_cost_vbar = [&](Eigen::Vector2i e) { + const Quadric& Q0 = Qs[e(0)]; + const Quadric& Q1 = Qs[e(1)]; + const Quadric Qbar = Q0 + Q1; + double cost; + Eigen::Vector3d vbar; + if (Qbar.IsInvertible()) { + vbar = Qbar.Minimum(); + cost = Qbar.Eval(vbar); + } else { + const Eigen::Vector3d& v0 = mesh->vertices_[e(0)]; + const Eigen::Vector3d& v1 = mesh->vertices_[e(1)]; + const Eigen::Vector3d vmid = (v0 + v1) / 2; + const double cost0 = Qbar.Eval(v0); + const double cost1 = Qbar.Eval(v1); + const double costmid = Qbar.Eval(vmid); + cost = std::min(cost0, std::min(cost1, costmid)); + if (cost == costmid) { + vbar = vmid; + } else if (cost == cost0) { + vbar = v0; } else { - const Eigen::Vector3d& v0 = mesh->vertices_[vidx0]; - const Eigen::Vector3d& v1 = mesh->vertices_[vidx1]; - Eigen::Vector3d vmid = (v0 + v1) / 2; - double cost0 = Qbar.Eval(v0); - double cost1 = Qbar.Eval(v1); - double costmid = Qbar.Eval(vmid); - cost = std::min(cost0, std::min(cost1, costmid)); - if (cost == costmid) { - vbar = vmid; - } else if (cost == cost0) { - vbar = v0; - } else { - vbar = v1; - } + vbar = v1; } - vbars[edge] = vbar; - costs[edge] = cost; + } + return std::make_pair(cost, vbar); + }; + + std::unordered_set> + added_edges; + auto AddEdge = [&](int vidx0, int vidx1, bool update) { + const int min = std::min(vidx0, vidx1); + const int max = std::max(vidx0, vidx1); + const Eigen::Vector2i edge(min, max); + if (update || added_edges.count(edge) == 0) { + const auto cost = compute_cost_vbar(edge).first; + added_edges.insert(edge); queue.push(CostEdge(cost, min, max)); } }; @@ -366,6 +370,7 @@ std::shared_ptr TriangleMesh::SimplifyQuadricDecimation( AddEdge(triangle(1), triangle(2), false); AddEdge(triangle(2), triangle(0), false); } + added_edges.clear(); // perform incremental edge collapse bool has_vert_normal = HasVertexNormals(); @@ -384,50 +389,71 @@ std::shared_ptr TriangleMesh::SimplifyQuadricDecimation( // test if the edge has been updated (reinserted into queue) Eigen::Vector2i edge(vidx0, vidx1); + const auto cost_vbar = compute_cost_vbar(edge); + const Eigen::Vector3d vbar = cost_vbar.second; bool valid = !vertices_deleted[vidx0] && !vertices_deleted[vidx1] && - cost == costs[edge]; + cost == cost_vbar.first; if (!valid) { continue; } - // avoid flip of triangle normal - bool flipped = false; - for (int tidx : vert_to_triangles[vidx1]) { - if (triangles_deleted[tidx]) { - continue; - } - - const Eigen::Vector3i& tria = mesh->triangles_[tidx]; - bool has_vidx0 = - vidx0 == tria(0) || vidx0 == tria(1) || vidx0 == tria(2); - bool has_vidx1 = - vidx1 == tria(0) || vidx1 == tria(1) || vidx1 == tria(2); - if (has_vidx0 && has_vidx1) { - continue; - } + // avoid flip of triangle normal and creation of degenerate triangles + bool creates_invalid_triangle = false; + const double degenerate_ratio_threshold = 0.001; + std::unordered_map edges{}; + for (int vidx : {vidx1, vidx0}) { + for (int tidx : vert_to_triangles[vidx]) { + if (triangles_deleted[tidx]) { + continue; + } - Eigen::Vector3d vert0 = mesh->vertices_[tria(0)]; - Eigen::Vector3d vert1 = mesh->vertices_[tria(1)]; - Eigen::Vector3d vert2 = mesh->vertices_[tria(2)]; - Eigen::Vector3d norm_before = (vert1 - vert0).cross(vert2 - vert0); - norm_before /= norm_before.norm(); + const Eigen::Vector3i& tria = mesh->triangles_[tidx]; + const bool has_vidx0 = vidx0 == tria(0) || vidx0 == tria(1) || + vidx0 == tria(2); + const bool has_vidx1 = vidx1 == tria(0) || vidx1 == tria(1) || + vidx1 == tria(2); + if (has_vidx0 && has_vidx1) { + continue; + } - if (vidx1 == tria(0)) { - vert0 = vbars[edge]; - } else if (vidx1 == tria(1)) { - vert1 = vbars[edge]; - } else if (vidx1 == tria(2)) { - vert2 = vbars[edge]; - } + Eigen::Vector3d verts[3] = {mesh->vertices_[tria(0)], + mesh->vertices_[tria(1)], + mesh->vertices_[tria(2)]}; + Eigen::Vector3d norm_before = + (verts[1] - verts[0]).cross(verts[2] - verts[0]); + const double area_before = 0.5 * norm_before.norm(); + norm_before /= norm_before.norm(); + + for (auto i = 0; i < 3; ++i) { + if (tria(i) == vidx) { + verts[i] = vbar; + continue; + } + auto& vert_count = edges[tria(i)]; + creates_invalid_triangle |= vert_count >= 2; + vert_count += 1; + } - Eigen::Vector3d norm_after = (vert1 - vert0).cross(vert2 - vert0); - norm_after /= norm_after.norm(); - if (norm_before.dot(norm_after) < 0) { - flipped = true; - break; + Eigen::Vector3d norm_after = + (verts[1] - verts[0]).cross(verts[2] - verts[0]); + const double area_after = 0.5 * norm_after.norm(); + norm_after /= norm_after.norm(); + // Disallow flipping the triangle normal + creates_invalid_triangle |= norm_before.dot(norm_after) < 0; + // Disallow creating very small triangles (possibly degenerate) + creates_invalid_triangle |= + area_after < degenerate_ratio_threshold * area_before; + + if (creates_invalid_triangle) { + // Goto is the only way to jump out of two loops without + // multiple redundant if()'s. Yes, it can lead to spagetti + // code if abused but we're doing a very short jump here + goto end_flip_loop; + } } } - if (flipped) { + end_flip_loop: + if (creates_invalid_triangle) { continue; } @@ -460,7 +486,7 @@ std::shared_ptr TriangleMesh::SimplifyQuadricDecimation( } // update vertex vidx0 to vbar - mesh->vertices_[vidx0] = vbars[edge]; + mesh->vertices_[vidx0] = vbar; Qs[vidx0] += Qs[vidx1]; if (has_vert_normal) { mesh->vertex_normals_[vidx0] = 0.5 * (mesh->vertex_normals_[vidx0] + diff --git a/cpp/open3d/geometry/VoxelGrid.cpp b/cpp/open3d/geometry/VoxelGrid.cpp index 4a6dac2d1a7..dafe6b52c01 100644 --- a/cpp/open3d/geometry/VoxelGrid.cpp +++ b/cpp/open3d/geometry/VoxelGrid.cpp @@ -197,6 +197,8 @@ void VoxelGrid::AddVoxel(const Voxel &voxel) { voxels_[voxel.grid_index_] = voxel; } +void VoxelGrid::RemoveVoxel(const Eigen::Vector3i &idx) { voxels_.erase(idx); } + std::vector VoxelGrid::CheckIfIncluded( const std::vector &queries) { std::vector output; diff --git a/cpp/open3d/geometry/VoxelGrid.h b/cpp/open3d/geometry/VoxelGrid.h index d12d664c5f9..30a228206e0 100644 --- a/cpp/open3d/geometry/VoxelGrid.h +++ b/cpp/open3d/geometry/VoxelGrid.h @@ -123,6 +123,9 @@ class VoxelGrid : public Geometry3D { /// Add a voxel with specified grid index and color. void AddVoxel(const Voxel &voxel); + /// Remove a voxel with specified grid index. + void RemoveVoxel(const Eigen::Vector3i &idx); + /// Return a vector of 3D coordinates that define the indexed voxel cube. std::vector GetVoxelBoundingPoints( const Eigen::Vector3i &index) const; @@ -242,7 +245,7 @@ class VoxelGrid : public Geometry3D { public: /// Size of the voxel. double voxel_size_ = 0.0; - /// Coorindate of the origin point. + /// Coordinate of the origin point. Eigen::Vector3d origin_ = Eigen::Vector3d::Zero(); /// Voxels contained in voxel grid std::unordered_map VoxelGrid::CreateFromTriangleMeshWithinBounds( box_center, box_half_size, v0, v1, v2)) { Eigen::Vector3i grid_index(widx, hidx, didx); output->AddVoxel(geometry::Voxel(grid_index)); - break; + // Don't `break` here, since a triangle can span + // across multiple voxels. } } } diff --git a/cpp/open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh b/cpp/open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh index dc9b905f4d4..c40ae43874e 100644 --- a/cpp/open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh +++ b/cpp/open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh @@ -45,7 +45,7 @@ namespace impl { /// \param texture_alignment The texture alignment in bytes. This is used /// for allocating segments within the temporary memory. /// -/// \param filter_backrop Output array for the computed filter gradient +/// \param filter_backprop Output array for the computed filter gradient /// with shape [depth,height,width, inp channels, out channels] /// /// \param filter_dims The sizes of the filter dimensions. The size of diff --git a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvBackpropFilterOps.cpp b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvBackpropFilterOps.cpp index a5c9bd1ffc5..3fa594026e2 100644 --- a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvBackpropFilterOps.cpp +++ b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvBackpropFilterOps.cpp @@ -147,7 +147,7 @@ REGISTER_OP("Open3DContinuousConvBackpropFilter") } c->set_output(0, filters_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the backprop for the filter of the ContinuousConv diff --git a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvOps.cpp b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvOps.cpp index 549bcbc2e5e..154ed9e0813 100644 --- a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvOps.cpp +++ b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvOps.cpp @@ -139,7 +139,7 @@ REGISTER_OP("Open3DContinuousConv") c->MakeShape({output_first_dim, output_channel_dim}); c->set_output(0, output_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Continuous convolution of two pointclouds. diff --git a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeBackpropFilterOps.cpp b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeBackpropFilterOps.cpp index fe7dd41ca88..ae13c7fd12c 100644 --- a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeBackpropFilterOps.cpp +++ b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeBackpropFilterOps.cpp @@ -166,7 +166,7 @@ REGISTER_OP("Open3DContinuousConvTransposeBackpropFilter") } c->set_output(0, filters_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the backrop for the filter of the ContinuousConvTranspose diff --git a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeOps.cpp b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeOps.cpp index f91f2d86acf..19a41bb40b3 100644 --- a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeOps.cpp +++ b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeOps.cpp @@ -168,7 +168,7 @@ REGISTER_OP("Open3DContinuousConvTranspose") c->MakeShape({output_first_dim, output_channel_dim}); c->set_output(0, output_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Continuous tranpose convolution of two pointclouds. diff --git a/cpp/open3d/ml/tensorflow/misc/BuildSpatialHashTableOps.cpp b/cpp/open3d/ml/tensorflow/misc/BuildSpatialHashTableOps.cpp index 4d271d50830..ff5289e95ac 100644 --- a/cpp/open3d/ml/tensorflow/misc/BuildSpatialHashTableOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/BuildSpatialHashTableOps.cpp @@ -52,7 +52,7 @@ REGISTER_OP("Open3DBuildSpatialHashTable") hash_table_splits_shape = MakeShapeHandle(c, batch_size + 1); c->set_output(2, hash_table_splits_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Creates a spatial hash table meant as input for fixed_radius_search @@ -78,7 +78,7 @@ The following example shows how **build_spatial_hash_table** and radius = 1.0 - # build the spatial hash table for fixex_radius_search + # build the spatial hash table for fixed_radius_search table = ml3d.ops.build_spatial_hash_table(points, radius, points_row_splits=torch.LongTensor([0,5]), diff --git a/cpp/open3d/ml/tensorflow/misc/FixedRadiusSearchOps.cpp b/cpp/open3d/ml/tensorflow/misc/FixedRadiusSearchOps.cpp index a74ab8c752b..eeb2f02cffb 100644 --- a/cpp/open3d/ml/tensorflow/misc/FixedRadiusSearchOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/FixedRadiusSearchOps.cpp @@ -79,7 +79,7 @@ REGISTER_OP("Open3DFixedRadiusSearch") neighbors_distance_shape = c->MakeShape({0}); c->set_output(2, neighbors_distance_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the indices of all neighbors within a radius. diff --git a/cpp/open3d/ml/tensorflow/misc/InvertNeighborsListOps.cpp b/cpp/open3d/ml/tensorflow/misc/InvertNeighborsListOps.cpp index 5fb0a7f6457..cc2c935bbb8 100644 --- a/cpp/open3d/ml/tensorflow/misc/InvertNeighborsListOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/InvertNeighborsListOps.cpp @@ -58,7 +58,7 @@ REGISTER_OP("Open3DInvertNeighborsList") // the attributes will have the same shape c->set_output(2, inp_neighbors_attributes); - return Status::OK(); + return Status(); }) .Doc(R"doc( Inverts a neighbors list made of neighbors_index and neighbors_row_splits. diff --git a/cpp/open3d/ml/tensorflow/misc/KnnSearchOps.cpp b/cpp/open3d/ml/tensorflow/misc/KnnSearchOps.cpp index e68bcd53c50..112d022e28c 100644 --- a/cpp/open3d/ml/tensorflow/misc/KnnSearchOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/KnnSearchOps.cpp @@ -67,7 +67,7 @@ REGISTER_OP("Open3DKnnSearch") neighbors_distance_shape = c->MakeShape({0}); c->set_output(2, neighbors_distance_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the indices of k nearest neighbors. diff --git a/cpp/open3d/ml/tensorflow/misc/NmsOps.cpp b/cpp/open3d/ml/tensorflow/misc/NmsOps.cpp index 0b2ded2325f..b8341190ea1 100644 --- a/cpp/open3d/ml/tensorflow/misc/NmsOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/NmsOps.cpp @@ -34,7 +34,7 @@ REGISTER_OP("Open3DNms") keep_indices = c->MakeShape({c->UnknownDim()}); c->set_output(0, keep_indices); - return Status::OK(); + return Status(); }) .Doc(R"doc( Performs non-maximum suppression of bounding boxes. diff --git a/cpp/open3d/ml/tensorflow/misc/RadiusSearchOps.cpp b/cpp/open3d/ml/tensorflow/misc/RadiusSearchOps.cpp index 94d2e0d0097..3dd99b9e460 100644 --- a/cpp/open3d/ml/tensorflow/misc/RadiusSearchOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/RadiusSearchOps.cpp @@ -69,7 +69,7 @@ REGISTER_OP("Open3DRadiusSearch") neighbors_distance_shape = c->MakeShape({0}); c->set_output(2, neighbors_distance_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the indices and distances of all neighbours within a radius. diff --git a/cpp/open3d/ml/tensorflow/misc/ReduceSubarraysSumOps.cpp b/cpp/open3d/ml/tensorflow/misc/ReduceSubarraysSumOps.cpp index 14be37412c0..5226079e783 100644 --- a/cpp/open3d/ml/tensorflow/misc/ReduceSubarraysSumOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/ReduceSubarraysSumOps.cpp @@ -33,7 +33,7 @@ REGISTER_OP("Open3DReduceSubarraysSum") sums_shape = c->MakeShape({sums_size}); c->set_output(0, sums_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the sum for each subarray in a flat vector of arrays. diff --git a/cpp/open3d/ml/tensorflow/misc/VoxelPoolingOps.cpp b/cpp/open3d/ml/tensorflow/misc/VoxelPoolingOps.cpp index 64e88539db0..ec163ea415b 100644 --- a/cpp/open3d/ml/tensorflow/misc/VoxelPoolingOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/VoxelPoolingOps.cpp @@ -53,7 +53,7 @@ REGISTER_OP("Open3DVoxelPooling") c->WithValue(c->Dim(positions_shape, -1), 3, &d)); } - return Status::OK(); + return Status(); }) .Doc(R"doc( Spatial pooling for point clouds by combining points that fall into the same voxel bin. @@ -202,7 +202,7 @@ REGISTER_OP("Open3DVoxelPoolingGrad") c->Dim(pooled_positions_shape, -1), 3, &d)); } - return Status::OK(); + return Status(); }) .Doc(R"doc( Gradient for features in VoxelPooling. For internal use only. diff --git a/cpp/open3d/ml/tensorflow/misc/VoxelizeOps.cpp b/cpp/open3d/ml/tensorflow/misc/VoxelizeOps.cpp index e9eb3c19f63..7c345edfd3f 100644 --- a/cpp/open3d/ml/tensorflow/misc/VoxelizeOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/VoxelizeOps.cpp @@ -58,7 +58,7 @@ REGISTER_OP("Open3DVoxelize") voxel_batch_splits = c->MakeShape({c->UnknownDim()}); c->set_output(3, voxel_batch_splits); - return Status::OK(); + return Status(); }) .Doc(R"doc( Voxelization for point clouds. diff --git a/cpp/open3d/ml/tensorflow/pointnet/BallQueryOps.cpp b/cpp/open3d/ml/tensorflow/pointnet/BallQueryOps.cpp index 2b0c863e279..07d7a03d34f 100644 --- a/cpp/open3d/ml/tensorflow/pointnet/BallQueryOps.cpp +++ b/cpp/open3d/ml/tensorflow/pointnet/BallQueryOps.cpp @@ -32,6 +32,6 @@ REGISTER_OP("Open3DBallQuery") ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims1, 1), nsample}); c->set_output(0, output); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); diff --git a/cpp/open3d/ml/tensorflow/pointnet/InterpolateOps.cpp b/cpp/open3d/ml/tensorflow/pointnet/InterpolateOps.cpp index 18363a9e265..f16e9020609 100644 --- a/cpp/open3d/ml/tensorflow/pointnet/InterpolateOps.cpp +++ b/cpp/open3d/ml/tensorflow/pointnet/InterpolateOps.cpp @@ -30,7 +30,7 @@ REGISTER_OP("Open3DThreeNN") c->MakeShape({c->Dim(dims1, 0), c->Dim(dims1, 1), 3}); c->set_output(0, output); c->set_output(1, output); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); @@ -50,7 +50,7 @@ REGISTER_OP("Open3DThreeInterpolate") ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape( {c->Dim(dims1, 0), c->Dim(dims1, 1), c->Dim(dims2, 1)}); c->set_output(0, output); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); @@ -70,6 +70,6 @@ REGISTER_OP("Open3DThreeInterpolateGrad") c->MakeShape({c->Dim(dims1, 0), c->Dim(dims1, 1), M}); c->set_output(0, output); c->set_output(1, output); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); diff --git a/cpp/open3d/ml/tensorflow/pointnet/RoiPoolOps.cpp b/cpp/open3d/ml/tensorflow/pointnet/RoiPoolOps.cpp index 10b72a5b4b2..8a72b3b0027 100644 --- a/cpp/open3d/ml/tensorflow/pointnet/RoiPoolOps.cpp +++ b/cpp/open3d/ml/tensorflow/pointnet/RoiPoolOps.cpp @@ -42,6 +42,6 @@ REGISTER_OP("Open3DRoiPool") ::tensorflow::shape_inference::ShapeHandle output2 = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims1, 1)}); c->set_output(1, output2); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); diff --git a/cpp/open3d/ml/tensorflow/pointnet/SamplingOps.cpp b/cpp/open3d/ml/tensorflow/pointnet/SamplingOps.cpp index 1c1cad2f3e2..b33a636d3f1 100644 --- a/cpp/open3d/ml/tensorflow/pointnet/SamplingOps.cpp +++ b/cpp/open3d/ml/tensorflow/pointnet/SamplingOps.cpp @@ -30,6 +30,6 @@ REGISTER_OP("Open3DFurthestPointSampling") ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), npoint}); c->set_output(0, output); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); diff --git a/cpp/open3d/ml/tensorflow/pvcnn/TrilinearDevoxelizeOps.cpp b/cpp/open3d/ml/tensorflow/pvcnn/TrilinearDevoxelizeOps.cpp index edf77e60268..7097f5f3287 100644 --- a/cpp/open3d/ml/tensorflow/pvcnn/TrilinearDevoxelizeOps.cpp +++ b/cpp/open3d/ml/tensorflow/pvcnn/TrilinearDevoxelizeOps.cpp @@ -51,7 +51,7 @@ REGISTER_OP("Open3DTrilinearDevoxelize") c->set_output(1, out2); c->set_output(2, out2); - return Status::OK(); + return Status(); }) .Doc(R"doc( Trilinear Devoxelize. @@ -167,7 +167,7 @@ REGISTER_OP("Open3DTrilinearDevoxelizeGrad") c->set_output(0, out); - return Status::OK(); + return Status(); }) .Doc(R"doc( Gradient function for Trilinear Devoxelize op. diff --git a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvBackpropFilterOps.cpp b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvBackpropFilterOps.cpp index 70916caf16e..5cc83701568 100644 --- a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvBackpropFilterOps.cpp +++ b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvBackpropFilterOps.cpp @@ -66,7 +66,7 @@ REGISTER_OP("Open3DSparseConvBackpropFilter") CHECK_SHAPE_HANDLE(c, out_features_gradient, num_out, out_channels); c->set_output(0, filters); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the backprop for the filter of the SparseConv diff --git a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvOps.cpp b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvOps.cpp index 2dd55fd3551..264a443ef25 100644 --- a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvOps.cpp +++ b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvOps.cpp @@ -66,7 +66,7 @@ REGISTER_OP("Open3DSparseConv") ShapeHandle out_features_shape = MakeShapeHandle(c, num_out, out_channels); c->set_output(0, out_features_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( General sparse convolution. diff --git a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeBackpropFilterOps.cpp b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeBackpropFilterOps.cpp index a2482d19eab..af9741ad39e 100644 --- a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeBackpropFilterOps.cpp +++ b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeBackpropFilterOps.cpp @@ -71,7 +71,7 @@ REGISTER_OP("Open3DSparseConvTransposeBackpropFilter") CHECK_SHAPE_HANDLE(c, out_features_gradient, num_out, out_channels); c->set_output(0, filters); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the backrop for the filter of the SparseConvTranspose diff --git a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeOps.cpp b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeOps.cpp index 2b5035521b2..a1daf49a16c 100644 --- a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeOps.cpp +++ b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeOps.cpp @@ -75,7 +75,7 @@ REGISTER_OP("Open3DSparseConvTranspose") ShapeHandle out_features_shape = MakeShapeHandle(c, num_out, out_channels); c->set_output(0, out_features_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Sparse tranpose convolution of two pointclouds. diff --git a/cpp/open3d/ml/tensorflow/tf_subsampling/tf_batch_subsampling.cpp b/cpp/open3d/ml/tensorflow/tf_subsampling/tf_batch_subsampling.cpp index 3425a436146..74472a667f4 100644 --- a/cpp/open3d/ml/tensorflow/tf_subsampling/tf_batch_subsampling.cpp +++ b/cpp/open3d/ml/tensorflow/tf_subsampling/tf_batch_subsampling.cpp @@ -47,7 +47,7 @@ REGISTER_OP("Open3DBatchGridSubsampling") TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &input0_shape)); c->set_output(0, input0_shape); c->set_output(1, c->input(1)); - return Status::OK(); + return Status(); }); class BatchGridSubsamplingOp : public OpKernel { diff --git a/cpp/open3d/ml/tensorflow/tf_subsampling/tf_subsampling.cpp b/cpp/open3d/ml/tensorflow/tf_subsampling/tf_subsampling.cpp index 31b2d51571c..fd7162966f8 100644 --- a/cpp/open3d/ml/tensorflow/tf_subsampling/tf_subsampling.cpp +++ b/cpp/open3d/ml/tensorflow/tf_subsampling/tf_subsampling.cpp @@ -44,7 +44,7 @@ REGISTER_OP("Open3DGridSubsampling") ::tensorflow::shape_inference::ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &input)); c->set_output(0, input); - return Status::OK(); + return Status(); }); class GridSubsamplingOp : public OpKernel { diff --git a/cpp/open3d/t/geometry/BoundingVolume.cpp b/cpp/open3d/t/geometry/BoundingVolume.cpp index 8e437bd43b5..0e529f27d1d 100644 --- a/cpp/open3d/t/geometry/BoundingVolume.cpp +++ b/cpp/open3d/t/geometry/BoundingVolume.cpp @@ -318,7 +318,7 @@ OrientedBoundingBox::OrientedBoundingBox(const core::Tensor ¢er, utility::LogError( "Invalid oriented bounding box. Please make sure the values of " "extent are all positive and the rotation matrix is " - "othogonal."); + "orthogonal."); } } @@ -405,7 +405,7 @@ core::Tensor OrientedBoundingBox::GetMaxBound() const { core::Tensor OrientedBoundingBox::GetBoxPoints() const { const t::geometry::AxisAlignedBoundingBox aabb(GetExtent() * -0.5, GetExtent() * 0.5); - return aabb.GetBoxPoints().Matmul(GetRotation()).Add(GetCenter()); + return aabb.GetBoxPoints().Matmul(GetRotation().T()).Add(GetCenter()); } OrientedBoundingBox &OrientedBoundingBox::Translate( diff --git a/cpp/open3d/t/geometry/BoundingVolume.h b/cpp/open3d/t/geometry/BoundingVolume.h index 1eec772e898..27fe7e4aaa6 100644 --- a/cpp/open3d/t/geometry/BoundingVolume.h +++ b/cpp/open3d/t/geometry/BoundingVolume.h @@ -146,7 +146,7 @@ class AxisAlignedBoundingBox : public Geometry, public DrawableGeometry { const utility::optional ¢er = utility::nullopt); /// \brief Add operation for axis-aligned bounding box. - /// The device of ohter box must be the same as the device of the current + /// The device of other box must be the same as the device of the current /// box. AxisAlignedBoundingBox &operator+=(const AxisAlignedBoundingBox &other); @@ -223,7 +223,7 @@ class AxisAlignedBoundingBox : public Geometry, public DrawableGeometry { /// \brief A bounding box oriented along an arbitrary frame of reference. /// /// - (center, rotation, extent): The oriented bounding box is defined by its -/// center position, rotation maxtrix and extent. +/// center position, rotation matrix and extent. /// - Usage /// - OrientedBoundingBox::GetCenter() /// - OrientedBoundingBox::SetCenter(const core::Tensor ¢er) diff --git a/cpp/open3d/t/geometry/PointCloud.cpp b/cpp/open3d/t/geometry/PointCloud.cpp index 6ec721ffa4a..f4c5b47f068 100644 --- a/cpp/open3d/t/geometry/PointCloud.cpp +++ b/cpp/open3d/t/geometry/PointCloud.cpp @@ -275,30 +275,66 @@ PointCloud PointCloud::SelectByIndex( return pcd; } -PointCloud PointCloud::VoxelDownSample( - double voxel_size, const core::HashBackendType &backend) const { +PointCloud PointCloud::VoxelDownSample(double voxel_size, + const std::string &reduction) const { if (voxel_size <= 0) { utility::LogError("voxel_size must be positive."); } - core::Tensor points_voxeld = GetPointPositions() / voxel_size; - core::Tensor points_voxeli = points_voxeld.Floor().To(core::Int64); - - core::HashSet points_voxeli_hashset(points_voxeli.GetLength(), core::Int64, - {3}, device_, backend); - - core::Tensor buf_indices, masks; - points_voxeli_hashset.Insert(points_voxeli, buf_indices, masks); + if (reduction != "mean") { + utility::LogError("Reduction can only be 'mean' for VoxelDownSample."); + } - PointCloud pcd_down(GetPointPositions().GetDevice()); + // Discretize voxels. + core::Tensor voxeld = GetPointPositions() / voxel_size; + core::Tensor voxeli = voxeld.Floor().To(core::Int64); + + // Map discrete voxels to indices. + core::HashSet voxeli_hashset(voxeli.GetLength(), core::Int64, {3}, device_); + + // Index map: (0, original_points) -> (0, unique_points). + core::Tensor index_map_point2voxel, masks; + voxeli_hashset.Insert(voxeli, index_map_point2voxel, masks); + + // Insert and find are two different passes. + // In the insertion pass, -1/false is returned for already existing + // downsampled corresponding points. + // In the find pass, actual indices are returned corresponding downsampled + // points. + voxeli_hashset.Find(voxeli, index_map_point2voxel, masks); + index_map_point2voxel = index_map_point2voxel.To(core::Int64); + + int64_t num_points = voxeli.GetLength(); + int64_t num_voxels = voxeli_hashset.Size(); + + // Count the number of points in each voxel. + auto voxel_num_points = + core::Tensor::Zeros({num_voxels}, core::Float32, device_); + voxel_num_points.IndexAdd_( + /*dim*/ 0, index_map_point2voxel, + core::Tensor::Ones({num_points}, core::Float32, device_)); + + // Create a new point cloud. + PointCloud pcd_down(device_); for (auto &kv : point_attr_) { - if (kv.first == "positions") { - pcd_down.SetPointAttr(kv.first, - points_voxeli.IndexGet({masks}).To( - GetPointPositions().GetDtype()) * - voxel_size); + auto point_attr = kv.second; + + std::string attr_string = kv.first; + auto attr_dtype = point_attr.GetDtype(); + + // Use float to avoid unsupported tensor types. + core::SizeVector attr_shape = point_attr.GetShape(); + attr_shape[0] = num_voxels; + auto voxel_attr = + core::Tensor::Zeros(attr_shape, core::Float32, device_); + if (reduction == "mean") { + voxel_attr.IndexAdd_(0, index_map_point2voxel, + point_attr.To(core::Float32)); + voxel_attr /= voxel_num_points.View({-1, 1}); + voxel_attr = voxel_attr.To(attr_dtype); } else { - pcd_down.SetPointAttr(kv.first, kv.second.IndexGet({masks})); + utility::LogError("Unsupported reduction type {}.", reduction); } + pcd_down.SetPointAttr(attr_string, voxel_attr); } return pcd_down; @@ -695,12 +731,15 @@ void PointCloud::OrientNormalsTowardsCameraLocation( } } -void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { +void PointCloud::OrientNormalsConsistentTangentPlane( + size_t k, + const double lambda /* = 0.0*/, + const double cos_alpha_tol /* = 1.0*/) { PointCloud tpcd(GetPointPositions()); tpcd.SetPointNormals(GetPointNormals()); open3d::geometry::PointCloud lpcd = tpcd.ToLegacy(); - lpcd.OrientNormalsConsistentTangentPlane(k); + lpcd.OrientNormalsConsistentTangentPlane(k, lambda, cos_alpha_tol); SetPointNormals(core::eigen_converter::EigenVector3dVectorToTensor( lpcd.normals_, GetPointPositions().GetDtype(), GetDevice())); diff --git a/cpp/open3d/t/geometry/PointCloud.h b/cpp/open3d/t/geometry/PointCloud.h index 68827fa489c..995e1b5ba50 100644 --- a/cpp/open3d/t/geometry/PointCloud.h +++ b/cpp/open3d/t/geometry/PointCloud.h @@ -329,9 +329,9 @@ class PointCloud : public Geometry, public DrawableGeometry { /// \brief Downsamples a point cloud with a specified voxel size. /// /// \param voxel_size Voxel size. A positive number. + /// \param reduction Reduction type. Currently only support "mean". PointCloud VoxelDownSample(double voxel_size, - const core::HashBackendType &backend = - core::HashBackendType::Default) const; + const std::string &reduction = "mean") const; /// \brief Downsamples a point cloud by selecting every kth index point and /// its attributes. @@ -517,10 +517,18 @@ class PointCloud : public Geometry, public DrawableGeometry { /// \brief Function to consistently orient estimated normals based on /// consistent tangent planes as described in Hoppe et al., "Surface /// Reconstruction from Unorganized Points", 1992. + /// Further details on parameters are described in + /// Piazza, Valentini, Varetti, "Mesh Reconstruction from Point Cloud", + /// 2023. /// /// \param k k nearest neighbour for graph reconstruction for normal /// propagation. - void OrientNormalsConsistentTangentPlane(size_t k); + /// \param lambda penalty constant on the distance of a point from the + /// tangent plane \param cos_alpha_tol treshold that defines the amplitude + /// of the cone spanned by the reference normal + void OrientNormalsConsistentTangentPlane(size_t k, + const double lambda = 0.0, + const double cos_alpha_tol = 1.0); /// \brief Function to compute point color gradients. If radius is provided, /// then HybridSearch is used, otherwise KNN-Search is used. diff --git a/cpp/open3d/t/geometry/RaycastingScene.cpp b/cpp/open3d/t/geometry/RaycastingScene.cpp index ce7982baac6..412d6130862 100644 --- a/cpp/open3d/t/geometry/RaycastingScene.cpp +++ b/cpp/open3d/t/geometry/RaycastingScene.cpp @@ -267,6 +267,19 @@ struct RaycastingScene::Impl { geometry_ptrs_; core::Device tensor_device_; // cpu + bool devprop_join_commit; + + void CommitScene() { + if (!scene_committed_) { + if (devprop_join_commit) { + rtcJoinCommitScene(scene_); + } else { + rtcCommitScene(scene_); + } + scene_committed_ = true; + } + } + template void CastRays(const float* const rays, const size_t num_rays, @@ -276,10 +289,7 @@ struct RaycastingScene::Impl { float* primitive_uvs, float* primitive_normals, const int nthreads) { - if (!scene_committed_) { - rtcCommitScene(scene_); - scene_committed_ = true; - } + CommitScene(); struct RTCIntersectContext context; rtcInitIntersectContext(&context); @@ -365,10 +375,7 @@ struct RaycastingScene::Impl { const float tfar, int8_t* occluded, const int nthreads) { - if (!scene_committed_) { - rtcCommitScene(scene_); - scene_committed_ = true; - } + CommitScene(); struct RTCIntersectContext context; rtcInitIntersectContext(&context); @@ -420,10 +427,7 @@ struct RaycastingScene::Impl { const size_t num_rays, int* intersections, const int nthreads) { - if (!scene_committed_) { - rtcCommitScene(scene_); - scene_committed_ = true; - } + CommitScene(); memset(intersections, 0, sizeof(int) * num_rays); @@ -486,10 +490,7 @@ struct RaycastingScene::Impl { float* primitive_uvs, float* primitive_normals, const int nthreads) { - if (!scene_committed_) { - rtcCommitScene(scene_); - scene_committed_ = true; - } + CommitScene(); auto LoopFn = [&](const tbb::blocked_range& range) { for (size_t i = range.begin(); i < range.end(); ++i) { @@ -552,6 +553,9 @@ RaycastingScene::RaycastingScene(int64_t nthreads) impl_->scene_, RTC_SCENE_FLAG_ROBUST | RTC_SCENE_FLAG_CONTEXT_FILTER_FUNCTION); + impl_->devprop_join_commit = rtcGetDeviceProperty( + impl_->device_, RTC_DEVICE_PROPERTY_JOIN_COMMIT_SUPPORTED); + impl_->scene_committed_ = false; } diff --git a/cpp/open3d/t/geometry/TensorMap.cpp b/cpp/open3d/t/geometry/TensorMap.cpp index 3eaefdecb5e..f47bcaf0955 100644 --- a/cpp/open3d/t/geometry/TensorMap.cpp +++ b/cpp/open3d/t/geometry/TensorMap.cpp @@ -55,8 +55,8 @@ void TensorMap::AssertSizeSynchronized() const { for (auto& kv : *this) { if (kv.first != primary_key_ && kv.second.GetLength() != primary_size) { - fmt::format(" > Tensor \"{}\" has size {}.\n", kv.first, - kv.second.GetLength()); + ss << fmt::format(" > Tensor \"{}\" has size {}.\n", + kv.first, kv.second.GetLength()); } } utility::LogError("{}", ss.str()); diff --git a/cpp/open3d/utility/FileSystem.cpp b/cpp/open3d/utility/FileSystem.cpp index 5a2ce2748cc..98ff4926f2b 100644 --- a/cpp/open3d/utility/FileSystem.cpp +++ b/cpp/open3d/utility/FileSystem.cpp @@ -32,11 +32,12 @@ #define _SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING #endif #ifdef __APPLE__ -// CMAKE_OSX_DEPLOYMENT_TARGET "10.15" or newer -#define _LIBCPP_NO_EXPERIMENTAL_DEPRECATION_WARNING_FILESYSTEM -#endif +#include +namespace fs = std::__fs::filesystem; +#else #include namespace fs = std::experimental::filesystem; +#endif #include "open3d/utility/Logging.h" diff --git a/cpp/open3d/visualization/gui/Combobox.cpp b/cpp/open3d/visualization/gui/Combobox.cpp index 9db8a2f7159..2bba54cc0f5 100644 --- a/cpp/open3d/visualization/gui/Combobox.cpp +++ b/cpp/open3d/visualization/gui/Combobox.cpp @@ -147,8 +147,6 @@ Size Combobox::CalcPreferredSize(const LayoutContext& context, Combobox::DrawResult Combobox::Draw(const DrawContext& context) { bool value_changed = false; - bool was_open = ImGui::IsPopupOpen(impl_->imgui_id_.c_str()); - bool did_open = false; auto& frame = GetFrame(); ImGui::SetCursorScreenPos( @@ -166,10 +164,8 @@ Combobox::DrawResult Combobox::Draw(const DrawContext& context) { DrawImGuiPushEnabledState(); ImGui::PushItemWidth(float(frame.width)); + if (ImGui::BeginCombo(impl_->imgui_id_.c_str(), GetSelectedValue())) { - if (!was_open) { - did_open = true; - } for (size_t i = 0; i < impl_->items_.size(); ++i) { bool isSelected = false; if (ImGui::Selectable(impl_->items_[i].c_str(), &isSelected, 0)) { @@ -185,13 +181,14 @@ Combobox::DrawResult Combobox::Draw(const DrawContext& context) { } ImGui::EndCombo(); } + ImGui::PopItemWidth(); DrawImGuiPopEnabledState(); ImGui::PopStyleColor(3); - return ((value_changed || did_open) ? Widget::DrawResult::REDRAW - : Widget::DrawResult::NONE); + return value_changed ? Widget::DrawResult::REDRAW + : Widget::DrawResult::NONE; } } // namespace gui diff --git a/cpp/open3d/visualization/gui/Font.h b/cpp/open3d/visualization/gui/Font.h index ba6f7d38df6..62467154ac7 100644 --- a/cpp/open3d/visualization/gui/Font.h +++ b/cpp/open3d/visualization/gui/Font.h @@ -7,6 +7,7 @@ #pragma once +#include #include #include diff --git a/cpp/open3d/visualization/gui/ProgressBar.cpp b/cpp/open3d/visualization/gui/ProgressBar.cpp index a55e32bb090..0c930155a41 100644 --- a/cpp/open3d/visualization/gui/ProgressBar.cpp +++ b/cpp/open3d/visualization/gui/ProgressBar.cpp @@ -42,16 +42,19 @@ Widget::DrawResult ProgressBar::Draw(const DrawContext& context) { auto fg = context.theme.border_color; auto color = colorToImguiRGBA(fg); float rounding = frame.height / 2.0f; + ImGui::GetWindowDrawList()->AddRect( - ImVec2(float(frame.x), float(frame.y)), - ImVec2(float(frame.GetRight()), float(frame.GetBottom())), color, - rounding); + ImVec2(float(frame.x), float(frame.y) - ImGui::GetScrollY()), + ImVec2(float(frame.GetRight()), + float(frame.GetBottom()) - ImGui::GetScrollY()), + color, rounding); float x = float(frame.x) + float(frame.width) * impl_->value_; x = std::max(x, float(frame.x + rounding)); + ImGui::GetWindowDrawList()->AddRectFilled( - ImVec2(float(frame.x), float(frame.y)), - ImVec2(float(x), float(frame.GetBottom())), color, - frame.height / 2.0f); + ImVec2(float(frame.x), float(frame.y) - ImGui::GetScrollY()), + ImVec2(float(x), float(frame.GetBottom()) - ImGui::GetScrollY()), + color, frame.height / 2.0f); return DrawResult::NONE; } diff --git a/cpp/open3d/visualization/gui/Util.h b/cpp/open3d/visualization/gui/Util.h index a1fce75796f..375933bd6dd 100644 --- a/cpp/open3d/visualization/gui/Util.h +++ b/cpp/open3d/visualization/gui/Util.h @@ -10,6 +10,7 @@ #include +#include #include #include diff --git a/cpp/open3d/visualization/gui/Window.cpp b/cpp/open3d/visualization/gui/Window.cpp index 0ee562b4310..bb023903792 100644 --- a/cpp/open3d/visualization/gui/Window.cpp +++ b/cpp/open3d/visualization/gui/Window.cpp @@ -707,7 +707,7 @@ Widget::DrawResult DrawChild(DrawContext& dc, const char* name, std::shared_ptr child, Mode mode) { - // Note: ImGUI's concept of a "window" is really a moveable child of the + // Note: ImGUI's concept of a "window" is really a movable child of the // OS window. We want a child to act like a child of the OS window, // like native UI toolkits, Qt, etc. So the top-level widgets of // a window are drawn using ImGui windows whose frame is specified diff --git a/cpp/open3d/visualization/visualizer/Visualizer.h b/cpp/open3d/visualization/visualizer/Visualizer.h index 76850aced4a..02126229bc3 100644 --- a/cpp/open3d/visualization/visualizer/Visualizer.h +++ b/cpp/open3d/visualization/visualizer/Visualizer.h @@ -173,7 +173,6 @@ class Visualizer { /// Function to retrieve the associated ViewControl ViewControl &GetViewControl() { return *view_control_ptr_; } - const ViewControl &GetViewControl() const { return *view_control_ptr_; } /// Function to retrieve the associated RenderOption. RenderOption &GetRenderOption() { return *render_option_ptr_; } /// \brief Function to capture screen and store RGB in a float buffer. @@ -210,11 +209,18 @@ class Visualizer { bool do_render = true, bool convert_to_world_coordinate = false); void CaptureRenderOption(const std::string &filename = ""); + /// Function to reset view point. void ResetViewPoint(bool reset_bounding_box = false); const std::string &GetWindowName() const { return window_name_; } + /// Get the current view status as a json string of ViewTrajectory. + std::string GetViewStatus(); + + /// Set the current view status from a json string of ViewTrajectory. + void SetViewStatus(const std::string &view_status_str); + protected: /// Function to initialize OpenGL virtual bool InitOpenGL(); @@ -230,11 +236,13 @@ class Visualizer { /// meshes individually). virtual void Render(bool render_screen = false); + /// Copy the current view status to clipboard. void CopyViewStatusToClipboard(); + /// Apply the view point from clipboard. void CopyViewStatusFromClipboard(); - // callback functions + /// Callback functions virtual void WindowRefreshCallback(GLFWwindow *window); virtual void WindowResizeCallback(GLFWwindow *window, int w, int h); virtual void MouseMoveCallback(GLFWwindow *window, double x, double y); diff --git a/cpp/open3d/visualization/visualizer/VisualizerRender.cpp b/cpp/open3d/visualization/visualizer/VisualizerRender.cpp index 9669f995249..175d11f99f8 100644 --- a/cpp/open3d/visualization/visualizer/VisualizerRender.cpp +++ b/cpp/open3d/visualization/visualizer/VisualizerRender.cpp @@ -143,34 +143,40 @@ void Visualizer::ResetViewPoint(bool reset_bounding_box /* = false*/) { } void Visualizer::CopyViewStatusToClipboard() { + glfwSetClipboardString(window_, GetViewStatus().c_str()); +} + +void Visualizer::CopyViewStatusFromClipboard() { + const char *clipboard_string_buffer = glfwGetClipboardString(window_); + if (clipboard_string_buffer != nullptr) { + std::string clipboard_string(clipboard_string_buffer); + SetViewStatus(clipboard_string); + } +} + +std::string Visualizer::GetViewStatus() { ViewParameters current_status; if (!view_control_ptr_->ConvertToViewParameters(current_status)) { - utility::LogWarning("Something is wrong copying view status."); + utility::LogWarning("Cannot convert to view parameters."); } ViewTrajectory trajectory; trajectory.view_status_.push_back(current_status); - std::string clipboard_string; - if (!io::WriteIJsonConvertibleToJSONString(clipboard_string, trajectory)) { - utility::LogWarning("Something is wrong copying view status."); + std::string view_status_str; + if (!io::WriteIJsonConvertibleToJSONString(view_status_str, trajectory)) { + utility::LogWarning("Cannot convert ViewTrajectory to json string."); } - glfwSetClipboardString(window_, clipboard_string.c_str()); + return view_status_str; } -void Visualizer::CopyViewStatusFromClipboard() { - const char *clipboard_string_buffer = glfwGetClipboardString(window_); - if (clipboard_string_buffer != NULL) { - std::string clipboard_string(clipboard_string_buffer); - ViewTrajectory trajectory; - if (!io::ReadIJsonConvertibleFromJSONString(clipboard_string, - trajectory)) { - utility::LogWarning("Something is wrong copying view status."); - } - if (trajectory.view_status_.size() != 1) { - utility::LogWarning("Something is wrong copying view status."); - } - view_control_ptr_->ConvertFromViewParameters( - trajectory.view_status_[0]); +void Visualizer::SetViewStatus(const std::string &view_status_str) { + ViewTrajectory trajectory; + if (!io::ReadIJsonConvertibleFromJSONString(view_status_str, trajectory)) { + utility::LogWarning("Cannot convert string to view status."); + } + if (trajectory.view_status_.size() != 1) { + utility::LogWarning("Cannot convert string to view status."); } + view_control_ptr_->ConvertFromViewParameters(trajectory.view_status_[0]); } std::shared_ptr Visualizer::CaptureScreenFloatBuffer( diff --git a/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.cpp b/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.cpp index aef757932f9..7b61ac144f5 100644 --- a/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.cpp +++ b/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.cpp @@ -140,19 +140,19 @@ PeerConnectionManager::PeerConnectionManager( // Register api in http server. func_["/api/getMediaList"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/getMediaList"); + utility::LogDebug("[Called HTTP API] /api/getMediaList"); return this->GetMediaList(); }; func_["/api/getIceServers"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/getIceServers"); + utility::LogDebug("[Called HTTP API] /api/getIceServers"); return this->GetIceServers(); }; func_["/api/call"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/call"); + utility::LogDebug("[Called HTTP API] /api/call"); std::string peerid; std::string url; // window_uid. std::string options; @@ -167,7 +167,7 @@ PeerConnectionManager::PeerConnectionManager( func_["/api/getIceCandidate"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/getIceCandidate"); + utility::LogDebug("[Called HTTP API] /api/getIceCandidate"); std::string peerid; if (req_info->query_string) { CivetServer::getParam(req_info->query_string, "peerid", peerid); @@ -178,7 +178,7 @@ PeerConnectionManager::PeerConnectionManager( func_["/api/addIceCandidate"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/addIceCandidate"); + utility::LogDebug("[Called HTTP API] /api/addIceCandidate"); std::string peerid; if (req_info->query_string) { CivetServer::getParam(req_info->query_string, "peerid", peerid); @@ -188,7 +188,7 @@ PeerConnectionManager::PeerConnectionManager( func_["/api/hangup"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/hangup"); + utility::LogDebug("[Called HTTP API] /api/hangup"); std::string peerid; if (req_info->query_string) { CivetServer::getParam(req_info->query_string, "peerid", peerid); @@ -713,8 +713,8 @@ void PeerConnectionManager::SendInitFramesToPeer(const std::string &peerid) { void PeerConnectionManager::CloseWindowConnections( const std::string &window_uid) { - utility::LogInfo("PeerConnectionManager::CloseWindowConnections: {}", - window_uid); + utility::LogDebug("PeerConnectionManager::CloseWindowConnections: {}", + window_uid); std::set peerids; { std::lock_guard mlock(window_uid_to_peerids_mutex_); diff --git a/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.h b/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.h index 173f642fae2..ceb0fc9df74 100644 --- a/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.h +++ b/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.h @@ -204,7 +204,7 @@ class PeerConnectionManager { const std::string state = webrtc::DataChannelInterface::DataStateString( data_channel_->state()); - utility::LogInfo( + utility::LogDebug( "DataChannelObserver::OnStateChange label: {}, state: {}, " "peerid: {}", label, state, peerid_); diff --git a/cpp/open3d/visualization/webrtc_server/WebRTCWindowSystem.cpp b/cpp/open3d/visualization/webrtc_server/WebRTCWindowSystem.cpp index e83939ee82f..4440f0d865b 100644 --- a/cpp/open3d/visualization/webrtc_server/WebRTCWindowSystem.cpp +++ b/cpp/open3d/visualization/webrtc_server/WebRTCWindowSystem.cpp @@ -419,7 +419,7 @@ void WebRTCWindowSystem::SendInitFrames(const std::string &window_uid) { std::string WebRTCWindowSystem::CallHttpAPI(const std::string &entry_point, const std::string &query_string, const std::string &data) const { - utility::LogInfo("[Called HTTP API (custom handshake)] {}", entry_point); + utility::LogDebug("[Called HTTP API (custom handshake)] {}", entry_point); std::string query_string_trimmed = ""; if (!query_string.empty() && query_string[0] == '?') { diff --git a/cpp/pybind/CMakeLists.txt b/cpp/pybind/CMakeLists.txt index ca5bcf16198..6d7991d44e6 100644 --- a/cpp/pybind/CMakeLists.txt +++ b/cpp/pybind/CMakeLists.txt @@ -66,6 +66,7 @@ elseif (UNIX) # Linux };]=]) target_link_options(pybind PRIVATE $<$: "-Wl,--version-script=${CMAKE_CURRENT_BINARY_DIR}/pybind.map" >) + target_link_options(pybind PRIVATE "-flto=auto") endif() diff --git a/cpp/pybind/data/dataset.cpp b/cpp/pybind/data/dataset.cpp index e1454c70315..b86b40c5464 100644 --- a/cpp/pybind/data/dataset.cpp +++ b/cpp/pybind/data/dataset.cpp @@ -974,28 +974,28 @@ void pybind_redwood_indoor_living_room1(py::module& m) { R"doc(RedwoodIndoorLivingRoom1 (Augmented ICL-NUIM Dataset) Data class for `RedwoodIndoorLivingRoom1`, containing dense point cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni -sequence, and ground-truth camera trajectory. +sequence, and ground-truth camera trajectory. :: -RedwoodIndoorLivingRoom1 -├── colors -│  ├── 00000.jpg -│ ├── 00001.jpg -│ ├── ... -│ └── 02869.jpg -├── depth -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02869.png -├── depth_noisy -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02869.png -├── dist-model.txt -├── livingroom1.oni -├── livingroom1-traj.txt -└── livingroom.ply + RedwoodIndoorLivingRoom1 + |-- colors + | |-- 00000.jpg + | |-- 00001.jpg + | |-- ... + | '-- 02869.jpg + |-- depth + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02869.png + |-- depth_noisy + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02869.png + |-- dist-model.txt + |-- livingroom1.oni + |-- livingroom1-traj.txt + '-- livingroom.ply )doc"); dataset.def(py::init(), "data_root"_a = ""); dataset.def_property_readonly("point_cloud_path", @@ -1029,28 +1029,28 @@ void pybind_redwood_indoor_living_room2(py::module& m) { R"doc(RedwoodIndoorLivingRoom2 (Augmented ICL-NUIM Dataset) Data class for `RedwoodIndoorLivingRoom2`, containing dense point cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni -sequence, and ground-truth camera trajectory. +sequence, and ground-truth camera trajectory. :: -RedwoodIndoorLivingRoom2 -├── colors -│  ├── 00000.jpg -│ ├── 00001.jpg -│ ├── ... -│ └── 02349.jpg -├── depth -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02349.png -├── depth_noisy -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02349.png -├── dist-model.txt -├── livingroom2.oni -├── livingroom2-traj.txt -└── livingroom.ply + RedwoodIndoorLivingRoom2 + |-- colors + | |-- 00000.jpg + | |-- 00001.jpg + | |-- ... + | '-- 02349.jpg + |-- depth + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02349.png + |-- depth_noisy + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02349.png + |-- dist-model.txt + |-- livingroom2.oni + |-- livingroom2-traj.txt + '-- livingroom.ply )doc"); dataset.def(py::init(), "data_root"_a = ""); dataset.def_property_readonly("point_cloud_path", @@ -1083,28 +1083,28 @@ void pybind_redwood_indoor_office1(py::module& m) { R"doc(RedwoodIndoorOffice1 (Augmented ICL-NUIM Dataset) Data class for `RedwoodIndoorOffice1`, containing dense point cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni -sequence, and ground-truth camera trajectory. +sequence, and ground-truth camera trajectory. :: -RedwoodIndoorOffice1 -├── colors -│  ├── 00000.jpg -│ ├── 00001.jpg -│ ├── ... -│ └── 02689.jpg -├── depth -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02689.png -├── depth_noisy -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02689.png -├── dist-model.txt -├── office1.oni -├── office1-traj.txt -└── office.ply + RedwoodIndoorOffice1 + |-- colors + | |-- 00000.jpg + | |-- 00001.jpg + | |-- ... + | '-- 02689.jpg + |-- depth + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02689.png + |-- depth_noisy + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02689.png + |-- dist-model.txt + |-- office1.oni + |-- office1-traj.txt + '-- office.ply )doc"); dataset.def(py::init(), "data_root"_a = ""); dataset.def_property_readonly("point_cloud_path", @@ -1136,28 +1136,28 @@ void pybind_redwood_indoor_office2(py::module& m) { R"doc(RedwoodIndoorOffice2 (Augmented ICL-NUIM Dataset) Data class for `RedwoodIndoorOffice2`, containing dense point cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni -sequence, and ground-truth camera trajectory. +sequence, and ground-truth camera trajectory. :: -RedwoodIndoorOffice2 -├── colors -│  ├── 00000.jpg -│ ├── 00001.jpg -│ ├── ... -│ └── 02537.jpg -├── depth -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02537.png -├── depth_noisy -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02537.png -├── dist-model.txt -├── office2.oni -├── office2-traj.txt -└── office.ply + RedwoodIndoorOffice2 + |-- colors + | |-- 00000.jpg + | |-- 00001.jpg + | |-- ... + | '-- 02537.jpg + |-- depth + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02537.png + |-- depth_noisy + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02537.png + |-- dist-model.txt + |-- office2.oni + |-- office2-traj.txt + '-- office.ply )doc"); dataset.def(py::init(), "data_root"_a = ""); dataset.def_property_readonly("point_cloud_path", diff --git a/cpp/pybind/geometry/image.cpp b/cpp/pybind/geometry/image.cpp index 0a18be65e7f..70e63184e64 100644 --- a/cpp/pybind/geometry/image.cpp +++ b/cpp/pybind/geometry/image.cpp @@ -86,6 +86,13 @@ void pybind_image(py::module &m) { } height = (int)info.shape[0]; width = (int)info.shape[1]; + if (info.strides[1] != num_of_channels * bytes_per_channel || + info.strides[0] != + width * num_of_channels * bytes_per_channel) { + throw std::runtime_error( + "Image can only be initialized from a contiguous " + "buffer."); + } auto img = new Image(); img->Prepare(width, height, num_of_channels, bytes_per_channel); memcpy(img->data_.data(), info.ptr, img->data_.size()); diff --git a/cpp/pybind/geometry/pointcloud.cpp b/cpp/pybind/geometry/pointcloud.cpp index d9e2ce935c6..2af8319e93d 100644 --- a/cpp/pybind/geometry/pointcloud.cpp +++ b/cpp/pybind/geometry/pointcloud.cpp @@ -86,16 +86,16 @@ void pybind_pointcloud(py::module &m) { "num_samples"_a) .def("crop", (std::shared_ptr(PointCloud::*)( - const AxisAlignedBoundingBox &) const) & + const AxisAlignedBoundingBox &, bool) const) & PointCloud::Crop, "Function to crop input pointcloud into output pointcloud", - "bounding_box"_a) + "bounding_box"_a, "invert"_a = false) .def("crop", (std::shared_ptr(PointCloud::*)( - const OrientedBoundingBox &) const) & + const OrientedBoundingBox &, bool) const) & PointCloud::Crop, "Function to crop input pointcloud into output pointcloud", - "bounding_box"_a) + "bounding_box"_a, "invert"_a = false) .def("remove_non_finite_points", &PointCloud::RemoveNonFinitePoints, "Removes all points from the point cloud that have a nan " "entry, or infinite entries. It also removes the " @@ -139,7 +139,7 @@ void pybind_pointcloud(py::module &m) { &PointCloud::OrientNormalsConsistentTangentPlane, "Function to orient the normals with respect to consistent " "tangent planes", - "k"_a) + "k"_a, "lambda"_a = 0.0, "cos_alpha_tol"_a = 1.0) .def("compute_point_cloud_distance", &PointCloud::ComputePointCloudDistance, "For each point in the source point cloud, compute the " @@ -289,7 +289,8 @@ camera. Given depth value d at (u, v) image coordinate, the corresponding 3d poi "number of points[0-1]"}}); docstring::ClassMethodDocInject( m, "PointCloud", "crop", - {{"bounding_box", "AxisAlignedBoundingBox to crop points"}}); + {{"bounding_box", "AxisAlignedBoundingBox to crop points"}, + {"invert", "optional boolean to invert cropping"}}); docstring::ClassMethodDocInject( m, "PointCloud", "remove_non_finite_points", {{"remove_nan", "Remove NaN values from the PointCloud"}, diff --git a/cpp/pybind/geometry/voxelgrid.cpp b/cpp/pybind/geometry/voxelgrid.cpp index 7abe1872e91..0528979b0f3 100644 --- a/cpp/pybind/geometry/voxelgrid.cpp +++ b/cpp/pybind/geometry/voxelgrid.cpp @@ -77,6 +77,10 @@ void pybind_voxelgrid(py::module &m) { "Returns ``True`` if the voxel grid contains voxels.") .def("get_voxel", &VoxelGrid::GetVoxel, "point"_a, "Returns voxel index given query point.") + .def("add_voxel", &VoxelGrid::AddVoxel, "voxel"_a, + "Add a new voxel into the VoxelGrid.") + .def("remove_voxel", &VoxelGrid::RemoveVoxel, "idx"_a, + "Remove a voxel given index.") .def("check_if_included", &VoxelGrid::CheckIfIncluded, "queries"_a, "Element-wise check if a query in the list is included in " "the VoxelGrid. Queries are double precision and " @@ -155,6 +159,11 @@ void pybind_voxelgrid(py::module &m) { docstring::ClassMethodDocInject(m, "VoxelGrid", "has_voxels"); docstring::ClassMethodDocInject(m, "VoxelGrid", "get_voxel", {{"point", "The query point."}}); + docstring::ClassMethodDocInject(m, "VoxelGrid", "add_voxel", + {{"Voxel", "A new voxel."}}); + docstring::ClassMethodDocInject( + m, "VoxelGrid", "remove_voxel", + {{"idx", "The grid index of the target voxel."}}); docstring::ClassMethodDocInject( m, "VoxelGrid", "check_if_included", {{"query", "a list of voxel indices to check."}}); diff --git a/cpp/pybind/make_install_pip_package.cmake b/cpp/pybind/make_install_pip_package.cmake index a5c9fcdd05c..98754677d11 100644 --- a/cpp/pybind/make_install_pip_package.cmake +++ b/cpp/pybind/make_install_pip_package.cmake @@ -4,5 +4,5 @@ # Note: Since `make python-package` clears PYTHON_COMPILED_MODULE_DIR every time, # it is guaranteed that there is only one wheel in ${PYTHON_PACKAGE_DST_DIR}/pip_package/*.whl file(GLOB WHEEL_FILE "${PYTHON_PACKAGE_DST_DIR}/pip_package/*.whl") -execute_process(COMMAND ${Python3_EXECUTABLE} -m pip uninstall open3d --yes) +execute_process(COMMAND ${Python3_EXECUTABLE} -m pip uninstall open3d open3d-cpu --yes) execute_process(COMMAND ${Python3_EXECUTABLE} -m pip install ${WHEEL_FILE} -U) diff --git a/cpp/pybind/t/geometry/pointcloud.cpp b/cpp/pybind/t/geometry/pointcloud.cpp index cc7f98eca59..a36813fd353 100644 --- a/cpp/pybind/t/geometry/pointcloud.cpp +++ b/cpp/pybind/t/geometry/pointcloud.cpp @@ -72,8 +72,8 @@ The attributes of the point cloud have different levels:: # The shape must be (N, 3). The device of "positions" determines the device # of the point cloud. pcd.point.positions = o3d.core.Tensor([[0, 0, 0], - [1, 1, 1], - [2, 2, 2]], dtype, device) + [1, 1, 1], + [2, 2, 2]], dtype, device) # Common attributes: "normals", "colors". # Common attributes are used in built-in point cloud operations. The @@ -82,11 +82,11 @@ The attributes of the point cloud have different levels:: # "normals" and "colors" must have shape (N, 3) and must be on the same # device as the point cloud. pcd.point.normals = o3d.core.Tensor([[0, 0, 1], - [0, 1, 0], - [1, 0, 0]], dtype, device) + [0, 1, 0], + [1, 0, 0]], dtype, device) pcd.point.colors = o3d.core.Tensor([[0.0, 0.0, 0.0], - [0.1, 0.1, 0.1], - [0.2, 0.2, 0.2]], dtype, device) + [0.1, 0.1, 0.1], + [0.2, 0.2, 0.2]], dtype, device) # User-defined attributes. # You can also attach custom attributes. The value tensor must be on the @@ -211,12 +211,32 @@ The attributes of the point cloud have different levels:: "output point cloud."); pointcloud.def( "voxel_down_sample", - [](const PointCloud& pointcloud, const double voxel_size) { - return pointcloud.VoxelDownSample( - voxel_size, core::HashBackendType::Default); + [](const PointCloud& pointcloud, const double voxel_size, + const std::string& reduction) { + return pointcloud.VoxelDownSample(voxel_size, reduction); }, - "Downsamples a point cloud with a specified voxel size.", - "voxel_size"_a); + "Downsamples a point cloud with a specified voxel size and a " + "reduction type.", + "voxel_size"_a, "reduction"_a = "mean", + R"doc(Downsamples a point cloud with a specified voxel size. + +Args: + voxel_size (float): The size of the voxel used to downsample the point cloud. + + reduction (str): The approach to pool point properties in a voxel. Can only be "mean" at current. + +Return: + A downsampled point cloud with point properties reduced in each voxel. + +Example: + + We will load the Eagle dataset, downsample it, and show the result:: + + eagle = o3d.data.EaglePointCloud() + pcd = o3d.t.io.read_point_cloud(eagle.path) + pcd_down = pcd.voxel_down_sample(voxel_size=0.05) + o3d.visualization.draw([{'name': 'pcd', 'geometry': pcd}, {'name': 'pcd_down', 'geometry': pcd_down}]) + )doc"); pointcloud.def("uniform_down_sample", &PointCloud::UniformDownSample, "Downsamples a point cloud by selecting every kth index " "point and its attributes.", @@ -239,8 +259,8 @@ The attributes of the point cloud have different levels:: sphere of a given search radius. Args: - nb_points. Number of neighbor points required within the radius. - search_radius. Radius of the sphere. + nb_points: Number of neighbor points required within the radius. + search_radius: Radius of the sphere. Return: Tuple of filtered point cloud and boolean mask tensor for selected values @@ -253,8 +273,8 @@ sphere of a given search radius. neighbors in average. This function is not recommended to use on GPU. Args: - nb_neighbors. Number of neighbors around the target point. - std_ratio. Standard deviation ratio. + nb_neighbors: Number of neighbors around the target point. + std_ratio: Standard deviation ratio. Return: Tuple of filtered point cloud and boolean mask tensor for selected values @@ -269,8 +289,8 @@ neighbors in average. This function is not recommended to use on GPU. infinite value. It also removes the corresponding attributes. Args: - remove_nan. Remove NaN values from the PointCloud. - remove_infinite. Remove infinite values from the PointCloud. + remove_nan: Remove NaN values from the PointCloud. + remove_infinite: Remove infinite values from the PointCloud. Return: Tuple of filtered point cloud and boolean mask tensor for selected values @@ -300,11 +320,70 @@ infinite value. It also removes the corresponding attributes. "Function to orient the normals of a point cloud.", "camera_location"_a = core::Tensor::Zeros( {3}, core::Float32, core::Device("CPU:0"))); - pointcloud.def("orient_normals_consistent_tangent_plane", - &PointCloud::OrientNormalsConsistentTangentPlane, - "Function to orient the normals with respect to consistent " - "tangent planes.", - "k"_a); + pointcloud.def( + "orient_normals_consistent_tangent_plane", + &PointCloud::OrientNormalsConsistentTangentPlane, "k"_a, + "lambda"_a = 0.0, "cos_alpha_tol"_a = 1.0, + R"(Function to consistently orient the normals of a point cloud based on tangent planes. + +The algorithm is described in Hoppe et al., "Surface Reconstruction from Unorganized Points", 1992. +Additional information about the choice of lambda and cos_alpha_tol for complex +point clouds can be found in Piazza, Valentini, Varetti, "Mesh Reconstruction from Point Cloud", 2023 +(https://eugeniovaretti.github.io/meshreco/Piazza_Valentini_Varetti_MeshReconstructionFromPointCloud_2023.pdf). + +Args: + k (int): Number of neighbors to use for tangent plane estimation. + lambda (float): A non-negative real parameter that influences the distance + metric used to identify the true neighbors of a point in complex + geometries. It penalizes the distance between a point and the tangent + plane defined by the reference point and its normal vector, helping to + mitigate misclassification issues encountered with traditional + Euclidean distance metrics. + cos_alpha_tol (float): Cosine threshold angle used to determine the + inclusion boundary of neighbors based on the direction of the normal + vector. + +Example: + We use Bunny point cloud to compute its normals and orient them consistently. + The initial reconstruction adheres to Hoppe's algorithm (raw), whereas the + second reconstruction utilises the lambda and cos_alpha_tol parameters. + Due to the high density of the Bunny point cloud available in Open3D a larger + value of the parameter k is employed to test the algorithm. Usually you do + not have at disposal such a refined point clouds, thus you cannot find a + proper choice of k: refer to + https://eugeniovaretti.github.io/meshreco for these cases.:: + + import open3d as o3d + import numpy as np + # Load point cloud + data = o3d.data.BunnyMesh() + + # Case 1, Hoppe (raw): + pcd = o3d.io.read_point_cloud(data.path) + + # Compute normals and orient them consistently, using k=100 neighbours + pcd.estimate_normals() + pcd.orient_normals_consistent_tangent_plane(100) + + # Create mesh from point cloud using Poisson Algorithm + poisson_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=8, width=0, scale=1.1, linear_fit=False)[0] + poisson_mesh.paint_uniform_color(np.array([[0.5],[0.5],[0.5]])) + poisson_mesh.compute_vertex_normals() + o3d.visualization.draw_geometries([poisson_mesh]) + + # Case 2, reconstruction using lambda and cos_alpha_tol parameters: + pcd_robust = o3d.io.read_point_cloud(data.path) + + # Compute normals and orient them consistently, using k=100 neighbours + pcd_robust.estimate_normals() + pcd_robust.orient_normals_consistent_tangent_plane(100, 10, 0.5) + + # Create mesh from point cloud using Poisson Algorithm + poisson_mesh_robust = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd_robust, depth=8, width=0, scale=1.1, linear_fit=False)[0] + poisson_mesh_robust.paint_uniform_color(np.array([[0.5],[0.5],[0.5]])) + poisson_mesh_robust.compute_vertex_normals() + + o3d.visualization.draw_geometries([poisson_mesh_robust]) )"); pointcloud.def( "estimate_color_gradients", &PointCloud::EstimateColorGradients, py::call_guard(), py::arg("max_nn") = 30, @@ -325,7 +404,7 @@ infinite value. It also removes the corresponding attributes. "with_normals"_a = false, "Factory function to create a pointcloud (with only 'points') from " "a depth image and a camera model.\n\n Given depth value d at (u, " - "v) image coordinate, the corresponding 3d point is:\n z = d / " + "v) image coordinate, the corresponding 3d point is:\n\n z = d / " "depth_scale\n\n x = (u - cx) * z / fx\n\n y = (v - cy) * z / fy"); pointcloud.def_static( "create_from_rgbd_image", &PointCloud::CreateFromRGBDImage, @@ -370,14 +449,16 @@ This is a wrapper for a CPU implementation and a copy of the point cloud data and resulting visible triangle mesh and indiecs will be made. Args: - camera_location. All points not visible from that location will be removed. - radius. The radius of the spherical projection. + camera_location: All points not visible from that location will be removed. + + radius: The radius of the spherical projection. Return: Tuple of visible triangle mesh and indices of visible points on the same device as the point cloud. Example: + We use armadillo mesh to compute the visible points from given camera:: # Convert mesh to a point cloud and estimate dimensions. @@ -406,15 +487,18 @@ with Noise', 1996. This is a wrapper for a CPU implementation and a copy of the point cloud data and resulting labels will be made. Args: - eps. Density parameter that is used to find neighbouring points. - min_points. Minimum number of points to form a cluster. - print_progress (default False). If 'True' the progress is visualized in the console. + eps: Density parameter that is used to find neighbouring points. + + min_points: Minimum number of points to form a cluster. + +print_progress (default False): If 'True' the progress is visualized in the console. Return: A Tensor list of point labels on the same device as the point cloud, -1 indicates noise according to the algorithm. Example: + We use Redwood dataset for demonstration:: import matplotlib.pyplot as plt @@ -433,23 +517,25 @@ point cloud data and resulting labels will be made. pointcloud.def( "segment_plane", &PointCloud::SegmentPlane, "distance_threshold"_a = 0.01, "ransac_n"_a = 3, - "num_iterations"_a = 100, "probability"_a = 0.99999999, + "num_iterations"_a = 100, "probability"_a = 0.999, R"(Segments a plane in the point cloud using the RANSAC algorithm. This is a wrapper for a CPU implementation and a copy of the point cloud data and resulting plane model and inlier indiecs will be made. Args: - distance_threshold (default 0.01). Max distance a point can be from the plane - model, and still be considered an inlier. - ransac_n (default 3). Number of initial points to be considered inliers in each iteration. - num_iterations (default 100). Maximum number of iterations. - probability (default 0.99999999). Expected probability of finding the optimal plane. + distance_threshold (default 0.01): Max distance a point can be from the plane model, and still be considered an inlier. + + ransac_n (default 3): Number of initial points to be considered inliers in each iteration. + num_iterations (default 100): Maximum number of iterations. + + probability (default 0.999): Expected probability of finding the optimal plane. Return: - Tuple of the plane model ax + by + cz + d = 0 and the indices of + Tuple of the plane model `ax + by + cz + d = 0` and the indices of the plane inliers on the same device as the point cloud. Example: + We use Redwood dataset to compute its plane model and inliers:: sample_pcd_data = o3d.data.PCDPointCloud() @@ -467,16 +553,11 @@ resulting plane model and inlier indiecs will be made. R"doc(Compute the convex hull of a triangle mesh using qhull. This runs on the CPU. Args: - joggle_inputs (default False). Handle precision problems by - randomly perturbing the input data. Set to True if perturbing the input - iis acceptable but you need convex simplicial output. If False, - neighboring facets may be merged in case of precision problems. See - `QHull docs `__ for more - details. + joggle_inputs (default False): Handle precision problems by randomly perturbing the input data. Set to True if perturbing the input is acceptable but you need convex simplicial output. If False, neighboring facets may be merged in case of precision problems. See `QHull docs `__ for more details. Return: TriangleMesh representing the convexh hull. This contains an - extra vertex property "point_indices" that contains the index of the + extra vertex property `point_indices` that contains the index of the corresponding vertex in the original mesh. Example: @@ -495,9 +576,9 @@ The implementation is inspired by the PCL implementation. Reference: https://pointclouds.org/documentation/classpcl_1_1_boundary_estimation.html Args: - radius. Neighbor search radius parameter. - max_nn (default 30). Maximum number of neighbors to search. - angle_threshold (default 90.0). Angle threshold to decide if a point is on the boundary. + radius: Neighbor search radius parameter. + max_nn (default 30): Maximum number of neighbors to search. + angle_threshold (default 90.0): Angle threshold to decide if a point is on the boundary. Return: Tensor of boundary points and its boolean mask tensor. @@ -590,11 +671,6 @@ The implementation is inspired by the PCL implementation. Reference: m, "PointCloud", "orient_normals_towards_camera_location", {{"camera_location", "Normals are oriented with towards the camera_location."}}); - docstring::ClassMethodDocInject( - m, "PointCloud", "orient_normals_consistent_tangent_plane", - {{"k", - "Number of k nearest neighbors used in constructing the " - "Riemannian graph used to propagate normal orientation."}}); docstring::ClassMethodDocInject( m, "PointCloud", "crop", {{"aabb", "AxisAlignedBoundingBox to crop points."}, @@ -655,6 +731,7 @@ The implementation is inspired by the PCL implementation. Reference: Example: This code generates a set of straight lines from a point cloud:: + import open3d as o3d import numpy as np pcd = o3d.t.geometry.PointCloud(np.random.rand(10,3)) diff --git a/cpp/pybind/visualization/visualizer.cpp b/cpp/pybind/visualization/visualizer.cpp index cec58d606bc..7b76396c7c3 100644 --- a/cpp/pybind/visualization/visualizer.cpp +++ b/cpp/pybind/visualization/visualizer.cpp @@ -93,11 +93,9 @@ void pybind_visualizer(py::module &m) { "reset_bounding_box"_a = true) .def("clear_geometries", &Visualizer::ClearGeometries, "Function to clear geometries from the visualizer") - .def( - "get_view_control", - [](Visualizer &vis) { return vis.GetViewControl(); }, - "Function to retrieve the associated ``ViewControl``", - py::return_value_policy::reference_internal) + .def("get_view_control", &Visualizer::GetViewControl, + "Function to retrieve the associated ``ViewControl``", + py::return_value_policy::reference_internal) .def("get_render_option", &Visualizer::GetRenderOption, "Function to retrieve the associated ``RenderOption``", py::return_value_policy::reference_internal) @@ -119,7 +117,14 @@ void pybind_visualizer(py::module &m) { &Visualizer::CaptureDepthPointCloud, "Function to capture and save local point cloud", "filename"_a, "do_render"_a = false, "convert_to_world_coordinate"_a = false) - .def("get_window_name", &Visualizer::GetWindowName); + .def("get_window_name", &Visualizer::GetWindowName) + .def("get_view_status", &Visualizer::GetViewStatus, + "Get the current view status as a json string of " + "ViewTrajectory.") + .def("set_view_status", &Visualizer::SetViewStatus, + "Set the current view status from a json string of " + "ViewTrajectory.", + "view_status_str"_a); py::class_, diff --git a/cpp/tests/core/Tensor.cpp b/cpp/tests/core/Tensor.cpp index 46ca362735c..ad377274169 100644 --- a/cpp/tests/core/Tensor.cpp +++ b/cpp/tests/core/Tensor.cpp @@ -1415,6 +1415,40 @@ TEST_P(TensorPermuteDevicePairs, IndexSetBroadcast) { 0, 0, 0, 0, 20, 20, 20, 0, 0, 0, 0, 0})); } +TEST_P(TensorPermuteDevices, IndexAdd_) { + core::Device device = GetParam(); + + const int tensor_size = 100; + + // Test one: dst_t[np.array([0, 1, 2, 3, 4])] += np.array([1, 1, 1, 1, 1]) + { + core::Tensor index = + core::Tensor::Arange(0, tensor_size, 1, core::Int64, device); + core::Tensor src = + core::Tensor::Zeros({tensor_size}, core::Float32, device); + src.IndexAdd_( + /*dim=*/0, index, + core::Tensor::Ones({tensor_size}, core::Float32, device)); + EXPECT_TRUE(src.AllClose( + core::Tensor::Ones({tensor_size}, core::Float32, device))); + } + + // Test two: dst_t[np.array([0, 0, 0, 0, 0])] += np.array([1, 1, 1, 1, 1]) + { + core::Tensor index = + core::Tensor::Zeros({tensor_size}, core::Int64, device); + core::Tensor src = + core::Tensor::Zeros({tensor_size}, core::Float32, device); + src.IndexAdd_( + /*dim=*/0, index, + core::Tensor::Ones({tensor_size}, core::Float32, device)); + EXPECT_EQ(src[0].Item(), tensor_size); + EXPECT_TRUE(src.Slice(0, 1, tensor_size) + .AllClose(core::Tensor::Zeros( + {tensor_size - 1}, core::Float32, device))); + } +} + TEST_P(TensorPermuteDevices, Permute) { core::Device device = GetParam(); diff --git a/cpp/tests/geometry/PointCloud.cpp b/cpp/tests/geometry/PointCloud.cpp index 2777b7d68a3..21bfe5659bf 100644 --- a/cpp/tests/geometry/PointCloud.cpp +++ b/cpp/tests/geometry/PointCloud.cpp @@ -18,6 +18,7 @@ #include "open3d/io/ImageIO.h" #include "open3d/io/PinholeCameraTrajectoryIO.h" #include "open3d/io/PointCloudIO.h" +#include "open3d/utility/Random.h" #include "open3d/visualization/utility/DrawGeometry.h" #include "tests/Tests.h" @@ -981,6 +982,38 @@ TEST(PointCloud, Crop_OrientedBoundingBox) { })); } +TEST(PointCloud, Crop_AxisAlignedBoundingBox_Invert) { + geometry::AxisAlignedBoundingBox aabb({0, 0, 0}, {2, 2, 2}); + geometry::PointCloud pcd({{0, 0, 0}, + {2, 2, 2}, + {1, 1, 1}, + {1, 1, 2}, + {3, 1, 1}, + {-1, 1, 1}}); + pcd.normals_ = {{0, 0, 0}, {1, 0, 0}, {2, 0, 0}, + {3, 0, 0}, {4, 0, 0}, {5, 0, 0}}; + pcd.colors_ = {{0.0, 0.0, 0.0}, {0.1, 0.0, 0.0}, {0.2, 0.0, 0.0}, + {0.3, 0.0, 0.0}, {0.4, 0.0, 0.0}, {0.5, 0.0, 0.0}}; + pcd.covariances_ = { + 0.0 * Eigen::Matrix3d::Identity(), + 1.0 * Eigen::Matrix3d::Identity(), + 2.0 * Eigen::Matrix3d::Identity(), + 3.0 * Eigen::Matrix3d::Identity(), + 4.0 * Eigen::Matrix3d::Identity(), + 5.0 * Eigen::Matrix3d::Identity(), + }; + std::shared_ptr pc_crop = pcd.Crop(aabb, true); + ExpectEQ(pc_crop->points_, + std::vector({{3, 1, 1}, {-1, 1, 1}})); + ExpectEQ(pc_crop->normals_, + std::vector({{4, 0, 0}, {5, 0, 0}})); + ExpectEQ(pc_crop->colors_, + std::vector({{0.4, 0.0, 0.0}, {0.5, 0.0, 0.0}})); + ExpectEQ(pc_crop->covariances_, + std::vector({4.0 * Eigen::Matrix3d::Identity(), + 5.0 * Eigen::Matrix3d::Identity()})); +} + TEST(PointCloud, EstimateNormals) { geometry::PointCloud pcd({ {0, 0, 0}, @@ -1371,6 +1404,31 @@ TEST(PointCloud, SegmentPlaneSpecialCase) { EXPECT_ANY_THROW(pcd.SegmentPlane(0.01, 3, 10, 1.5)); } +TEST(PointCloud, SegmentPlaneDeterministic) { + geometry::PointCloud pcd; + data::PCDPointCloud pointcloud_pcd; + io::ReadPointCloud(pointcloud_pcd.GetPath(), pcd); + EXPECT_EQ(pcd.points_.size(), 113662); + + // Hard-coded test + Eigen::Vector4d plane_model; + std::vector inliers; + utility::random::Seed(0); + std::tie(plane_model, inliers) = pcd.SegmentPlane(0.01, 3, 1000, 1.0); + ExpectEQ(plane_model, Eigen::Vector4d(-0.06, -0.10, 0.99, -1.06), 0.1); + + // Test segment plane for 10 times with the same random seed. + for (int i = 0; i < 10; ++i) { + // Reset random seed. + utility::random::Seed(0); + Eigen::Vector4d plane_model_d; + std::vector inliers_d; + std::tie(plane_model_d, inliers_d) = + pcd.SegmentPlane(0.01, 3, 1000, 1.0); + ExpectEQ(plane_model, plane_model_d); + } +} + TEST(PointCloud, DetectPlanarPatches) { geometry::PointCloud pcd; data::PCDPointCloud pointcloud_pcd; diff --git a/cpp/tests/t/geometry/OrientedBoundingBox.cpp b/cpp/tests/t/geometry/OrientedBoundingBox.cpp index d7450fabddb..95a0b4fe983 100644 --- a/cpp/tests/t/geometry/OrientedBoundingBox.cpp +++ b/cpp/tests/t/geometry/OrientedBoundingBox.cpp @@ -226,8 +226,8 @@ TEST_P(OrientedBoundingBoxPermuteDevices, Scale) { TEST_P(OrientedBoundingBoxPermuteDevices, GetBoxPoints) { core::Device device = GetParam(); - core::Tensor center = core::Tensor::Init({-1, -1, -1}, device); - core::Tensor extent = core::Tensor::Init({1.0, 1.0, 1.0}, device); + core::Tensor center = core::Tensor::Init({-1., -1., -1.}, device); + core::Tensor extent = core::Tensor::Init({0.0, 0.0, 1.0}, device); core::Tensor rotation = core::Tensor::Eye(3, core::Float32, device); t::geometry::OrientedBoundingBox obb(center, rotation, extent); @@ -235,14 +235,14 @@ TEST_P(OrientedBoundingBoxPermuteDevices, GetBoxPoints) { auto box_points = obb.GetBoxPoints(); EXPECT_TRUE( - box_points.AllClose(core::Tensor::Init({{-1.5, -1.5, -1.5}, - {-0.5, -1.5, -1.5}, - {-1.5, -0.5, -1.5}, - {-1.5, -1.5, -0.5}, - {-0.5, -0.5, -0.5}, - {-1.5, -0.5, -0.5}, - {-0.5, -1.5, -0.5}, - {-0.5, -0.5, -1.5}}, + box_points.AllClose(core::Tensor::Init({{-1.0, -1.0, -1.5}, + {-1.0, -1.0, -1.5}, + {-1.0, -1.0, -1.5}, + {-1.0, -1.0, -0.5}, + {-1.0, -1.0, -0.5}, + {-1.0, -1.0, -0.5}, + {-1.0, -1.0, -0.5}, + {-1.0, -1.0, -1.5}}, device))); } diff --git a/cpp/tests/t/geometry/PointCloud.cpp b/cpp/tests/t/geometry/PointCloud.cpp index 1134a33c472..0b4d7365de6 100644 --- a/cpp/tests/t/geometry/PointCloud.cpp +++ b/cpp/tests/t/geometry/PointCloud.cpp @@ -901,7 +901,7 @@ TEST_P(PointCloudPermuteDevices, VoxelDownSample) { device)); auto pcd_small_down = pcd_small.VoxelDownSample(1); EXPECT_TRUE(pcd_small_down.GetPointPositions().AllClose( - core::Tensor::Init({{0, 0, 0}}, device))); + core::Tensor::Init({{0.375, 0.375, 0.575}}, device))); } TEST_P(PointCloudPermuteDevices, UniformDownSample) { diff --git a/cpp/tests/t/geometry/TriangleMesh.cpp b/cpp/tests/t/geometry/TriangleMesh.cpp index 4748cb0cd0d..520848a5032 100644 --- a/cpp/tests/t/geometry/TriangleMesh.cpp +++ b/cpp/tests/t/geometry/TriangleMesh.cpp @@ -500,11 +500,8 @@ TEST_P(TriangleMeshPermuteDevices, ToLegacy) { Pointwise(FloatEq(), {1.0, 1.1})})); auto mat_iterator = std::find_if( - legacy_mesh.materials_.begin(), - legacy_mesh.materials_.end(), - [](const auto& pair)->bool{ - return pair.first == "Mat1"; - }); + legacy_mesh.materials_.begin(), legacy_mesh.materials_.end(), + [](const auto& pair) -> bool { return pair.first == "Mat1"; }); EXPECT_TRUE(mat_iterator != legacy_mesh.materials_.end()); auto& mat = mat_iterator->second; EXPECT_TRUE(Eigen::Vector4f(mat.baseColor.f4) == diff --git a/docker/Dockerfile.ci b/docker/Dockerfile.ci index 5474b1aee58..98d97c06f3a 100644 --- a/docker/Dockerfile.ci +++ b/docker/Dockerfile.ci @@ -201,7 +201,8 @@ RUN if [ "${BUILD_PYTORCH_OPS}" = "ON" ] || [ "${BUILD_TENSORFLOW_OPS}" = "ON" ] && make VERBOSE=1 -j$(nproc) \ && make install-pip-package -j$(nproc) \ && make install -j$(nproc) \ - && if [ "${PACKAGE}" = "ON" ]; then make package; fi + && if [ "${PACKAGE}" = "ON" ]; then make package; fi \ + && if [ "${PACKAGE}" = "VIEWER" ]; then make package-Open3DViewer-deb; fi # Compress ccache folder, move to / directory RUN ccache -s \ @@ -211,6 +212,7 @@ RUN ccache -s \ && cd ${CCACHE_DIR_PARENT} \ && tar -czf /${CCACHE_TAR_NAME}.tar.gz ${CCACHE_DIR_NAME} \ && if [ "${PACKAGE}" = "ON" ]; then mv /root/Open3D/build/package/open3d-devel*.tar.xz /; fi \ + && if [ "${PACKAGE}" = "VIEWER" ]; then mv /root/Open3D/build/package-Open3DViewer-deb/open3d-viewer-*-Linux.deb /; fi \ && ls -alh / RUN echo "Docker build done." diff --git a/docker/docker_build.sh b/docker/docker_build.sh index c30d6571788..a4e38cb84c3 100755 --- a/docker/docker_build.sh +++ b/docker/docker_build.sh @@ -249,8 +249,8 @@ ci_build() { popd docker run -v "${PWD}:/opt/mount" --rm "${DOCKER_TAG}" \ - bash -cx "cp /open3d*.tar* /opt/mount \ - && chown $(id -u):$(id -g) /opt/mount/open3d*.tar*" + bash -cx "cp /open3d* /opt/mount \ + && chown $(id -u):$(id -g) /opt/mount/open3d*" } 2-bionic_export_env() { @@ -354,7 +354,7 @@ cpu-static_export_env() { export BUILD_CUDA_MODULE=OFF export BUILD_TENSORFLOW_OPS=OFF export BUILD_PYTORCH_OPS=OFF - export PACKAGE=OFF + export PACKAGE=VIEWER export BUILD_SYCL_MODULE=OFF } diff --git a/docs/conf.in.py b/docs/conf.in.py index 0954849a09b..82a791e42b2 100644 --- a/docs/conf.in.py +++ b/docs/conf.in.py @@ -27,12 +27,10 @@ # import sys # sys.path.insert(0, os.path.abspath('.')) -import sys import os import re import subprocess -from pathlib import Path -import shutil +import sys def get_git_short_hash(): @@ -73,7 +71,7 @@ def get_git_short_hash(): 'm2r2', ] -if os.environ["skip_notebooks"] == "true": +if os.environ.get("skip_notebooks", "false") == "true": print("Skipping Jupyter notebooks") extensions = [e for e in extensions if e != "nbsphinx"] @@ -96,7 +94,7 @@ def get_git_short_hash(): # General information about the project. project = u"Open3D" -copyright = u"2018 - 2021, www.open3d.org" +copyright = u"2018 - 2023, www.open3d.org" author = u"www.open3d.org" # The version info for the project you're documenting, acts as replacement for @@ -120,7 +118,16 @@ def get_git_short_hash(): # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] +exclude_patterns = [ + "_build", + "Thumbs.db", + ".DS_Store", + "**.ipynb_checkpoints", + "docker.in.rst", + "getting_started.in.rst", + "jupyter/*/*.ipynb", + "python_api_in/*.rst", +] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" diff --git a/docs/contribute/contribution_recipes.rst b/docs/contribute/contribution_recipes.rst index b3ff00c4bfb..03102849214 100644 --- a/docs/contribute/contribution_recipes.rst +++ b/docs/contribute/contribution_recipes.rst @@ -264,7 +264,7 @@ Case 4: When adding a Python tutorial .. note:: When you commit a ipynb notebook file make sure to remove the output cells to keep the commit sizes small. - You can use the script ``docs/jupyter/jupyter_strip_output.sh`` for + You can use the script ``docs/jupyter/jupyter_strip_output.py`` for stripping the output cells of all tutorials. Dos diff --git a/docs/cpp_project.rst b/docs/cpp_project.rst index ef931912723..89bfd98b70f 100644 --- a/docs/cpp_project.rst +++ b/docs/cpp_project.rst @@ -10,7 +10,7 @@ We provide two example CMake projects to demonstrate how to use Open3D in your CMake projects. * `Find Pre-Installed Open3D Package in CMake `_ - This option can be used if you'd like Open3D build and install Open3D first, + This option can be used if you'd like to build and install Open3D first, then link your project to Open3D. * `Use Open3D as a CMake External Project `_ This option can be used if you'd like Open3D to build alongside with your diff --git a/docs/getting_started.in.rst b/docs/getting_started.in.rst index e3ff7529fa6..7bc974a87da 100644 --- a/docs/getting_started.in.rst +++ b/docs/getting_started.in.rst @@ -13,13 +13,10 @@ Open3D Python packages are distributed via Supported Python versions: -.. hlist:: - :columns: 4 - - * 3.7 - * 3.8 - * 3.9 - * 3.10 +* 3.7 +* 3.8 +* 3.9 +* 3.10 Supported operating systems: @@ -100,7 +97,7 @@ install the latest development version directly with pip: .. code-block:: bash - pip install --trusted-host www.open3d.org -f http://www.open3d.org/docs/latest/getting_started.html open3d + pip install -U --trusted-host www.open3d.org -f http://www.open3d.org/docs/latest/getting_started.html open3d .. note:: The development wheels for Linux are named according to PEP600. Please @@ -144,9 +141,7 @@ demonstrate the usage of Open3D Python interface. See ``examples/python`` for all Python examples. .. note:: Open3D's Python tutorial utilizes some external packages: ``numpy``, - ``matplotlib``, ``opencv-python``. OpenCV is only used for reconstruction - system. Please read ``util/install-deps-python.sh`` for installing these - packages. + ``matplotlib``, ``opencv-python``. .. _install_open3d_c++: diff --git a/docs/index.rst b/docs/index.rst index 391c0298e64..083cd7e23fa 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -26,43 +26,26 @@ Open3D: A Modern Library for 3D Data Processing compilation cpp_project builddocs - open3d_ml - arm docker + arm + open3d_ml .. toctree:: :maxdepth: 2 :caption: Tutorial + tutorial/core/index tutorial/geometry/index tutorial/t_geometry/index + tutorial/data/index + tutorial/visualization/index tutorial/pipelines/index tutorial/t_pipelines/index - tutorial/visualization/index - tutorial/core/index - tutorial/data/index tutorial/reconstruction_system/index tutorial/t_reconstruction_system/index tutorial/sensor/index tutorial/reference -.. toctree:: - :maxdepth: 1 - :caption: Contribute - - contribute/contribute - contribute/contribution_recipes - contribute/styleguide - -.. toctree:: - :maxdepth: 1 - :caption: C++ API - - cpp_api - -.. - Note: when adding new modules, please also update documented_modules.txt. - .. toctree:: :maxdepth: 1 :caption: Python API @@ -88,3 +71,20 @@ Open3D: A Modern Library for 3D Data Processing python_example/pipelines/index python_example/utility/index python_example/visualization/index + +.. toctree:: + :maxdepth: 1 + :caption: C++ API + + cpp_api + +.. toctree:: + :maxdepth: 1 + :caption: Contribute + + contribute/contribute + contribute/contribution_recipes + contribute/styleguide + +.. + Note: when adding new modules, please also update documented_modules.txt. diff --git a/docs/jupyter/core/hashmap.ipynb b/docs/jupyter/core/hashmap.ipynb index 31f57aace92..6d05755c4f9 100644 --- a/docs/jupyter/core/hashmap.ipynb +++ b/docs/jupyter/core/hashmap.ipynb @@ -88,19 +88,23 @@ "Next we show how to insert a batch of (key, value) pairs. You'll need to prepare two tensors:\n", "\n", "The `keys` tensor contains all keys. \n", + "\n", "- The `keys` tensor must be on the same device as the hash map. \n", "- The shape of the `keys` tensor is `key_elment_shape` with `N` prefixed to the front. \n", "\n", "For example \n", + " \n", "1. if `key_element_shape == ()`, `keys.shape == (N,)`; \n", "2. if `key_element_shape == (3,)`, `keys.shape == (N, 3).`; \n", "3. if `key_element_shape == (8, 8, 8)`, `keys.shape == (N, 8, 8, 8).`\n", " \n", "The `vals` tensor contains all values. \n", + " \n", "- The `vals` tensor must be on the same device as the hash map. \n", "- The shape of the `vals` tensor is `val_elment_shape` with `N` prefixed to the front. \n", "\n", "For example \n", + "\n", "1. if `val_elment_shape == ()`, `vals.shape == (N,)`; \n", "2. if `val_elment_shape == (3,)`, `vals.shape == (N, 3).`;\n", "3. if `val_elment_shape == (8, 8, 8)`, `vals.shape == (N, 8, 8, 8).`" diff --git a/docs/jupyter/t_pipelines/t_icp_registration.ipynb b/docs/jupyter/t_pipelines/t_icp_registration.ipynb index 44a21f39584..cb22159bc7f 100644 --- a/docs/jupyter/t_pipelines/t_icp_registration.ipynb +++ b/docs/jupyter/t_pipelines/t_icp_registration.ipynb @@ -405,7 +405,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 1. Set Inputs and Parameters" + "### 1. Set Inputs and Parameters" ] }, { @@ -453,7 +453,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 2. Get Registration Result from ICP" + "### 2. Get Registration Result from ICP" ] }, { @@ -602,7 +602,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 1. Set Inputs and Parameters" + "### 1. Set Inputs and Parameters" ] }, { @@ -656,7 +656,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 2. Get Registration Result from Multi-Scale ICP" + "### 2. Get Registration Result from Multi-Scale ICP" ] }, { @@ -884,7 +884,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 1. Set Inputs and Parameters" + "### 1. Set Inputs and Parameters" ] }, { diff --git a/docs/jupyter/visualization/visualization.ipynb b/docs/jupyter/visualization/visualization.ipynb index 63dcf2ed863..9b7030599f6 100644 --- a/docs/jupyter/visualization/visualization.ipynb +++ b/docs/jupyter/visualization/visualization.ipynb @@ -129,7 +129,7 @@ "metadata": {}, "source": [ "## Geometry primitives\n", - "The code below generates a box, a sphere, and a cylinder using `create_box`, `create_sphere`, and `create_cylinder`. The box is painted in red, the sphere is painted in blue, and the cylinder is painted in green. Normals are computed for all meshes to support Phong shading (see [Visualize 3D mesh](mesh.ipynb#visualize-a-3d-mesh) and [Surface normal estimation](mesh.ipynb#surface-normal-estimation)). We can even create a coordinate axis using `create_coordinate_frame`, with its origin point set at (-2, -2, -2)." + "The code below generates a box, a sphere, and a cylinder using `create_box`, `create_sphere`, and `create_cylinder`. The box is painted in red, the sphere is painted in blue, and the cylinder is painted in green. Normals are computed for all meshes to support Phong shading (see [Visualize 3D mesh](../geometry/mesh.ipynb#visualize-a-3d-mesh) and [Surface normal estimation](../geometry/mesh.ipynb#surface-normal-estimation)). We can even create a coordinate axis using `create_coordinate_frame`, with its origin point set at (-2, -2, -2)." ] }, { diff --git a/docs/make_docs.py b/docs/make_docs.py index 64880410b03..8249f66b29e 100644 --- a/docs/make_docs.py +++ b/docs/make_docs.py @@ -11,20 +11,21 @@ # (3) make.py calls the actual `sphinx-build` import argparse -import subprocess -import sys import importlib -import os import inspect -import shutil +import multiprocessing +import os import re -from pathlib import Path -import nbformat -import nbconvert +import shutil import ssl -import certifi +import subprocess +import sys import urllib.request -import multiprocessing +from pathlib import Path + +import certifi +import nbconvert +import nbformat def _create_or_clear_dir(dir_path): @@ -66,10 +67,10 @@ def __init__(self, output_dir="python_api", input_dir="python_api_in"): self.output_dir = output_dir self.input_dir = input_dir self.module_names = PyAPIDocsBuilder._get_documented_module_names() - print("Generating *.rst Python API docs in directory: %s" % - self.output_dir) def generate_rst(self): + print(f"Generating *.rst Python API docs in directory: " + f"{self.output_dir}") _create_or_clear_dir(self.output_dir) for module_name in self.module_names: @@ -279,8 +280,6 @@ def __init__(self, input_dir, pwd, output_dir="python_example"): sys.path.append(os.path.join(pwd, "..", "python", "tools")) from cli import _get_all_examples_dict self.get_all_examples_dict = _get_all_examples_dict - print("Generating *.rst Python example docs in directory: %s" % - self.output_dir) def _get_examples_dict(self): examples_dict = self.get_all_examples_dict() @@ -305,11 +304,11 @@ def _generate_index(title, output_path): f.write(out_string) @staticmethod - def _add_example_to_docs(example, output_path): + def _add_example_to_docs(example: Path, output_path): shutil.copy(example, output_path) - out_string = (f"{example.stem}.py" - f"\n```````````````````````````````````````\n" - f"\n.. literalinclude:: {example.stem}.py" + out_string = (f"{example.name}" + f"\n{'`' * (len(example.name))}\n" + f"\n.. literalinclude:: {example.name}" f"\n :language: python" f"\n :linenos:" f"\n\n\n") @@ -318,6 +317,8 @@ def _add_example_to_docs(example, output_path): f.write(out_string) def generate_rst(self): + print(f"Generating *.rst Python example docs in directory: " + f"{self.output_dir}") _create_or_clear_dir(self.output_dir) examples_dict = self._get_examples_dict() @@ -390,6 +391,11 @@ def run(self): build_dir = os.path.join(self.html_output_dir, "html") nproc = multiprocessing.cpu_count() if self.parallel else 1 print(f"Building docs with {nproc} processes") + today = os.environ.get("SPHINX_TODAY", None) + if today: + cmd_args_today = ["-D", "today=" + today] + else: + cmd_args_today = [] if self.is_release: version_list = [ @@ -400,15 +406,10 @@ def run(self): print("Building docs for release:", release_version) cmd = [ - "sphinx-build", - "-j", - str(nproc), - "-b", - "html", - "-D", - "version=" + release_version, - "-D", - "release=" + release_version, + "sphinx-build", "-j", + str(nproc), "-b", "html", "-D", "version=" + release_version, + "-D", "release=" + release_version + ] + cmd_args_today + [ ".", build_dir, ] @@ -419,6 +420,7 @@ def run(self): str(nproc), "-b", "html", + ] + cmd_args_today + [ ".", build_dir, ] @@ -481,27 +483,26 @@ def run(self): # Jupyter notebooks os.environ["CI"] = "true" - # Copy and execute notebooks in the tutorial folder + # Copy from jupyter to the tutorial folder. nb_paths = [] - nb_direct_copy = [ - 'draw_plotly.ipynb', - 'hashmap.ipynb', - 'jupyter_visualization.ipynb', - 't_icp_registration.ipynb', - 'tensor.ipynb', - ] + nb_parent_src = Path(self.current_file_dir) / "jupyter" + nb_parent_dst = Path(self.current_file_dir) / "tutorial" example_dirs = [ - "geometry", "t_geometry", "core", "data", "pipelines", - "visualization", "t_pipelines" + name for name in os.listdir(nb_parent_src) + if os.path.isdir(nb_parent_src / name) ] + + print(f"Copying {nb_parent_src / 'open3d_tutorial.py'} " + f"to {nb_parent_dst / 'open3d_tutorial.py'}") + shutil.copy( + nb_parent_src / "open3d_tutorial.py", + nb_parent_dst / "open3d_tutorial.py", + ) + for example_dir in example_dirs: - in_dir = (Path(self.current_file_dir) / "jupyter" / example_dir) - out_dir = Path(self.current_file_dir) / "tutorial" / example_dir + in_dir = nb_parent_src / example_dir + out_dir = nb_parent_dst / example_dir out_dir.mkdir(parents=True, exist_ok=True) - shutil.copy( - in_dir.parent / "open3d_tutorial.py", - out_dir.parent / "open3d_tutorial.py", - ) if self.clean_notebooks: for nb_out_path in out_dir.glob("*.ipynb"): @@ -522,6 +523,15 @@ def run(self): shutil.copytree(in_dir / "images", out_dir / "images") # Execute Jupyter notebooks + # Files that should not be executed. + nb_direct_copy = [ + 'draw_plotly.ipynb', + 'hashmap.ipynb', + 'jupyter_visualization.ipynb', + 't_icp_registration.ipynb', + 'tensor.ipynb', + ] + for nb_path in nb_paths: if nb_path.name in nb_direct_copy: print("[Processing notebook {}, directly copied]".format( @@ -583,7 +593,7 @@ def run(self): action="store_true", default=False, help=("Whether to clean existing notebooks in docs/tutorial. " - "Notebooks are copied from examples/python to docs/tutorial."), + "Notebooks are copied from docs/jupyter to docs/tutorial."), ) parser.add_argument( "--execute_notebooks", diff --git a/docs/tutorial/geometry/index.rst b/docs/tutorial/geometry/index.rst index 2cade36c5ed..ea7292387c5 100644 --- a/docs/tutorial/geometry/index.rst +++ b/docs/tutorial/geometry/index.rst @@ -22,6 +22,7 @@ Geometry iss_keypoint_detector ray_casting distance_queries + uvmaps .. toctree:: :caption: Interface diff --git a/docs/tutorial/geometry/uvmaps.rst b/docs/tutorial/geometry/uvmaps.rst index 7fccce5008f..7d4c7aa7344 100644 --- a/docs/tutorial/geometry/uvmaps.rst +++ b/docs/tutorial/geometry/uvmaps.rst @@ -24,7 +24,8 @@ Quick Reference to default UV Maps for some primitive shapes provided by Open3D The examples below all assume the following code preamble: -.. code_block:: python +.. code-block:: python + import open3d as o3d import open3d.visualization.rendering as rendering @@ -45,7 +46,9 @@ Example Texture Map Box (map uv to each face = false) ************************************ -.. code_block:: python +.. code-block:: python + + box = o3d.geometry.TriangleMesh.create_box(create_uv_map=True) o3d.visualization.draw({'name': 'box', 'geometry': box, 'material': material}) @@ -62,7 +65,9 @@ Box (map uv to each face = false) Box (map uv to each face = true) ************************************** -.. code_block:: python +.. code-block:: python + + box = o3d.geometry.TriangleMesh.create_box(create_uv_map=True, map_texture_to_each_face=True) o3d.visualization.draw({'name': 'box', 'geometry': box, 'material': material}) @@ -80,7 +85,9 @@ Box (map uv to each face = true) Tetrahedral ************* -.. code_block:: python +.. code-block:: python + + tetra = o3d.geometry.TriangleMesh.create_tetrahedron(create_uv_map=True) o3d.visualization.draw({'name': 'tetrahedron', 'geometry': tetra, 'material': material}) @@ -98,7 +105,9 @@ Tetrahedral Octahedral *************** -.. code_block:: python +.. code-block:: python + + octo = o3d.geometry.TriangleMesh.create_octahedron(create_uv_map=True) o3d.visualization.draw({'name': 'octahedron', 'geometry': octo, 'material': material}) @@ -115,7 +124,9 @@ Octahedral Icosahedron ************** -.. code_block:: python +.. code-block:: python + + ico = o3d.geometry.TriangleMesh.create_icosahedron(create_uv_map=True) o3d.visualization.draw({'name': 'icosahedron', 'geometry': ico, 'material': material}) @@ -132,7 +143,9 @@ Icosahedron Cylinder ************* -.. code_block:: python +.. code-block:: python + + cylinder = o3d.geometry.TriangleMesh.create_cylinder(create_uv_map=True) o3d.visualization.draw({'name': 'cylinder', 'geometry': cylinder, 'material': material}) @@ -149,7 +162,9 @@ Cylinder Cone ******* -.. code_block:: python +.. code-block:: python + + cone = o3d.geometry.TriangleMesh.create_cone(create_uv_map=True) o3d.visualization.draw({'name': 'cone', 'geometry': cone, 'material': material}) @@ -166,7 +181,9 @@ Cone Sphere ******* -.. code_block:: python +.. code-block:: python + + sphere = o3d.geometry.TriangleMesh.create_sphere(create_uv_map=True) o3d.visualization.draw({'name': 'sphere', 'geometry': sphere, 'material': material}) diff --git a/docs/tutorial/reconstruction_system/integrate_scene.rst b/docs/tutorial/reconstruction_system/integrate_scene.rst index 4a56b188ab3..d480d782219 100644 --- a/docs/tutorial/reconstruction_system/integrate_scene.rst +++ b/docs/tutorial/reconstruction_system/integrate_scene.rst @@ -21,9 +21,10 @@ Integrate RGBD frames .. literalinclude:: ../../../examples/python/reconstruction_system/integrate_scene.py :language: python - :lineno-start: 38 - :lines: 27,40-72 + :pyobject: scalable_integrate_rgb_frames + :end-at: o3d.visualization.draw_geometries([mesh]) :linenos: + :lineno-match: This function first reads the alignment results from both :ref:`reconstruction_system_make_fragments` and diff --git a/docs/tutorial/reconstruction_system/make_fragments.rst b/docs/tutorial/reconstruction_system/make_fragments.rst index 2485bfad2b3..d3f53b0575f 100644 --- a/docs/tutorial/reconstruction_system/make_fragments.rst +++ b/docs/tutorial/reconstruction_system/make_fragments.rst @@ -25,9 +25,9 @@ Register RGBD image pairs .. literalinclude:: ../../../examples/python/reconstruction_system/make_fragments.py :language: python - :lineno-start: 46 - :lines: 27,47-76 + :pyobject: register_one_rgbd_pair :linenos: + :lineno-match: The function reads a pair of RGBD images and registers the ``source_rgbd_image`` to the ``target_rgbd_image``. The Open3D function ``compute_rgbd_odometry`` is @@ -45,9 +45,9 @@ Multiway registration .. literalinclude:: ../../../examples/python/reconstruction_system/make_fragments.py :language: python - :lineno-start: 76 - :lines: 27,77-123 + :pyobject: make_posegraph_for_fragment :linenos: + :lineno-match: This script uses the technique demonstrated in :ref:`/tutorial/pipelines/multiway_registration.ipynb`. The function @@ -61,9 +61,9 @@ function ``optimize_posegraph_for_fragment``. .. literalinclude:: ../../../examples/python/reconstruction_system/optimize_posegraph.py :language: python - :lineno-start: 51 - :lines: 27,52-63 + :pyobject: optimize_posegraph_for_fragment :linenos: + :lineno-match: This function calls ``global_optimization`` to estimate poses of the RGBD images. @@ -74,11 +74,11 @@ Make a fragment .. literalinclude:: ../../../examples/python/reconstruction_system/make_fragments.py :language: python - :lineno-start: 124 - :lines: 27,125-146 + :pyobject: integrate_rgb_frames_for_fragment :linenos: + :lineno-match: -Once the poses are estimates, :ref:`/tutorial/pipelines/rgbd_integration.ipynb` +Once the poses are estimated, :ref:`/tutorial/pipelines/rgbd_integration.ipynb` is used to reconstruct a colored fragment from each RGBD sequence. Batch processing @@ -86,11 +86,16 @@ Batch processing .. literalinclude:: ../../../examples/python/reconstruction_system/make_fragments.py :language: python - :lineno-start: 181 - :lines: 27,182-205 + :start-at: def process_single_fragment(fragment_id, color_files, depth_files, n_files, :linenos: + :lineno-match: + +The ``process_single_fragment`` function calls each individual function explained above. +The ``run`` function determines the number of fragments to generate based on the number +of images in the dataset and the configuration value ``n_frames_per_fragment``. +Subsequently, it invokes ``process_single_fragment`` for each of these fragments. +Furthermore, it leverages multiprocessing to speed up computation of all fragments. -The main function calls each individual function explained above. .. _reconstruction_system_make_fragments_results: diff --git a/docs/tutorial/reconstruction_system/refine_registration.rst b/docs/tutorial/reconstruction_system/refine_registration.rst index 2afcb24b6bc..e512255f671 100644 --- a/docs/tutorial/reconstruction_system/refine_registration.rst +++ b/docs/tutorial/reconstruction_system/refine_registration.rst @@ -20,9 +20,9 @@ Fine-grained registration .. literalinclude:: ../../../examples/python/reconstruction_system/refine_registration.py :language: python - :lineno-start: 63 - :lines: 27,64-136 :linenos: + :pyobject: multiscale_icp + :lineno-match: Two options are given for the fine-grained registration. The ``color`` option is recommended since it uses color information to prevent drift. See [Park2017]_ @@ -33,9 +33,9 @@ Multiway registration .. literalinclude:: ../../../examples/python/reconstruction_system/refine_registration.py :language: python - :lineno-start: 40 - :lines: 27,41-63 :linenos: + :pyobject: update_posegraph_for_scene + :lineno-match: This script uses the technique demonstrated in :ref:`/tutorial/pipelines/multiway_registration.ipynb`. Function ``update_posegraph_for_scene`` builds a pose graph for multiway registration of all fragments. Each graph node represents a fragment and its pose which transforms the geometry to the global space. @@ -44,21 +44,20 @@ for multiway registration. .. literalinclude:: ../../../examples/python/reconstruction_system/optimize_posegraph.py :language: python - :lineno-start: 63 - :lines: 27,64-73 :linenos: + :pyobject: optimize_posegraph_for_scene + :lineno-match: Main registration loop `````````````````````````````````````` -The function ``make_posegraph_for_refined_scene`` below calls all the functions - introduced above. +The function ``make_posegraph_for_refined_scene`` below calls all the functions introduced above. .. literalinclude:: ../../../examples/python/reconstruction_system/refine_registration.py :language: python - :lineno-start: 173 - :lines: 27,174-223 :linenos: + :pyobject: make_posegraph_for_refined_scene + :lineno-match: The main workflow is: pairwise local refinement -> multiway registration. diff --git a/docs/tutorial/reconstruction_system/register_fragments.rst b/docs/tutorial/reconstruction_system/register_fragments.rst index 7e3486589a8..b86adc62fda 100644 --- a/docs/tutorial/reconstruction_system/register_fragments.rst +++ b/docs/tutorial/reconstruction_system/register_fragments.rst @@ -20,9 +20,9 @@ Preprocess point cloud .. literalinclude:: ../../../examples/python/reconstruction_system/register_fragments.py :language: python - :lineno-start: 41 - :lines: 27,42-54 + :pyobject: preprocess_point_cloud :linenos: + :lineno-match: This function downsamples a point cloud to make it sparser and regularly distributed. Normals and FPFH feature are precomputed. See @@ -36,9 +36,9 @@ Compute initial registration .. literalinclude:: ../../../examples/python/reconstruction_system/register_fragments.py :language: python - :lineno-start: 85 - :lines: 27,86-114 + :pyobject: compute_initial_registration :linenos: + :lineno-match: This function computes a rough alignment between two fragments. If the fragments are neighboring fragments, the rough alignment is determined by an aggregating @@ -53,9 +53,9 @@ Pairwise global registration .. literalinclude:: ../../../examples/python/reconstruction_system/register_fragments.py :language: python - :lineno-start: 54 - :lines: 27,55-85 + :pyobject: register_point_cloud_fpfh :linenos: + :lineno-match: This function uses :ref:`/tutorial/pipelines/global_registration.ipynb#RANSAC` or :ref:`/tutorial/pipelines/global_registration.ipynb#fast-global-registration` for pairwise global registration. @@ -66,9 +66,9 @@ Multiway registration .. literalinclude:: ../../../examples/python/reconstruction_system/register_fragments.py :language: python - :lineno-start: 114 - :lines: 27,115-137 + :pyobject: update_posegraph_for_scene :linenos: + :lineno-match: This script uses the technique demonstrated in :ref:`/tutorial/pipelines/multiway_registration.ipynb`. The function @@ -81,9 +81,9 @@ called for multiway registration. .. literalinclude:: ../../../examples/python/reconstruction_system/optimize_posegraph.py :language: python - :lineno-start: 63 - :lines: 27,64-73 + :pyobject: optimize_posegraph_for_scene :linenos: + :lineno-match: Main registration loop `````````````````````````````````````` @@ -94,9 +94,9 @@ multiway registration. .. literalinclude:: ../../../examples/python/reconstruction_system/register_fragments.py :language: python - :lineno-start: 167 - :lines: 27,168-210 + :pyobject: make_posegraph_for_scene :linenos: + :lineno-match: Results `````````````````````````````````````` diff --git a/docs/tutorial/reconstruction_system/system_overview.rst b/docs/tutorial/reconstruction_system/system_overview.rst index 2cae7ed387e..0394d629c1e 100644 --- a/docs/tutorial/reconstruction_system/system_overview.rst +++ b/docs/tutorial/reconstruction_system/system_overview.rst @@ -48,7 +48,7 @@ Getting the example code .. code-block:: sh - # Activate your conda enviroment, where you have installed open3d pip package. + # Activate your conda environment, where you have installed open3d pip package. # Clone the Open3D github repository and go to the example. cd examples/python/reconstruction_system/ @@ -69,7 +69,7 @@ Running the example with default dataset. python run_system.py --make --register --refine --integrate Changing the default dataset. -One may change the default dataset to other avaialble datasets. +One may change the default dataset to other available datasets. Currently the following datasets are available: 1. Lounge (keyword: ``lounge``) (Default) @@ -81,7 +81,7 @@ Currently the following datasets are available: .. code-block:: sh - # Using jack_jack as the default dataset. + # Using bedroom as the default dataset. python run_system.py --default_dataset 'bedroom' --make --register --refine --integrate Running the example with custom dataset using config file. diff --git a/docs/tutorial/t_geometry/index.rst b/docs/tutorial/t_geometry/index.rst index e1f1e9a4bc8..7c24fb565cb 100644 --- a/docs/tutorial/t_geometry/index.rst +++ b/docs/tutorial/t_geometry/index.rst @@ -1,5 +1,5 @@ Geometry (Tensor) -======== +================= .. toctree:: :caption: Basics diff --git a/docs/tutorial/t_pipelines/index.rst b/docs/tutorial/t_pipelines/index.rst index b687d2ce428..b10fa1b0e09 100644 --- a/docs/tutorial/t_pipelines/index.rst +++ b/docs/tutorial/t_pipelines/index.rst @@ -1,7 +1,7 @@ .. _t_pipelines: Pipelines (Tensor) -========= +================== .. toctree:: diff --git a/docs/tutorial/t_reconstruction_system/customized_integration.rst b/docs/tutorial/t_reconstruction_system/customized_integration.rst index 79625fd5ec2..78e252fa8d5 100644 --- a/docs/tutorial/t_reconstruction_system/customized_integration.rst +++ b/docs/tutorial/t_reconstruction_system/customized_integration.rst @@ -10,8 +10,8 @@ The frustum **block** selection remains the same, but then we manually activate .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate_custom.py :language: python - :lineno-start: 78 - :lines: 27,79-87 + :lineno-start: 60 + :lines: 8,61-68 Voxel Indices `````````````` @@ -19,8 +19,8 @@ We can then unroll **voxel** indices in these **blocks** into a flattened array, .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate_custom.py :language: python - :lineno-start: 91 - :lines: 27,92-93 + :lineno-start: 72 + :lines: 8,73-74 Up to now we have finished preparation. Then we can perform customized geometry transformation in the Tensor interface, with the same fashion as we conduct in numpy or pytorch. @@ -30,8 +30,8 @@ We first transform the voxel coordinates to the frame's coordinate system, proje .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate_custom.py :language: python - :lineno-start: 99 - :lines: 27,100-118 + :lineno-start: 80 + :lines: 8,81-98 Customized integration ```````````````````````` @@ -43,7 +43,7 @@ With the data association, we are able to conduct integration. In this example, .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate_custom.py :language: python - :lineno-start: 118 - :lines: 27,119-128,133-151 + :lineno-start: 98 + :lines: 8,99-108,113-132 You may follow the example and adapt it to your customized properties. Open3D supports conversion from and to PyTorch tensors without memory any copy, see :ref:`/tutorial/core/tensor.ipynb#PyTorch-I/O-with-DLPack-memory-map`. This can be use to leverage PyTorch's capabilities such as automatic differentiation and other operators. diff --git a/docs/tutorial/t_reconstruction_system/dense_slam.rst b/docs/tutorial/t_reconstruction_system/dense_slam.rst index b11a76ed374..84b2c382fca 100644 --- a/docs/tutorial/t_reconstruction_system/dense_slam.rst +++ b/docs/tutorial/t_reconstruction_system/dense_slam.rst @@ -13,8 +13,8 @@ In a SLAM system, we maintain a ``model`` built upon a :ref:`voxel_block_grid`, .. literalinclude:: ../../../examples/python/t_reconstruction_system/dense_slam.py :language: python - :lineno-start: 45 - :lines: 27,46-54 + :lineno-start: 26 + :lines: 8,27-35 Frame-to-model tracking ```````````````````````` @@ -22,8 +22,8 @@ The frame-to-model tracking runs in a loop: .. literalinclude:: ../../../examples/python/t_reconstruction_system/dense_slam.py :language: python - :lineno-start: 57 - :lines: 27,58-78 + :lineno-start: 38 + :lines: 8,39,42-61 where we iteratively update the synthesized frame via ray-casting from the model, and perform the tensor version of :ref:`/tutorial/pipelines/rgbd_odometry.ipynb` between the input frame and the synthesized frame. @@ -44,7 +44,7 @@ If all above have been correctly set but still no luck, please file an issue. **Q**: So WHY did my tracking fail? -**A**: For the front end, we are using direct RGB-D odometry. Comparing to feature-based odometry, RGB-D odometry is more accurate when it completes successfully but is less robust. We will add support for feature-based tracking in the future. For the backend, unlike our offline reconstruction system, we do not detect loop closures, and do not perform pose graph optimization or bundle adjustment at the moment. +**A**: For the front end, we are using direct RGB-D odometry. Compared to feature-based odometry, RGB-D odometry is more accurate when it completes successfully but is less robust. We will add support for feature-based tracking in the future. For the backend, unlike our offline reconstruction system, we do not detect loop closures, and do not perform pose graph optimization or bundle adjustment at the moment. **Q**: Why don't you implement loop closure or relocalization? diff --git a/docs/tutorial/t_reconstruction_system/index.rst b/docs/tutorial/t_reconstruction_system/index.rst index 31322f613c0..57fb76950b1 100644 --- a/docs/tutorial/t_reconstruction_system/index.rst +++ b/docs/tutorial/t_reconstruction_system/index.rst @@ -25,7 +25,7 @@ Getting the example code .. code-block:: sh - # Activate your conda enviroment, where you have installed open3d pip package. + # Activate your conda environment, where you have installed open3d pip package. # Clone the Open3D github repository and go to the example. cd examples/python/t_reconstruction_system/ @@ -40,7 +40,7 @@ Running the example with default dataset. # which is ``lounge`` dataset from stanford. python dense_slam_gui.py -It is recommended to use CUDA if avaialble. +It is recommended to use CUDA if available. .. code-block:: sh @@ -49,7 +49,7 @@ It is recommended to use CUDA if avaialble. python dense_slam_gui.py --device 'cuda:0' Changing the default dataset. -One may change the default dataset to other avaialble datasets. +One may change the default dataset to other available datasets. Currently the following datasets are available: 1. Lounge (keyword: ``lounge``) (Default) @@ -74,7 +74,7 @@ Example config file for online reconstruction system has been provided in ``examples/python/t_reconstruction_system/default_config.yml``, which looks like the following: .. literalinclude:: ../../../examples/python/t_reconstruction_system/default_config.yml - :language: yml + :language: yaml :lineno-start: 1 :lines: 1- :linenos: @@ -87,7 +87,7 @@ images using the Intel RealSense camera. For more details, please see :ref:`capture_your_own_dataset`. Getting started with online reconstruction system -`````````````````````````````````````` +````````````````````````````````````````````````` .. toctree:: diff --git a/docs/tutorial/t_reconstruction_system/integration.rst b/docs/tutorial/t_reconstruction_system/integration.rst index e62d9bf3cfc..417f723205d 100644 --- a/docs/tutorial/t_reconstruction_system/integration.rst +++ b/docs/tutorial/t_reconstruction_system/integration.rst @@ -12,8 +12,8 @@ In the activation step, we first locate blocks that contain points unprojected f .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate.py :language: python - :lineno-start: 82 - :lines: 27,83-85 + :lineno-start: 51 + :lines: 8,52-54 Integration ```````````` @@ -23,24 +23,24 @@ We may use optimized functions, along with raw depth images with calibration par .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate.py :language: python - :lineno-start: 86 - :lines: 27,87-93 + :lineno-start: 55 + :lines: 8,56-63 Currently, to use our optimized function, we assume the below combinations of data types, in the order of ``depth image``, ``color image``, ``tsdf in voxel grid``, ``weight in voxel grid``, ``color in voxel grid`` in CPU .. literalinclude:: ../../../cpp/open3d/t/geometry/kernel/VoxelBlockGridCPU.cpp :language: cpp - :lineno-start: 229 - :lines: 230-236 + :lineno-start: 212 + :lines: 212-218 and CUDA .. literalinclude:: ../../../cpp/open3d/t/geometry/kernel/VoxelBlockGridCUDA.cu :language: cpp - :lineno-start: 255 - :lines: 256-262 + :lineno-start: 238 + :lines: 238-244 -For more generalized functionalities, you may extend the macros and/or the kernel functions and compile Open3D from scratch achieve the maximal performance (~100Hz on a GTX 1070), or follow :ref:`customized_integration` and implement a fast prototype (~25Hz). +For more generalized functionalities, you may extend the macros and/or the kernel functions and compile Open3D from scratch to achieve the maximal performance (~100Hz on a GTX 1070), or follow :ref:`customized_integration` and implement a fast prototype (~25Hz). Surface extraction `````````````````` @@ -48,10 +48,10 @@ You may use the provided APIs to extract surface points. .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate.py :language: python - :lineno-start: 135 - :lines: 27,136-140 + :lineno-start: 105 + :lines: 8, 106-110 -Note ``extract_triangle_mesh`` applies marching cubes and generate mesh. ``extract_point_cloud`` uses the similar algorithm, but skips the triangle face generation step. +Note ``extract_triangle_mesh`` applies marching cubes and generates mesh. ``extract_point_cloud`` uses a similar algorithm, but skips the triangle face generation step. Save and load `````````````` @@ -60,7 +60,7 @@ The voxel block grids can be saved to and loaded from `.npz` files that are acce .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate.py :language: python :lineno-start: 47 - :lines: 27,48,98 + :lines: 8,48,98 The ``.npz`` file of the aforementioned voxel block grid contains the following entries: diff --git a/docs/tutorial/t_reconstruction_system/ray_casting.rst b/docs/tutorial/t_reconstruction_system/ray_casting.rst index ca74ee98b02..3809a7941a7 100644 --- a/docs/tutorial/t_reconstruction_system/ray_casting.rst +++ b/docs/tutorial/t_reconstruction_system/ray_casting.rst @@ -12,19 +12,19 @@ We provide optimized conventional rendering, and basic support for customized re Conventional rendering `````````````````````` -From a reconstructed voxel block grid from :ref:`optimized_integration`, we can efficiently render the scene given the input depth as a rough range estimate. +From a reconstructed voxel block grid :code:`vbg` from :ref:`optimized_integration`, we can efficiently render the scene given the input depth as a rough range estimate. .. literalinclude:: ../../../examples/python/t_reconstruction_system/ray_casting.py :language: python - :lineno-start: 76 - :lines: 27,77-92 + :lineno-start: 68 + :lines: 8,69-82 The results could be directly obtained and visualized by .. literalinclude:: ../../../examples/python/t_reconstruction_system/ray_casting.py :language: python - :lineno-start: 90 - :lines: 27,91,93-95,105-112 + :lineno-start: 83 + :lines: 8,84,86-88,98-105 Customized rendering ````````````````````` @@ -32,7 +32,7 @@ In customized rendering, we manually perform trilinear-interpolation by accessin .. literalinclude:: ../../../examples/python/t_reconstruction_system/ray_casting.py :language: python - :lineno-start: 97 - :lines: 27,98-103,114-115 + :lineno-start: 90 + :lines: 8,91-96,107-108 Since the output is rendered via indices, the rendering process could be rewritten in differentiable engines like PyTorch seamlessly via :ref:`/tutorial/core/tensor.ipynb#PyTorch-I/O-with-DLPack-memory-map`. diff --git a/docs/tutorial/t_reconstruction_system/voxel_block_grid.rst b/docs/tutorial/t_reconstruction_system/voxel_block_grid.rst index 0836bd471f0..cf8eed1f2fa 100644 --- a/docs/tutorial/t_reconstruction_system/voxel_block_grid.rst +++ b/docs/tutorial/t_reconstruction_system/voxel_block_grid.rst @@ -14,21 +14,21 @@ A voxel block grid can be constructed by: .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate.py :language: python - :lineno-start: 56 - :lines: 27,57-74 + :lineno-start: 27 + :lines: 8,28-45 -In this example, the multi-value hash map has key shape shape ``(3,)`` and dtype ``int32``. The hash map values are organized as a structure of array (SoA). The hash map values include: +In this example, the multi-value hash map has key of shape ``(3,)`` and dtype ``float32``. The hash map values are organized as a structure of array (SoA). The hash map values include: By default it contains: -- Truncated Signed Distance Function (TSDF) of element shape ``(8, 8, 8, 1)`` -- Weight of element shape ``(8, 8, 8, 1)`` -- (Optionally) RGB color of element shape ``(8, 8, 8, 3)`` +- Truncated Signed Distance Function (TSDF) of element shape ``(16, 16, 16, 1)`` +- Weight of element shape ``(16, 16, 16, 1)`` +- (Optionally) RGB color of element shape ``(16, 16, 16, 3)`` -where ``8`` stands for the local voxel block grid resolution. +where ``16`` stands for the local voxel block grid resolution. By convention, we use ``3.0 / 512`` as the voxel resolution. This spatial resolution is equivalent to representing a ``3m x 3m x 3m`` (m = meter) room with a dense ``512 x 512 x 512`` voxel grid. -The voxel block grid is optimized to run fast on GPU. On CPU the it runs slower. Empirically, we reserve ``100000`` such blocks for a living room-scale scene to avoid frequent rehashing. +The voxel block grid is optimized to run fast on GPU. On CPU, it runs slower. Empirically, we reserve ``50000`` such blocks for a living room-scale scene to avoid frequent rehashing. -You can always customize your own properties, e.g., ``intensity`` of element shape ``(8, 8, 8, 1)`` in ``float32``, ``label`` of element shape ``(8, 8, 8, 1)`` in ``int32``, etc. To know how to process the data, please refer to :ref:`customized_integration`. +You can always customize your own properties, e.g., ``intensity`` of element shape ``(16, 16, 16, 1)`` in ``float32``, ``label`` of element shape ``(16, 16, 16, 1)`` in ``int32``, etc. To know how to process the data, please refer to :ref:`customized_integration`. diff --git a/docs/tutorial/visualization/cpu_rendering.rst b/docs/tutorial/visualization/cpu_rendering.rst index b748ae2f4a8..f504382469e 100644 --- a/docs/tutorial/visualization/cpu_rendering.rst +++ b/docs/tutorial/visualization/cpu_rendering.rst @@ -46,7 +46,7 @@ Here are the different ways to do that: # In Python code import os - os.environ['EGL_PLATFORM'] = 'surfaceless' # Ubunu 20.04+ + os.environ['EGL_PLATFORM'] = 'surfaceless' # Ubuntu 20.04+ os.environ['OPEN3D_CPU_RENDERING'] = 'true' # Ubuntu 18.04 import open3d as o3d @@ -91,7 +91,24 @@ The method for enabling interactive CPU rendering depends on your system: them installed is not sufficient. You can check the drivers in use with the ``glxinfo`` command. -2. **You use Nvidia or AMD drivers or old Mesa drivers (< v20.2).** We provide +2. **You use Nvidia or AMD drivers, but your OS comes with recent Mesa drivers (>= v20.2).** + Install Mesa drivers if they are not installed in your system (e.g. `sudo apt install libglx0-mesa` + in Ubuntu). Preload the Mesa driver library before running any Open3D application requiring CPU rendering. + For example: + + .. code:: bash + + export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libGLX_mesa.so.0 + Open3D + + Or with Python code: + + .. code:: bash + + export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libGLX_mesa.so.0 + python examples/python/visualization/draw.py + +3. **Your OS has old Mesa drivers (< v20.2).** We provide the Mesa software rendering library binary for download `here `__. This is automatically downloaded to @@ -115,5 +132,4 @@ The method for enabling interactive CPU rendering depends on your system: .. code:: bash - LD_PRELOAD=$HOME/.local/lib/libGL.so.1.5.0 python - examples/python/visualization/draw.py + LD_PRELOAD=$HOME/.local/lib/libGL.so.1.5.0 python examples/python/visualization/draw.py diff --git a/docs/tutorial/visualization/customized_visualization.rst b/docs/tutorial/visualization/customized_visualization.rst index c410571e03e..b437fbb932b 100644 --- a/docs/tutorial/visualization/customized_visualization.rst +++ b/docs/tutorial/visualization/customized_visualization.rst @@ -12,8 +12,8 @@ Mimic draw_geometries() with Visualizer class .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 37 - :lines: 37-44 + :lineno-start: 39 + :lines: 39-47 :linenos: This function produces exactly the same functionality as the convenience function ``draw_geometries``. @@ -25,8 +25,8 @@ Class ``Visualizer`` has a couple of variables such as a ``ViewControl`` and a ` .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 70 - :lines: 70-76 + :lineno-start: 50 + :lines: 50-56 :linenos: Outputs: @@ -40,8 +40,8 @@ To change field of view of the camera, it is first necessary to get an instance .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 47 - :lines: 47-56 + :lineno-start: 27 + :lines: 27-36 :linenos: The field of view (FoV) can be set to a degree in the range [5,90]. Note that ``change_field_of_view`` adds the specified FoV to the current FoV. By default, the visualizer has an FoV of 60 degrees. Calling the following code @@ -71,8 +71,8 @@ Callback functions .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 59 - :lines: 59-67 + :lineno-start: 39 + :lines: 39-47 :linenos: Function ``draw_geometries_with_animation_callback`` registers a Python callback function ``rotate_view`` as the idle function of the main loop. It rotates the view along the x-axis whenever the visualizer is idle. This defines an animation behavior. @@ -82,8 +82,8 @@ Function ``draw_geometries_with_animation_callback`` registers a Python callback .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 79 - :lines: 79-108 + :lineno-start: 59 + :lines: 59-87 :linenos: Callback functions can also be registered upon key press event. This script registered four keys. For example, pressing :kbd:`k` changes the background color to black. @@ -96,8 +96,8 @@ Capture images in a customized animation .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 109 - :lines: 111-162 + :lineno-start: 90 + :lines: 90-141 :linenos: This function reads a camera trajectory, then defines an animation function ``move_forward`` to travel through the camera trajectory. In this animation function, both color image and depth image are captured using ``Visualizer.capture_depth_float_buffer`` and ``Visualizer.capture_screen_float_buffer`` respectively. The images are saved as png files. diff --git a/docs/tutorial/visualization/interactive_visualization.rst b/docs/tutorial/visualization/interactive_visualization.rst index d886466e568..8dfb03edebb 100644 --- a/docs/tutorial/visualization/interactive_visualization.rst +++ b/docs/tutorial/visualization/interactive_visualization.rst @@ -3,13 +3,16 @@ Interactive visualization ------------------------------------- -This tutorial introduces user interaction features of the visualizer window. +This tutorial introduces user interaction features of the visualizer window provided by:- + +#. ``open3d.visualization.draw_geometries_with_editing`` +#. ``open3d.visualization.VisualizerWithEditing`` .. literalinclude:: ../../../examples/python/visualization/interactive_visualization.py :language: python - :lineno-start: 27 - :lines: 27- + :start-at: # examples/python/visualization/interactive_visualization.py :linenos: + :lineno-match: This script executes two applications of user interaction: ``demo_crop_geometry`` and ``demo_manual_registration``. @@ -20,9 +23,9 @@ Crop geometry .. literalinclude:: ../../../examples/python/visualization/interactive_visualization.py :language: python - :lineno-start: 37 - :lines: 37-51 + :pyobject: demo_crop_geometry :linenos: + :lineno-match: This function simply reads a point cloud and calls ``draw_geometries_with_editing``. This function provides vertex selection and cropping. @@ -58,27 +61,30 @@ To finish selection mode, press ``F`` to switch to freeview mode. Manual registration ````````````````````````````````````````````` -Select correspondences -===================================== - The following script registers two point clouds using point-to-point ICP. It gets initial alignment via user interaction. +Prepare data +===================================== + .. literalinclude:: ../../../examples/python/visualization/interactive_visualization.py :language: python - :lineno-start: 61 - :lines: 61-76 + :pyobject: prepare_data :linenos: + :lineno-match: -The script reads two point clouds, and visualizes the point clouds before alignment. +This function reads two point clouds, and visualizes the point clouds before performing manual alignment. .. image:: ../../_static/visualization/interactive_visualization/manual_icp_initial.png :width: 400px +Select correspondences +===================================== + .. literalinclude:: ../../../examples/python/visualization/interactive_visualization.py :language: python - :lineno-start: 52 - :lines: 52-60 + :pyobject: pick_points :linenos: + :lineno-match: The function ``pick_points(pcd)`` makes an instance of ``VisualizerWithEditing``. To mimic ``draw_geometries``, it creates windows, adds the geometry, visualizes the geometry, and then terminates. A novel interface function from ``VisualizerWithEditing`` is ``get_picked_points()`` that returns the indices of user-picked vertices. @@ -115,9 +121,9 @@ Registration using user correspondences .. literalinclude:: ../../../examples/python/visualization/interactive_visualization.py :language: python - :lineno-start: 77 - :lines: 77-110 + :pyobject: register_via_correspondences :linenos: + :lineno-match: The later part of the demo computes an initial transformation based on the user-provided correspondences. This script builds pairs of correspondences using ``Vector2iVector(corr)``. It utilizes ``TransformationEstimationPointToPoint.compute_transformation`` to compute the initial transformation from the correspondences. The initial transformation is refined using ``registration_icp``. diff --git a/docs/tutorial/visualization/non_blocking_visualization.rst b/docs/tutorial/visualization/non_blocking_visualization.rst index 2a3397f1578..9d3e932646a 100644 --- a/docs/tutorial/visualization/non_blocking_visualization.rst +++ b/docs/tutorial/visualization/non_blocking_visualization.rst @@ -28,6 +28,7 @@ This rendering loop can be readily customized. For example, a custom loop can be vis = Visualizer() vis.create_window() + vis.add_geometry(geometry) for i in range(icp_iteration): # do ICP single iteration # transform geometry using ICP @@ -39,9 +40,9 @@ The full script implementing this idea is displayed below. .. literalinclude:: ../../../examples/python/visualization/non_blocking_visualization.py :language: python - :lineno-start: 27 - :lines: 27- + :start-at: # examples/python/visualization/non_blocking_visualization.py :linenos: + :lineno-match: The following sections explain this script. @@ -50,9 +51,9 @@ Prepare example data .. literalinclude:: ../../../examples/python/visualization/non_blocking_visualization.py :language: python - :lineno-start: 35 - :lines: 35-46 + :pyobject: prepare_data :linenos: + :lineno-match: This part reads two point clouds and downsamples them. The source point cloud is intentionally transformed for the misalignment. Both point clouds are flipped for better visualization. @@ -61,9 +62,10 @@ Initialize Visualizer class .. literalinclude:: ../../../examples/python/visualization/non_blocking_visualization.py :language: python - :lineno-start: 47 - :lines: 47-59 + :start-at: def demo_non_blocking_visualization(): + :end-at: save_image = False :linenos: + :lineno-match: These lines make an instance of the visualizer class, open a visualizer window, and add two geometries to the visualizer. @@ -72,9 +74,10 @@ Transform geometry and visualize it .. literalinclude:: ../../../examples/python/visualization/non_blocking_visualization.py :language: python - :lineno-start: 59 - :lines: 59-72 + :start-at: for i in range(icp_iteration): + :end-at: vis.destroy_window() :linenos: + :lineno-match: This script calls ``registration_icp`` for every iteration. Note that it explicitly forces only one ICP iteration via ``ICPConvergenceCriteria(max_iteration = 1)``. This is a trick to retrieve a slight pose update from a single ICP iteration. After ICP, source geometry is transformed accordingly. diff --git a/examples/python/io/realsense_io.py b/examples/python/io/realsense_io.py index 3e64a76dd1f..fa386554034 100644 --- a/examples/python/io/realsense_io.py +++ b/examples/python/io/realsense_io.py @@ -4,20 +4,159 @@ # Copyright (c) 2018-2023 www.open3d.org # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -"""Demonstrate RealSense camera discovery and frame capture""" +"""Demonstrate RealSense camera discovery and frame capture. An RS bag file is +used if a RealSense camera is not available. Captured frames are +displayed as a live point cloud. Also frames are saved to ./capture/{color,depth} +folders. +Usage: + + - Display live point cloud from RS camera: + python realsense_io.py + + - Display live point cloud from RS bag file: + python realsense_io.py rgbd.bag + + If no device is detected and no bag file is supplied, uses the Open3D + example JackJackL515Bag dataset. +""" + +import sys +import os +from concurrent.futures import ThreadPoolExecutor import open3d as o3d +import open3d.t.io as io3d +from open3d.t.geometry import PointCloud +import open3d.visualization.gui as gui +import open3d.visualization.rendering as rendering -if __name__ == "__main__": +DEPTH_MAX = 3 + + +def point_cloud_video(executor, rgbd_frame, mdata, timestamp, o3dvis): + """Update window to display the next point cloud frame.""" + app = gui.Application.instance + update_flag = rendering.Scene.UPDATE_POINTS_FLAG | rendering.Scene.UPDATE_COLORS_FLAG - o3d.t.io.RealSenseSensor.list_devices() - rscam = o3d.t.io.RealSenseSensor() + executor.submit(io3d.write_image, + f"capture/color/{point_cloud_video.fid:05d}.jpg", + rgbd_frame.color) + executor.submit(io3d.write_image, + f"capture/depth/{point_cloud_video.fid:05d}.png", + rgbd_frame.depth) + print(f"Frame: {point_cloud_video.fid}, timestamp: {timestamp * 1e-6:.3f}s", + end="\r") + if point_cloud_video.fid == 0: + # Start with a dummy max sized point cloud to allocate GPU buffers + # for update_geometry() + max_pts = rgbd_frame.color.rows * rgbd_frame.color.columns + pcd = PointCloud(o3d.core.Tensor.zeros((max_pts, 3))) + pcd.paint_uniform_color([1., 1., 1.]) + app.post_to_main_thread(o3dvis, + lambda: o3dvis.add_geometry("Point Cloud", pcd)) + pcd = PointCloud.create_from_rgbd_image( + rgbd_frame, + # Intrinsic matrix: Tensor([[fx, 0., cx], [0., fy, cy], [0., 0., 1.]]) + mdata.intrinsics.intrinsic_matrix, + depth_scale=mdata.depth_scale, + depth_max=DEPTH_MAX) + # GUI operations MUST run in the main thread. + app.post_to_main_thread( + o3dvis, lambda: o3dvis.update_geometry("Point Cloud", pcd, update_flag)) + point_cloud_video.fid += 1 + + +point_cloud_video.fid = 0 + + +def pcd_video_from_camera(executor, o3dvis): + """Show point cloud video coming from a RealSense camera. Save frames to + disk in capture/{color,depth} folders. + """ + rscam = io3d.RealSenseSensor() rscam.start_capture() - print(rscam.get_metadata()) - for fid in range(5): - rgbd_frame = rscam.capture_frame() - o3d.io.write_image(f"color{fid:05d}.jpg", rgbd_frame.color.to_legacy()) - o3d.io.write_image(f"depth{fid:05d}.png", rgbd_frame.depth.to_legacy()) - print("Frame: {}, time: {}s".format(fid, rscam.get_timestamp() * 1e-6)) - - rscam.stop_capture() + mdata = rscam.get_metadata() + print(mdata) + os.makedirs("capture/color") + os.makedirs("capture/depth") + rgbd_frame_future = executor.submit(rscam.capture_frame) + + def on_window_close(): + nonlocal rscam, executor + executor.shutdown() + rscam.stop_capture() + return True # OK to close window + + o3dvis.set_on_close(on_window_close) + + while True: + rgbd_frame = rgbd_frame_future.result() + # Run each IO operation in the threadpool + rgbd_frame_future = executor.submit(rscam.capture_frame) + point_cloud_video(executor, rgbd_frame, mdata, rscam.get_timestamp(), + o3dvis) + + +def pcd_video_from_bag(rsbagfile, executor, o3dvis): + """Show point cloud video coming from a RealSense bag file. Save frames to + disk in capture/{color,depth} folders. + """ + rsbag = io3d.RSBagReader.create(rsbagfile) + if not rsbag.is_opened(): + raise RuntimeError(f"RS bag file {rsbagfile} could not be opened.") + mdata = rsbag.metadata + print(mdata) + os.makedirs("capture/color") + os.makedirs("capture/depth") + + def on_window_close(): + nonlocal rsbag, executor + executor.shutdown() + rsbag.close() + return True # OK to close window + + o3dvis.set_on_close(on_window_close) + + rgbd_frame = rsbag.next_frame() + while not rsbag.is_eof(): + # Run each IO operation in the threadpool + rgbd_frame_future = executor.submit(rsbag.next_frame) + point_cloud_video(executor, rgbd_frame, mdata, rsbag.get_timestamp(), + o3dvis) + rgbd_frame = rgbd_frame_future.result() + + +def main(): + if os.path.exists("capture"): + raise RuntimeError( + "Frames saving destination folder 'capture' already exists. Please move it." + ) + + # Initialize app and create GUI window + app = gui.Application.instance + app.initialize() + o3dvis = o3d.visualization.O3DVisualizer("Open3D: PointCloud video", 1024, + 768) + o3dvis.show_axes = True + # set view: fov, eye, lookat, up + o3dvis.setup_camera(45, [0., 0., 0.], [0., 0., -1.], [0., -1., 0.]) + app.add_window(o3dvis) + + have_cam = io3d.RealSenseSensor.list_devices() + have_bag = (len(sys.argv) == 2) + + with ThreadPoolExecutor(max_workers=4) as executor: + # Run IO and compute in threadpool + if have_bag: + executor.submit(pcd_video_from_bag, sys.argv[1], executor, o3dvis) + elif have_cam: + executor.submit(pcd_video_from_camera, executor, o3dvis) + else: + executor.submit(pcd_video_from_bag, + o3d.data.JackJackL515Bag().path, executor, o3dvis) + + app.run() # GUI runs in the main thread. + + +if __name__ == "__main__": + main() diff --git a/examples/python/reconstruction_system/debug/visualize_fragments.py b/examples/python/reconstruction_system/debug/visualize_fragments.py index c969a86e074..2c05bd19142 100644 --- a/examples/python/reconstruction_system/debug/visualize_fragments.py +++ b/examples/python/reconstruction_system/debug/visualize_fragments.py @@ -5,7 +5,7 @@ # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -# examples/python/reconstruction_system/debug/visualize_fragment.py +# examples/python/reconstruction_system/debug/visualize_fragments.py import argparse import json diff --git a/examples/python/reconstruction_system/make_fragments.py b/examples/python/reconstruction_system/make_fragments.py index 138fe567aaf..dc4e59d0780 100644 --- a/examples/python/reconstruction_system/make_fragments.py +++ b/examples/python/reconstruction_system/make_fragments.py @@ -8,6 +8,7 @@ # examples/python/reconstruction_system/make_fragments.py import math +import multiprocessing import os, sys import numpy as np import open3d as o3d @@ -172,13 +173,14 @@ def run(config): math.ceil(float(n_files) / config['n_frames_per_fragment'])) if config["python_multi_threading"] is True: - from joblib import Parallel, delayed - import multiprocessing - import subprocess - MAX_THREAD = min(multiprocessing.cpu_count(), n_fragments) - Parallel(n_jobs=MAX_THREAD)(delayed(process_single_fragment)( - fragment_id, color_files, depth_files, n_files, n_fragments, config) - for fragment_id in range(n_fragments)) + max_workers = min(max(1, multiprocessing.cpu_count() - 1), n_fragments) + # Prevent over allocation of open mp threads in child processes + os.environ['OMP_NUM_THREADS'] = '1' + mp_context = multiprocessing.get_context('spawn') + with mp_context.Pool(processes=max_workers) as pool: + args = [(fragment_id, color_files, depth_files, n_files, + n_fragments, config) for fragment_id in range(n_fragments)] + pool.starmap(process_single_fragment, args) else: for fragment_id in range(n_fragments): process_single_fragment(fragment_id, color_files, depth_files, diff --git a/examples/python/reconstruction_system/opencv_pose_estimation.py b/examples/python/reconstruction_system/opencv_pose_estimation.py index fc42b22f5ad..47714065858 100644 --- a/examples/python/reconstruction_system/opencv_pose_estimation.py +++ b/examples/python/reconstruction_system/opencv_pose_estimation.py @@ -178,7 +178,7 @@ def estimate_3D_transform_RANSAC(pts_xyz_s, pts_xyz_t): if (n_inlier > max_inlier) and (np.linalg.det(R_approx) != 0.0) and \ (R_approx[0,0] > 0 and R_approx[1,1] > 0 and R_approx[2,2] > 0): Transform_good[:3, :3] = R_approx - Transform_good[:3, 3] = [t_approx[0], t_approx[1], t_approx[2]] + Transform_good[:3, 3] = t_approx.squeeze(1) max_inlier = n_inlier inlier_vec = [id_iter for diff_iter, id_iter \ in zip(diff, range(n_points)) \ diff --git a/examples/python/reconstruction_system/refine_registration.py b/examples/python/reconstruction_system/refine_registration.py index 85cb5311624..bd555b150f9 100644 --- a/examples/python/reconstruction_system/refine_registration.py +++ b/examples/python/reconstruction_system/refine_registration.py @@ -7,9 +7,12 @@ # examples/python/reconstruction_system/refine_registration.py +import multiprocessing +import os +import sys + import numpy as np import open3d as o3d -import os, sys pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(pyexample_path) @@ -163,29 +166,28 @@ def make_posegraph_for_refined_scene(ply_file_names, config): s = edge.source_node_id t = edge.target_node_id matching_results[s * n_files + t] = \ - matching_result(s, t, edge.transformation) - - if config["python_multi_threading"] == True: - from joblib import Parallel, delayed - import multiprocessing - import subprocess - MAX_THREAD = min(multiprocessing.cpu_count(), - max(len(pose_graph.edges), 1)) - results = Parallel(n_jobs=MAX_THREAD)( - delayed(register_point_cloud_pair)( - ply_file_names, matching_results[r].s, matching_results[r].t, - matching_results[r].transformation, config) - for r in matching_results) + matching_result(s, t, edge.transformation) + + if config["python_multi_threading"] is True: + os.environ['OMP_NUM_THREADS'] = '1' + max_workers = max( + 1, min(multiprocessing.cpu_count() - 1, len(pose_graph.edges))) + mp_context = multiprocessing.get_context('spawn') + with mp_context.Pool(processes=max_workers) as pool: + args = [(ply_file_names, v.s, v.t, v.transformation, config) + for k, v in matching_results.items()] + results = pool.starmap(register_point_cloud_pair, args) + for i, r in enumerate(matching_results): matching_results[r].transformation = results[i][0] matching_results[r].information = results[i][1] else: for r in matching_results: (matching_results[r].transformation, - matching_results[r].information) = \ - register_point_cloud_pair(ply_file_names, - matching_results[r].s, matching_results[r].t, - matching_results[r].transformation, config) + matching_results[r].information) = \ + register_point_cloud_pair(ply_file_names, + matching_results[r].s, matching_results[r].t, + matching_results[r].transformation, config) pose_graph_new = o3d.pipelines.registration.PoseGraph() odometry = np.identity(4) diff --git a/examples/python/reconstruction_system/register_fragments.py b/examples/python/reconstruction_system/register_fragments.py index 82e31fba9d7..f7d3c72e664 100644 --- a/examples/python/reconstruction_system/register_fragments.py +++ b/examples/python/reconstruction_system/register_fragments.py @@ -7,9 +7,12 @@ # examples/python/reconstruction_system/register_fragments.py +import multiprocessing +import os +import sys + import numpy as np import open3d as o3d -import os, sys pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(pyexample_path) @@ -158,16 +161,16 @@ def make_posegraph_for_scene(ply_file_names, config): for t in range(s + 1, n_files): matching_results[s * n_files + t] = matching_result(s, t) - if config["python_multi_threading"] == True: - from joblib import Parallel, delayed - import multiprocessing - import subprocess - MAX_THREAD = min(multiprocessing.cpu_count(), - max(len(matching_results), 1)) - results = Parallel(n_jobs=MAX_THREAD)(delayed( - register_point_cloud_pair)(ply_file_names, matching_results[r].s, - matching_results[r].t, config) - for r in matching_results) + if config["python_multi_threading"] is True: + os.environ['OMP_NUM_THREADS'] = '1' + max_workers = max( + 1, min(multiprocessing.cpu_count() - 1, len(matching_results))) + mp_context = multiprocessing.get_context('spawn') + with mp_context.Pool(processes=max_workers) as pool: + args = [(ply_file_names, v.s, v.t, config) + for k, v in matching_results.items()] + results = pool.starmap(register_point_cloud_pair, args) + for i, r in enumerate(matching_results): matching_results[r].success = results[i][0] matching_results[r].transformation = results[i][1] @@ -175,9 +178,9 @@ def make_posegraph_for_scene(ply_file_names, config): else: for r in matching_results: (matching_results[r].success, matching_results[r].transformation, - matching_results[r].information) = \ - register_point_cloud_pair(ply_file_names, - matching_results[r].s, matching_results[r].t, config) + matching_results[r].information) = \ + register_point_cloud_pair(ply_file_names, + matching_results[r].s, matching_results[r].t, config) for r in matching_results: if matching_results[r].success: diff --git a/examples/python/reconstruction_system/run_system.py b/examples/python/reconstruction_system/run_system.py index 91fd073b3c4..dedfc5552d2 100644 --- a/examples/python/reconstruction_system/run_system.py +++ b/examples/python/reconstruction_system/run_system.py @@ -86,7 +86,7 @@ initialize_config(config) check_folder_structure(config['path_dataset']) else: - # load deafult dataset. + # load default dataset. config = dataset_loader(args.default_dataset) assert config is not None diff --git a/examples/python/reconstruction_system/sensors/realsense_helper.py b/examples/python/reconstruction_system/sensors/realsense_helper.py index 65c68014553..5e37b0c50bb 100644 --- a/examples/python/reconstruction_system/sensors/realsense_helper.py +++ b/examples/python/reconstruction_system/sensors/realsense_helper.py @@ -5,7 +5,7 @@ # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -# examples/python/reconstruction_system/sensors/realsense_pcd_visualizer.py +# examples/python/reconstruction_system/sensors/realsense_helper.py # pyrealsense2 is required. # Please see instructions in https://github.com/IntelRealSense/librealsense/tree/master/wrappers/python diff --git a/examples/python/t_reconstruction_system/common.py b/examples/python/t_reconstruction_system/common.py index 1831545388b..d6331920d87 100644 --- a/examples/python/t_reconstruction_system/common.py +++ b/examples/python/t_reconstruction_system/common.py @@ -5,7 +5,7 @@ # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -# examples/python/reconstruction_system/common.py +# examples/python/t_reconstruction_system/common.py import open3d as o3d @@ -95,7 +95,7 @@ def get_default_dataset(config): def load_depth_file_names(config): if not os.path.exists(config.path_dataset): print( - 'Path \'{}\' not found.'.format(config.path_dataset), + f"Path '{config.path_dataset}' not found.", 'Please provide --path_dataset in the command line or the config file.' ) return [], [] @@ -106,7 +106,7 @@ def load_depth_file_names(config): depth_file_names = glob.glob(os.path.join(depth_folder, '*.png')) n_depth = len(depth_file_names) if n_depth == 0: - print('Depth image not found in {}, abort!'.format(depth_folder)) + print(f'Depth image not found in {depth_folder}, abort!') return [] return sorted(depth_file_names) diff --git a/examples/python/t_reconstruction_system/config.py b/examples/python/t_reconstruction_system/config.py index 5d87a561c9f..59b9ec40e2f 100644 --- a/examples/python/t_reconstruction_system/config.py +++ b/examples/python/t_reconstruction_system/config.py @@ -6,7 +6,9 @@ # ---------------------------------------------------------------------------- import os + import configargparse +import open3d as o3d class ConfigParser(configargparse.ArgParser): @@ -107,9 +109,8 @@ def __init__(self): integration_parser = self.add_argument_group('integration') integration_parser.add( - '--integration_mode',type=str, - choices=['color', 'depth'], - help='Volumetric integration mode.') + '--integrate_color', action='store_true', + default=False, help='Volumetric integration mode.') integration_parser.add( '--voxel_size', type=float, help='Voxel size in meter for volumetric integration.') @@ -141,7 +142,7 @@ def get_config(self): 'Fallback to hybrid odometry.') config.odometry_method = 'hybrid' - if config.engine == 'tensor': + elif config.engine == 'tensor': if config.icp_method == 'generalized': print('Tensor engine does not support generalized ICP.', 'Fallback to colored ICP.') @@ -152,6 +153,13 @@ def get_config(self): 'Disabled.') config.multiprocessing = False + if (config.device.lower().startswith('cuda') and + (not o3d.core.cuda.is_available())): + print( + 'Open3d not built with cuda support or no cuda device available. ', + 'Fallback to CPU.') + config.device = 'CPU:0' + return config diff --git a/examples/python/t_reconstruction_system/default_config.yml b/examples/python/t_reconstruction_system/default_config.yml index 36e4030efce..3e32515cbf4 100644 --- a/examples/python/t_reconstruction_system/default_config.yml +++ b/examples/python/t_reconstruction_system/default_config.yml @@ -8,7 +8,6 @@ depth_folder: depth color_folder: color path_intrinsic: '' path_color_intrinsic: '' -fragment_size: 100 depth_min: 0.1 depth_max: 3.0 depth_scale: 1000.0 @@ -21,7 +20,7 @@ icp_voxelsize: 0.05 icp_distance_thr: 0.07 global_registration_method: ransac registration_loop_weight: 0.1 -integration_mode: color +integrate_color: true voxel_size: 0.0058 trunc_voxel_multiplier: 8.0 block_count: 40000 diff --git a/examples/python/t_reconstruction_system/dense_slam.py b/examples/python/t_reconstruction_system/dense_slam.py index a2e03795af2..374b6dce91c 100644 --- a/examples/python/t_reconstruction_system/dense_slam.py +++ b/examples/python/t_reconstruction_system/dense_slam.py @@ -5,7 +5,7 @@ # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -# examples/python/t_reconstruction_system/ray_casting.py +# examples/python/t_reconstruction_system/dense_slam.py # P.S. This example is used in documentation, so, please ensure the changes are # synchronized. @@ -16,7 +16,8 @@ import time from config import ConfigParser -from common import get_default_dataset, load_rgbd_file_names, save_poses, load_intrinsic, extract_trianglemesh, get_default_testdata, extract_rgbd_frames +from common import (get_default_dataset, load_rgbd_file_names, save_poses, + load_intrinsic, extract_trianglemesh, extract_rgbd_frames) def slam(depth_file_names, color_file_names, intrinsic, config): @@ -90,7 +91,7 @@ def slam(depth_file_names, color_file_names, intrinsic, config): # Extract RGB-D frames and intrinsic from bag file. if config.path_dataset.endswith(".bag"): assert os.path.isfile( - config.path_dataset), (f"File {config.path_dataset} not found.") + config.path_dataset), f"File {config.path_dataset} not found." print("Extracting frames from RGBD video file") config.path_dataset, config.path_intrinsic, config.depth_scale = extract_rgbd_frames( config.path_dataset) diff --git a/examples/python/t_reconstruction_system/dense_slam_gui.py b/examples/python/t_reconstruction_system/dense_slam_gui.py index a4e68526754..24744b9e87d 100644 --- a/examples/python/t_reconstruction_system/dense_slam_gui.py +++ b/examples/python/t_reconstruction_system/dense_slam_gui.py @@ -17,7 +17,7 @@ from config import ConfigParser -import os, sys +import os import numpy as np import threading import time @@ -459,7 +459,7 @@ def update_main(self): # Extract RGB-D frames and intrinsic from bag file. if config.path_dataset.endswith(".bag"): assert os.path.isfile( - config.path_dataset), (f"File {config.path_dataset} not found.") + config.path_dataset), f"File {config.path_dataset} not found." print("Extracting frames from RGBD video file") config.path_dataset, config.path_intrinsic, config.depth_scale = extract_rgbd_frames( config.path_dataset) diff --git a/examples/python/t_reconstruction_system/integrate.py b/examples/python/t_reconstruction_system/integrate.py index 2c3e011cdea..3d86ec02943 100644 --- a/examples/python/t_reconstruction_system/integrate.py +++ b/examples/python/t_reconstruction_system/integrate.py @@ -10,25 +10,22 @@ # P.S. This example is used in documentation, so, please ensure the changes are # synchronized. -import os -import numpy as np +import time + import open3d as o3d import open3d.core as o3c -import time -import matplotlib.pyplot as plt from tqdm import tqdm -from config import ConfigParser from common import load_rgbd_file_names, load_depth_file_names, load_intrinsic, load_extrinsics, get_default_dataset +from config import ConfigParser def integrate(depth_file_names, color_file_names, depth_intrinsic, - color_intrinsic, extrinsics, integrate_color, config): - + color_intrinsic, extrinsics, config): n_files = len(depth_file_names) device = o3d.core.Device(config.device) - if integrate_color: + if config.integrate_color: vbg = o3d.t.geometry.VoxelBlockGrid( attr_names=('tsdf', 'weight', 'color'), attr_dtypes=(o3c.float32, o3c.float32, o3c.float32), @@ -56,7 +53,7 @@ def integrate(depth_file_names, color_file_names, depth_intrinsic, depth, depth_intrinsic, extrinsic, config.depth_scale, config.depth_max) - if integrate_color: + if config.integrate_color: color = o3d.t.io.read_image(color_file_names[i]).to(device) vbg.integrate(frustum_block_coords, depth, color, depth_intrinsic, color_intrinsic, extrinsic, config.depth_scale, @@ -83,7 +80,6 @@ def integrate(depth_file_names, color_file_names, depth_intrinsic, 'Default dataset may be selected from the following options: ' '[lounge, jack_jack]', default='lounge') - parser.add('--integrate_color', action='store_true') parser.add('--path_trajectory', help='path to the trajectory .log or .json file.') parser.add('--path_npz', diff --git a/examples/python/t_reconstruction_system/integrate_custom.py b/examples/python/t_reconstruction_system/integrate_custom.py index c6b8df3c7f4..8b352780b04 100644 --- a/examples/python/t_reconstruction_system/integrate_custom.py +++ b/examples/python/t_reconstruction_system/integrate_custom.py @@ -5,139 +5,137 @@ # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -# examples/python/t_reconstruction_system/ray_casting.py +# examples/python/t_reconstruction_system/integrate_custom.py # P.S. This example is used in documentation, so, please ensure the changes are # synchronized. import os -import numpy as np +import time + import open3d as o3d import open3d.core as o3c -import time -import matplotlib.pyplot as plt +from tqdm import tqdm +from common import get_default_dataset, load_rgbd_file_names, load_depth_file_names, load_intrinsic, load_extrinsics, extract_rgbd_frames from config import ConfigParser -from common import get_default_dataset, load_rgbd_file_names, load_depth_file_names, save_poses, load_intrinsic, load_extrinsics, extract_rgbd_frames - -from tqdm import tqdm def integrate(depth_file_names, color_file_names, intrinsic, extrinsics, config): if os.path.exists(config.path_npz): - print('Voxel block grid npz file {} found, trying to load...'.format( - config.path_npz)) + print( + f'Voxel block grid npz file {config.path_npz} found, trying to load...' + ) vbg = o3d.t.geometry.VoxelBlockGrid.load(config.path_npz) print('Loading finished.') + return vbg + + print( + f'Voxel block grid npz file {config.path_npz} not found, trying to integrate...' + ) + + n_files = len(depth_file_names) + device = o3d.core.Device(config.device) + + voxel_size = 3.0 / 512 + trunc = voxel_size * 4 + res = 8 + + if config.integrate_color: + vbg = o3d.t.geometry.VoxelBlockGrid( + ('tsdf', 'weight', 'color'), + (o3c.float32, o3c.float32, o3c.float32), ((1), (1), (3)), 3.0 / 512, + 8, 100000, device) else: - print('Voxel block grid npz file {} not found, trying to integrate...'. - format(config.path_npz)) + vbg = o3d.t.geometry.VoxelBlockGrid( + ('tsdf', 'weight'), (o3c.float32, o3c.float32), ((1), (1)), + 3.0 / 512, 8, 100000, device) - n_files = len(depth_file_names) - device = o3d.core.Device(config.device) + start = time.time() + for i in tqdm(range(n_files)): + depth = o3d.t.io.read_image(depth_file_names[i]).to(device) + extrinsic = extrinsics[i] - voxel_size = 3.0 / 512 - trunc = voxel_size * 4 - res = 8 + start = time.time() + # Get active frustum block coordinates from input + frustum_block_coords = vbg.compute_unique_block_coordinates( + depth, intrinsic, extrinsic, config.depth_scale, config.depth_max) + # Activate them in the underlying hash map (may have been inserted) + vbg.hashmap().activate(frustum_block_coords) - if config.integrate_color: - vbg = o3d.t.geometry.VoxelBlockGrid( - ('tsdf', 'weight', 'color'), - (o3c.float32, o3c.float32, o3c.float32), ((1), (1), (3)), - 3.0 / 512, 8, 100000, device) - else: - vbg = o3d.t.geometry.VoxelBlockGrid( - ('tsdf', 'weight'), (o3c.float32, o3c.float32), ((1), (1)), - 3.0 / 512, 8, 100000, device) + # Find buf indices in the underlying engine + buf_indices, masks = vbg.hashmap().find(frustum_block_coords) + o3d.core.cuda.synchronize() + end = time.time() + + start = time.time() + voxel_coords, voxel_indices = vbg.voxel_coordinates_and_flattened_indices( + buf_indices) + o3d.core.cuda.synchronize() + end = time.time() + # Now project them to the depth and find association + # (3, N) -> (2, N) start = time.time() - for i in tqdm(range(n_files)): - depth = o3d.t.io.read_image(depth_file_names[i]).to(device) - extrinsic = extrinsics[i] - - start = time.time() - # Get active frustum block coordinates from input - frustum_block_coords = vbg.compute_unique_block_coordinates( - depth, intrinsic, extrinsic, config.depth_scale, - config.depth_max) - # Activate them in the underlying hash map (may have been inserted) - vbg.hashmap().activate(frustum_block_coords) - - # Find buf indices in the underlying engine - buf_indices, masks = vbg.hashmap().find(frustum_block_coords) - o3d.core.cuda.synchronize() - end = time.time() - - start = time.time() - voxel_coords, voxel_indices = vbg.voxel_coordinates_and_flattened_indices( - buf_indices) - o3d.core.cuda.synchronize() - end = time.time() - - # Now project them to the depth and find association - # (3, N) -> (2, N) - start = time.time() - extrinsic_dev = extrinsic.to(device, o3c.float32) - xyz = extrinsic_dev[:3, :3] @ voxel_coords.T() + extrinsic_dev[:3, - 3:] - - intrinsic_dev = intrinsic.to(device, o3c.float32) - uvd = intrinsic_dev @ xyz - d = uvd[2] - u = (uvd[0] / d).round().to(o3c.int64) - v = (uvd[1] / d).round().to(o3c.int64) - o3d.core.cuda.synchronize() - end = time.time() - - start = time.time() - mask_proj = (d > 0) & (u >= 0) & (v >= 0) & (u < depth.columns) & ( - v < depth.rows) - - v_proj = v[mask_proj] - u_proj = u[mask_proj] - d_proj = d[mask_proj] - depth_readings = depth.as_tensor()[v_proj, u_proj, 0].to( - o3c.float32) / config.depth_scale - sdf = depth_readings - d_proj - - mask_inlier = (depth_readings > 0) \ - & (depth_readings < config.depth_max) \ - & (sdf >= -trunc) - - sdf[sdf >= trunc] = trunc - sdf = sdf / trunc - o3d.core.cuda.synchronize() - end = time.time() - - start = time.time() - weight = vbg.attribute('weight').reshape((-1, 1)) - tsdf = vbg.attribute('tsdf').reshape((-1, 1)) - - valid_voxel_indices = voxel_indices[mask_proj][mask_inlier] - w = weight[valid_voxel_indices] - wp = w + 1 - - tsdf[valid_voxel_indices] \ - = (tsdf[valid_voxel_indices] * w + - sdf[mask_inlier].reshape(w.shape)) / (wp) - if config.integrate_color: - color = o3d.t.io.read_image(color_file_names[i]).to(device) - color_readings = color.as_tensor()[v_proj, - u_proj].to(o3c.float32) - - color = vbg.attribute('color').reshape((-1, 3)) - color[valid_voxel_indices] \ - = (color[valid_voxel_indices] * w + - color_readings[mask_inlier]) / (wp) - - weight[valid_voxel_indices] = wp - o3d.core.cuda.synchronize() - end = time.time() - - print('Saving to {}...'.format(config.path_npz)) - vbg.save(config.path_npz) - print('Saving finished') + extrinsic_dev = extrinsic.to(device, o3c.float32) + xyz = extrinsic_dev[:3, :3] @ voxel_coords.T() + extrinsic_dev[:3, 3:] + + intrinsic_dev = intrinsic.to(device, o3c.float32) + uvd = intrinsic_dev @ xyz + d = uvd[2] + u = (uvd[0] / d).round().to(o3c.int64) + v = (uvd[1] / d).round().to(o3c.int64) + o3d.core.cuda.synchronize() + end = time.time() + + start = time.time() + mask_proj = (d > 0) & (u >= 0) & (v >= 0) & (u < depth.columns) & ( + v < depth.rows) + + v_proj = v[mask_proj] + u_proj = u[mask_proj] + d_proj = d[mask_proj] + depth_readings = depth.as_tensor()[v_proj, u_proj, 0].to( + o3c.float32) / config.depth_scale + sdf = depth_readings - d_proj + + mask_inlier = (depth_readings > 0) \ + & (depth_readings < config.depth_max) \ + & (sdf >= -trunc) + + sdf[sdf >= trunc] = trunc + sdf = sdf / trunc + o3d.core.cuda.synchronize() + end = time.time() + + start = time.time() + weight = vbg.attribute('weight').reshape((-1, 1)) + tsdf = vbg.attribute('tsdf').reshape((-1, 1)) + + valid_voxel_indices = voxel_indices[mask_proj][mask_inlier] + w = weight[valid_voxel_indices] + wp = w + 1 + + tsdf[valid_voxel_indices] \ + = (tsdf[valid_voxel_indices] * w + + sdf[mask_inlier].reshape(w.shape)) / (wp) + if config.integrate_color: + color = o3d.t.io.read_image(color_file_names[i]).to(device) + color_readings = color.as_tensor()[v_proj, u_proj].to(o3c.float32) + + color = vbg.attribute('color').reshape((-1, 3)) + color[valid_voxel_indices] \ + = (color[valid_voxel_indices] * w + + color_readings[mask_inlier]) / (wp) + + weight[valid_voxel_indices] = wp + o3d.core.cuda.synchronize() + end = time.time() + + print(f'Saving to {config.path_npz}...') + vbg.save(config.path_npz) + print('Saving finished') return vbg @@ -170,7 +168,7 @@ def integrate(depth_file_names, color_file_names, intrinsic, extrinsics, # Extract RGB-D frames and intrinsic from bag file. if config.path_dataset.endswith(".bag"): assert os.path.isfile( - config.path_dataset), (f"File {config.path_dataset} not found.") + config.path_dataset), f"File {config.path_dataset} not found." print("Extracting frames from RGBD video file") config.path_dataset, config.path_intrinsic, config.depth_scale = extract_rgbd_frames( config.path_dataset) diff --git a/examples/python/t_reconstruction_system/pose_graph_optim.py b/examples/python/t_reconstruction_system/pose_graph_optim.py index 151f55a941f..94abaf34711 100644 --- a/examples/python/t_reconstruction_system/pose_graph_optim.py +++ b/examples/python/t_reconstruction_system/pose_graph_optim.py @@ -57,8 +57,8 @@ def _dicts2graph(self): for (i, j) in self.dict_edges: if not (i in self.dict_nodes) or not (j in self.dict_nodes): print( - 'Edge node ({} {}) not found, abort pose graph construction' - .format(i, j)) + f'Edge node ({i} {j}) not found, abort pose graph construction' + ) trans, info, is_loop = self.dict_edges[(i, j)] ki = nodes2indices[i] kj = nodes2indices[j] diff --git a/examples/python/t_reconstruction_system/ray_casting.py b/examples/python/t_reconstruction_system/ray_casting.py index 84af9a97af1..5bae7301a30 100644 --- a/examples/python/t_reconstruction_system/ray_casting.py +++ b/examples/python/t_reconstruction_system/ray_casting.py @@ -50,7 +50,7 @@ # Extract RGB-D frames and intrinsic from bag file. if config.path_dataset.endswith(".bag"): assert os.path.isfile( - config.path_dataset), (f"File {config.path_dataset} not found.") + config.path_dataset), f"File {config.path_dataset} not found." print("Extracting frames from RGBD video file") config.path_dataset, config.path_intrinsic, config.depth_scale = extract_rgbd_frames( config.path_dataset) diff --git a/examples/python/t_reconstruction_system/run_system.py b/examples/python/t_reconstruction_system/run_system.py index 55b5dbcb160..51e94257ace 100644 --- a/examples/python/t_reconstruction_system/run_system.py +++ b/examples/python/t_reconstruction_system/run_system.py @@ -104,7 +104,6 @@ intrinsic, intrinsic, extrinsics, - integrate_color=True, config=config) pcd = vbg.extract_point_cloud() diff --git a/examples/python/visualization/draw.py b/examples/python/visualization/draw.py index 3c2de61889c..f354f55783e 100644 --- a/examples/python/visualization/draw.py +++ b/examples/python/visualization/draw.py @@ -319,6 +319,8 @@ def main(): multi_objects() actions() selections() + groups() + time_animation() if __name__ == "__main__": diff --git a/examples/python/visualization/interactive_visualization.py b/examples/python/visualization/interactive_visualization.py index 29e98dd6c08..1b3ff5ed051 100644 --- a/examples/python/visualization/interactive_visualization.py +++ b/examples/python/visualization/interactive_visualization.py @@ -37,6 +37,15 @@ def draw_registration_result(source, target, transformation): o3d.visualization.draw_geometries([source_temp, target_temp]) +def prepare_data(): + pcd_data = o3d.data.DemoICPPointClouds() + source = o3d.io.read_point_cloud(pcd_data.paths[0]) + target = o3d.io.read_point_cloud(pcd_data.paths[2]) + print("Visualization of two point clouds before manual alignment") + draw_registration_result(source, target, np.identity(4)) + return source, target + + def pick_points(pcd): print("") print( @@ -53,29 +62,15 @@ def pick_points(pcd): return vis.get_picked_points() -def demo_manual_registration(): - print("Demo for manual ICP") - pcd_data = o3d.data.DemoICPPointClouds() - source = o3d.io.read_point_cloud(pcd_data.paths[0]) - target = o3d.io.read_point_cloud(pcd_data.paths[2]) - print("Visualization of two point clouds before manual alignment") - draw_registration_result(source, target, np.identity(4)) - - # pick points from two point clouds and builds correspondences - picked_id_source = pick_points(source) - picked_id_target = pick_points(target) - assert (len(picked_id_source) >= 3 and len(picked_id_target) >= 3) - assert (len(picked_id_source) == len(picked_id_target)) - corr = np.zeros((len(picked_id_source), 2)) - corr[:, 0] = picked_id_source - corr[:, 1] = picked_id_target - +def register_via_correspondences(source, target, source_points, target_points): + corr = np.zeros((len(source_points), 2)) + corr[:, 0] = source_points + corr[:, 1] = target_points # estimate rough transformation using correspondences print("Compute a rough transform using the correspondences given by user") p2p = o3d.pipelines.registration.TransformationEstimationPointToPoint() trans_init = p2p.compute_transformation(source, target, o3d.utility.Vector2iVector(corr)) - # point-to-point ICP for refinement print("Perform point-to-point ICP refinement") threshold = 0.03 # 3cm distance threshold @@ -83,6 +78,18 @@ def demo_manual_registration(): source, target, threshold, trans_init, o3d.pipelines.registration.TransformationEstimationPointToPoint()) draw_registration_result(source, target, reg_p2p.transformation) + + +def demo_manual_registration(): + print("Demo for manual ICP") + source, target = prepare_data() + + # pick points from two point clouds and builds correspondences + source_points = pick_points(source) + target_points = pick_points(target) + assert (len(source_points) >= 3 and len(target_points) >= 3) + assert (len(source_points) == len(target_points)) + register_via_correspondences(source, target, source_points, target_points) print("") diff --git a/examples/python/visualization/non_blocking_visualization.py b/examples/python/visualization/non_blocking_visualization.py index cc8bd74697c..04baaf85574 100644 --- a/examples/python/visualization/non_blocking_visualization.py +++ b/examples/python/visualization/non_blocking_visualization.py @@ -10,22 +10,27 @@ import open3d as o3d import numpy as np -if __name__ == "__main__": - o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) + +def prepare_data(): pcd_data = o3d.data.DemoICPPointClouds() source_raw = o3d.io.read_point_cloud(pcd_data.paths[0]) target_raw = o3d.io.read_point_cloud(pcd_data.paths[1]) - source = source_raw.voxel_down_sample(voxel_size=0.02) target = target_raw.voxel_down_sample(voxel_size=0.02) + trans = [[0.862, 0.011, -0.507, 0.0], [-0.139, 0.967, -0.215, 0.7], [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]] source.transform(trans) - flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]] source.transform(flip_transform) target.transform(flip_transform) + return source, target + +def demo_non_blocking_visualization(): + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) + + source, target = prepare_data() vis = o3d.visualization.Visualizer() vis.create_window() vis.add_geometry(source) @@ -46,4 +51,9 @@ if save_image: vis.capture_screen_image("temp_%04d.jpg" % i) vis.destroy_window() + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Info) + + +if __name__ == '__main__': + demo_non_blocking_visualization() diff --git a/python/js/amd-public-path.js b/python/js/amd-public-path.js new file mode 100644 index 00000000000..71f9372e44f --- /dev/null +++ b/python/js/amd-public-path.js @@ -0,0 +1,8 @@ +// In an AMD module, we set the public path using the magic requirejs "module" dependency +// See https://github.com/requirejs/requirejs/wiki/Differences-between-the-simplified-CommonJS-wrapper-and-standard-AMD-define#module +// Since "module" is a requirejs magic module, we must include "module" in the webpack externals configuration. +var module = require("module"); +var url = new URL(module.uri, document.location); +// Using lastIndexOf("/")+1 gives us the empty string if there is no "/", so pathname becomes "/" +url.pathname = url.pathname.slice(0, url.pathname.lastIndexOf("/") + 1); +__webpack_public_path__ = `${url.origin}${url.pathname}`; diff --git a/python/js/lib/embed.js b/python/js/lib/embed.js deleted file mode 100644 index ff852e762cf..00000000000 --- a/python/js/lib/embed.js +++ /dev/null @@ -1,9 +0,0 @@ -// Entry point for the unpkg bundle containing custom model definitions. -// -// It differs from the notebook bundle in that it does not need to define a -// dynamic baseURL for the static assets and may load some css that would -// already be loaded by the notebook otherwise. - -// Export widget models and views, and the npm package version number. -module.exports = require("./web_visualizer.js"); -module.exports["version"] = require("../package.json").version; diff --git a/python/js/lib/extension.js b/python/js/lib/extension.js index 7c220feaed0..991c9a9a596 100644 --- a/python/js/lib/extension.js +++ b/python/js/lib/extension.js @@ -1,12 +1,6 @@ // This file contains the javascript that is run when the notebook is loaded. // It contains some requirejs configuration and the `load_ipython_extension` // which is required for any notebook extension. -// -// Some static assets may be required by the custom widget javascript. The base -// url for the notebook is not known at build time and is therefore computed -// dynamically. -__webpack_public_path__ = document.querySelector('body').getAttribute('data-base-url') + 'nbextensions/open3d'; - // Configure requirejs if (window.require) { @@ -21,5 +15,5 @@ if (window.require) { // Export the required load_ipython_extension module.exports = { - load_ipython_extension: function() {} + load_ipython_extensionn() {} }; diff --git a/python/js/lib/labplugin.js b/python/js/lib/labplugin.js index 50a8fda4b40..95c8cd49ad6 100644 --- a/python/js/lib/labplugin.js +++ b/python/js/lib/labplugin.js @@ -4,13 +4,12 @@ var base = require('@jupyter-widgets/base'); module.exports = { id: 'open3d:plugin', requires: [base.IJupyterWidgetRegistry], - activate: function(app, widgets) { - widgets.registerWidget({ - name: 'open3d', - version: plugin.version, - exports: plugin - }); + activate: (app, widgets) => { + widgets.registerWidget({ + name: 'open3d', + version: plugin.version, + exports: plugin + }); }, autoStart: true }; - diff --git a/python/js/lib/web_visualizer.js b/python/js/lib/web_visualizer.js index fd68ad6c307..3ccdff725a3 100644 --- a/python/js/lib/web_visualizer.js +++ b/python/js/lib/web_visualizer.js @@ -1,27 +1,8 @@ // ---------------------------------------------------------------------------- -// - Open3D: www.open3d.org - +// - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- -// The MIT License (MIT) -// -// Copyright (c) 2018-2023 www.open3d.org -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -// IN THE SOFTWARE. +// Copyright(c) 2018-2023 www.open3d.org +// SPDX - License - Identifier: MIT // ---------------------------------------------------------------------------- // Jupyter widget for Open3D WebRTC visualizer. See web_visualizer.py for the @@ -44,49 +25,51 @@ let WebRtcStreamer = require("./webrtcstreamer"); // // When serializing the entire widget state for embedding, only values that // differ from the defaults will be specified. -let WebVisualizerModel = widgets.DOMWidgetModel.extend({ - defaults: _.extend(widgets.DOMWidgetModel.prototype.defaults(), { - _model_name: "WebVisualizerModel", - _view_name: "WebVisualizerView", - _model_module: "open3d", - _view_module: "open3d", - // @...@ is configured by cpp/pybind/make_python_package.cmake. - _model_module_version: "@PROJECT_VERSION_THREE_NUMBER@", - _view_module_version: "@PROJECT_VERSION_THREE_NUMBER@", - }), -}); +class WebVisualizerModel extends widgets.DOMWidgetModel { + defaults() { + return _.extend(widgets.DOMWidgetModel.prototype.defaults(), { + _model_name: "WebVisualizerModel", + _view_name: "WebVisualizerView", + _model_module: "open3d", + _view_module: "open3d", + // @...@ is configured by cpp/pybind/make_python_package.cmake. + _model_module_version: "@PROJECT_VERSION_THREE_NUMBER@", + _view_module_version: "@PROJECT_VERSION_THREE_NUMBER@", + }); + } +} // Custom View. Renders the widget model. -let WebVisualizerView = widgets.DOMWidgetView.extend({ - sleep: function (time_ms) { +class WebVisualizerView extends widgets.DOMWidgetView { + sleep(time_ms) { return new Promise((resolve) => setTimeout(resolve, time_ms)); - }, + } - logAndReturn: function (value) { + logAndReturn(value) { console.log("logAndReturn: ", value); return value; - }, + } - callResultReady: function (callId) { + callResultReady(callId) { let pyjs_channel = this.model.get("pyjs_channel"); console.log("Current pyjs_channel:", pyjs_channel); let callResultMap = JSON.parse(this.model.get("pyjs_channel")); return callId in callResultMap; - }, + } - extractCallResult: function (callId) { + extractCallResult(callId) { if (!this.callResultReady(callId)) { throw "extractCallResult not ready yet."; } let callResultMap = JSON.parse(this.model.get("pyjs_channel")); return callResultMap[callId]; - }, + } /** * Hard-coded to call "call_http_api". Args and return value are all * strings. */ - callPython: async function (func, args = []) { + async callPython(func, args = []) { let callId = this.callId.toString(); this.callId++; let message = { @@ -116,9 +99,9 @@ let WebVisualizerView = widgets.DOMWidgetView.extend({ json_result ); return json_result; - }, + } - commsCall: function (url, data = {}) { + commsCall(url, data = {}) { // https://stackoverflow.com/a/736970/1255535 // parseUrl(url).hostname // parseUrl(url).entryPoint @@ -180,9 +163,9 @@ let WebVisualizerView = widgets.DOMWidgetView.extend({ } else { throw "Unsupported entryPoint: " + entryPoint; } - }, + } - render: function () { + render() { let windowUID = this.model.get("window_uid"); let onClose = function () { console.log("onClose() called for window_uid:", windowUID); @@ -214,8 +197,8 @@ let WebVisualizerView = widgets.DOMWidgetView.extend({ this.commsCall.bind(this) ); this.webRtcClient.connect(windowUID); - }, -}); + } +} module.exports = { WebVisualizerModel: WebVisualizerModel, diff --git a/python/js/package.json b/python/js/package.json index 091a632aae6..73a76b3a8af 100644 --- a/python/js/package.json +++ b/python/js/package.json @@ -1,8 +1,8 @@ { "name": "open3d", "version": "@PROJECT_VERSION_THREE_NUMBER@", - "description": "Open3D: A Modern Library for 3D Data Processing", - "author": "Open3D.org", + "description": "@PROJECT_DESCRIPTION@", + "author": "@PROJECT_EMAIL@", "main": "lib/index.js", "repository": { "type": "git", @@ -36,7 +36,7 @@ "rimraf": "^2.6.1" }, "dependencies": { - "@jupyter-widgets/base": "^1.1 || ^2 || ^3 || ^4", + "@jupyter-widgets/base": "^2 || ^3 || ^4 || ^5 || ^6", "lodash": "^4.17.4", "webrtc-adapter": "^4.2.2" }, diff --git a/python/js/webpack.config.js b/python/js/webpack.config.js index 7a7d378b956..05c2bee12aa 100644 --- a/python/js/webpack.config.js +++ b/python/js/webpack.config.js @@ -16,13 +16,11 @@ module.exports = (env, argv) => { // some configuration for requirejs, and provides the legacy // "load_ipython_extension" function which is required for any notebook // extension. - // entry: "./lib/extension.js", output: { filename: "extension.js", path: path.resolve(__dirname, "..", "open3d", "nbextension"), libraryTarget: "amd", - publicPath: "", // publicPath is set in extension.js }, devtool, }, @@ -32,47 +30,43 @@ module.exports = (env, argv) => { // This bundle contains the implementation for the custom widget views and // custom widget. // It must be an amd module - // - entry: "./lib/index.js", + entry: ["./amd-public-path.js", "./lib/index.js"], output: { filename: "index.js", path: path.resolve(__dirname, "..", "open3d", "nbextension"), libraryTarget: "amd", - publicPath: "", + publicPath: "", // Set in amd-public-path.js }, devtool, module: { rules: rules, }, - externals: ["@jupyter-widgets/base"], + // "module" is the magic requirejs dependency used to set the publicPath + externals: ["@jupyter-widgets/base", "module"] }, { // Embeddable open3d bundle // - // This bundle is generally almost identical to the notebook bundle - // containing the custom widget views and models. - // - // The only difference is in the configuration of the webpack public path - // for the static assets. - // - // It will be automatically distributed by unpkg to work with the static - // widget embedder. - // - // The target bundle is always `dist/index.js`, which is the path required - // by the custom widget embedder. + // This bundle is identical to the notebook bundle containing the custom + // widget views and models. The only difference is it is placed in the + // dist/ directory and shipped with the npm package for use from a CDN + // like jsdelivr. // - entry: "./lib/embed.js", + // The target bundle is always `dist/index.js`, which is the path + // required by the custom widget embedder. + entry: ["./amd-public-path.js", "./lib/index.js"], output: { filename: "index.js", path: path.resolve(__dirname, "dist"), libraryTarget: "amd", - publicPath: "https://unpkg.com/open3d@" + version + "/dist/", + publicPath: "", // Set in amd-public-path.js }, devtool, module: { rules: rules, }, - externals: ["@jupyter-widgets/base"], + // "module" is the magic requirejs dependency used to set the publicPath + externals: ["@jupyter-widgets/base", "module"] }, ]; }; diff --git a/python/open3d/visualization/draw.py b/python/open3d/visualization/draw.py index 70c5eb06ba0..f7764b41400 100644 --- a/python/open3d/visualization/draw.py +++ b/python/open3d/visualization/draw.py @@ -36,6 +36,121 @@ def draw(geometry=None, on_animation_frame=None, on_animation_tick=None, non_blocking_and_return_uid=False): + """Draw 3D geometry types and 3D models. This is a high level interface to + :class:`open3d.visualization.O3DVisualizer`. + + The initial view may be specified either as a combination of (lookat, eye, + up, and field of view) or (intrinsic matrix, extrinsic matrix) pair. A + simple pinhole camera model is used. + + Args: + geometry (List[Geometry] or List[Dict]): The 3D data to be displayed can be provided in different types: + - A list of any Open3D geometry types (``PointCloud``, ``TriangleMesh``, ``LineSet`` or ``TriangleMeshModel``). + - A list of dictionaries with geometry data and additional metadata. The following keys are used: + - **name** (str): Geometry name. + - **geometry** (Geometry): Open3D geometry to be drawn. + - **material** (:class:`open3d.visualization.rendering.MaterialRecord`): PBR material for the geometry. + - **group** (str): Assign the geometry to a group. Groups are shown in the settings panel and users can take take joint actions on a group as a whole. + - **time** (float): If geometry elements are assigned times, a time bar is displayed and the elements can be animated. + - **is_visible** (bool): Show this geometry? + title (str): Window title. + width (int): Viewport width. + height (int): Viewport height. + actions (List[(str, Callable)]): A list of pairs of action names and the + corresponding functions to execute. These actions are presented as + buttons in the settings panel. Each callable receives the window + (``O3DVisualizer``) as an argument. + lookat (array of shape (3,)): Camera principal axis direction. + eye (array of shape (3,)): Camera location. + up (array of shape (3,)): Camera up direction. + field_of_view (float): Camera horizontal field of view (degrees). + intrinsic_matrix (array of shape (3,3)): Camera intrinsic matrix. + extrinsic_matrix (array of shape (4,4)): Camera extrinsic matrix (world + to camera transformation). + bg_color (array of shape (4,)): Background color float with range [0,1], + default white. + bg_image (open3d.geometry.Image): Background image. + ibl (open3d.geometry.Image): Environment map for image based lighting + (IBL). + ibl_intensity (float): IBL intensity. + show_skybox (bool): Show skybox as scene background (default False). + show_ui (bool): Show settings user interface (default False). This can + be toggled from the Actions menu. + raw_mode (bool): Use raw mode for simpler rendering of the basic + geometry (Default false). + point_size (int): 3D point size (default 3). + line_width (int): 3D line width (default 1). + animation_time_step (float): Duration in seconds for each animation + frame. + animation_duration (float): Total animation duration in seconds. + rpc_interface (bool): Start an RPC interface at http://localhost:51454 and + listen for drawing requests. The requests can be made with + :class:`open3d.visualization.ExternalVisualizer`. + on_init (Callable): Extra initialization procedure for the underlying + GUI window. The procedure receives a single argument of type + :class:`open3d.visualization.O3DVisualizer`. + on_animation_frame (Callable): Callback for each animation frame update + with signature:: + + Callback(O3DVisualizer, double time) -> None + + on_animation_tick (Callable): Callback for each animation time step with + signature:: + + Callback(O3DVisualizer, double tick_duration, double time) -> TickResult + + If the callback returns ``TickResult.REDRAW``, the scene is redrawn. + It should return ``TickResult.NOCHANGE`` if redraw is not required. + non_blocking_and_return_uid (bool): Do not block waiting for the user + to close the window. Instead return the window ID. This is useful + for embedding the visualizer and is used in the WebRTC interface and + Tensorboard plugin. + + Example: + See `examples/visualization/draw.py` for examples of advanced usage. The ``actions()`` + example from that file is shown below:: + + import open3d as o3d + import open3d.visualization as vis + + SOURCE_NAME = "Source" + RESULT_NAME = "Result (Poisson reconstruction)" + TRUTH_NAME = "Ground truth" + + bunny = o3d.data.BunnyMesh() + bunny_mesh = o3d.io.read_triangle_mesh(bunny.path) + bunny_mesh.compute_vertex_normals() + + bunny_mesh.paint_uniform_color((1, 0.75, 0)) + bunny_mesh.compute_vertex_normals() + cloud = o3d.geometry.PointCloud() + cloud.points = bunny_mesh.vertices + cloud.normals = bunny_mesh.vertex_normals + + def make_mesh(o3dvis): + mesh, _ = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson( + cloud) + mesh.paint_uniform_color((1, 1, 1)) + mesh.compute_vertex_normals() + o3dvis.add_geometry({"name": RESULT_NAME, "geometry": mesh}) + o3dvis.show_geometry(SOURCE_NAME, False) + + def toggle_result(o3dvis): + truth_vis = o3dvis.get_geometry(TRUTH_NAME).is_visible + o3dvis.show_geometry(TRUTH_NAME, not truth_vis) + o3dvis.show_geometry(RESULT_NAME, truth_vis) + + vis.draw([{ + "name": SOURCE_NAME, + "geometry": cloud + }, { + "name": TRUTH_NAME, + "geometry": bunny_mesh, + "is_visible": False + }], + actions=[("Create Mesh", make_mesh), + ("Toggle truth/result", toggle_result)]) + """ gui.Application.instance.initialize() w = O3DVisualizer(title, width, height) w.set_background(bg_color, bg_image) diff --git a/python/pyproject.toml b/python/pyproject.toml index 25cafb616b9..3d229011876 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,3 +1,3 @@ [build-system] -requires = ["ipywidgets>=7.6.0", "pygments>=2.7.4", "jupyter_packaging~=0.10", "jupyterlab>=3.0.0,==3.*", "setuptools>=40.8.0", "wheel"] +requires = ["ipywidgets>=8.0.3", "pygments>=2.7.4", "jupyter_packaging~=0.12", "jupyterlab>=3.0.0,==3.*", "setuptools>=50.3.2", "wheel==0.38.4"] build-backend = "setuptools.build_meta" diff --git a/python/requirements.txt b/python/requirements.txt index 1affea6046d..8677bc49f79 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,5 +1,5 @@ numpy>=1.18.0 dash>=2.6.0 werkzeug>=2.2.3 -nbformat==5.7.0 +nbformat>=5.7.0 configargparse diff --git a/python/requirements_jupyter_build.txt b/python/requirements_jupyter_build.txt index 9d43e772d4d..492acf451d4 100644 --- a/python/requirements_jupyter_build.txt +++ b/python/requirements_jupyter_build.txt @@ -1,5 +1,5 @@ pywinpty==2.0.2; sys_platform=='win32' and python_version=='3.6' ipywidgets>=8.0.4 pygments>=2.7.4 -jupyter_packaging~=0.10 +jupyter_packaging~=0.12 jupyterlab>=3.0.0,==3.* diff --git a/python/requirements_test.txt b/python/requirements_test.txt index 7eafb00b60a..9b3710ef1e4 100644 --- a/python/requirements_test.txt +++ b/python/requirements_test.txt @@ -3,4 +3,4 @@ pytest-randomly==3.8.0 scipy==1.7.3 tensorboard==2.8.0 oauthlib==3.2.2 -certifi==2022.12.7 +certifi==2023.7.22 diff --git a/python/setup.py b/python/setup.py index 5e69df23932..5322d230915 100644 --- a/python/setup.py +++ b/python/setup.py @@ -35,7 +35,7 @@ import jupyterlab # noqa # pylint: disable=unused-import except ImportError as error: print(error.__class__.__name__ + ": " + error.message) - print("Run `pip install jupyter_packaging ipywidgets jupyterlab`.") + print("Run `pip install -r requirements-jupyter-build.txt`.") here = os.path.dirname(os.path.abspath(__file__)) js_dir = os.path.join(here, "js") @@ -178,6 +178,9 @@ def finalize_options(self): description="@PROJECT_DESCRIPTION@", long_description=long_description, long_description_content_type="text/x-rst", + # Metadata below is valid but currently ignored by pip (<=v23) + obsoletes=["open3d_python"], + provides=["open3d", "open3d_cpu"], # For open3d-cpu ) setup(**setup_args) diff --git a/python/test/t/geometry/test_pointcloud.py b/python/test/t/geometry/test_pointcloud.py index 87db1ff7dd4..e6c194477ca 100644 --- a/python/test/t/geometry/test_pointcloud.py +++ b/python/test/t/geometry/test_pointcloud.py @@ -161,7 +161,7 @@ def test_member_functions(device): pcd_small_down = pcd.voxel_down_sample(1) assert pcd_small_down.point.positions.allclose( - o3c.Tensor([[0, 0, 0]], dtype, device)) + o3c.Tensor([[0.375, 0.375, 0.575]], dtype, device)) def test_extrude_rotation(): diff --git a/util/ci_utils.sh b/util/ci_utils.sh index 887e507fff5..80ca4469a14 100644 --- a/util/ci_utils.sh +++ b/util/ci_utils.sh @@ -244,8 +244,8 @@ build_pip_package() { echo "Packaging Open3D full pip package..." make VERBOSE=1 -j"$NPROC" pip-package - mv open3d*.whl lib/python_package/pip_package/ # restore CPU wheel - popd # PWD=Open3D + mv open3d*.whl lib/python_package/pip_package/ # restore CPU wheel + popd # PWD=Open3D } # Test wheel in blank virtual environment @@ -464,10 +464,13 @@ build_docs() { } maximize_ubuntu_github_actions_build_space() { - df -h - $SUDO rm -rf /usr/share/dotnet - $SUDO rm -rf /usr/local/lib/android - $SUDO rm -rf /opt/ghc + # https://github.com/easimon/maximize-build-space/blob/master/action.yml + df -h . # => 26GB + $SUDO rm -rf /usr/share/dotnet # ~17GB + $SUDO rm -rf /usr/local/lib/android # ~11GB + $SUDO rm -rf /opt/ghc # ~2.7GB + $SUDO rm -rf /opt/hostedtoolcache/CodeQL # ~5.4GB + $SUDO docker image prune --all --force # ~4.5GB $SUDO rm -rf "$AGENT_TOOLSDIRECTORY" - df -h + df -h . # => 53GB }