From 7ee28b23ff0a8a34d491609bdec658c5f097d137 Mon Sep 17 00:00:00 2001 From: Fabien Castan Date: Tue, 13 Apr 2021 17:31:15 +0200 Subject: [PATCH 1/5] [nodes] use dspsift instead of sift by default --- meshroom/nodes/aliceVision/CameraLocalization.py | 2 +- meshroom/nodes/aliceVision/CameraRigCalibration.py | 2 +- meshroom/nodes/aliceVision/CameraRigLocalization.py | 4 ++-- meshroom/nodes/aliceVision/ConvertSfMFormat.py | 2 +- meshroom/nodes/aliceVision/ExportMatches.py | 2 +- meshroom/nodes/aliceVision/FeatureExtraction.py | 2 +- meshroom/nodes/aliceVision/FeatureMatching.py | 2 +- meshroom/nodes/aliceVision/GlobalSfM.py | 2 +- meshroom/nodes/aliceVision/StructureFromMotion.py | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/meshroom/nodes/aliceVision/CameraLocalization.py b/meshroom/nodes/aliceVision/CameraLocalization.py index 484304d6aa..3afe30709a 100644 --- a/meshroom/nodes/aliceVision/CameraLocalization.py +++ b/meshroom/nodes/aliceVision/CameraLocalization.py @@ -42,7 +42,7 @@ class CameraLocalization(desc.CommandLineNode): name='matchDescTypes', label='Match Desc Types', description='''Describer types to use for the matching.''', - value=['sift'], + value=['dspsift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], diff --git a/meshroom/nodes/aliceVision/CameraRigCalibration.py b/meshroom/nodes/aliceVision/CameraRigCalibration.py index f12ca6c401..9b5eecc61c 100644 --- a/meshroom/nodes/aliceVision/CameraRigCalibration.py +++ b/meshroom/nodes/aliceVision/CameraRigCalibration.py @@ -49,7 +49,7 @@ class CameraRigCalibration(desc.CommandLineNode): name='matchDescTypes', label='Match Describer Types', description='''The describer types to use for the matching''', - value=['sift'], + value=['dspsift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], diff --git a/meshroom/nodes/aliceVision/CameraRigLocalization.py b/meshroom/nodes/aliceVision/CameraRigLocalization.py index 94e9514f96..e5a4dd37c7 100644 --- a/meshroom/nodes/aliceVision/CameraRigLocalization.py +++ b/meshroom/nodes/aliceVision/CameraRigLocalization.py @@ -49,8 +49,8 @@ class CameraRigLocalization(desc.CommandLineNode): name='matchDescTypes', label='Match Describer Types', description='''The describer types to use for the matching''', - value=['sift'], - values=['sift', 'sift_float', 'sift_upright', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], + value=['dspsift'], + values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], joinChar=',', diff --git a/meshroom/nodes/aliceVision/ConvertSfMFormat.py b/meshroom/nodes/aliceVision/ConvertSfMFormat.py index 324329fbba..a5c71aa7ca 100644 --- a/meshroom/nodes/aliceVision/ConvertSfMFormat.py +++ b/meshroom/nodes/aliceVision/ConvertSfMFormat.py @@ -35,7 +35,7 @@ class ConvertSfMFormat(desc.CommandLineNode): name='describerTypes', label='Describer Types', description='Describer types to keep.', - value=['sift'], + value=['dspsift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv', 'unknown'], exclusive=False, uid=[0], diff --git a/meshroom/nodes/aliceVision/ExportMatches.py b/meshroom/nodes/aliceVision/ExportMatches.py index 24a45d584e..b90c635898 100644 --- a/meshroom/nodes/aliceVision/ExportMatches.py +++ b/meshroom/nodes/aliceVision/ExportMatches.py @@ -21,7 +21,7 @@ class ExportMatches(desc.CommandLineNode): name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', - value=['sift'], + value=['dspsift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], diff --git a/meshroom/nodes/aliceVision/FeatureExtraction.py b/meshroom/nodes/aliceVision/FeatureExtraction.py index c733fc61f3..6dd487831f 100644 --- a/meshroom/nodes/aliceVision/FeatureExtraction.py +++ b/meshroom/nodes/aliceVision/FeatureExtraction.py @@ -42,7 +42,7 @@ class FeatureExtraction(desc.CommandLineNode): name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', - value=['sift'], + value=['dspsift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], diff --git a/meshroom/nodes/aliceVision/FeatureMatching.py b/meshroom/nodes/aliceVision/FeatureMatching.py index f9b6b1f8a6..bef82041ec 100644 --- a/meshroom/nodes/aliceVision/FeatureMatching.py +++ b/meshroom/nodes/aliceVision/FeatureMatching.py @@ -63,7 +63,7 @@ class FeatureMatching(desc.CommandLineNode): name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', - value=['sift'], + value=['dspsift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], diff --git a/meshroom/nodes/aliceVision/GlobalSfM.py b/meshroom/nodes/aliceVision/GlobalSfM.py index 7a0242d452..a400570a39 100644 --- a/meshroom/nodes/aliceVision/GlobalSfM.py +++ b/meshroom/nodes/aliceVision/GlobalSfM.py @@ -52,7 +52,7 @@ class GlobalSfM(desc.CommandLineNode): name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', - value=['sift'], + value=['dspsift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, diff --git a/meshroom/nodes/aliceVision/StructureFromMotion.py b/meshroom/nodes/aliceVision/StructureFromMotion.py index afc50f943d..e06b6a9eb6 100644 --- a/meshroom/nodes/aliceVision/StructureFromMotion.py +++ b/meshroom/nodes/aliceVision/StructureFromMotion.py @@ -97,7 +97,7 @@ class StructureFromMotion(desc.CommandLineNode): name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', - value=['sift'], + value=['dspsift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], From e330201077f5bf1d761850e71d0de0cb12bef0ca Mon Sep 17 00:00:00 2001 From: Fabien Castan Date: Tue, 13 Apr 2021 17:49:37 +0200 Subject: [PATCH 2/5] [nodes] sfm: change default value for observationConstraint to scale --- meshroom/nodes/aliceVision/StructureFromMotion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meshroom/nodes/aliceVision/StructureFromMotion.py b/meshroom/nodes/aliceVision/StructureFromMotion.py index e06b6a9eb6..2a3926932a 100644 --- a/meshroom/nodes/aliceVision/StructureFromMotion.py +++ b/meshroom/nodes/aliceVision/StructureFromMotion.py @@ -119,7 +119,7 @@ class StructureFromMotion(desc.CommandLineNode): description='Observation contraint mode used in the optimization:\n' ' * Basic: Use standard reprojection error in pixel coordinates\n' ' * Scale: Use reprojection error in pixel coordinates but relative to the feature scale', - value='Basic', + value='Scale', values=['Basic', 'Scale'], exclusive=True, uid=[0], From 8cf9dae33fd96f2eac5b60679a5fedace96783d5 Mon Sep 17 00:00:00 2001 From: Fabien Castan Date: Tue, 13 Apr 2021 18:09:43 +0200 Subject: [PATCH 3/5] [nodes] ImageMatching: use method SequentialAndVocTree by default --- meshroom/nodes/aliceVision/ImageMatching.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/meshroom/nodes/aliceVision/ImageMatching.py b/meshroom/nodes/aliceVision/ImageMatching.py index 62b9e2323c..c09c701d9d 100644 --- a/meshroom/nodes/aliceVision/ImageMatching.py +++ b/meshroom/nodes/aliceVision/ImageMatching.py @@ -65,7 +65,7 @@ class ImageMatching(desc.CommandLineNode): ' * Exhaustive: Export all image pairs.\n' ' * Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n' ' * FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n', - value='VocabularyTree', + value='SequentialAndVocabularyTree', values=['VocabularyTree', 'Sequential', 'SequentialAndVocabularyTree', 'Exhaustive', 'Frustum', 'FrustumOrVocabularyTree'], exclusive=True, uid=[0], @@ -111,7 +111,7 @@ class ImageMatching(desc.CommandLineNode): name='nbMatches', label='Voc Tree: Nb Matches', description='The number of matches to retrieve for each image (If 0 it will retrieve all the matches).', - value=50, + value=40, range=(0, 1000, 1), uid=[0], advanced=True, @@ -121,7 +121,7 @@ class ImageMatching(desc.CommandLineNode): name='nbNeighbors', label='Sequential: Nb Neighbors', description='The number of neighbors to retrieve for each image (If 0 it will retrieve all the neighbors).', - value=50, + value=5, range=(0, 1000, 1), uid=[0], advanced=True, From 2f18e89a9dd91d62a6847c686b8c491212e80937 Mon Sep 17 00:00:00 2001 From: Fabien Castan Date: Fri, 16 Apr 2021 10:39:07 +0200 Subject: [PATCH 4/5] [nodes] KeyframeSelection: extract all frames by default --- meshroom/nodes/aliceVision/KeyframeSelection.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/meshroom/nodes/aliceVision/KeyframeSelection.py b/meshroom/nodes/aliceVision/KeyframeSelection.py index db035ec1c6..c8635a8c6b 100644 --- a/meshroom/nodes/aliceVision/KeyframeSelection.py +++ b/meshroom/nodes/aliceVision/KeyframeSelection.py @@ -109,14 +109,14 @@ class KeyframeSelection(desc.CommandLineNode): name='useSparseDistanceSelection', label='Use Sparse Distance Selection', description='Use sparseDistance selection in order to avoid similar keyframes.', - value=True, + value=False, uid=[0], ), desc.BoolParam( name='useSharpnessSelection', label='Use Sharpness Selection', description='Use frame sharpness score for keyframe selection.', - value=True, + value=False, uid=[0], ), desc.FloatParam( @@ -148,7 +148,7 @@ class KeyframeSelection(desc.CommandLineNode): name='minFrameStep', label='Min Frame Step', description='''minimum number of frames between two keyframes''', - value=12, + value=1, range=(1, 100, 1), uid=[0], ), @@ -156,7 +156,7 @@ class KeyframeSelection(desc.CommandLineNode): name='maxFrameStep', label='Max Frame Step', description='''maximum number of frames after which a keyframe can be taken''', - value=36, + value=2, range=(2, 1000, 1), uid=[0], ), From 0ddda9f9822c50a81c613c8366c2d51696d070ce Mon Sep 17 00:00:00 2001 From: Fabien Castan Date: Fri, 16 Apr 2021 10:40:20 +0200 Subject: [PATCH 5/5] [multiview] new experimental pipeline for camera tracking --- bin/meshroom_batch | 5 ++- meshroom/multiview.py | 63 +++++++++++++++++++++++++++++++++-- meshroom/ui/qml/main.qml | 4 +++ meshroom/ui/reconstruction.py | 3 ++ 4 files changed, 72 insertions(+), 3 deletions(-) mode change 100644 => 100755 bin/meshroom_batch diff --git a/bin/meshroom_batch b/bin/meshroom_batch old mode 100644 new mode 100755 index 2fc12b673b..bfa388c401 --- a/bin/meshroom_batch +++ b/bin/meshroom_batch @@ -21,7 +21,7 @@ parser.add_argument('-I', '--inputRecursive', metavar='FOLDERS/IMAGES', type=str help='Input folders containing all images recursively.') parser.add_argument('-p', '--pipeline', metavar='photogrammetry/panoramaHdr/panoramaFisheyeHdr/MG_FILE', type=str, default='photogrammetry', - help='"photogrammetry" pipeline, "panoramaHdr" pipeline, "panoramaFisheyeHdr" pipeline or a Meshroom file containing a custom pipeline to run on input images. ' + help='"photogrammetry", "panoramaHdr", "panoramaFisheyeHdr", "cameraTracking" pipeline or a Meshroom file containing a custom pipeline to run on input images. ' 'Requirements: the graph must contain one CameraInit node, ' 'and one Publish node if --output is set.') @@ -119,6 +119,9 @@ with multiview.GraphModification(graph): elif args.pipeline.lower() == "panoramafisheyehdr": # default panorama Fisheye Hdr pipeline multiview.panoramaFisheyeHdr(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph) + elif args.pipeline.lower() == "cameratracking": + # default panorama Fisheye Hdr pipeline + multiview.cameraTracking(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph) else: # custom pipeline graph.load(args.pipeline) diff --git a/meshroom/multiview.py b/meshroom/multiview.py index c6807a0ae0..9798a4fbd3 100644 --- a/meshroom/multiview.py +++ b/meshroom/multiview.py @@ -182,9 +182,11 @@ def panoramaFisheyeHdr(inputImages=None, inputViewpoints=None, inputIntrinsics=N panoramaHdr(inputImages, inputViewpoints, inputIntrinsics, output, graph) for panoramaInit in graph.nodesOfType("PanoramaInit"): panoramaInit.attribute("useFisheye").value = True - # when using fisheye images, the overlap between images can be small - # and thus requires many features to get enough correspondances for cameras estimation for featureExtraction in graph.nodesOfType("FeatureExtraction"): + # when using fisheye images, 'sift' performs better than 'dspsift' + featureExtraction.attribute("describerTypes").value = ['sift'] + # when using fisheye images, the overlap between images can be small + # and thus requires many features to get enough correspondances for cameras estimation featureExtraction.attribute("describerPreset").value = 'high' return graph @@ -468,3 +470,60 @@ def sfmAugmentation(graph, sourceSfm, withMVS=False): mvsNodes = mvsPipeline(graph, structureFromMotion) return sfmNodes, mvsNodes + + +def cameraTrackingPipeline(graph): + """ + Instantiate a camera tracking pipeline inside 'graph'. + + Args: + graph (Graph/UIGraph): the graph in which nodes should be instantiated + + Returns: + list of Node: the created nodes + """ + + with GraphModification(graph): + + cameraInit, featureExtraction, imageMatching, featureMatching, structureFromMotion = sfmPipeline(graph) + + imageMatching.attribute("nbMatches").value = 5 # voctree nb matches + imageMatching.attribute("nbNeighbors").value = 10 + + structureFromMotion.attribute("minNumberOfMatches").value = 0 + structureFromMotion.attribute("minInputTrackLength").value = 5 + structureFromMotion.attribute("minNumberOfObservationsForTriangulation").value = 3 + structureFromMotion.attribute("minAngleForTriangulation").value = 1.0 + structureFromMotion.attribute("minAngleForLandmark").value = 0.5 + + exportAnimatedCamera = graph.addNewNode('ExportAnimatedCamera', input=structureFromMotion.output) + + # store current pipeline version in graph header + graph.header.update({'pipelineVersion': __version__}) + + return [ + cameraInit, + featureExtraction, + imageMatching, + featureMatching, + structureFromMotion, + exportAnimatedCamera, + ] + + +def cameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None): + if not graph: + graph = Graph('Camera Tracking') + with GraphModification(graph): + trackingNodes = cameraTrackingPipeline(graph) + cameraInit = trackingNodes[0] + cameraInit.viewpoints.extend([{'path': image} for image in inputImages]) + cameraInit.viewpoints.extend(inputViewpoints) + cameraInit.intrinsics.extend(inputIntrinsics) + + if output: + exportNode = trackingNodes[-1] + graph.addNewNode('Publish', output=output, inputFiles=[exportNode.output]) + + return graph + diff --git a/meshroom/ui/qml/main.qml b/meshroom/ui/qml/main.qml index 523ea39926..270fae10aa 100755 --- a/meshroom/ui/qml/main.qml +++ b/meshroom/ui/qml/main.qml @@ -420,6 +420,10 @@ ApplicationWindow { text: "Panorama Fisheye HDR" onTriggered: ensureSaved(function() { _reconstruction.new("panoramafisheyehdr") }) } + Action { + text: "Camera Tracking (experimental)" + onTriggered: ensureSaved(function() { _reconstruction.new("cameratracking") }) + } } Action { id: openActionItem diff --git a/meshroom/ui/reconstruction.py b/meshroom/ui/reconstruction.py index c37ffafc67..56ce0092f3 100755 --- a/meshroom/ui/reconstruction.py +++ b/meshroom/ui/reconstruction.py @@ -490,6 +490,9 @@ def new(self, pipeline=None): elif p.lower() == "panoramafisheyehdr": # default panorama fisheye hdr pipeline self.setGraph(multiview.panoramaFisheyeHdr()) + elif p.lower() == "cameratracking": + # default camera tracking pipeline + self.setGraph(multiview.cameraTracking()) else: # use the user-provided default photogrammetry project file self.load(p, setupProjectFile=False)