From 8064f2fe7f8defc2189fa540618a5002d3db9928 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Wed, 23 Aug 2023 16:23:33 -0400 Subject: [PATCH 01/84] ruff --- docs/conf.py | 3 +- docs/examples.py | 8 +-- examples/convexify.py | 3 +- examples/docker/render/render.py | 2 +- examples/nricp.py | 7 ++- examples/offscreen_render.py | 2 +- examples/outlined.py | 3 +- examples/ray.py | 3 +- examples/raytrace.py | 3 +- examples/scan_register.py | 3 +- examples/shortest.py | 4 +- examples/sinter.py | 8 ++- examples/viewcallback.py | 4 +- examples/voxel.py | 10 ++-- examples/voxel_silhouette.py | 6 ++- examples/widget.py | 7 ++- pyproject.toml | 26 +++++++--- tests/corpus.py | 5 +- tests/helpers/dxfhelp.py | 8 +-- tests/helpers/id_helper.py | 19 +++---- tests/notebooks.py | 18 +++---- tests/test_base.py | 6 +-- tests/test_binvox.py | 3 +- tests/test_bounds.py | 2 +- tests/test_cache.py | 23 ++++----- tests/test_convex.py | 2 +- tests/test_creation.py | 6 +-- tests/test_dxf.py | 2 +- tests/test_export.py | 6 +-- tests/test_gltf.py | 10 ++-- tests/test_graph.py | 2 +- tests/test_grouping.py | 8 +-- tests/test_identifier.py | 10 ++-- tests/test_inertia.py | 2 +- tests/test_integrate.py | 3 +- tests/test_mesh.py | 3 +- tests/test_minimal.py | 5 +- tests/test_obj.py | 8 +-- tests/test_pbr.py | 3 +- tests/test_permutate.py | 6 +-- tests/test_polygons.py | 5 +- tests/test_primitives.py | 4 +- tests/test_proximity.py | 2 +- tests/test_resolvers.py | 12 ++--- tests/test_runlength.py | 1 + tests/test_scene.py | 4 +- tests/test_scenegraph.py | 4 +- tests/test_simplify.py | 4 +- tests/test_texture.py | 6 +-- tests/test_transformations.py | 7 +-- tests/test_util.py | 8 +-- tests/test_vhacd.py | 2 +- tests/test_voxel.py | 2 +- trimesh/__init__.py | 29 ++++------- trimesh/base.py | 69 +++++++++++++------------ trimesh/bounds.py | 11 ++-- trimesh/caching.py | 11 ++-- trimesh/collision.py | 14 ++--- trimesh/comparison.py | 4 +- trimesh/constants.py | 6 +-- trimesh/convex.py | 5 +- trimesh/creation.py | 28 ++++------ trimesh/exceptions.py | 6 +-- trimesh/exchange/binvox.py | 24 ++++----- trimesh/exchange/dae.py | 14 +++-- trimesh/exchange/export.py | 19 ++++--- trimesh/exchange/gltf.py | 42 ++++++--------- trimesh/exchange/load.py | 44 +++++++--------- trimesh/exchange/obj.py | 42 +++++++-------- trimesh/exchange/off.py | 3 +- trimesh/exchange/openctm.py | 5 +- trimesh/exchange/ply.py | 34 +++++------- trimesh/exchange/stl.py | 7 ++- trimesh/exchange/threedxml.py | 10 ++-- trimesh/exchange/threemf.py | 28 +++------- trimesh/exchange/urdf.py | 35 ++++++------- trimesh/exchange/xaml.py | 9 ++-- trimesh/graph.py | 10 ++-- trimesh/grouping.py | 3 +- trimesh/interfaces/blender.py | 10 ++-- trimesh/interfaces/generic.py | 14 ++--- trimesh/interfaces/gmsh.py | 3 +- trimesh/interfaces/scad.py | 4 +- trimesh/interfaces/vhacd.py | 5 +- trimesh/intersections.py | 14 +++-- trimesh/nsphere.py | 4 +- trimesh/parent.py | 15 +++--- trimesh/path/arc.py | 5 +- trimesh/path/creation.py | 8 ++- trimesh/path/entities.py | 8 +-- trimesh/path/exchange/dxf.py | 27 ++++------ trimesh/path/exchange/export.py | 5 +- trimesh/path/exchange/load.py | 10 ++-- trimesh/path/exchange/misc.py | 7 +-- trimesh/path/exchange/svg_io.py | 40 ++++++-------- trimesh/path/intersections.py | 1 - trimesh/path/packing.py | 8 +-- trimesh/path/path.py | 48 +++++++---------- trimesh/path/polygons.py | 8 +-- trimesh/path/raster.py | 6 +-- trimesh/path/repair.py | 8 +-- trimesh/path/segments.py | 7 +-- trimesh/path/simplify.py | 14 ++--- trimesh/path/traversal.py | 6 +-- trimesh/path/util.py | 3 +- trimesh/permutate.py | 3 +- trimesh/points.py | 14 ++--- trimesh/primitives.py | 37 ++++++------- trimesh/proximity.py | 5 +- trimesh/ray/__init__.py | 2 +- trimesh/ray/ray_pyembree.py | 18 +++---- trimesh/ray/ray_triangle.py | 14 ++--- trimesh/ray/ray_util.py | 4 +- trimesh/registration.py | 12 ++--- trimesh/remesh.py | 9 ++-- trimesh/rendering.py | 4 +- trimesh/repair.py | 7 +-- trimesh/resolvers.py | 10 ++-- trimesh/resources/__init__.py | 4 +- trimesh/resources/javascript/compile.py | 8 +-- trimesh/sample.py | 6 +-- trimesh/scene/__init__.py | 1 - trimesh/scene/cameras.py | 7 ++- trimesh/scene/lighting.py | 12 ++--- trimesh/scene/scene.py | 29 ++++------- trimesh/scene/transforms.py | 22 ++++---- trimesh/smoothing.py | 4 +- trimesh/transformations.py | 7 ++- trimesh/triangles.py | 5 +- trimesh/units.py | 2 +- trimesh/util.py | 34 ++++++------ trimesh/version.py | 2 +- trimesh/viewer/__init__.py | 8 +-- trimesh/viewer/notebook.py | 5 +- trimesh/viewer/trackball.py | 2 +- trimesh/viewer/widget.py | 5 +- trimesh/viewer/windowed.py | 27 +++++----- trimesh/visual/base.py | 1 + trimesh/visual/color.py | 13 ++--- trimesh/visual/gloss.py | 2 +- trimesh/visual/material.py | 33 ++++++------ trimesh/visual/objects.py | 2 +- trimesh/visual/texture.py | 12 ++--- trimesh/voxel/base.py | 16 ++---- trimesh/voxel/creation.py | 7 +-- trimesh/voxel/encoding.py | 23 ++++----- trimesh/voxel/morphology.py | 6 +-- trimesh/voxel/ops.py | 1 + trimesh/voxel/runlength.py | 1 + trimesh/voxel/transforms.py | 6 +-- 150 files changed, 674 insertions(+), 901 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index e33ea433f..fd4cbbfdb 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- -import os import inspect +import os def abspath(rel): diff --git a/docs/examples.py b/docs/examples.py index 4c511db88..a1e90570a 100644 --- a/docs/examples.py +++ b/docs/examples.py @@ -6,10 +6,10 @@ of `../examples/*.ipynb` """ -import os -import sys import json import logging +import os +import sys log = logging.getLogger('trimesh') log.addHandler(logging.StreamHandler(sys.stdout)) @@ -62,10 +62,10 @@ def extract_docstring(loaded): if not fn.lower().endswith('.ipynb'): continue path = os.path.join(source, fn) - with open(path, 'r') as f: + with open(path) as f: raw = json.load(f) doc = extract_docstring(raw) - log.info('`{}`: "{}"\n'.format(fn, doc)) + log.info(f'`{fn}`: "{doc}"\n') link = f'examples.{fn.split(".")[0]}.html' markdown.append(f'### [{fn}]({link})') diff --git a/examples/convexify.py b/examples/convexify.py index 484b14323..c1b675b40 100644 --- a/examples/convexify.py +++ b/examples/convexify.py @@ -8,9 +8,10 @@ Useful for generating collision models of an object. """ -import trimesh import numpy as np +import trimesh + if __name__ == '__main__': # attach to trimesh logs diff --git a/examples/docker/render/render.py b/examples/docker/render/render.py index 172ccba18..fed7a97ad 100644 --- a/examples/docker/render/render.py +++ b/examples/docker/render/render.py @@ -1,6 +1,6 @@ -import trimesh from pyglet import gl +import trimesh if __name__ == '__main__': # print logged messages diff --git a/examples/nricp.py b/examples/nricp.py index 53e98243f..e95e608fd 100644 --- a/examples/nricp.py +++ b/examples/nricp.py @@ -9,12 +9,11 @@ """ -import trimesh import numpy as np -from trimesh.registration import (nricp_amberg, - nricp_sumner, - procrustes) + +import trimesh from trimesh.proximity import closest_point +from trimesh.registration import nricp_amberg, nricp_sumner, procrustes from trimesh.triangles import points_to_barycentric diff --git a/examples/offscreen_render.py b/examples/offscreen_render.py index e7930db34..1c855ea5b 100644 --- a/examples/offscreen_render.py +++ b/examples/offscreen_render.py @@ -1,7 +1,7 @@ import numpy as np -import trimesh +import trimesh if __name__ == '__main__': # print logged messages diff --git a/examples/outlined.py b/examples/outlined.py index da0da16e6..79401f5f5 100644 --- a/examples/outlined.py +++ b/examples/outlined.py @@ -5,9 +5,10 @@ Show a mesh with edges highlighted using GL_LINES """ -import trimesh import numpy as np +import trimesh + if __name__ == '__main__': mesh = trimesh.load('../models/featuretype.STL') diff --git a/examples/ray.py b/examples/ray.py index c6bb60522..372e456ab 100644 --- a/examples/ray.py +++ b/examples/ray.py @@ -7,9 +7,10 @@ same API with a roughly 50x speedup. """ -import trimesh import numpy as np +import trimesh + if __name__ == '__main__': # test on a sphere mesh diff --git a/examples/raytrace.py b/examples/raytrace.py index becbcd7f6..daf193568 100644 --- a/examples/raytrace.py +++ b/examples/raytrace.py @@ -7,12 +7,11 @@ Install `pyembree` for a speedup (600k+ rays per second) """ -from __future__ import division +import numpy as np import PIL.Image import trimesh -import numpy as np if __name__ == '__main__': diff --git a/examples/scan_register.py b/examples/scan_register.py index 9b5600acc..613c7a222 100644 --- a/examples/scan_register.py +++ b/examples/scan_register.py @@ -6,9 +6,10 @@ it to a "truth" mesh. """ -import trimesh import numpy as np +import trimesh + def simulated_brick(face_count, extents, noise, max_iter=10): """ diff --git a/examples/shortest.py b/examples/shortest.py index cc82810f2..4b96b91ac 100644 --- a/examples/shortest.py +++ b/examples/shortest.py @@ -7,10 +7,10 @@ of the mesh. """ -import trimesh - import networkx as nx +import trimesh + if __name__ == '__main__': # test on a sphere mesh diff --git a/examples/sinter.py b/examples/sinter.py index 02af16fe0..006bd13ce 100644 --- a/examples/sinter.py +++ b/examples/sinter.py @@ -3,14 +3,12 @@ might for a powder volume in a sintered printing process. """ import os -import trimesh import numpy as np - -from trimesh.path import packing - from pyinstrument import Profiler +import trimesh +from trimesh.path import packing # path with our sample models models = os.path.abspath(os.path.join( @@ -65,7 +63,7 @@ def collect_meshes(count=None, max_size=20.0): # get some sample data meshes = collect_meshes(max_size=size) - log.debug('loaded {} meshes'.format(len(meshes))) + log.debug(f'loaded {len(meshes)} meshes') # place the meshes into the volume with Profiler() as P: diff --git a/examples/viewcallback.py b/examples/viewcallback.py index 80af678c3..82ccff58a 100644 --- a/examples/viewcallback.py +++ b/examples/viewcallback.py @@ -7,9 +7,11 @@ """ import time -import trimesh + import numpy as np +import trimesh + def sinwave(scene): """ diff --git a/examples/voxel.py b/examples/voxel.py index 7e3a4fe89..e85ac7f08 100644 --- a/examples/voxel.py +++ b/examples/voxel.py @@ -1,14 +1,12 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +import inspect import os + import numpy as np -import inspect -import trimesh -from trimesh.exchange.binvox import voxelize_mesh +import trimesh from trimesh import voxel as v +from trimesh.exchange.binvox import voxelize_mesh log = trimesh.util.log diff --git a/examples/voxel_silhouette.py b/examples/voxel_silhouette.py index fd8551574..fd76a8476 100644 --- a/examples/voxel_silhouette.py +++ b/examples/voxel_silhouette.py @@ -1,8 +1,10 @@ import os -import trimesh + import numpy as np from PIL import Image +import trimesh + def vis(): # separate function to delay plt import @@ -42,7 +44,7 @@ def vis(): closest = np.min(dists) farthest = np.max(dists) z = np.linspace(closest, farthest, resolution) - log.debug('z range: %f, %f' % (closest, farthest)) + log.debug(f'z range: {closest:f}, {farthest:f}') vox = mesh.voxelized(1. / resolution, method='binvox') diff --git a/examples/widget.py b/examples/widget.py index 2e4ddd8bc..08011e1d8 100644 --- a/examples/widget.py +++ b/examples/widget.py @@ -7,13 +7,12 @@ import glooey import numpy as np - +import PIL.Image import pyglet + import trimesh -import trimesh.viewer import trimesh.transformations as tf -import PIL.Image - +import trimesh.viewer here = pathlib.Path(__file__).resolve().parent diff --git a/pyproject.toml b/pyproject.toml index 7818fa9c6..f8cc629bb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,12 +3,26 @@ build-backend = "setuptools.build_meta" requires = ["setuptools >= 40.8", "wheel"] [tool.ruff] -select = ["E", "F", # the default rules - "T201", # disallow print statements - "B"] # pass bugbear -ignore = ["B905", # `zip()` without an explicit `strict=` - "B904", # `raise ... from err` seems a bit silly - "B018"] # useless expression ideally ignore only on `tests` +# See https://github.com/charliermarsh/ruff#rules for error code definitions. +select = [ + # "ANN", # annotations + "B", # bugbear + "C", # comprehensions + "E", # style errors + "F", # flakes + "I", # import sorting + "RUF100", # meta + "U", # upgrade + "W", # style warnings + "YTT", # sys.version +] +ignore = [ + "C901", # Comprehension is too complex (11 > 10) + "N802", # Function name should be lowercase + "N806", # Variable in function should be lowercase + "E501", # Line too long ({width} > {limit} characters) + "B905", # zip() without an explicit strict= parameter +] line-length = 90 [tool.autopep8] diff --git a/tests/corpus.py b/tests/corpus.py index f5d16acee..c4f1127ae 100644 --- a/tests/corpus.py +++ b/tests/corpus.py @@ -5,12 +5,11 @@ Test loaders against large corpuses of test data from github: will download more than a gigabyte to your home directory! """ -import trimesh -from trimesh.util import wrap_as_stream, log import numpy as np - from pyinstrument import Profiler +import trimesh +from trimesh.util import log, wrap_as_stream # get a set with available extension available = trimesh.available_formats() diff --git a/tests/helpers/dxfhelp.py b/tests/helpers/dxfhelp.py index c881c6cee..088331791 100644 --- a/tests/helpers/dxfhelp.py +++ b/tests/helpers/dxfhelp.py @@ -6,8 +6,9 @@ than strings inside a JSON blob """ -import os import json +import os + import numpy as np @@ -15,7 +16,7 @@ def get_json(file_name='../templates/dxf.json'): """ Load the JSON blob into native objects """ - with open(file_name, 'r') as f: + with open(file_name) as f: t = json.load(f) return t @@ -82,7 +83,7 @@ def read_files(path): # skip emacs buffers if '~' in file_name: continue - with open(os.path.join(path, file_name), 'r') as f: + with open(os.path.join(path, file_name)) as f: template[file_name] = replace_whitespace( f.read(), reformat=False, insert=True) @@ -92,6 +93,7 @@ def read_files(path): if __name__ == '__main__': import sys + import trimesh trimesh.util.attach_to_log() diff --git a/tests/helpers/id_helper.py b/tests/helpers/id_helper.py index 3d656d8b8..b33dbb5c4 100644 --- a/tests/helpers/id_helper.py +++ b/tests/helpers/id_helper.py @@ -10,16 +10,15 @@ changes. We use this to generate the arbitrary sigfig thresholds. """ -import trimesh -import numpy as np - - -import time +import collections import json +import logging import os +import time -import collections -import logging +import numpy as np + +import trimesh log = trimesh.util.log TOL_ZERO = 1e-12 @@ -74,9 +73,7 @@ def permutations(mesh, identifiers.append(identifier) if (time.time() - start) > cutoff: - log.debug('bailing for time:{} count:{}'.format( - time.time() - start, - i)) + log.debug(f'bailing for time:{time.time() - start} count:{i}') return np.array(identifiers) return np.array(identifiers) @@ -182,7 +179,7 @@ def data_stats(data): result.append({'mean': mean.tolist(), 'percent': percent.tolist()}) - log.debug('\n\n{}/{}'.format(i, len(meshes) - 1)) + log.debug(f'\n\n{i}/{len(meshes) - 1}') log.debug('mean', mean) log.debug('percent', percent) log.debug('oom', mean / percent) diff --git a/tests/notebooks.py b/tests/notebooks.py index c82b96365..fdf0a8717 100644 --- a/tests/notebooks.py +++ b/tests/notebooks.py @@ -1,9 +1,10 @@ -import os -import sys -import json import inspect +import json import logging +import os import subprocess +import sys + import numpy as np # current working directory @@ -166,7 +167,7 @@ def main(): file_name = sys.argv[sys.argv.index("exec") + 1].strip() # we want to skip some of these examples in CI if 'ci' in sys.argv and os.path.basename(file_name) in ci_blacklist: - log.debug('{} in CI blacklist: skipping!'.format(file_name)) + log.debug(f'{file_name} in CI blacklist: skipping!') return # skip files that don't exist @@ -175,23 +176,22 @@ def main(): if file_name.lower().endswith('.ipynb'): # ipython notebooks - with open(file_name, 'r') as file_obj: + with open(file_name) as file_obj: script = load_notebook(file_obj) elif file_name.lower().endswith('.py'): # regular python files - with open(file_name, 'r') as file_obj: + with open(file_name) as file_obj: script = exclude_calls(file_obj.read().split('\n')) else: # skip other types of files return - log.debug('running {}'.format(file_name)) + log.debug(f'running {file_name}') try: exec(script, globals()) except BaseException as E: log.debug( - 'failed {}!\n\nscript was:\n{}\n\n'.format( - file_name, script)) + f'failed {file_name}!\n\nscript was:\n{script}\n\n') raise E diff --git a/tests/test_base.py b/tests/test_base.py index 722daaee2..11ced68ce 100644 --- a/tests/test_base.py +++ b/tests/test_base.py @@ -73,7 +73,7 @@ def test_none(self): if method.startswith('_'): continue # a string expression to evaluate - expr = 'mesh.{}'.format(method) + expr = f'mesh.{method}' try: # get the value of that expression @@ -94,12 +94,12 @@ def test_none(self): if method.startswith('_') or method in blacklist: continue # a string expression to evaluate - expr = 'scene.{}'.format(method) + expr = f'scene.{method}' # get the value of that expression res = eval(expr) # shouldn't be None! if res is None: - raise ValueError('"{}" is None!!'.format(expr)) + raise ValueError(f'"{expr}" is None!!') if __name__ == '__main__': diff --git a/tests/test_binvox.py b/tests/test_binvox.py index 9c1077d1b..dc19b3392 100644 --- a/tests/test_binvox.py +++ b/tests/test_binvox.py @@ -4,9 +4,10 @@ import generic as g from io import BytesIO + +from trimesh import voxel as v from trimesh.exchange import binvox from trimesh.voxel import runlength as rl -from trimesh import voxel as v class BinvoxTest(g.unittest.TestCase): diff --git a/tests/test_bounds.py b/tests/test_bounds.py index fef025720..6e9b1b6c4 100644 --- a/tests/test_bounds.py +++ b/tests/test_bounds.py @@ -291,7 +291,7 @@ def test_obb_corpus(self): meshes = list(g.get_meshes(split=True, min_volume=min_volume, only_watertight=True)) - g.log.debug('loaded {} meshes'.format(len(meshes))) + g.log.debug(f'loaded {len(meshes)} meshes') if g.PY3: # our models corpus should have 200+ models diff --git a/tests/test_cache.py b/tests/test_cache.py index cb8cd629d..f4b97cdd4 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -20,8 +20,7 @@ def test_track(self): g.trimesh.caching.sha256] for option in options: - g.log.info('testing hash function: {}'.format( - option.__name__)) + g.log.info(f'testing hash function: {option.__name__}') g.trimesh.caching.hash_fast = option # generate test data and perform numpy operations @@ -108,8 +107,7 @@ def test_contiguous(self): g.trimesh.caching.sha256] for option in options: - g.log.info('testing hash function: {}'.format( - option.__name__)) + g.log.info(f'testing hash function: {option.__name__}') g.trimesh.caching.hash_fast = option # hashing will fail on non- contiguous arrays # make sure our utility function has handled this @@ -196,7 +194,9 @@ def test_method_combinations(self): return import itertools + import numpy as np + from trimesh.caching import tracked_array dim = (100, 3) @@ -218,7 +218,7 @@ def test_method_combinations(self): 'shape'] # start with no arguments - attempts = [tuple()] + attempts = [()] # add a single argument from our guesses attempts.extend([(A,) for A in flat]) # add 2 and 3 length permutations of our guesses @@ -228,9 +228,9 @@ def test_method_combinations(self): # are suspicious of a method caching you could uncomment this out: # attempts.extend([tuple(G) for G in itertools.permutations(flat, 3)]) - skip = set(['__array_ufunc__', # segfaulting when called with `(2.3, 1)` + skip = {'__array_ufunc__', # segfaulting when called with `(2.3, 1)` 'astype', - ]) + } # collect functions which mutate arrays but don't change our hash broken = [] @@ -241,14 +241,14 @@ def test_method_combinations(self): continue failures = [] - g.log.debug('hash check: `{}`'.format(method)) + g.log.debug(f'hash check: `{method}`') for A in attempts: m = g.random((100, 3)) true_pre = m.tobytes() m = tracked_array(m) hash_pre = hash(m) try: - eval('m.{method}(*A)'.format(method=method)) + eval(f'm.{method}(*A)') except BaseException as J: failures.append(str(J)) @@ -261,10 +261,9 @@ def test_method_combinations(self): broken.append((method, A)) if len(broken) > 0: - method_busted = set([method for method, _ in broken]) + method_busted = {method for method, _ in broken} raise ValueError( - '`TrackedArray` incorrectly hashing methods: {}'.format( - method_busted)) + f'`TrackedArray` incorrectly hashing methods: {method_busted}') def test_validate(self): # create a mesh with two duplicate triangles diff --git a/tests/test_convex.py b/tests/test_convex.py index 7bf530bec..33968233f 100644 --- a/tests/test_convex.py +++ b/tests/test_convex.py @@ -56,7 +56,7 @@ def test_convex(self): close_ok = ratio > .9 if not close_ok: - g.log.error('volume inconsistent: {}'.format(volume)) + g.log.error(f'volume inconsistent: {volume}') raise ValueError('volume is inconsistent on {}'.format( mesh.metadata['file_name'])) assert min(volume) > 0.0 diff --git a/tests/test_creation.py b/tests/test_creation.py index f6511fa5f..2781f4d23 100644 --- a/tests/test_creation.py +++ b/tests/test_creation.py @@ -126,8 +126,7 @@ def test_camera_marker(self): assert isinstance(meshes, list) # all meshes should be viewable type for mesh in meshes: - assert isinstance(mesh, (g.trimesh.Trimesh, - g.trimesh.path.Path3D)) + assert isinstance(mesh, g.trimesh.Trimesh | g.trimesh.path.Path3D) def test_axis(self): # specify the size of the origin radius @@ -290,8 +289,7 @@ def test_triangulate(self): g.log.error( 'failed to benchmark triangle', exc_info=True) g.log.info( - 'benchmarked triangulation on {} polygons: {}'.format( - len(bench), str(times))) + f'benchmarked triangulation on {len(bench)} polygons: {str(times)}') def test_triangulate_plumbing(self): """ diff --git a/tests/test_dxf.py b/tests/test_dxf.py index c63351052..dd10e1cbb 100644 --- a/tests/test_dxf.py +++ b/tests/test_dxf.py @@ -32,7 +32,7 @@ def test_dxf(self): # try using ezdxf as a simple validator # it raises exceptions aggressively if ezdxf is not None: - with open(temp_name, 'r') as f: + with open(temp_name) as f: ezdxf.read(f) # export to a string diff --git a/tests/test_export.py b/tests/test_export.py index 14ea1b093..3ffd72f44 100644 --- a/tests/test_export.py +++ b/tests/test_export.py @@ -12,9 +12,9 @@ def test_export(self): from trimesh.exceptions import ExceptionWrapper - export_types = set(k for k, v in + export_types = {k for k, v in g.trimesh.exchange.export._mesh_exporters.items() - if not isinstance(v, ExceptionWrapper)) + if not isinstance(v, ExceptionWrapper)} meshes = list(g.get_meshes(8)) # make sure we've got something with texture @@ -252,7 +252,7 @@ def test_parse_file_args(self): RET_COUNT = 5 # a path that doesn't exist - nonexists = '/banana{}'.format(g.random()) + nonexists = f'/banana{g.random()}' assert not g.os.path.exists(nonexists) # loadable OBJ model diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 767fefe53..c11a0daa5 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -829,11 +829,11 @@ def test_primitive_geometry_meta(self): # Model with primitives s = g.get_mesh('CesiumMilkTruck.glb') # check to see if names are somewhat sane - assert set(s.geometry.keys()) == set([ + assert set(s.geometry.keys()) == { 'Cesium_Milk_Truck', 'Cesium_Milk_Truck_1', 'Cesium_Milk_Truck_2', - 'Wheels']) + 'Wheels'} # Assert that primitive geometries are marked as such assert s.geometry['Cesium_Milk_Truck'].metadata[ 'from_gltf_primitive'] @@ -850,8 +850,8 @@ def test_primitive_geometry_meta(self): m = g.get_mesh('CesiumMilkTruck.glb', merge_primitives=True) # names should be non-insane - assert set(m.geometry.keys()) == set([ - 'Cesium_Milk_Truck', 'Wheels']) + assert set(m.geometry.keys()) == { + 'Cesium_Milk_Truck', 'Wheels'} assert not s.geometry['Wheels'].metadata[ 'from_gltf_primitive'] assert s.geometry['Cesium_Milk_Truck'].metadata[ @@ -902,7 +902,7 @@ def test_bulk(self): if hasattr(geom, 'geometry') and len(geom.geometry) == 0: continue - g.log.info('Testing: {}'.format(fn)) + g.log.info(f'Testing: {fn}') # check a roundtrip which will validate on export # and crash on reload if we've done anything screwey # unitize normals will unitize any normals to comply with diff --git a/tests/test_graph.py b/tests/test_graph.py index 53545933d..402ab2f2c 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -260,7 +260,7 @@ def check_engines(edges, nodes): diff = g.np.setdiff1d(g.np.hstack(c), nodes) assert len(diff) == 0 # store the result as a set of tuples so we can compare - results.append(set([tuple(sorted(i)) for i in c])) + results.append({tuple(sorted(i)) for i in c}) # make sure different engines are returning the same thing try: diff --git a/tests/test_grouping.py b/tests/test_grouping.py index c23eddcc6..711d391a6 100644 --- a/tests/test_grouping.py +++ b/tests/test_grouping.py @@ -77,7 +77,7 @@ def test_blocks(self): a[2] = False result = blocks(a, min_len=1, only_nonzero=True) assert len(result) == 2 - assert set(result[0]) == set([1]) + assert set(result[0]) == {1} assert all(a[i].all() for i in result) def test_block_wrap(self): @@ -175,7 +175,7 @@ def test_block_wrap(self): 'only_nonzero': True} r = blocks(**kwargs) assert len(r) == 1 - assert set(r[0]) == set([0, 4]) + assert set(r[0]) == {0, 4} check_roll_wrap(**kwargs) def test_runs(self): @@ -338,8 +338,8 @@ def check_roll_wrap(**kwargs): g.np.roll(data, -i), **kwargs) # get result as a set of tuples with the rolling index # removed through a modulus, so we can compare equality - check = set([tuple(((j + i) % len(data)).tolist()) - for j in block]) + check = {tuple(((j + i) % len(data)).tolist()) + for j in block} if current is None: current = check # all values should be the same diff --git a/tests/test_identifier.py b/tests/test_identifier.py index 030e69b85..16ef93483 100644 --- a/tests/test_identifier.py +++ b/tests/test_identifier.py @@ -66,7 +66,7 @@ def test_scene_id(self): if not all(meshes[0].identifier_hash == i.identifier_hash for i in meshes): raise ValueError( - '{} differs after transform!'.format(geom_name)) + f'{geom_name} differs after transform!') # check an example for a mirrored part assert (scenes[0].geometry['disc_cam_B'].identifier_hash != @@ -110,10 +110,10 @@ def clean_name(name): # should be the same in both forms assert len(a) == len(b) - a_set = set([tuple(sorted([clean_name(i) for i in group])) - for group in a]) - b_set = set([tuple(sorted([clean_name(i) for i in group])) - for group in b]) + a_set = {tuple(sorted([clean_name(i) for i in group])) + for group in a} + b_set = {tuple(sorted([clean_name(i) for i in group])) + for group in b} assert a_set == b_set ptp = [] diff --git a/tests/test_inertia.py b/tests/test_inertia.py index 040b7ca5e..a5302db57 100644 --- a/tests/test_inertia.py +++ b/tests/test_inertia.py @@ -405,7 +405,7 @@ class MassTests(g.unittest.TestCase): def setUp(self): # inertia numbers pulled from solidworks self.truth = g.data['mass_properties'] - self.meshes = dict() + self.meshes = {} for data in self.truth: filename = data['filename'] self.meshes[filename] = g.get_mesh(filename) diff --git a/tests/test_integrate.py b/tests/test_integrate.py index ca677f143..d8d0222a8 100644 --- a/tests/test_integrate.py +++ b/tests/test_integrate.py @@ -8,8 +8,9 @@ class IntegrateTest(g.unittest.TestCase): def test_integrate(self): try: - from trimesh.integrate import symbolic_barycentric import sympy as sp + + from trimesh.integrate import symbolic_barycentric except BaseException: g.log.warning('no sympy', exc_info=True) return diff --git a/tests/test_mesh.py b/tests/test_mesh.py index 49cd0cca6..1ebb2790f 100644 --- a/tests/test_mesh.py +++ b/tests/test_mesh.py @@ -119,8 +119,7 @@ def test_meshes(self): # nothing in the cache should be writeable if cached.flags['WRITEABLE']: - raise ValueError('{} is writeable!'.format( - name)) + raise ValueError(f'{name} is writeable!') # only check int, float, and bool if cached.dtype.kind not in 'ibf': diff --git a/tests/test_minimal.py b/tests/test_minimal.py index 9d0e43448..a3c38ddc5 100644 --- a/tests/test_minimal.py +++ b/tests/test_minimal.py @@ -7,11 +7,12 @@ """ import os - import unittest -import trimesh + import numpy as np +import trimesh + # the path of the current directory _pwd = os.path.dirname( os.path.abspath(os.path.expanduser(__file__))) diff --git a/tests/test_obj.py b/tests/test_obj.py index b57343b7c..90db07741 100644 --- a/tests/test_obj.py +++ b/tests/test_obj.py @@ -115,7 +115,7 @@ def test_obj_simple_order(self): # load a simple OBJ file without merging vertices m = g.trimesh.load(file_name, process=False) # use trivial loading to compare with fancy performant one - with open(file_name, 'r') as f: + with open(file_name) as f: f, v, vt = simple_load(f.read()) # trimesh loader should return the same face order assert g.np.allclose(f, m.faces) @@ -132,7 +132,7 @@ def test_order_tex(self): process=False, maintain_order=True) # use trivial loading to compare with fancy performant one - with open(file_name, 'r') as f: + with open(file_name) as f: f, v, vt = simple_load(f.read()) # trimesh loader should return the same face order assert g.np.allclose(f, m.faces) @@ -361,8 +361,8 @@ def test_scene_export_material_name(self): mtl = r['mystuff.mtl'].decode('utf-8') assert mtl.count('newmtl') == 1 - assert 'newmtl {}'.format(dummy) in mtl - assert '{}.jpeg'.format(dummy) in r + assert f'newmtl {dummy}' in mtl + assert f'{dummy}.jpeg' in r def test_compound_scene_export(self): diff --git a/tests/test_pbr.py b/tests/test_pbr.py index 89365aa02..721599420 100644 --- a/tests/test_pbr.py +++ b/tests/test_pbr.py @@ -1,8 +1,9 @@ import unittest -import trimesh import numpy as np +import trimesh + class PBRTest(unittest.TestCase): diff --git a/tests/test_permutate.py b/tests/test_permutate.py index 3ef3b341b..7c701599b 100644 --- a/tests/test_permutate.py +++ b/tests/test_permutate.py @@ -23,8 +23,7 @@ def make_assertions(mesh, test, rigid=False): mesh.face_adjacency) and len(mesh.faces) > MIN_FACES): g.log.error( - 'face_adjacency unchanged: {}'.format( - str(test.face_adjacency))) + f'face_adjacency unchanged: {str(test.face_adjacency)}') raise ValueError( 'face adjacency of %s the same after permutation!', mesh.metadata['file_name']) @@ -33,8 +32,7 @@ def make_assertions(mesh, test, rigid=False): mesh.face_adjacency_edges) and len(mesh.faces) > MIN_FACES): g.log.error( - 'face_adjacency_edges unchanged: {}'.format( - str(test.face_adjacency_edges))) + f'face_adjacency_edges unchanged: {str(test.face_adjacency_edges)}') raise ValueError( 'face adjacency edges of %s the same after permutation!', mesh.metadata['file_name']) diff --git a/tests/test_polygons.py b/tests/test_polygons.py index 808d9fb12..2e93102f0 100644 --- a/tests/test_polygons.py +++ b/tests/test_polygons.py @@ -187,11 +187,10 @@ def truth_corner(bh): h * b**3 / 3.0, 0.5 * b**2 * 0.5 * h**2], dtype=g.np.float64) - from trimesh.path.polygons import second_moments - from trimesh.path.polygons import transform_polygon - from shapely.geometry import Polygon + from trimesh.path.polygons import second_moments, transform_polygon + heights = g.np.array([[0.01, 0.01], [1, 1], [10, 2], diff --git a/tests/test_primitives.py b/tests/test_primitives.py index 0fb4bdde2..9ed1001b5 100644 --- a/tests/test_primitives.py +++ b/tests/test_primitives.py @@ -171,8 +171,8 @@ def test_mesh_schema(self): def test_primitives(self): - kind = set([i.__class__.__name__ - for i in self.primitives]) + kind = {i.__class__.__name__ + for i in self.primitives} # make sure our test data has every primitive kinds = {'Box', 'Capsule', 'Cylinder', 'Sphere'} if has_triangle: diff --git a/tests/test_proximity.py b/tests/test_proximity.py index 63be516ae..32bc58bec 100644 --- a/tests/test_proximity.py +++ b/tests/test_proximity.py @@ -61,7 +61,7 @@ def test_nearest_naive(self): assert g.np.ptp(data_points, axis=0).max() < g.tol.merge assert g.np.ptp(data_dist, axis=0).max() < g.tol.merge - log_msg = '\n'.join("{}: {}s".format(i, j) + log_msg = '\n'.join(f"{i}: {j}s" for i, j in zip( [i.__name__ for i in funs], g.np.diff(tic))) diff --git a/tests/test_resolvers.py b/tests/test_resolvers.py index f10b5dba8..ba1255f47 100644 --- a/tests/test_resolvers.py +++ b/tests/test_resolvers.py @@ -18,7 +18,7 @@ def test_filepath_namespace(self): assert len(resolver.get('rabbit.obj')) > 0 # check a few file path keys - check = set(['ballA.off', 'featuretype.STL']) + check = {'ballA.off', 'featuretype.STL'} assert set(resolver.keys()).issuperset(check) # try a namespaced resolver @@ -58,22 +58,22 @@ def test_items(self): assert len(set(resolver.keys())) == 0 resolver['hi'] = b'what' # should have one item - assert set(resolver.keys()) == set(['hi']) + assert set(resolver.keys()) == {'hi'} # should have the right value assert resolver['hi'] == b'what' # original archive should have been modified - assert set(archive.keys()) == set(['hi']) + assert set(archive.keys()) == {'hi'} # add a subdirectory key resolver['stuff/nah'] = b'sup' - assert set(archive.keys()) == set(['hi', 'stuff/nah']) - assert set(resolver.keys()) == set(['hi', 'stuff/nah']) + assert set(archive.keys()) == {'hi', 'stuff/nah'} + assert set(resolver.keys()) == {'hi', 'stuff/nah'} # try namespacing ns = resolver.namespaced('stuff') assert ns['nah'] == b'sup' g.log.debug(ns.keys()) - assert set(ns.keys()) == set(['nah']) + assert set(ns.keys()) == {'nah'} if __name__ == '__main__': diff --git a/tests/test_runlength.py b/tests/test_runlength.py index 6a38bacd5..c63d7136c 100644 --- a/tests/test_runlength.py +++ b/tests/test_runlength.py @@ -3,6 +3,7 @@ except BaseException: import generic as g from trimesh.voxel import runlength as rl + np = g.np diff --git a/tests/test_scene.py b/tests/test_scene.py index 12029c3a9..448c6017f 100644 --- a/tests/test_scene.py +++ b/tests/test_scene.py @@ -367,8 +367,8 @@ def test_doubling(self): r.extents) # duplicate node groups should be twice as long - set_ori = set([len(i) * 2 for i in s.duplicate_nodes]) - set_dbl = set([len(i) for i in r.duplicate_nodes]) + set_ori = {len(i) * 2 for i in s.duplicate_nodes} + set_dbl = {len(i) for i in r.duplicate_nodes} assert set_ori == set_dbl diff --git a/tests/test_scenegraph.py b/tests/test_scenegraph.py index 038a3eaf6..6a35b4d37 100644 --- a/tests/test_scenegraph.py +++ b/tests/test_scenegraph.py @@ -67,8 +67,8 @@ def test_nodes(self): # get a scene graph graph = g.get_mesh('cycloidal.3DXML').graph # get any non-root node - node = next(iter((set(graph.nodes).difference( - [graph.base_frame])))) + node = next(iter(set(graph.nodes).difference( + [graph.base_frame]))) # remove that node graph.transforms.remove_node(node) # should have dumped the cache and removed the node diff --git a/tests/test_simplify.py b/tests/test_simplify.py index a237132dd..50aaf26fc 100644 --- a/tests/test_simplify.py +++ b/tests/test_simplify.py @@ -33,9 +33,7 @@ def polygon_simplify(self, polygon, arc_count): g.log.debug(new_count, arc_count) if arc_count > 1: - g.log.info('originally were {} arcs, simplify found {}'.format( - arc_count, - new_count)) + g.log.info(f'originally were {arc_count} arcs, simplify found {new_count}') assert new_count > 0 assert new_count <= arc_count diff --git a/tests/test_texture.py b/tests/test_texture.py index 71db2c471..f590978b4 100644 --- a/tests/test_texture.py +++ b/tests/test_texture.py @@ -33,7 +33,7 @@ def test_order_kwarg(self): for file_name in ['ico4.obj', 'ico4uv.obj']: # get the location of the model file file_path = g.get_path(file_name) - with open(file_path, 'r') as f: + with open(file_path) as f: # get the raw ordered vertices from the file with basic string # ops v_raw = g.np.array( @@ -183,8 +183,8 @@ def test_concatentate_multi(self): unique = vertex_c[g.trimesh.grouping.unique_rows(vertex_c)[0]] # roundtripped colors should be a superset of original colors - assert set(tuple(c) for c in unique).issuperset( - set(tuple(c) for c in colors)) + assert {tuple(c) for c in unique}.issuperset( + {tuple(c) for c in colors}) def test_to_tex(self): m = g.trimesh.creation.box() diff --git a/tests/test_transformations.py b/tests/test_transformations.py index a32fd9e26..672fc3954 100644 --- a/tests/test_transformations.py +++ b/tests/test_transformations.py @@ -16,9 +16,10 @@ def test_doctest(self): but it depends on numpy string formatting and is very flaky. """ - import trimesh - import random import doctest + import random + + import trimesh # make sure formatting is the same as their docstrings g.np.set_printoptions(suppress=True, precision=5) @@ -31,7 +32,7 @@ def test_doctest(self): results = doctest.testmod(trimesh.transformations, verbose=False, raise_on_error=True) - g.log.info('transformations {}'.format(str(results))) + g.log.info(f'transformations {str(results)}') def test_downstream(self): """ diff --git a/tests/test_util.py b/tests/test_util.py index 04cf63782..398dae83a 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -1,8 +1,10 @@ -import trimesh -import unittest import logging +import unittest + import numpy as np +import trimesh + try: from . import generic as g except BaseException: @@ -272,7 +274,7 @@ def test_io_wrap(self): assert res_s == test_s # check __enter__ and __exit__ - hi = 'hi'.encode('utf-8') + hi = b'hi' with util.BytesIO(hi) as f: assert f.read() == hi diff --git a/tests/test_vhacd.py b/tests/test_vhacd.py index a2e75bbb2..3a24a38c9 100644 --- a/tests/test_vhacd.py +++ b/tests/test_vhacd.py @@ -23,7 +23,7 @@ def test_vhacd(self): if len(decomposed) != 10: # it should return the correct number of meshes - raise ValueError('{} != 10'.format(len(decomposed))) + raise ValueError(f'{len(decomposed)} != 10') # make sure everything is convex # also this will fail if the type is returned incorrectly diff --git a/tests/test_voxel.py b/tests/test_voxel.py index 334e2ecc6..9ea20dbf4 100644 --- a/tests/test_voxel.py +++ b/tests/test_voxel.py @@ -282,7 +282,7 @@ def _test_equiv(self, v0, v1, query_points=None): `is_filled` are tested for consistency. """ def array_as_set(array2d): - return set(tuple(x) for x in array2d) + return {tuple(x) for x in array2d} # all points are filled assert g.np.all(v0.is_filled(v1.points)) diff --git a/trimesh/__init__.py b/trimesh/__init__.py index 67de65cdd..591a78225 100644 --- a/trimesh/__init__.py +++ b/trimesh/__init__.py @@ -9,35 +9,24 @@ """ # current version -from .version import __version__ +# avoid a circular import in trimesh.base +from . import bounds, collision, nsphere, primitives, smoothing, voxel # geometry objects from .base import Trimesh -from .points import PointCloud -from .scene.scene import Scene - -# utility functions -from .util import unitize -from .transformations import transform_points # general numeric tolerances from .constants import tol # loader functions -from .exchange.load import ( - load, - load_mesh, - load_path, - load_remote, - available_formats) +from .exchange.load import available_formats, load, load_mesh, load_path, load_remote +from .points import PointCloud +from .scene.scene import Scene +from .transformations import transform_points -# avoid a circular import in trimesh.base -from . import voxel -from . import bounds -from . import nsphere -from . import collision -from . import smoothing -from . import primitives +# utility functions +from .util import unitize +from .version import __version__ try: # handle vector paths diff --git a/trimesh/base.py b/trimesh/base.py index 3385cd025..41b25ce2c 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -5,42 +5,43 @@ Library for importing, exporting and doing simple operations on triangular meshes. """ -from . import ray -from . import util -from . import units -from . import poses -from . import graph -from . import sample -from . import repair -from . import convex -from . import remesh -from . import caching -from . import inertia -from . import boolean -from . import grouping -from . import geometry -from . import permutate -from . import proximity -from . import triangles -from . import curvature -from . import smoothing # noqa -from . import comparison -from . import registration -from . import decomposition -from . import intersections -from . import transformations - -from .visual import create_visual, TextureVisuals -from .exchange.export import export_mesh -from .constants import log, log_time, tol - -from .scene import Scene -from .parent import Geometry3D - import copy import warnings + import numpy as np +from . import ( + boolean, + caching, + comparison, + convex, + curvature, + decomposition, + geometry, + graph, + grouping, + inertia, + intersections, + permutate, + poses, + proximity, + ray, + registration, + remesh, + repair, + sample, + smoothing, # noqa + transformations, + triangles, + units, + util, +) +from .constants import log, log_time, tol +from .exchange.export import export_mesh +from .parent import Geometry3D +from .scene import Scene +from .visual import TextureVisuals, create_visual + class Trimesh(Geometry3D): @@ -162,7 +163,7 @@ def __init__(self, self.nearest = proximity.ProximityQuery(self) # store metadata about the mesh in a dictionary - self.metadata = dict() + self.metadata = {} # update the mesh metadata with passed metadata if isinstance(metadata, dict): self.metadata.update(metadata) @@ -2641,8 +2642,8 @@ def projected(self, projected : trimesh.path.Path2D Outline of source mesh """ - from .path import Path2D from .exchange.load import load_path + from .path import Path2D from .path.polygons import projected projection = projected( diff --git a/trimesh/bounds.py b/trimesh/bounds.py index c7bee0936..377a2aaa3 100644 --- a/trimesh/bounds.py +++ b/trimesh/bounds.py @@ -1,17 +1,12 @@ import numpy as np -from .constants import log, now -from . import util -from . import convex -from . import nsphere -from . import geometry -from . import grouping -from . import transformations +from . import convex, geometry, grouping, nsphere, transformations, util +from .constants import log, now try: # scipy is a soft dependency - from scipy.spatial import ConvexHull from scipy import optimize + from scipy.spatial import ConvexHull except BaseException as E: # raise the exception when someone tries to use it from . import exceptions diff --git a/trimesh/caching.py b/trimesh/caching.py index a821bab41..ce429507f 100644 --- a/trimesh/caching.py +++ b/trimesh/caching.py @@ -22,16 +22,17 @@ import os import time import warnings +from functools import wraps + import numpy as np -from functools import wraps from .constants import log from .util import is_sequence try: from collections.abc import Mapping except BaseException: - from collections import Mapping + from collections.abc import Mapping # sha256 is always available @@ -372,7 +373,7 @@ def __setslice__(self, *args, **kwargs): *args, **kwargs) -class Cache(object): +class Cache: """ Class to cache values which will be stored until the result of an ID function changes. @@ -524,7 +525,7 @@ def __exit__(self, *args): self.id_current = self._id_function() -class DiskCache(object): +class DiskCache: """ Store results of expensive operations on disk with an option to expire the results. This is used @@ -580,7 +581,7 @@ def get(self, key, fetch): with open(path, 'rb') as f: return f.read() - log.debug('not in cache fetching: `{}`'.format(key)) + log.debug(f'not in cache fetching: `{key}`') # since we made it here our data isn't cached # run the expensive function to fetch the file raw = fetch() diff --git a/trimesh/collision.py b/trimesh/collision.py index 243125f3f..fd546de8a 100644 --- a/trimesh/collision.py +++ b/trimesh/collision.py @@ -1,7 +1,7 @@ -import numpy as np - import collections +import numpy as np + try: # pip install python-fcl import fcl @@ -9,7 +9,7 @@ fcl = None -class ContactData(object): +class ContactData: """ Data structure for holding information about a collision contact. """ @@ -88,7 +88,7 @@ def index(self, name): return self._inds[name] -class DistanceData(object): +class DistanceData: """ Data structure for holding information about a distance query. """ @@ -161,7 +161,7 @@ def point(self, name): return self._points[name] -class CollisionManager(object): +class CollisionManager: """ A mesh-mesh collision manager. """ @@ -245,7 +245,7 @@ def remove_object(self, name): # remove names self._names.pop(geom_id) else: - raise ValueError('{} not in collision manager!'.format(name)) + raise ValueError(f'{name} not in collision manager!') def set_transform(self, name, transform): """ @@ -265,7 +265,7 @@ def set_transform(self, name, transform): o.setTranslation(transform[:3, 3]) self._manager.update(o) else: - raise ValueError('{} not in collision manager!'.format(name)) + raise ValueError(f'{name} not in collision manager!') def in_collision_single(self, mesh, diff --git a/trimesh/comparison.py b/trimesh/comparison.py index 68fa1dc68..02ac50ebc 100644 --- a/trimesh/comparison.py +++ b/trimesh/comparison.py @@ -5,11 +5,11 @@ Provide methods for quickly hashing and comparing meshes. """ +from hashlib import sha256 + import numpy as np from . import util - -from hashlib import sha256 from .constants import tol # how many significant figures to use for each diff --git a/trimesh/constants.py b/trimesh/constants.py index 375289fb2..30bc2a0da 100644 --- a/trimesh/constants.py +++ b/trimesh/constants.py @@ -3,7 +3,7 @@ from .util import log, now -class ToleranceMesh(object): +class ToleranceMesh: """ ToleranceMesh objects hold tolerance information about meshes. @@ -41,7 +41,7 @@ def __init__(self, **kwargs): self.__dict__.update(kwargs) -class TolerancePath(object): +class TolerancePath: """ TolerancePath objects contain tolerance information used in Path objects. @@ -102,7 +102,7 @@ def __init__(self, **kwargs): self.__dict__.update(kwargs) -class ResolutionPath(object): +class ResolutionPath: """ res.seg_frac : float When discretizing curves, what percentage of the drawing diff --git a/trimesh/convex.py b/trimesh/convex.py index a5a20b64f..6e0ccc05d 100644 --- a/trimesh/convex.py +++ b/trimesh/convex.py @@ -11,12 +11,9 @@ import numpy as np +from . import triangles, util from .constants import tol -from . import util -from . import triangles - - try: from scipy.spatial import ConvexHull except ImportError as E: diff --git a/trimesh/creation.py b/trimesh/creation.py index 4c7657680..39a7965ff 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -5,22 +5,16 @@ Create meshes from primitives, or with operations. """ -from .base import Trimesh -from .constants import log, tol -from .geometry import (faces_to_edges, - align_vectors, - plane_transform) - -from . import util -from . import grouping -from . import triangles -from . import exceptions -from . import transformations as tf +import collections +import warnings import numpy as np -import warnings -import collections +from . import exceptions, grouping, triangles, util +from . import transformations as tf +from .base import Trimesh +from .constants import log, tol +from .geometry import align_vectors, faces_to_edges, plane_transform try: # shapely is a soft dependency @@ -632,7 +626,7 @@ def box(extents=None, transform=None, bounds=None, **kwargs): dtype=np.float64).reshape(-1, 3) if 'metadata' not in kwargs: - kwargs['metadata'] = dict() + kwargs['metadata'] = {} kwargs['metadata'].update( {'shape': 'box', 'extents': extents}) @@ -850,7 +844,7 @@ def cone(radius, [0, height]] # revolve the profile to create a cone if 'metadata' not in kwargs: - kwargs['metadata'] = dict() + kwargs['metadata'] = {} kwargs['metadata'].update( {'shape': 'cone', 'radius': radius, @@ -907,7 +901,7 @@ def cylinder(radius, [radius, half], [0, half]] if 'metadata' not in kwargs: - kwargs['metadata'] = dict() + kwargs['metadata'] = {} kwargs['metadata'].update( {'shape': 'cylinder', 'height': height, @@ -977,7 +971,7 @@ def annulus(r_min, [r_min, -half]] if 'metadata' not in kwargs: - kwargs['metadata'] = dict() + kwargs['metadata'] = {} kwargs['metadata'].update( {'shape': 'annulus', 'r_min': r_min, diff --git a/trimesh/exceptions.py b/trimesh/exceptions.py index 11916d40f..1e4500262 100644 --- a/trimesh/exceptions.py +++ b/trimesh/exceptions.py @@ -6,7 +6,7 @@ """ -class ExceptionWrapper(object): +class ExceptionWrapper: """ Create a dummy object which will raise an exception when attributes are accessed (i.e. when used as a module) or when called (i.e. @@ -27,8 +27,8 @@ def __getattribute__(self, *args, **kwargs): if args[0] == '__class__': return None.__class__ # otherwise raise our original exception - raise super(ExceptionWrapper, self).__getattribute__('exception') + raise super().__getattribute__('exception') def __call__(self, *args, **kwargs): # will raise when this object is called like a function - raise super(ExceptionWrapper, self).__getattribute__('exception') + raise super().__getattribute__('exception') diff --git a/trimesh/exchange/binvox.py b/trimesh/exchange/binvox.py index 16195dd73..54b9342fa 100644 --- a/trimesh/exchange/binvox.py +++ b/trimesh/exchange/binvox.py @@ -6,10 +6,11 @@ Exporting meshes as binvox files requires the `binvox` executable to be in your path. """ +import collections import os import subprocess + import numpy as np -import collections from .. import util from ..base import Trimesh @@ -54,7 +55,7 @@ def parse_binvox_header(fp): binvox = '#binvox' space = ' ' if not line.startswith(binvox): - raise IOError('Not a binvox file') + raise OSError('Not a binvox file') shape = tuple( int(s) for s in fp.readline().strip().split(space)[1:]) translate = tuple( @@ -182,11 +183,10 @@ def voxel_from_binvox( Loaded voxels """ # shape must be uniform else scale is ambiguous + from .. import transformations from ..voxel import encoding as enc from ..voxel.base import VoxelGrid - from .. import transformations - if isinstance(rle_data, enc.RunLengthEncoding): encoding = rle_data else: @@ -267,7 +267,7 @@ def export_binvox(voxel, axis_order='xzy'): Representation according to binvox spec """ translate = voxel.translation - scale = voxel.scale * ((np.array(voxel.shape) - 1)) + scale = voxel.scale * (np.array(voxel.shape) - 1) neg_scale, = np.where(scale < 0) encoding = voxel.encoding.flip(neg_scale) scale = np.abs(scale) @@ -283,7 +283,7 @@ def export_binvox(voxel, axis_order='xzy'): rle_data, shape=voxel.shape, translate=translate, scale=scale) -class Binvoxer(object): +class Binvoxer: """ Interface for binvox CL tool. @@ -420,7 +420,7 @@ def __init__( encoder = binvox_path if encoder is None: - raise IOError(' '.join([ + raise OSError(' '.join([ 'No `binvox_path` provided and no binvox executable found', 'on PATH, please go to https://www.patrickmin.com/binvox/ and', 'download the appropriate version.'])) @@ -430,8 +430,7 @@ def __init__( 'Maximum dimension using exact is 1024, got %d' % dimension) if file_type not in Binvoxer.SUPPORTED_OUTPUT_TYPES: raise ValueError( - 'file_type %s not in set of supported output types %s' % - (file_type, str(Binvoxer.SUPPORTED_OUTPUT_TYPES))) + 'file_type {} not in set of supported output types {}'.format(file_type, str(Binvoxer.SUPPORTED_OUTPUT_TYPES))) args = [encoder, '-d', str(dimension), '-t', file_type] if exact: args.append('-e') @@ -516,11 +515,10 @@ def __call__(self, path, overwrite=False): ext = ext[1:].lower() if ext not in Binvoxer.SUPPORTED_INPUT_TYPES: raise ValueError( - 'file_type %s not in set of supported input types %s' % - (ext, str(Binvoxer.SUPPORTED_INPUT_TYPES))) - out_path = '%s.%s' % (head, self._file_type) + 'file_type {} not in set of supported input types {}'.format(ext, str(Binvoxer.SUPPORTED_INPUT_TYPES))) + out_path = f'{head}.{self._file_type}' if os.path.isfile(out_path) and not overwrite: - raise IOError('Attempted to voxelize object at existing path') + raise OSError('Attempted to voxelize object at existing path') self._args[-1] = path # generalizes to python2 and python3 diff --git a/trimesh/exchange/dae.py b/trimesh/exchange/dae.py index 3831c9e45..95356bdb9 100644 --- a/trimesh/exchange/dae.py +++ b/trimesh/exchange/dae.py @@ -1,14 +1,12 @@ -import io import copy +import io import uuid import numpy as np -from .. import util -from .. import visual - -from ..util import unique_name +from .. import util, visual from ..constants import log +from ..util import unique_name _EYE = np.eye(4) _EYE.flags.writeable = False @@ -101,7 +99,7 @@ def export_collada(mesh, **kwargs): import collada meshes = mesh - if not isinstance(mesh, (list, tuple, set, np.ndarray)): + if not isinstance(mesh, list | tuple | set | np.ndarray): meshes = [mesh] c = collada.Collada() @@ -148,14 +146,14 @@ def export_collada(mesh, **kwargs): ) indices = np.repeat(m.faces.flatten(), len(arrays)) - matref = u'material{}'.format(i) + matref = f'material{i}' triset = geom.createTriangleSet(indices, input_list, matref) geom.primitives.append(triset) c.geometries.append(geom) matnode = collada.scene.MaterialNode(matref, mat, inputs=[]) geomnode = collada.scene.GeometryNode(geom, [matnode]) - node = collada.scene.Node(u'node{}'.format(i), children=[geomnode]) + node = collada.scene.Node(f'node{i}', children=[geomnode]) nodes.append(node) scene = collada.scene.Scene('scene', nodes) c.scenes.append(scene) diff --git a/trimesh/exchange/export.py b/trimesh/exchange/export.py index b4fd127fd..d65bd2da0 100644 --- a/trimesh/exchange/export.py +++ b/trimesh/exchange/export.py @@ -1,20 +1,19 @@ -import os import json +import os + import numpy as np +from .. import resolvers, util from ..constants import log -from .. import util -from .. import resolvers - -from .urdf import export_urdf # NOQA +from .dae import _collada_exporters from .gltf import export_glb, export_gltf from .obj import export_obj from .off import _off_exporters -from .stl import export_stl, export_stl_ascii from .ply import _ply_exporters -from .dae import _collada_exporters -from .xyz import _xyz_exporters +from .stl import export_stl, export_stl_ascii from .threemf import _3mf_exporters +from .urdf import export_urdf # NOQA +from .xyz import _xyz_exporters def export_mesh(mesh, @@ -68,7 +67,7 @@ def export_mesh(mesh, if file_type not in _mesh_exporters: raise ValueError('%s exporter not available!', file_type) - if isinstance(mesh, (list, tuple, set, np.ndarray)): + if isinstance(mesh, list | tuple | set | np.ndarray): faces = 0 for m in mesh: faces += len(m.faces) @@ -283,7 +282,7 @@ def export_scene(scene, data = _mesh_exporters['3mf'](scene, **kwargs) else: raise ValueError( - 'unsupported export format: {}'.format(file_type)) + f'unsupported export format: {file_type}') # now write the data or return bytes of result if isinstance(data, dict): diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 822989db8..4c155510b 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -6,22 +6,16 @@ as GL_TRIANGLES, and trimesh.Path2D/Path3D as GL_LINES """ -import json import base64 import collections +import json import numpy as np -from .. import util -from .. import visual -from .. import rendering -from .. import resources -from .. import transformations - -from ..util import unique_name +from .. import rendering, resources, transformations, util, visual from ..caching import hash_fast from ..constants import log, tol - +from ..util import unique_name from ..visual.gloss import specular_to_pbr # magic numbers which have meaning in GLTF @@ -125,7 +119,7 @@ def export_gltf(scene, base64_buffer_format = "data:application/octet-stream;base64,{}" if merge_buffers: views = _build_views(buffer_items) - buffer_data = bytes().join(buffer_items.values()) + buffer_data = b"".join(buffer_items.values()) if embed_buffers: buffer_name = base64_buffer_format.format( base64.b64encode(buffer_data).decode()) @@ -148,7 +142,7 @@ def export_gltf(scene, buffer_name = base64_buffer_format.format( base64.b64encode(item).decode()) else: - buffer_name = "gltf_buffer_{}.bin".format(i) + buffer_name = f"gltf_buffer_{i}.bin" files[buffer_name] = item buffers[i] = {"uri": buffer_name, "byteLength": len(item)} @@ -211,7 +205,7 @@ def export_glb( views = _build_views(buffer_items) # combine bytes into a single blob - buffer_data = bytes().join(buffer_items.values()) + buffer_data = b"".join(buffer_items.values()) # add the information about the buffer data if len(buffer_data) > 0: @@ -247,7 +241,7 @@ def export_glb( np.array([len(buffer_data), 0x004E4942], dtype="{} skipping!'.format( - len(split))) + f'face needs more values 3>{len(split)} skipping!') continue # f is like: '76/558/76' @@ -557,7 +553,7 @@ def _parse_vertices(text): # up to the location of out our first vertex but we # are going to use this check for "do we have texture" # determination later so search the whole stupid file - starts = {k: text.find('\n{} '.format(k)) for k in + starts = {k: text.find(f'\n{k} ') for k in ['v', 'vt', 'vn']} # no valid values so exit early @@ -566,7 +562,7 @@ def _parse_vertices(text): # find the last position of each valid value ends = {k: text.find( - '\n', text.rfind('\n{} '.format(k)) + 2 + len(k)) + '\n', text.rfind(f'\n{k} ') + 2 + len(k)) for k, v in starts.items() if v >= 0} # take the first and last position of any vertex property @@ -577,7 +573,7 @@ def _parse_vertices(text): # get the clean-ish data from the file as python lists data = {k: [i.split('\n', 1)[0] - for i in chunk.split('\n{} '.format(k))[1:]] + for i in chunk.split(f'\n{k} ')[1:]] for k, v in starts.items() if v >= 0} # count the number of data values per row on a sample row @@ -909,7 +905,7 @@ def export_obj(mesh, # add the uv coordinates export.append('vt ' + converted) # add the directive to use the exported material - export.appendleft('usemtl {}'.format(tex_name)) + export.appendleft(f'usemtl {tex_name}') except BaseException: log.debug('failed to convert UV coordinates', exc_info=True) @@ -950,7 +946,7 @@ def export_obj(mesh, # things like images mtl_data[file_name] = file_data else: - log.warning('not writing {}'.format(file_name)) + log.warning(f'not writing {file_name}') if mtl_name is None: # if no name passed set a default @@ -958,18 +954,18 @@ def export_obj(mesh, # prepend a header to the MTL text if requested if header is not None: - prepend = '# {}\n\n'.format(header).encode('utf-8') + prepend = f'# {header}\n\n'.encode() else: prepend = b'' # save the material data mtl_data[mtl_name] = prepend + b'\n\n'.join(mtl_lib) # add the reference to the MTL file - objects.appendleft('mtllib {}'.format(mtl_name)) + objects.appendleft(f'mtllib {mtl_name}') if header is not None: # add a created-with header to the top of the file - objects.appendleft('# {}'.format(header)) + objects.appendleft(f'# {header}') # combine elements into a single string text = '\n'.join(objects) diff --git a/trimesh/exchange/off.py b/trimesh/exchange/off.py index d9a625513..cf8bfb2c6 100644 --- a/trimesh/exchange/off.py +++ b/trimesh/exchange/off.py @@ -1,4 +1,5 @@ import re + import numpy as np from .. import util @@ -29,7 +30,7 @@ def load_off(file_obj, **kwargs): _, header, raw = re.split('(COFF|OFF)', text, maxsplit=1) if header.upper() not in ['OFF', 'COFF']: raise NameError( - 'Not an OFF file! Header was: `{}`'.format(header)) + f'Not an OFF file! Header was: `{header}`') # split into lines and remove whitespace splits = [i.strip() for i in str.splitlines(str(raw))] diff --git a/trimesh/exchange/openctm.py b/trimesh/exchange/openctm.py index 5ccc5c3a2..323833fb1 100644 --- a/trimesh/exchange/openctm.py +++ b/trimesh/exchange/openctm.py @@ -28,11 +28,12 @@ # distribution. # ------------------------------------------------------------------------------ -import os import ctypes import ctypes.util +import os import numpy as np + _ctm_loaders = {} try: @@ -129,7 +130,7 @@ def load_ctm(file_obj, file_type=None, **kwargs): err = ctmGetError(ctm) if err != CTM_NONE: - raise IOError("Error loading file: " + str(ctmErrorString(err))) + raise OSError("Error loading file: " + str(ctmErrorString(err))) # get vertices vertex_count = ctmGetInteger(ctm, CTM_VERTEX_COUNT) diff --git a/trimesh/exchange/ply.py b/trimesh/exchange/ply.py index f450a7503..a6d11779a 100644 --- a/trimesh/exchange/ply.py +++ b/trimesh/exchange/ply.py @@ -1,18 +1,13 @@ -import numpy as np - -from string import Template - -import tempfile -import subprocess import collections +import subprocess +import tempfile +from string import Template -from .. import util -from .. import visual -from .. import grouping -from .. import resources +import numpy as np -from ..geometry import triangulate_quads +from .. import grouping, resources, util, visual from ..constants import log +from ..geometry import triangulate_quads # from ply specification, and additional dtypes found in the wild _dtypes = { @@ -149,7 +144,7 @@ def _add_attributes_to_dtype(dtype, attributes): else: attribute_dtype = data.dtype if len( data.dtype) == 0 else data.dtype[0] - dtype.append(('{}_count'.format(name), 'u1')) + dtype.append((f'{name}_count', 'u1')) dtype.append( (name, _numpy_type_to_ply_type(attribute_dtype), data.shape[1])) return dtype @@ -174,12 +169,10 @@ def _add_attributes_to_header(header, attributes): for name, data in attributes.items(): if data.ndim == 1: header.append( - 'property {} {}\n'.format( - _numpy_type_to_ply_type(data.dtype), name)) + f'property {_numpy_type_to_ply_type(data.dtype)} {name}\n') else: header.append( - 'property list uchar {} {}\n'.format( - _numpy_type_to_ply_type(data.dtype), name)) + f'property list uchar {_numpy_type_to_ply_type(data.dtype)} {name}\n') return header @@ -201,8 +194,7 @@ def _add_attributes_to_data_array(data_array, attributes): """ for name, data in attributes.items(): if data.ndim > 1: - data_array['{}_count'.format( - name)] = data.shape[1] * np.ones(data.shape[0]) + data_array[f'{name}_count'] = data.shape[1] * np.ones(data.shape[0]) data_array[name] = data return data_array @@ -487,7 +479,7 @@ def _elements_to_kwargs(elements, # return empty geometry if there are no vertices kwargs['geometry'] = {} return kwargs - + try: vertex_normals = np.column_stack([elements['vertex']['data'][j] for j in ('nx', 'ny', 'nz')]) @@ -495,7 +487,7 @@ def _elements_to_kwargs(elements, kwargs['vertex_normals'] = vertex_normals except BaseException: pass - + if 'face' in elements and elements['face']['length']: face_data = elements['face']['data'] else: @@ -876,7 +868,7 @@ def populate_data(file_obj, elements): elements[key]['data'] = np.frombuffer( data, dtype=dtype) except BaseException: - log.warning('PLY failed to populate: {}'.format(key)) + log.warning(f'PLY failed to populate: {key}') elements[key]['data'] = None return elements diff --git a/trimesh/exchange/stl.py b/trimesh/exchange/stl.py index 77dc58dc5..ff08b757b 100644 --- a/trimesh/exchange/stl.py +++ b/trimesh/exchange/stl.py @@ -1,7 +1,7 @@ -from .. import util - import numpy as np +from .. import util + class HeaderError(Exception): # the exception raised if an STL file object doesn't match its header @@ -114,8 +114,7 @@ def load_stl_binary(file_obj): # so it's much better to raise an exception here. if len_data != len_expected: raise HeaderError( - 'Binary STL has incorrect length in header: {} vs {}'.format( - len_data, len_expected)) + f'Binary STL has incorrect length in header: {len_data} vs {len_expected}') blob = np.frombuffer(file_obj.read(), dtype=_stl_dtype) diff --git a/trimesh/exchange/threedxml.py b/trimesh/exchange/threedxml.py index d9fe5b060..b47f516bd 100644 --- a/trimesh/exchange/threedxml.py +++ b/trimesh/exchange/threedxml.py @@ -16,8 +16,8 @@ from ..exceptions import ExceptionWrapper Image = ExceptionWrapper(E) -import json import collections +import json from .. import util from ..visual.texture import TextureVisuals @@ -136,7 +136,7 @@ def get_rgba(color): references[reference_id]['color'] = rgba # geometries will hold meshes - geometries = dict() + geometries = {} # get geometry for ReferenceRep in tree.iter(tag='{*}ReferenceRep'): @@ -147,7 +147,7 @@ def get_rgba(color): # the format of the geometry file part_format = ReferenceRep.attrib['format'] if part_format not in ('TESSELLATED', ): - util.log.warning('ReferenceRep %r unsupported format %r' % ( + util.log.warning('ReferenceRep {!r} unsupported format {!r}'.format( part_file, part_format)) continue @@ -246,7 +246,7 @@ def get_rgba(color): # save each mesh as the kwargs for a trimesh.Trimesh constructor # aka, a Trimesh object can be created with trimesh.Trimesh(**mesh) # this avoids needing trimesh- specific imports in this IO function - mesh = dict() + mesh = {} (mesh['vertices'], mesh['faces']) = util.append_faces(mesh_vertices, mesh_faces) @@ -395,8 +395,8 @@ def print_element(element): try: # soft dependencies - from lxml import etree import networkx as nx + from lxml import etree _threedxml_loaders = {'3dxml': load_3DXML} except BaseException as E: # set loader to exception wrapper diff --git a/trimesh/exchange/threemf.py b/trimesh/exchange/threemf.py index ee8d89bf4..69dc19490 100644 --- a/trimesh/exchange/threemf.py +++ b/trimesh/exchange/threemf.py @@ -1,14 +1,11 @@ +import collections import io -import sys import uuid import zipfile -import collections import numpy as np -from .. import util -from .. import graph - +from .. import graph, util from ..constants import log @@ -161,7 +158,7 @@ def load_3MF(file_obj, last = path[-1][0] # if someone included an undefined component, skip it if last not in id_name: - log.debug('id {} included but not defined!'.format(last)) + log.debug(f'id {last} included but not defined!') continue # frame names unique name = id_name[last] + util.unique_id() @@ -232,11 +229,6 @@ def export_3MF(mesh, Represents geometry as a 3MF file. """ - if sys.version_info < (3, 6): - # Python only added 'w' mode to `zipfile` in Python 3.6 - # and it is not worth the effort to work around - raise NotImplementedError( - "3MF export requires Python >= 3.6") from ..scene.scene import Scene if not isinstance(mesh, Scene): @@ -274,8 +266,7 @@ def model_id(x): # specify the parameters for the zip container zip_kwargs = {'compression': compression} # compresslevel was added in Python 3.7 - if sys.version_info >= (3, 7): - zip_kwargs['compresslevel'] = compresslevel + zip_kwargs['compresslevel'] = compresslevel with zipfile.ZipFile(file_obj, mode='w', **zip_kwargs) as z: # 3dmodel.model @@ -458,17 +449,10 @@ def _attrib_to_transform(attrib): # do import here to keep lxml a soft dependency try: - from lxml import etree import networkx as nx + from lxml import etree _three_loaders = {'3mf': load_3MF} - if sys.version_info < (3, 6): - # Python only added 'w' mode to `zipfile` in Python 3.6 - # and it is not worth the effort to work around - from ..exceptions import ExceptionWrapper - _3mf_exporters = {'3mf': ExceptionWrapper( - NotImplementedError("3MF export requires Python >= 3.6"))} - else: - _3mf_exporters = {'3mf': export_3MF} + _3mf_exporters = {'3mf': export_3MF} except BaseException as E: from ..exceptions import ExceptionWrapper _three_loaders = {'3mf': ExceptionWrapper(E)} diff --git a/trimesh/exchange/urdf.py b/trimesh/exchange/urdf.py index 81d52ef75..b60072201 100644 --- a/trimesh/exchange/urdf.py +++ b/trimesh/exchange/urdf.py @@ -31,9 +31,11 @@ def export_urdf(mesh, """ import lxml.etree as et + + from ..resources import get + # TODO: fix circular import from .export import export_mesh - from ..resources import get # Extract the save directory and the file name fullpath = os.path.abspath(directory) @@ -71,8 +73,8 @@ def export_urdf(mesh, for i, piece in enumerate(convex_pieces): # Save each nearly convex mesh out to a file - piece_name = '{}_convex_piece_{}'.format(name, i) - piece_filename = '{}.obj'.format(piece_name) + piece_name = f'{name}_convex_piece_{i}' + piece_filename = f'{piece_name}.obj' piece_filepath = os.path.join(fullpath, piece_filename) export_mesh(piece, piece_filepath) @@ -80,8 +82,8 @@ def export_urdf(mesh, piece.center_mass = mesh.center_mass piece.density = effective_density * mesh.density - link_name = 'link_{}'.format(piece_name) - geom_name = '{}'.format(piece_filename) + link_name = f'link_{piece_name}' + geom_name = f'{piece_filename}' I = [['{:.2E}'.format(y) for y in x] # NOQA for x in piece.moment_inertia] @@ -91,7 +93,7 @@ def export_urdf(mesh, # Inertial information inertial = et.SubElement(link, 'inertial') et.SubElement(inertial, 'origin', xyz="0 0 0", rpy="0 0 0") - et.SubElement(inertial, 'mass', value='{:.2E}'.format(piece.mass)) + et.SubElement(inertial, 'mass', value=f'{piece.mass:.2E}') et.SubElement( inertial, 'inertia', @@ -106,29 +108,23 @@ def export_urdf(mesh, et.SubElement(visual, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(visual, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, - scale="{:.4E} {:.4E} {:.4E}".format(scale, - scale, - scale)) + scale=f"{scale:.4E} {scale:.4E} {scale:.4E}") material = et.SubElement(visual, 'material', name='') if color is not None: et.SubElement(material, 'color', - rgba="{:.2E} {:.2E} {:.2E} 1".format(color[0], - color[1], - color[2])) + rgba=f"{color[0]:.2E} {color[1]:.2E} {color[2]:.2E} 1") # Collision Information collision = et.SubElement(link, 'collision') et.SubElement(collision, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(collision, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, - scale="{:.4E} {:.4E} {:.4E}".format(scale, - scale, - scale)) + scale=f"{scale:.4E} {scale:.4E} {scale:.4E}") # Create rigid joint to previous link if prev_link_name is not None: - joint_name = '{}_joint'.format(link_name) + joint_name = f'{link_name}_joint' joint = et.SubElement(root, 'joint', name=joint_name, @@ -141,7 +137,7 @@ def export_urdf(mesh, # Write URDF file tree = et.ElementTree(root) - urdf_filename = '{}.urdf'.format(name) + urdf_filename = f'{name}.urdf' tree.write(os.path.join(fullpath, urdf_filename), pretty_print=True) @@ -152,11 +148,10 @@ def export_urdf(mesh, version = et.SubElement(root, 'version') version.text = '1.0' sdf = et.SubElement(root, 'sdf', version='1.4') - sdf.text = '{}.urdf'.format(name) + sdf.text = f'{name}.urdf' author = et.SubElement(root, 'author') - et.SubElement(author, 'name').text = 'trimesh {}'.format( - __version__) + et.SubElement(author, 'name').text = f'trimesh {__version__}' et.SubElement(author, 'email').text = 'blank@blank.blank' description = et.SubElement(root, 'description') diff --git a/trimesh/exchange/xaml.py b/trimesh/exchange/xaml.py index 8db0aef79..be7e21633 100644 --- a/trimesh/exchange/xaml.py +++ b/trimesh/exchange/xaml.py @@ -4,13 +4,12 @@ Load 3D XAMl files, an export option from Solidworks. """ -import numpy as np - import collections -from .. import util -from .. import visual +import numpy as np + from .. import transformations as tf +from .. import util, visual def load_XAML(file_obj, *args, **kwargs): @@ -139,7 +138,7 @@ def element_to_transform(element): normals.append(c_normals) # compile the results into clean numpy arrays - result = dict() + result = {} result['vertices'], result['faces'] = util.append_faces(vertices, faces) result['face_colors'] = np.vstack(colors) diff --git a/trimesh/graph.py b/trimesh/graph.py index 093d7da4c..6ce4f1652 100644 --- a/trimesh/graph.py +++ b/trimesh/graph.py @@ -8,18 +8,16 @@ Currently uses networkx or scipy.sparse.csgraph backend. """ -import numpy as np import collections -from . import util -from . import grouping -from . import exceptions +import numpy as np +from . import exceptions, grouping, util from .constants import log, tol from .geometry import faces_to_edges try: - from scipy.sparse import csgraph, coo_matrix + from scipy.sparse import coo_matrix, csgraph except BaseException as E: # re-raise exception when used csgraph = exceptions.ExceptionWrapper(E) @@ -918,8 +916,8 @@ def graph_to_svg(graph): svg: string, pictoral layout in SVG format """ - import tempfile import subprocess + import tempfile with tempfile.NamedTemporaryFile() as dot_file: nx.drawing.nx_agraph.write_dot(graph, dot_file.name) svg = subprocess.check_output(['dot', dot_file.name, '-Tsvg']) diff --git a/trimesh/grouping.py b/trimesh/grouping.py index ea634d969..d9d21b91f 100644 --- a/trimesh/grouping.py +++ b/trimesh/grouping.py @@ -8,7 +8,6 @@ import numpy as np from . import util - from .constants import log, tol try: @@ -557,7 +556,7 @@ def group_dict(): The loop and appends make this rather slow on large arrays but it works on irregular groups. """ - observed = dict() + observed = {} hashable = hashable_rows(data, digits=digits) for index, key in enumerate(hashable): key_string = key.tobytes() diff --git a/trimesh/interfaces/blender.py b/trimesh/interfaces/blender.py index b0448554d..e640c3d14 100644 --- a/trimesh/interfaces/blender.py +++ b/trimesh/interfaces/blender.py @@ -1,12 +1,10 @@ -from .. import util -from .. import resources - -from .generic import MeshScript -from ..constants import log - import os import platform +from .. import resources, util +from ..constants import log +from .generic import MeshScript + _search_path = os.environ.get('PATH', '') if platform.system() == 'Windows': # try to find Blender install on Windows diff --git a/trimesh/interfaces/generic.py b/trimesh/interfaces/generic.py index 3b2a26794..21785643b 100644 --- a/trimesh/interfaces/generic.py +++ b/trimesh/interfaces/generic.py @@ -1,10 +1,9 @@ import os import platform import subprocess - from string import Template +from subprocess import CalledProcessError, check_output from tempfile import NamedTemporaryFile -from subprocess import check_output, CalledProcessError from .. import exchange from ..util import log @@ -33,14 +32,12 @@ def __enter__(self): digit_count = len(str(len(self.meshes))) self.mesh_pre = [ NamedTemporaryFile( - suffix='.{}'.format( - self.exchange), - prefix='{}_'.format(str(i).zfill(digit_count)), + suffix=f'.{self.exchange}', + prefix=f'{str(i).zfill(digit_count)}_', mode='wb', delete=False) for i in range(len(self.meshes))] self.mesh_post = NamedTemporaryFile( - suffix='.{}'.format( - self.exchange), + suffix=f'.{self.exchange}', mode='rb', delete=False) self.script_out = NamedTemporaryFile( @@ -102,8 +99,7 @@ def run(self, command): def __exit__(self, *args, **kwargs): if self.debug: - log.info('MeshScript.debug: not deleting {}'.format( - self.script_out.name)) + log.info(f'MeshScript.debug: not deleting {self.script_out.name}') return # delete all the temporary files by name # they are closed but their names are still available diff --git a/trimesh/interfaces/gmsh.py b/trimesh/interfaces/gmsh.py index ebabcee0c..c36674ec1 100644 --- a/trimesh/interfaces/gmsh.py +++ b/trimesh/interfaces/gmsh.py @@ -32,10 +32,11 @@ def load_gmsh(file_name, gmsh_args=None): Surface mesh of input geometry """ # use STL as an intermediate format - from ..exchange.stl import load_stl # do import here to avoid very occasional segfaults import gmsh + from ..exchange.stl import load_stl + # start with default args for the meshing step # Mesh.Algorithm=2 MeshAdapt/Delaunay, there are others but they may include quads # With this planes are meshed using Delaunay and cylinders are meshed diff --git a/trimesh/interfaces/scad.py b/trimesh/interfaces/scad.py index 372f9b8f6..2400fdb91 100644 --- a/trimesh/interfaces/scad.py +++ b/trimesh/interfaces/scad.py @@ -2,9 +2,9 @@ import platform from subprocess import CalledProcessError +from ..constants import log from ..util import which from .generic import MeshScript -from ..constants import log # start the search with the user's PATH _search_path = os.environ.get('PATH', '') @@ -48,7 +48,7 @@ def interface_scad(meshes, script, debug=False, **kwargs): # OFF is a simple text format that references vertices by-index # making it slightly preferable to STL for this kind of exchange duty try: - with MeshScript(meshes=meshes, script=script, + with MeshScript(meshes=meshes, script=script, debug=debug, exchange='off') as scad: result = scad.run(_scad_executable + ' $SCRIPT -o $MESH_POST') except CalledProcessError as e: diff --git a/trimesh/interfaces/vhacd.py b/trimesh/interfaces/vhacd.py index 2cbdb21c0..5c7a15549 100644 --- a/trimesh/interfaces/vhacd.py +++ b/trimesh/interfaces/vhacd.py @@ -1,9 +1,9 @@ import os import platform -from .generic import MeshScript from ..constants import log from ..util import which +from .generic import MeshScript _search_path = os.environ.get("PATH", "") @@ -49,8 +49,7 @@ def convex_decomposition(mesh, debug=False, **kwargs): # pass through extra arguments from the input dictionary for key, value in kwargs.items(): - argstring += ' --{} {}'.format(str(key), - str(value)) + argstring += f' --{str(key)} {str(value)}' with MeshScript(meshes=[mesh], script='', diff --git a/trimesh/intersections.py b/trimesh/intersections.py index 2784e50c0..b3bb5f24c 100644 --- a/trimesh/intersections.py +++ b/trimesh/intersections.py @@ -6,12 +6,9 @@ """ import numpy as np -from . import util -from . import geometry -from . import grouping -from . import triangles as tm +from . import geometry, grouping, util from . import transformations as tf - +from . import triangles as tm from .constants import tol from .triangles import points_to_barycentric @@ -716,11 +713,12 @@ def slice_mesh_plane(mesh, return None # avoid circular import - from .base import Trimesh - from .visual import TextureVisuals - from .path import polygons from scipy.spatial import cKDTree + + from .base import Trimesh from .creation import triangulate_polygon + from .path import polygons + from .visual import TextureVisuals # check input plane plane_normal = np.asanyarray( diff --git a/trimesh/nsphere.py b/trimesh/nsphere.py index 3532ac27f..1a760a6c3 100644 --- a/trimesh/nsphere.py +++ b/trimesh/nsphere.py @@ -7,9 +7,7 @@ """ import numpy as np -from . import util -from . import convex - +from . import convex, util from .constants import log, tol try: diff --git a/trimesh/parent.py b/trimesh/parent.py index b50b7d1ef..5f63b7eb9 100644 --- a/trimesh/parent.py +++ b/trimesh/parent.py @@ -134,17 +134,14 @@ def __repr__(self): elements = [] if hasattr(self, 'vertices'): # for Trimesh and PointCloud - elements.append('vertices.shape={}'.format( - self.vertices.shape)) + elements.append(f'vertices.shape={self.vertices.shape}') if hasattr(self, 'faces'): # for Trimesh - elements.append('faces.shape={}'.format( - self.faces.shape)) + elements.append(f'faces.shape={self.faces.shape}') if hasattr(self, 'geometry') and isinstance( self.geometry, dict): # for Scene - elements.append('len(geometry)={}'.format( - len(self.geometry))) + elements.append(f'len(geometry)={len(self.geometry)}') if 'Voxel' in type(self).__name__: # for VoxelGrid objects elements.append(str(self.shape)[1:-1]) @@ -252,7 +249,7 @@ def bounding_box_oriented(self): representing the minimum volume oriented bounding box of the mesh """ - from . import primitives, bounds + from . import bounds, primitives to_origin, extents = bounds.oriented_bounds(self) obb = primitives.Box(transform=np.linalg.inv(to_origin), extents=extents, @@ -275,7 +272,7 @@ def bounding_sphere(self): minball : trimesh.primitives.Sphere Sphere primitive containing current mesh """ - from . import primitives, nsphere + from . import nsphere, primitives center, radius = nsphere.minimum_nsphere(self) minball = primitives.Sphere(center=center, radius=radius, @@ -292,7 +289,7 @@ def bounding_cylinder(self): mincyl : trimesh.primitives.Cylinder Cylinder primitive containing current mesh """ - from . import primitives, bounds + from . import bounds, primitives kwargs = bounds.minimum_cylinder(self) mincyl = primitives.Cylinder(mutable=False, **kwargs) return mincyl diff --git a/trimesh/path/arc.py b/trimesh/path/arc.py index 1e121a9e2..936e1c167 100644 --- a/trimesh/path/arc.py +++ b/trimesh/path/arc.py @@ -1,10 +1,9 @@ import numpy as np from .. import util - from ..constants import log -from ..constants import tol_path as tol from ..constants import res_path as res +from ..constants import tol_path as tol # floating point zero _TOL_ZERO = 1e-12 @@ -158,7 +157,7 @@ def discretize_arc(points, # the number of facets, based on the angle criteria count_a = angle / res.seg_angle - count_l = ((R * angle)) / (res.seg_frac * scale) + count_l = (R * angle) / (res.seg_frac * scale) # figure out the number of line segments count = np.max([count_a, count_l]) diff --git a/trimesh/path/creation.py b/trimesh/path/creation.py index e376dabc3..6ab272d1b 100644 --- a/trimesh/path/creation.py +++ b/trimesh/path/creation.py @@ -1,11 +1,9 @@ import numpy as np -from . import arc -from .entities import Line, Arc +from .. import transformations, util from ..geometry import plane_transform - -from .. import util -from .. import transformations +from . import arc +from .entities import Arc, Line def circle_pattern(pattern_radius, diff --git a/trimesh/path/entities.py b/trimesh/path/entities.py index 081369eed..e8a92dd76 100644 --- a/trimesh/path/entities.py +++ b/trimesh/path/entities.py @@ -5,14 +5,14 @@ Basic geometric primitives which only store references to vertex indices rather than vertices themselves. """ -import numpy as np - from copy import deepcopy -from .arc import discretize_arc, arc_center -from .curve import discretize_bezier, discretize_bspline + +import numpy as np from .. import util from ..util import ABC +from .arc import arc_center, discretize_arc +from .curve import discretize_bezier, discretize_bspline class Entity(ABC): diff --git a/trimesh/path/exchange/dxf.py b/trimesh/path/exchange/dxf.py index 9b968a73a..8768f55d2 100644 --- a/trimesh/path/exchange/dxf.py +++ b/trimesh/path/exchange/dxf.py @@ -1,19 +1,14 @@ -import numpy as np - from collections import defaultdict -from ..arc import to_threepoint -from ..entities import Line, Arc, BSpline, Text +import numpy as np -from ... import resources -from ...util import multi_dict +from ... import grouping, resources, util +from ... import transformations as tf from ...constants import log from ...constants import tol_path as tol - -from ... import util -from ... import grouping -from ... import transformations as tf - +from ...util import multi_dict +from ..arc import to_threepoint +from ..entities import Arc, BSpline, Line, Text # unit codes _DXF_UNITS = {1: 'inches', @@ -560,7 +555,7 @@ def convert_insert(e): unsupported[entity_type] += 1 if len(unsupported) > 0: log.debug('skipping dxf entities: {}'.format( - ', '.join('{}: {}'.format(k, v) for k, v + ', '.join(f'{k}: {v}' for k, v in unsupported.items()))) # stack vertices into single array vertices = util.vstack_empty(vertices).astype(np.float64) @@ -632,7 +627,7 @@ def format_points(points, group = group[:, :2] three = three[:, :2] # join into result string - packed = '\n'.join('{:d}\n{:.12g}'.format(g, v) + packed = '\n'.join(f'{g:d}\n{v:.12g}' for g, v in zip(group.reshape(-1), three.reshape(-1))) @@ -743,7 +738,7 @@ def convert_bspline(spline, vertices): normal = [0.0, 0.0, 1.0] n_code = [210, 220, 230] - n_str = '\n'.join('{:d}\n{:.12g}'.format(i, j) + n_str = '\n'.join(f'{i:d}\n{j:.12g}' for i, j in zip(n_code, normal)) subs = entity_info(spline) @@ -815,9 +810,9 @@ def convert_generic(entity, vertices): entities_str = '\n'.join(collected) # add in the extents of the document as explicit XYZ lines - hsub = {'EXTMIN_{}'.format(k): v for k, v in zip( + hsub = {f'EXTMIN_{k}': v for k, v in zip( 'XYZ', np.append(path.bounds[0], 0.0))} - hsub.update({'EXTMAX_{}'.format(k): v for k, v in zip( + hsub.update({f'EXTMAX_{k}': v for k, v in zip( 'XYZ', np.append(path.bounds[1], 0.0))}) # apply a units flag defaulting to `1` hsub['LUNITS'] = _UNITS_TO_DXF.get(path.units, 1) diff --git a/trimesh/path/exchange/export.py b/trimesh/path/exchange/export.py index b76e47677..96b1eb0df 100644 --- a/trimesh/path/exchange/export.py +++ b/trimesh/path/exchange/export.py @@ -1,8 +1,7 @@ import os -from ... import util -from . import dxf -from . import svg_io +from ... import util +from . import dxf, svg_io def export_path(path, diff --git a/trimesh/path/exchange/load.py b/trimesh/path/exchange/load.py index d2d0aec88..3cfb18278 100644 --- a/trimesh/path/exchange/load.py +++ b/trimesh/path/exchange/load.py @@ -1,11 +1,10 @@ import os -from .dxf import _dxf_loaders -from .svg_io import svg_to_path +from ... import util from ..path import Path - from . import misc -from ... import util +from .dxf import _dxf_loaders +from .svg_io import svg_to_path def load_path(file_obj, file_type=None, **kwargs): @@ -68,8 +67,7 @@ def load_path(file_obj, file_type=None, **kwargs): raise ValueError('Not a supported object type!') result = load_kwargs(kwargs) - util.log.debug('loaded {} in {:0.4f}s'.format( - str(result), util.now() - tic)) + util.log.debug(f'loaded {str(result)} in {util.now() - tic:0.4f}s') return result diff --git a/trimesh/path/exchange/misc.py b/trimesh/path/exchange/misc.py index 4d05c9863..57c958a7e 100644 --- a/trimesh/path/exchange/misc.py +++ b/trimesh/path/exchange/misc.py @@ -1,10 +1,7 @@ import numpy as np -from ... import util -from ... import graph -from ... import grouping - -from ..entities import Line, Arc +from ... import graph, grouping, util +from ..entities import Arc, Line def dict_to_path(as_dict): diff --git a/trimesh/path/exchange/svg_io.py b/trimesh/path/exchange/svg_io.py index 3c7bd73fd..111838fef 100644 --- a/trimesh/path/exchange/svg_io.py +++ b/trimesh/path/exchange/svg_io.py @@ -1,23 +1,16 @@ -import json import base64 import collections - -import numpy as np - +import json from copy import deepcopy -from ..arc import arc_center -from ..entities import Line, Arc, Bezier +import numpy as np +from ... import exceptions, grouping, resources, util from ...constants import log, tol - -from ... import util -from ... import grouping -from ... import resources -from ... import exceptions - +from ...transformations import planar_matrix, transform_points from ...util import jsonify -from ...transformations import transform_points, planar_matrix +from ..arc import arc_center +from ..entities import Arc, Bezier, Line try: # pip install svg.path @@ -37,7 +30,7 @@ # store any additional properties using a trimesh namespace _ns_name = 'trimesh' _ns_url = 'https://github.com/mikedh/trimesh' -_ns = '{{{}}}'.format(_ns_url) +_ns = f'{{{_ns_url}}}' _IDENTITY = np.eye(3) _IDENTITY.flags['WRITEABLE'] = False @@ -209,7 +202,7 @@ def transform_to_matrices(transform): mat[:2, :2] *= values matrices.append(mat) else: - log.debug('unknown SVG transform: {}'.format(key)) + log.debug(f'unknown SVG transform: {key}') return matrices @@ -268,7 +261,7 @@ def load_cubic(svg_cubic): svg_cubic.end]) return Bezier(np.arange(4) + counts[name]), points - class MultiLine(object): + class MultiLine: # An object to hold one or multiple Line entities. def __init__(self, lines): if tol.strict: @@ -434,7 +427,7 @@ def _entities_to_str(entities, points = vertices.copy() # generate a format string with the requested digits - temp_digits = '0.{}f'.format(int(digits)) + temp_digits = f'0.{int(digits)}f' # generate a format string for circles as two arc segments temp_circle = ('M {x:DI},{y:DI}a{r:DI},{r:DI},0,1,0,{d:DI},' + '0a{r:DI},{r:DI},0,1,0,-{d:DI},0Z').replace('DI', temp_digits) @@ -573,9 +566,7 @@ def export_svg(drawing, elements = [] for meta, path_string in pairs: # create a simple path element - elements.append(''.format( - d=path_string, - attr=_format_attrib(meta))) + elements.append(f'') # format as XML if 'stroke_width' in kwargs: @@ -610,8 +601,7 @@ def _format_attrib(attrib): Bag of keys and values. """ bag = {k: _encode(v) for k, v in attrib.items()} - return '\n'.join('{ns}:{key}="{value}"'.format( - ns=_ns_name, key=k, value=v) + return '\n'.join(f'{_ns_name}:{k}="{v}"' for k, v in bag.items() if len(k) > 0 and v is not None and len(v) > 0) @@ -664,7 +654,7 @@ def _deep_same(original, other): # ndarrays will be converted to lists # but otherwise types should be identical if isinstance(original, np.ndarray): - assert isinstance(other, (list, np.ndarray)) + assert isinstance(other, list | np.ndarray) elif util.is_string(original): # handle python 2+3 unicode vs str assert util.is_string(other) @@ -672,11 +662,11 @@ def _deep_same(original, other): # otherwise they should be the same type assert isinstance(original, type(other)) - if isinstance(original, (str, bytes)): + if isinstance(original, str | bytes): # string and bytes should just be identical assert original == other return - elif isinstance(original, (float, int, np.ndarray)): + elif isinstance(original, float | int | np.ndarray): # for numeric classes use numpy magic comparison # which includes an epsilon for floating point assert np.allclose(original, other) diff --git a/trimesh/path/intersections.py b/trimesh/path/intersections.py index 60a7ece61..fd70c7a02 100644 --- a/trimesh/path/intersections.py +++ b/trimesh/path/intersections.py @@ -1,7 +1,6 @@ import numpy as np from .. import util - from ..constants import tol_path as tol diff --git a/trimesh/path/packing.py b/trimesh/path/packing.py index a72849e13..08bf3f8f8 100644 --- a/trimesh/path/packing.py +++ b/trimesh/path/packing.py @@ -6,8 +6,8 @@ """ import numpy as np -from ..util import allclose, bounds_tree from ..constants import log, tol +from ..util import allclose, bounds_tree # floating point zero _TOL_ZERO = 1e-12 @@ -377,7 +377,7 @@ def polygons(polygons, **kwargs): i.e. `consume.sum() == m` """ - from .polygons import polygons_obb, polygon_bounds + from .polygons import polygon_bounds, polygons_obb # find the oriented bounding box of the polygons obb, extents = polygons_obb(polygons) @@ -486,7 +486,7 @@ def rectangles(extents, # shrink the bounds by spacing result[0] += [[[spacing], [-spacing]]] - log.debug('packed with density {:0.5f}'.format(best_density)) + log.debug(f'packed with density {best_density:0.5f}') return result @@ -599,8 +599,8 @@ def visualize(extents, bounds): scene : trimesh.Scene Scene with boxes at requested locations. """ - from ..scene import Scene from ..creation import box + from ..scene import Scene from ..visual import random_color # use a roll transform to verify extents diff --git a/trimesh/path/path.py b/trimesh/path/path.py index 5c454c9af..4ba418698 100644 --- a/trimesh/path/path.py +++ b/trimesh/path/path.py @@ -5,38 +5,29 @@ A module designed to work with vector paths such as those stored in a DXF or SVG file. """ -import numpy as np - +import collections import copy import warnings -import collections - from hashlib import sha256 -from ..points import plane_fit -from ..geometry import plane_transform -from ..visual import to_rgba -from ..constants import log -from ..constants import tol_path as tol - -from .util import concatenate +import numpy as np -from .. import parent -from .. import util -from .. import units -from .. import bounds -from .. import caching -from .. import grouping -from .. import exceptions +from .. import bounds, caching, exceptions, grouping, parent, units, util from .. import transformations as tf - -from . import raster -from . import simplify -from . import creation # NOQA -from . import segments # NOQA -from . import traversal - +from ..constants import log +from ..constants import tol_path as tol +from ..geometry import plane_transform +from ..points import plane_fit +from ..visual import to_rgba +from . import ( + creation, # NOQA + raster, + segments, # NOQA + simplify, + traversal, +) from .exchange.export import export_path +from .util import concatenate # now import things which require non-minimal install of Trimesh # create a dummy module which will raise the ImportError @@ -102,7 +93,7 @@ def __init__(self, # assign each color to each entity self.colors = colors # collect metadata into new dictionary - self.metadata = dict() + self.metadata = {} if metadata.__class__.__name__ == 'dict': self.metadata.update(metadata) @@ -777,7 +768,7 @@ def copy(self): metadata[key] = copy.deepcopy(self.metadata[key]) except RuntimeError: # multiple threads - log.warning('key {} changed during copy'.format(key)) + log.warning(f'key {key} changed during copy') # copy the core data copied = type(self)(entities=copy.deepcopy(self.entities), @@ -892,8 +883,7 @@ def to_planar(self, N = normal else: log.debug( - "passed normal not used: {}".format( - normal.shape)) + f"passed normal not used: {normal.shape}") # create a transform from fit plane to XY to_2D = plane_transform(origin=C, normal=N) diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index e46140fe0..96315502a 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -1,17 +1,11 @@ import numpy as np - from shapely import ops from shapely.geometry import Polygon -from .. import bounds -from .. import graph -from .. import geometry -from .. import grouping - +from .. import bounds, geometry, graph, grouping from ..constants import log from ..constants import tol_path as tol from ..transformations import transform_points - from .simplify import fit_circle_check from .traversal import resample_path diff --git a/trimesh/path/raster.py b/trimesh/path/raster.py index ae5d7b70c..5967d308e 100644 --- a/trimesh/path/raster.py +++ b/trimesh/path/raster.py @@ -8,9 +8,7 @@ try: # keep pillow as a soft dependency - from PIL import (Image, - ImageDraw, - ImageChops) + from PIL import Image, ImageChops, ImageDraw except BaseException as E: from .. import exceptions # re-raise the useful exception when called @@ -55,7 +53,7 @@ def rasterize(path, if origin is None: origin = path.bounds[0] - (pitch * 2.0) - + # check inputs pitch = np.asanyarray(pitch, dtype=np.float64) origin = np.asanyarray(origin, dtype=np.float64) diff --git a/trimesh/path/repair.py b/trimesh/path/repair.py index c6b6a783a..504a029fd 100644 --- a/trimesh/path/repair.py +++ b/trimesh/path/repair.py @@ -4,12 +4,12 @@ Try to fix problems with closed regions. """ -from . import segments -from .. import util - import numpy as np from scipy.spatial import cKDTree +from .. import util +from . import segments + def fill_gaps(path, distance=.025): """ @@ -103,7 +103,7 @@ def fill_gaps(path, distance=.025): # add line segments in as line entities entities = [] - for i in range(len((final_seg))): + for i in range(len(final_seg)): entities.append( line_class( points=np.arange(2) + (i * 2) + len(path.vertices))) diff --git a/trimesh/path/segments.py b/trimesh/path/segments.py index 5475aa05c..7281ef60a 100644 --- a/trimesh/path/segments.py +++ b/trimesh/path/segments.py @@ -7,12 +7,7 @@ import numpy as np -from .. import util -from .. import grouping -from .. import geometry -from .. import interval -from .. import transformations - +from .. import geometry, grouping, interval, transformations, util from ..constants import tol diff --git a/trimesh/path/simplify.py b/trimesh/path/simplify.py index fb3657454..60b5f38ac 100644 --- a/trimesh/path/simplify.py +++ b/trimesh/path/simplify.py @@ -1,17 +1,13 @@ -import numpy as np - -import copy import collections +import copy -from . import arc -from . import entities +import numpy as np from .. import util - -from ..nsphere import fit_nsphere - from ..constants import log from ..constants import tol_path as tol +from ..nsphere import fit_nsphere +from . import arc, entities def fit_circle_check(points, @@ -243,7 +239,7 @@ def resample_spline(points, smooth=.001, count=None, degree=3): resampled : (count, dimension) float Points in space """ - from scipy.interpolate import splprep, splev + from scipy.interpolate import splev, splprep if count is None: count = len(points) points = np.asanyarray(points) diff --git a/trimesh/path/traversal.py b/trimesh/path/traversal.py index 4630ec291..75f256e66 100644 --- a/trimesh/path/traversal.py +++ b/trimesh/path/traversal.py @@ -1,12 +1,10 @@ import copy + import numpy as np +from .. import constants, grouping, util from .util import is_ccw -from .. import util -from .. import grouping -from .. import constants - try: import networkx as nx except BaseException as E: diff --git a/trimesh/path/util.py b/trimesh/path/util.py index 19d890e8b..683ff450d 100644 --- a/trimesh/path/util.py +++ b/trimesh/path/util.py @@ -1,4 +1,5 @@ import numpy as np + from ..util import is_ccw # NOQA @@ -21,7 +22,7 @@ def concatenate(paths): return paths[0].copy() # upgrade to 3D if we have mixed 2D and 3D paths - dimensions = set(i.vertices.shape[1] for i in paths) + dimensions = {i.vertices.shape[1] for i in paths} if len(dimensions) > 1: paths = [i.to_3D() if hasattr(i, 'to_3D') else i for i in paths] diff --git a/trimesh/permutate.py b/trimesh/permutate.py index d2928624b..24ae58aa5 100644 --- a/trimesh/permutate.py +++ b/trimesh/permutate.py @@ -7,9 +7,8 @@ import numpy as np -from . import transformations +from . import transformations, util from . import triangles as triangles_module -from . import util def transform(mesh, translation_scale=1000.0): diff --git a/trimesh/points.py b/trimesh/points.py index 01d8504f0..76909ec13 100644 --- a/trimesh/points.py +++ b/trimesh/points.py @@ -8,18 +8,13 @@ import numpy as np -from .parent import Geometry3D -from .geometry import plane_transform +from . import caching, grouping, transformations, util from .constants import tol +from .geometry import plane_transform +from .parent import Geometry3D from .visual.color import VertexColor -from . import util -from . import caching -from . import grouping -from . import transformations - - def point_plane_distance(points, plane_normal, plane_origin=None): @@ -394,8 +389,7 @@ def plot_points(points, show=True): elif points.shape[1] == 2: plt.scatter(*points.T) else: - raise ValueError('points not 2D/3D: {}'.format( - points.shape)) + raise ValueError(f'points not 2D/3D: {points.shape}') if show: plt.show() diff --git a/trimesh/primitives.py b/trimesh/primitives.py index 965d82742..33768741e 100644 --- a/trimesh/primitives.py +++ b/trimesh/primitives.py @@ -7,17 +7,12 @@ Useful because you can move boxes and spheres around and then use trimesh operations on them at any point. """ -import numpy as np import abc -from . import util -from . import sample -from . import caching -from . import inertia -from . import creation -from . import triangles -from . import transformations as tf +import numpy as np +from . import caching, creation, inertia, sample, triangles, util +from . import transformations as tf from .base import Trimesh from .constants import log, tol @@ -38,7 +33,7 @@ class _Primitive(Trimesh): def __init__(self): # run the Trimesh constructor with no arguments - super(_Primitive, self).__init__() + super().__init__() # remove any data self._data.clear() @@ -49,8 +44,7 @@ def __init__(self): self._cache.force_immutable = True def __repr__(self): - return ''.format( - type(self).__name__) + return f'' @property def faces(self): @@ -225,7 +219,7 @@ def _create_mesh(self): raise ValueError('Primitive doesn\'t define mesh creation!') -class _PrimitiveAttributes(object): +class _PrimitiveAttributes: """ Hold the mutable data which defines a primitive. """ @@ -287,7 +281,7 @@ def __doc__(self): def __getattr__(self, key): if key.startswith('_'): - return super(_PrimitiveAttributes, self).__getattr__(key) + return super().__getattr__(key) elif key == 'center': # this whole __getattr__ is a little hacky return self._data['transform'][:3, 3] @@ -295,12 +289,11 @@ def __getattr__(self, key): return util.convert_like(self._data[key], self._defaults[key]) raise AttributeError( - "primitive object has no attribute '{}' ".format(key)) + f"primitive object has no attribute '{key}' ") def __setattr__(self, key, value): if key.startswith('_'): - return super(_PrimitiveAttributes, - self).__setattr__(key, value) + return super().__setattr__(key, value) elif key == 'center': value = np.array(value, dtype=np.float64) transform = np.eye(4) @@ -317,7 +310,7 @@ def __setattr__(self, key, value): else: keys = list(self._defaults.keys()) raise ValueError( - 'Only default attributes {} can be set!'.format(keys)) + f'Only default attributes {keys} can be set!') def __dir__(self): result = sorted(dir(type(self)) + @@ -349,7 +342,7 @@ def __init__(self, mutable : bool Are extents and transform mutable after creation. """ - super(Cylinder, self).__init__() + super().__init__() defaults = {'height': 10.0, 'radius': 1.0, @@ -503,7 +496,7 @@ def __init__(self, mutable : bool Are extents and transform mutable after creation. """ - super(Capsule, self).__init__() + super().__init__() defaults = {'height': 1.0, 'radius': 1.0, @@ -589,7 +582,7 @@ def __init__(self, Are extents and transform mutable after creation. """ - super(Sphere, self).__init__() + super().__init__() defaults = {'radius': 1.0, 'transform': np.eye(4), @@ -725,7 +718,7 @@ def __init__(self, mutable : bool Are extents and transform mutable after creation. """ - super(Box, self).__init__() + super().__init__() defaults = {'transform': np.eye(4), 'extents': np.ones(3)} @@ -903,7 +896,7 @@ def __init__(self, from shapely.geometry import Point # run the Trimesh init - super(Extrusion, self).__init__() + super().__init__() # set default values defaults = {'polygon': Point([0, 0]).buffer(1.0), 'transform': np.eye(4), diff --git a/trimesh/proximity.py b/trimesh/proximity.py index c341d761c..e3f19a3e4 100644 --- a/trimesh/proximity.py +++ b/trimesh/proximity.py @@ -7,9 +7,8 @@ import numpy as np from . import util - +from .constants import log_time, tol from .grouping import group_min -from .constants import tol, log_time from .triangles import closest_point as _corresponding from .triangles import points_to_barycentric @@ -302,7 +301,7 @@ def has_normals(self): return self.normals is not None or self.interpolated_normals is not None -class ProximityQuery(object): +class ProximityQuery: """ Proximity queries for the current mesh. """ diff --git a/trimesh/ray/__init__.py b/trimesh/ray/__init__.py index 23ee5dfdd..a71eba216 100644 --- a/trimesh/ray/__init__.py +++ b/trimesh/ray/__init__.py @@ -1,4 +1,4 @@ -from .import ray_triangle +from . import ray_triangle # optionally load an interface to the embree raytracer try: diff --git a/trimesh/ray/ray_pyembree.py b/trimesh/ray/ray_pyembree.py index 77a014051..a8975fa1b 100644 --- a/trimesh/ray/ray_pyembree.py +++ b/trimesh/ray/ray_pyembree.py @@ -2,18 +2,13 @@ Ray queries using the embreex package with the API wrapped to match our native raytracer. """ -import numpy as np - from copy import deepcopy +import numpy as np -from .ray_util import contains_points - -from .. import util -from .. import caching -from .. import intersections - +from .. import caching, intersections, util from ..constants import log_time +from .ray_util import contains_points # the factor of geometry.scale to offset a ray from a triangle # to reliably not hit its origin triangle @@ -31,9 +26,8 @@ except BaseException as E: try: # this will be deprecated at some point hopefully soon - from pyembree import rtcore_scene + from pyembree import __version__, rtcore_scene from pyembree.mesh_construction import TriangleMesh - from pyembree import __version__ # see if we're using a newer version of the pyembree wrapper _embree_new = tuple([int(i) for i in __version__.split('.')]) >= (0, 1, 4) # both old and new versions require exact but different type @@ -43,7 +37,7 @@ raise E -class RayMeshIntersector(object): +class RayMeshIntersector: def __init__(self, geometry, @@ -330,7 +324,7 @@ def contains_points(self, points): return contains_points(self, points) -class _EmbreeWrap(object): +class _EmbreeWrap: """ A light wrapper for Embreex scene objects which allows queries to be scaled to help with precision diff --git a/trimesh/ray/ray_triangle.py b/trimesh/ray/ray_triangle.py index b502ad6f3..7a7a02593 100644 --- a/trimesh/ray/ray_triangle.py +++ b/trimesh/ray/ray_triangle.py @@ -3,19 +3,13 @@ """ import numpy as np - -from .ray_util import contains_points - -from ..constants import tol - -from .. import util -from .. import caching -from .. import grouping -from .. import intersections +from .. import caching, grouping, intersections, util from .. import triangles as triangles_mod +from ..constants import tol +from .ray_util import contains_points -class RayMeshIntersector(object): +class RayMeshIntersector: """ An object to query a mesh for ray intersections. Precomputes an r-tree for each triangle on the mesh. diff --git a/trimesh/ray/ray_util.py b/trimesh/ray/ray_util.py index fac7b58d4..68276213b 100644 --- a/trimesh/ray/ray_util.py +++ b/trimesh/ray/ray_util.py @@ -1,8 +1,6 @@ import numpy as np -from .. import util -from .. import bounds -from .. import constants +from .. import bounds, constants, util @constants.log_time diff --git a/trimesh/registration.py b/trimesh/registration.py index 42f5e2dc8..997d7759a 100644 --- a/trimesh/registration.py +++ b/trimesh/registration.py @@ -7,19 +7,15 @@ import numpy as np - -from . import util -from . import bounds -from . import transformations - -from .points import PointCloud, plane_fit +from . import bounds, transformations, util from .geometry import weighted_vertex_normals -from .triangles import normals, angles, cross +from .points import PointCloud, plane_fit from .transformations import transform_points +from .triangles import angles, cross, normals try: - from scipy.spatial import cKDTree import scipy.sparse as sparse + from scipy.spatial import cKDTree except BaseException as E: # wrapping just ImportError fails in some cases # will raise the error when someone tries to use KDtree diff --git a/trimesh/remesh.py b/trimesh/remesh.py index a27cbc5e8..3b2cde28a 100644 --- a/trimesh/remesh.py +++ b/trimesh/remesh.py @@ -6,12 +6,9 @@ """ import numpy as np -from . import util -from . import grouping -from . import graph - -from .geometry import faces_to_edges +from . import graph, grouping, util from .constants import tol +from .geometry import faces_to_edges def subdivide(vertices, @@ -111,7 +108,7 @@ def subdivide(vertices, stack = np.arange( start, start + len(f) * 4).reshape((-1, 4)) # reformat into a slightly silly dict for some reason - index_dict = {k: v for k, v in zip(nonzero, stack)} + index_dict = dict(zip(nonzero, stack)) return new_vertices, new_faces, index_dict diff --git a/trimesh/rendering.py b/trimesh/rendering.py index 9297d1139..155a7b49c 100644 --- a/trimesh/rendering.py +++ b/trimesh/rendering.py @@ -271,7 +271,7 @@ def colors_to_gl(colors, count): if dtype is not None and util.is_shape(colors, (count, (3, 4))): # save the shape and dtype for opengl color string - colors_type = 'c{}{}/static'.format(colors.shape[1], dtype) + colors_type = f'c{colors.shape[1]}{dtype}/static' # reshape the 2D array into a 1D one and then convert to a python list gl_colors = colors.reshape(-1).tolist() elif dtype is not None and colors.shape in [(3,), (4,)]: @@ -279,7 +279,7 @@ def colors_to_gl(colors, count): gl_colors = (np.ones((count, colors.size), dtype=colors.dtype) * colors).reshape(-1).tolist() # we know we're tiling - colors_type = 'c{}{}/static'.format(colors.size, dtype) + colors_type = f'c{colors.size}{dtype}/static' else: # case where colors are wrong shape # use black as the default color diff --git a/trimesh/repair.py b/trimesh/repair.py index b1a6913ad..a9fd7f70e 100644 --- a/trimesh/repair.py +++ b/trimesh/repair.py @@ -7,13 +7,10 @@ import numpy as np -from . import graph -from . import triangles - +from . import graph, triangles from .constants import log -from .grouping import group_rows from .geometry import faces_to_edges - +from .grouping import group_rows try: import networkx as nx diff --git a/trimesh/resolvers.py b/trimesh/resolvers.py index 09ead18f1..532c0c67d 100644 --- a/trimesh/resolvers.py +++ b/trimesh/resolvers.py @@ -7,12 +7,11 @@ archives, web assets, or a local file path. """ -import os import abc import itertools +import os -from . import util -from . import caching +from . import caching, util # URL parsing for remote resources via WebResolver try: @@ -84,8 +83,7 @@ def __init__(self, source): # exit if directory doesn't exist if not os.path.isdir(self.parent): raise ValueError( - 'path `{} `not a directory!'.format( - self.parent)) + f'path `{self.parent} `not a directory!') def keys(self): """ @@ -259,7 +257,7 @@ def get(self, name): # get the stored data obj = archive[name] # if the dict is storing data as bytes just return - if isinstance(obj, (bytes, str)): + if isinstance(obj, bytes | str): return obj # otherwise get it as a file object # read file object from beginning diff --git a/trimesh/resources/__init__.py b/trimesh/resources/__init__.py index 0d8ea9c3d..d01a0a091 100644 --- a/trimesh/resources/__init__.py +++ b/trimesh/resources/__init__.py @@ -1,5 +1,5 @@ -import os import json +import os from ..util import decode_text, wrap_as_stream @@ -76,8 +76,8 @@ def get_schema(name): schema : dict Loaded and resolved schema. """ - from ..schemas import resolve from ..resolvers import FilePathResolver + from ..schemas import resolve # get a resolver for our base path resolver = FilePathResolver( os.path.join(_pwd, 'schema', name)) diff --git a/trimesh/resources/javascript/compile.py b/trimesh/resources/javascript/compile.py index 2cecb624a..b5fc690a1 100644 --- a/trimesh/resources/javascript/compile.py +++ b/trimesh/resources/javascript/compile.py @@ -8,9 +8,9 @@ generate the template used in the trimesh viewer. """ import os + import jsmin import requests - from lxml import html @@ -26,7 +26,7 @@ def minify(path): if path.startswith('http'): data = requests.get(path).content.decode( 'ascii', errors='ignore') - print('downloaded', path, len(data)) # noqa + print('downloaded', path, len(data)) else: with open(path, 'rb') as f: # some upstream JS uses unicode spaces -_- @@ -55,12 +55,12 @@ def minify(path): # get a blob of file path = s.attrib['src'].strip() - print('minifying:', path) # noqa + print('minifying:', path) mini = minify(path) # replace test data in our file if path == 'load_base64.js': - print('replacing test data with "$B64GLTF"') # noqa + print('replacing test data with "$B64GLTF"') start = mini.find('base64_data') end = mini.find(';', start) # replace test data with a string we can replace diff --git a/trimesh/sample.py b/trimesh/sample.py index edf002eb8..ca1e1b28c 100644 --- a/trimesh/sample.py +++ b/trimesh/sample.py @@ -7,8 +7,7 @@ import numpy as np -from . import util -from . import transformations +from . import transformations, util from .visual import uv_to_interpolated_color if hasattr(np.random, 'default_rng'): @@ -219,8 +218,7 @@ def sample_surface_even(mesh, count, radius=None, seed=None): return points[:count], index[mask][:count] # warn if we didn't get all the samples we expect - util.log.warning('only got {}/{} samples!'.format( - len(points), count)) + util.log.warning(f'only got {len(points)}/{count} samples!') return points, index[mask] diff --git a/trimesh/scene/__init__.py b/trimesh/scene/__init__.py index 303754a90..1610837b9 100644 --- a/trimesh/scene/__init__.py +++ b/trimesh/scene/__init__.py @@ -1,5 +1,4 @@ from .cameras import Camera - from .scene import Scene, split_scene # add to __all__ as per pep8 diff --git a/trimesh/scene/cameras.py b/trimesh/scene/cameras.py index 6ce420af1..5fcd00bf8 100644 --- a/trimesh/scene/cameras.py +++ b/trimesh/scene/cameras.py @@ -5,7 +5,7 @@ from .. import util -class Camera(object): +class Camera: def __init__( self, @@ -39,7 +39,7 @@ def __init__( if name is None: # if name is not passed, make it something unique - self.name = 'camera_{}'.format(util.unique_id(6).upper()) + self.name = f'camera_{util.unique_id(6).upper()}' else: # otherwise assign it self.name = name @@ -283,8 +283,7 @@ def look_at(self, points, **kwargs): **kwargs) def __repr__(self): - return ' FOV: {} Resolution: {}'.format( - self.fov, self.resolution) + return f' FOV: {self.fov} Resolution: {self.resolution}' def look_at(points, fov, rotation=None, distance=None, center=None, pad=None): diff --git a/trimesh/scene/lighting.py b/trimesh/scene/lighting.py index 83c90ca3c..27f36367e 100644 --- a/trimesh/scene/lighting.py +++ b/trimesh/scene/lighting.py @@ -10,9 +10,7 @@ import numpy as np -from .. import util -from .. import visual -from .. import transformations +from .. import transformations, util, visual # default light color _DEFAULT_RGBA = np.array([60, 60, 60, 255], dtype=np.uint8) @@ -48,7 +46,7 @@ def __init__(self, if name is None: # if name is not passed, make it something unique - self.name = 'light_{}'.format(util.unique_id(6).upper()) + self.name = f'light_{util.unique_id(6).upper()}' else: # otherwise assign it self.name = name @@ -129,7 +127,7 @@ def __init__(self, color=None, intensity=None, radius=None): - super(DirectionalLight, self).__init__( + super().__init__( name=name, color=color, intensity=intensity, @@ -168,7 +166,7 @@ def __init__(self, color=None, intensity=None, radius=None): - super(PointLight, self).__init__( + super().__init__( name=name, color=color, intensity=intensity, @@ -220,7 +218,7 @@ def __init__(self, radius=None, innerConeAngle=0.0, outerConeAngle=np.pi / 4.0): - super(SpotLight, self).__init__( + super().__init__( name=name, color=color, intensity=intensity, diff --git a/trimesh/scene/scene.py b/trimesh/scene/scene.py index 940a8428e..392f30e96 100644 --- a/trimesh/scene/scene.py +++ b/trimesh/scene/scene.py @@ -1,22 +1,13 @@ -import numpy as np import collections import uuid -from . import cameras -from . import lighting - -from .. import util -from .. import units -from .. import convex -from .. import inertia -from .. import caching -from .. import grouping -from .. import transformations +import numpy as np -from ..util import unique_name +from .. import caching, convex, grouping, inertia, transformations, units, util from ..exchange import export from ..parent import Geometry3D - +from ..util import unique_name +from . import cameras, lighting from .transforms import SceneGraph @@ -167,8 +158,7 @@ def add_geometry(self, return if not hasattr(geometry, 'vertices'): - util.log.debug('unknown type ({}) added to scene!'.format( - type(geometry).__name__)) + util.log.debug(f'unknown type ({type(geometry).__name__}) added to scene!') return # get or create a name to reference the geometry by @@ -897,8 +887,8 @@ def subscene(self, node): graph = SceneGraph(base_frame=node) graph.from_edgelist(edges) - geometry_names = set([e[2]['geometry'] for e in edges - if 'geometry' in e[2]]) + geometry_names = {e[2]['geometry'] for e in edges + if 'geometry' in e[2]} geometry = {k: self.geometry[k] for k in geometry_names} result = Scene(geometry=geometry, graph=graph) return result @@ -1102,7 +1092,7 @@ def scaled(self, scale): """ # convert 2D geometries to 3D for 3D scaling factors scale_is_3D = isinstance( - scale, (list, tuple, np.ndarray)) and len(scale) == 3 + scale, list | tuple | np.ndarray) and len(scale) == 3 if scale_is_3D and np.all(np.asarray(scale) == scale[0]): # scale is uniform @@ -1415,8 +1405,7 @@ def node_remap(node): s = s.scene() # if we don't have a scene raise an exception if not isinstance(s, Scene): - raise ValueError('{} is not a scene!'.format( - type(s).__name__)) + raise ValueError(f'{type(s).__name__} is not a scene!') # remap geometries if they have been consumed map_geom = {} diff --git a/trimesh/scene/transforms.py b/trimesh/scene/transforms.py index 28eac393e..09ffce6d4 100644 --- a/trimesh/scene/transforms.py +++ b/trimesh/scene/transforms.py @@ -1,22 +1,18 @@ -import numpy as np import collections - from copy import deepcopy -from .. import util -from .. import caching +import numpy as np -from ..transformations import (rotation_matrix, - quaternion_matrix, - fix_rigid) +from .. import caching, util from ..caching import hash_fast +from ..transformations import fix_rigid, quaternion_matrix, rotation_matrix # we compare to identity a lot _identity = np.eye(4) _identity.flags['WRITEABLE'] = False -class SceneGraph(object): +class SceneGraph: """ Hold data about positions and instances of geometry in a scene. This includes a forest (i.e. multi-root tree) @@ -172,7 +168,7 @@ def get(self, frame_to, frame_from=None): np.linalg.inv(backward['matrix'])) # filter out any identity matrices matrices = [m for m in matrices if - np.abs((m - _identity)).max() > 1e-8] + np.abs(m - _identity).max() > 1e-8] if len(matrices) == 0: matrix = _identity elif len(matrices) == 1: @@ -425,8 +421,8 @@ def show(self, **kwargs): kwargs : dict Passed to `networkx.draw_networkx` """ - import networkx import matplotlib.pyplot as plt + import networkx # default kwargs will only be set if not # passed explicitly to the show command defaults = {'with_labels': True} @@ -536,7 +532,7 @@ def clear(self): self._cache.clear() -class EnforcedForest(object): +class EnforcedForest: """ A simple forest graph data structure: every node is allowed to have exactly one parent. This makes @@ -708,7 +704,7 @@ def shortest_path(self, u, v): common = set(backward).intersection( forward).difference({None}) if len(common) == 0: - raise ValueError('No path from {}->{}!'.format(u, v)) + raise ValueError(f'No path from {u}->{v}!') elif len(common) > 1: # get the first occurring common element in "forward" link = next(f for f in forward if f in common) @@ -785,7 +781,7 @@ def successors(self, node): children = self.children # if node doesn't exist return early if node not in children: - return set([node]) + return {node} # children we need to collect queue = [node] diff --git a/trimesh/smoothing.py b/trimesh/smoothing.py index a8d9c5091..3830c6e2d 100644 --- a/trimesh/smoothing.py +++ b/trimesh/smoothing.py @@ -1,15 +1,15 @@ import numpy as np try: - from scipy.sparse.linalg import spsolve from scipy.sparse import coo_matrix, eye + from scipy.sparse.linalg import spsolve except ImportError: pass from . import triangles -from .util import unitize from .geometry import index_sparse from .triangles import mass_properties +from .util import unitize def filter_laplacian(mesh, diff --git a/trimesh/transformations.py b/trimesh/transformations.py index 48d25498e..0cfc67198 100644 --- a/trimesh/transformations.py +++ b/trimesh/transformations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # transformations.py # Modified for inclusion in the `trimesh` library @@ -196,10 +195,10 @@ True """ -from __future__ import division, print_function import math + import numpy as np __version__ = '2017.02.17' @@ -1590,7 +1589,7 @@ def random_rotation_matrix(rand=None, num=1, translate=False): return matrix -class Arcball(object): +class Arcball: """Virtual Trackball Control. >>> ball = Arcball() @@ -1761,7 +1760,7 @@ def arcball_nearest_axis(point, axes): 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} -_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) +_TUPLE2AXES = {v: k for k, v in _AXES2TUPLE.items()} def vector_norm(data, axis=None, out=None): diff --git a/trimesh/triangles.py b/trimesh/triangles.py index ed47cf424..a5835ebf3 100644 --- a/trimesh/triangles.py +++ b/trimesh/triangles.py @@ -7,10 +7,9 @@ import numpy as np from . import util - -from .util import unitize, diagonal_dot -from .points import point_plane_distance from .constants import tol +from .points import point_plane_distance +from .util import diagonal_dot, unitize def cross(triangles): diff --git a/trimesh/units.py b/trimesh/units.py index fc6f76dd4..ea09ee188 100644 --- a/trimesh/units.py +++ b/trimesh/units.py @@ -6,8 +6,8 @@ Very basic conversions, and no requirement for sympy.physics.units or pint. """ -from .constants import log from . import resources +from .constants import log def unit_conversion(current, desired): diff --git a/trimesh/util.py b/trimesh/util.py index 0be3003fd..872317031 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -10,26 +10,21 @@ """ import abc -import sys +import base64 +import collections import copy import json -import uuid -import base64 +import logging import random import shutil -import logging -import zipfile +import sys import tempfile -import collections +import uuid +import zipfile import numpy as np -if sys.version_info >= (3, 4): - # for newer version of python - ABC = abc.ABC -else: - # an abstract base class that works on older versions - ABC = abc.ABCMeta('ABC', (), {}) +ABC = abc.ABC # a flag we can check elsewhere for Python 3 PY3 = sys.version_info.major >= 3 @@ -39,12 +34,13 @@ basestring = str # Python 3 from io import BytesIO, StringIO - from shutil import which # noqa - from time import perf_counter as now # noqa + from shutil import which + from time import perf_counter as now else: # Python 2 - from StringIO import StringIO from distutils.spawn import find_executable as which # noqa + + from StringIO import StringIO # monkey patch StringIO so `with` statements work StringIO.__enter__ = lambda a: a StringIO.__exit__ = lambda a, b, c, d: a.close() @@ -55,7 +51,7 @@ try: from collections.abc import Mapping except ImportError: - from collections import Mapping + from collections.abc import Mapping # create a default logger log = logging.getLogger('trimesh') @@ -1254,7 +1250,7 @@ def array_to_encoded(array, dtype=None, encoding='base64'): elif encoding == 'binary': encoded['binary'] = array.tobytes(order='C') else: - raise ValueError('encoding {} is not available!'.format(encoding)) + raise ValueError(f'encoding {encoding} is not available!') return encoded @@ -1829,7 +1825,7 @@ def wrap_as_stream(item): return StringIO(item) elif isinstance(item, bytes): return BytesIO(item) - raise ValueError('{} is not wrappable!'.format(type(item).__name__)) + raise ValueError(f'{type(item).__name__} is not wrappable!') def sigfig_round(values, sigfig=1): @@ -2318,7 +2314,7 @@ def __call__(self, key, *args, **kwargs): return self[key](*args, **kwargs) -class TemporaryDirectory(object): +class TemporaryDirectory: """ Same basic usage as tempfile.TemporaryDirectory but functional in Python 2.7+. diff --git a/trimesh/version.py b/trimesh/version.py index b21b2f52f..85b1ed9b8 100644 --- a/trimesh/version.py +++ b/trimesh/version.py @@ -2,4 +2,4 @@ if __name__ == '__main__': # print version if run directly i.e. in a CI script - print(__version__) # noqa + print(__version__) diff --git a/trimesh/viewer/__init__.py b/trimesh/viewer/__init__.py index f8aaeded5..d235d200f 100644 --- a/trimesh/viewer/__init__.py +++ b/trimesh/viewer/__init__.py @@ -6,17 +6,13 @@ """ -from .notebook import (in_notebook, - scene_to_notebook, - scene_to_html) - from .. import exceptions +from .notebook import in_notebook, scene_to_html, scene_to_notebook try: # try importing windowed which will fail # if we can't create an openGL context - from .windowed import (SceneViewer, - render_scene) + from .windowed import SceneViewer, render_scene except BaseException as E: # if windowed failed to import only raise # the exception if someone tries to use them diff --git a/trimesh/viewer/notebook.py b/trimesh/viewer/notebook.py index bcb49915d..451471b1e 100644 --- a/trimesh/viewer/notebook.py +++ b/trimesh/viewer/notebook.py @@ -5,12 +5,11 @@ Render trimesh.Scene objects in HTML and jupyter notebooks using three.js """ -import os import base64 +import os # for our template -from .. import util -from .. import resources +from .. import resources, util def scene_to_html(scene): diff --git a/trimesh/viewer/trackball.py b/trimesh/viewer/trackball.py index cc2de2e8a..cf32cdefb 100644 --- a/trimesh/viewer/trackball.py +++ b/trimesh/viewer/trackball.py @@ -30,7 +30,7 @@ from .. import transformations -class Trackball(object): +class Trackball: """A trackball class for creating camera transforms from mouse movements. """ STATE_ROTATE = 0 diff --git a/trimesh/viewer/widget.py b/trimesh/viewer/widget.py index e37d9cd13..464a90f4f 100644 --- a/trimesh/viewer/widget.py +++ b/trimesh/viewer/widget.py @@ -13,8 +13,7 @@ from .. import rendering from .trackball import Trackball -from .windowed import geometry_hash -from .windowed import SceneViewer +from .windowed import SceneViewer, geometry_hash class SceneGroup(pyglet.graphics.Group): @@ -144,7 +143,7 @@ def __init__(self, scene, **kwargs): self._background = kwargs.pop('background', None) self._smooth = kwargs.pop('smooth', True) if kwargs: - raise TypeError('unexpected kwargs: {}'.format(kwargs)) + raise TypeError(f'unexpected kwargs: {kwargs}') @property def scene_group(self): diff --git a/trimesh/viewer/windowed.py b/trimesh/viewer/windowed.py index 7a7731abc..b728d53cc 100644 --- a/trimesh/viewer/windowed.py +++ b/trimesh/viewer/windowed.py @@ -8,8 +8,8 @@ Works on all major platforms: Windows, Linux, and OSX. """ import collections -import numpy as np +import numpy as np import pyglet # pyglet 2.0 is close to a re-write moving from fixed-function @@ -20,13 +20,10 @@ raise ImportError( '`trimesh.viewer.windowed` requires `pip install "pyglet<2"`') -from .trackball import Trackball - -from .. import util -from .. import rendering - -from ..visual import to_rgba +from .. import rendering, util from ..transformations import translation_matrix +from ..visual import to_rgba +from .trackball import Trackball pyglet.options['shadow_window'] = False @@ -172,7 +169,7 @@ def __init__(self, samples=4, depth_size=24, double_buffer=True) - super(SceneViewer, self).__init__(config=conf, + super().__init__(config=conf, visible=visible, resizable=True, width=resolution[0], @@ -180,7 +177,7 @@ def __init__(self, caption=caption) except pyglet.window.NoSuchConfigException: conf = gl.Config(double_buffer=True) - super(SceneViewer, self).__init__(config=conf, + super().__init__(config=conf, resizable=True, visible=visible, width=resolution[0], @@ -188,7 +185,7 @@ def __init__(self, caption=caption) else: # window config was manually passed - super(SceneViewer, self).__init__(config=window_conf, + super().__init__(config=window_conf, resizable=True, visible=visible, width=resolution[0], @@ -257,7 +254,7 @@ def add_geometry(self, name, geometry, **kwargs): # convert geometry to constructor args args = rendering.convert_to_vertexlist(geometry, **kwargs) except BaseException: - util.log.warning('failed to add geometry `{}`'.format(name), + util.log.warning(f'failed to add geometry `{name}`', exc_info=True) return @@ -286,8 +283,8 @@ def cleanup_geometries(self): # shorthand to scene graph graph = self.scene.graph # which parts of the graph still have geometry - geom_keep = set([graph[node][1] for - node in graph.nodes_geometry]) + geom_keep = {graph[node][1] for + node in graph.nodes_geometry} # which geometries no longer need to be kept geom_delete = [geom for geom in self.vertex_list if geom not in geom_keep] @@ -450,7 +447,7 @@ def _gl_enable_lighting(scene): # opengl only supports 7 lights? for i, light in enumerate(scene.lights[:7]): # the index of which light we have - lightN = eval('gl.GL_LIGHT{}'.format(i)) + lightN = eval(f'gl.GL_LIGHT{i}') # get the transform for the light by name matrix = scene.graph.get(light.name)[0] @@ -813,7 +810,7 @@ def on_draw(self): util.log.debug(profiler.output_text(unicode=True, color=True)) def flip(self): - super(SceneViewer, self).flip() + super().flip() if self._record: # will save a PNG-encoded bytes img = self.save_image(util.BytesIO()) diff --git a/trimesh/visual/base.py b/trimesh/visual/base.py index 02246625a..38f567fc6 100644 --- a/trimesh/visual/base.py +++ b/trimesh/visual/base.py @@ -5,6 +5,7 @@ The base class for `Visual` objects """ import abc + from ..util import ABC diff --git a/trimesh/visual/color.py b/trimesh/visual/color.py index dd85cc313..7ac684569 100644 --- a/trimesh/visual/color.py +++ b/trimesh/visual/color.py @@ -22,18 +22,15 @@ and setting or altering a value should automatically change the mode. """ -import numpy as np - -import copy import colorsys +import copy -from .base import Visuals - -from .. import util -from .. import caching +import numpy as np -from ..grouping import unique_rows +from .. import caching, util from ..constants import tol +from ..grouping import unique_rows +from .base import Visuals class ColorVisuals(Visuals): diff --git a/trimesh/visual/gloss.py b/trimesh/visual/gloss.py index 7ab55d0d3..edeb28b71 100644 --- a/trimesh/visual/gloss.py +++ b/trimesh/visual/gloss.py @@ -122,7 +122,7 @@ def get_diffuse(diffuseFactor, diffuseTexture): else: log.warning( '`diffuseFactor` and `diffuseTexture` have incompatible shapes: ' + - '{0} and {1}'.format(diffuseFactor.shape, diffuse.shape)) + f'{diffuseFactor.shape} and {diffuse.shape}') else: diffuse = diffuseFactor if diffuseFactor is not None else [1, 1, 1, 1] diffuse = np.array(diffuse, dtype=np.float32) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 4dc745b30..d330debf2 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -6,13 +6,12 @@ """ import abc import copy -import numpy as np -from . import color -from .. import util -from .. import exceptions +import numpy as np +from .. import exceptions, util from ..constants import tol +from . import color # epsilon for comparing floating point _eps = 1e-5 @@ -109,11 +108,11 @@ def to_obj(self, name=None): name = self.name # create an MTL file - mtl = ['newmtl {}'.format(name), + mtl = [f'newmtl {name}', 'Ka {:0.8f} {:0.8f} {:0.8f}'.format(*Ka), 'Kd {:0.8f} {:0.8f} {:0.8f}'.format(*Kd), 'Ks {:0.8f} {:0.8f} {:0.8f}'.format(*Ks), - 'Ns {:0.8f}'.format(self.glossiness)] + f'Ns {self.glossiness:0.8f}'] # collect the OBJ data into files data = {} @@ -123,9 +122,9 @@ def to_obj(self, name=None): # what is the name of the export image to save if image_type is None: image_type = 'png' - image_name = '{}.{}'.format(name, image_type.lower()) + image_name = f'{name}.{image_type.lower()}' # save the reference to the image - mtl.append('map_Kd {}'.format(image_name)) + mtl.append(f'map_Kd {image_name}') # save the image texture as bytes in the original format f_obj = util.BytesIO() @@ -133,7 +132,7 @@ def to_obj(self, name=None): f_obj.seek(0) data[image_name] = f_obj.read() - data['{}.mtl'.format(name)] = '\n'.join(mtl).encode('utf-8') + data[f'{name}.mtl'] = '\n'.join(mtl).encode('utf-8') return data, name @@ -525,7 +524,7 @@ def normalTexture(self): Normal texture. """ return self._data.get('normalTexture') - + @normalTexture.setter def normalTexture(self, value): if value is None: @@ -545,7 +544,7 @@ def emissiveTexture(self): Emissive texture. """ return self._data.get('emissiveTexture') - + @emissiveTexture.setter def emissiveTexture(self, value): if value is None: @@ -565,7 +564,7 @@ def occlusionTexture(self): Occlusion texture. """ return self._data.get('occlusionTexture') - + @occlusionTexture.setter def occlusionTexture(self, value): if value is None: @@ -606,7 +605,7 @@ def metallicRoughnessTexture(self): Metallic-roughness texture. """ return self._data.get('metallicRoughnessTexture') - + @metallicRoughnessTexture.setter def metallicRoughnessTexture(self, value): if value is None: @@ -618,7 +617,7 @@ def metallicRoughnessTexture(self, value): @property def name(self): return self._data.get('name') - + @name.setter def name(self, value): if value is None: @@ -692,7 +691,7 @@ def __hash__(self): hash : int Hash of image and parameters """ - return hash(bytes().join( + return hash(b''.join( np.asanyarray(v).tobytes() for v in self._data.values() if v is not None)) @@ -747,9 +746,11 @@ def pack(materials, uvs, deduplicate=True): Combined UV coordinates in the 0.0-1.0 range. """ + import collections + from PIL import Image + from ..path import packing - import collections def material_to_img(mat): """ diff --git a/trimesh/visual/objects.py b/trimesh/visual/objects.py index df10957d0..18ff302f3 100644 --- a/trimesh/visual/objects.py +++ b/trimesh/visual/objects.py @@ -7,9 +7,9 @@ """ import numpy as np +from .color import ColorVisuals, color_to_uv from .material import pack from .texture import TextureVisuals -from .color import ColorVisuals, color_to_uv def create_visual(**kwargs): diff --git a/trimesh/visual/texture.py b/trimesh/visual/texture.py index fcc02c341..10da4296a 100644 --- a/trimesh/visual/texture.py +++ b/trimesh/visual/texture.py @@ -2,14 +2,10 @@ import numpy as np -from .base import Visuals +from .. import caching, grouping, util from . import color - -from .. import util -from .. import caching -from .. import grouping - -from .material import SimpleMaterial, PBRMaterial, empty_material # NOQA +from .base import Visuals +from .material import PBRMaterial, SimpleMaterial, empty_material # NOQA class TextureVisuals(Visuals): @@ -192,7 +188,7 @@ def update_vertices(self, mask): updates[key] = value[mask] except BaseException: # usual reason is an incorrect size or index - util.log.warning('failed to update visual: `{}`'.format(key)) + util.log.warning(f'failed to update visual: `{key}`') # clear all values from the vertex attributes self.vertex_attributes.clear() # apply the updated values diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index d3b795d58..58ec95422 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -6,20 +6,14 @@ """ import numpy as np -from . import ops -from . import transforms -from . import morphology - -from .encoding import Encoding, DenseEncoding -from .. import util -from .. import caching from .. import bounds as bounds_module +from .. import caching, util from .. import transformations as tr - -from ..parent import Geometry from ..constants import log - from ..exchange.binvox import export_binvox +from ..parent import Geometry +from . import morphology, ops, transforms +from .encoding import DenseEncoding, Encoding class VoxelGrid(Geometry): @@ -40,7 +34,7 @@ def __init__(self, encoding, transform=None, metadata=None): self._cache = caching.Cache( id_function=self._data.__hash__) - self.metadata = dict() + self.metadata = {} # update the mesh metadata with passed metadata if isinstance(metadata, dict): self.metadata.update(metadata) diff --git a/trimesh/voxel/creation.py b/trimesh/voxel/creation.py index de87523a3..55a67e8ee 100644 --- a/trimesh/voxel/creation.py +++ b/trimesh/voxel/creation.py @@ -1,11 +1,8 @@ import numpy as np -from ..constants import log_time -from .. import remesh -from .. import grouping -from .. import util +from .. import grouping, remesh, util from .. import transformations as tr - +from ..constants import log_time from . import base from . import encoding as enc diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 02c5134e3..723ea8642 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -1,11 +1,11 @@ """OO interfaces to encodings for ND arrays which caching.""" +import abc + import numpy as np -import abc +from .. import caching from ..util import ABC, log - from . import runlength -from .. import caching try: from scipy import sparse as sp @@ -201,7 +201,7 @@ def __init__(self, data): if not isinstance(data, np.ndarray): raise ValueError('DenseEncoding data must be a numpy array') data = caching.tracked_array(data) - super(DenseEncoding, self).__init__(data=data) + super().__init__(data=data) @property def dtype(self): @@ -292,7 +292,7 @@ def __init__(self, indices, values, shape=None): + 1 is used. """ data = caching.DataStore() - super(SparseEncoding, self).__init__(data) + super().__init__(data) data['indices'] = indices data['values'] = values indices = data['indices'] @@ -301,8 +301,7 @@ def __init__(self, indices, values, shape=None): 'indices must be 2D, got shaped %s' % str(indices.shape)) if data['values'].shape != (indices.shape[0],): raise ValueError( - 'values and indices shapes inconsistent: %s and %s' - % (data['values'], data['indices'])) + 'values and indices shapes inconsistent: {} and {}'.format(data['values'], data['indices'])) if shape is None: self._shape = tuple(data['indices'].max(axis=0) + 1) else: @@ -446,7 +445,7 @@ def __init__(self, data, dtype=None): dtype: dtype of encoded data. Each second value of data is cast will be cast to this dtype if provided. """ - super(RunLengthEncoding, self).__init__( + super().__init__( data=caching.tracked_array(data)) if dtype is None: dtype = self._data.dtype @@ -595,7 +594,7 @@ def __init__(self, data): ------------ data: binary run length encoded data. """ - super(BinaryRunLengthEncoding, self).__init__(data=data, dtype=bool) + super().__init__(data=data, dtype=bool) @caching.cache_decorator def is_empty(self): @@ -783,7 +782,7 @@ def __init__(self, encoding, shape): encoding = encoding.flat else: raise ValueError('encoding must be an Encoding') - super(ShapedEncoding, self).__init__(data=encoding) + super().__init__(data=encoding) self._shape = tuple(shape) nn = self._shape.count(-1) size = np.prod(self._shape) @@ -844,7 +843,7 @@ def __init__(self, base_encoding, perm): raise ValueError( 'base_encoding has %d ndims - cannot transpose with perm %s' % (base_encoding.ndims, str(perm))) - super(TransposedEncoding, self).__init__(base_encoding) + super().__init__(base_encoding) perm = np.array(perm, dtype=np.int64) if not all(i in perm for i in range(base_encoding.ndims)): raise ValueError('perm %s is not a valid permutation' % str(perm)) @@ -921,7 +920,7 @@ def __init__(self, encoding, axes): if len(set(self._axes)) != len(self._axes): raise ValueError( "Axes cannot contain duplicates, got %s" % str(self._axes)) - super(FlippedEncoding, self).__init__(encoding) + super().__init__(encoding) if not all(0 <= a < self._data.ndims for a in axes): raise ValueError( 'Invalid axes %s for %d-d encoding' diff --git a/trimesh/voxel/morphology.py b/trimesh/voxel/morphology.py index fa61587c6..fc3cf41c5 100644 --- a/trimesh/voxel/morphology.py +++ b/trimesh/voxel/morphology.py @@ -1,13 +1,11 @@ """Basic morphology operations that create new encodings.""" import numpy as np +from .. import util +from ..constants import log_time from . import encoding as enc from . import ops -from ..constants import log_time -from .. import util - - try: from scipy import ndimage except BaseException as E: diff --git a/trimesh/voxel/ops.py b/trimesh/voxel/ops.py index 2187fb875..af69ec453 100644 --- a/trimesh/voxel/ops.py +++ b/trimesh/voxel/ops.py @@ -114,6 +114,7 @@ def matrix_to_marching_cubes(matrix, pitch=1.0): the marching cubes algorithm in skimage """ from skimage import measure + from ..base import Trimesh matrix = np.asanyarray(matrix, dtype=bool) diff --git a/trimesh/voxel/runlength.py b/trimesh/voxel/runlength.py index 6b911a378..ae8a0abd6 100644 --- a/trimesh/voxel/runlength.py +++ b/trimesh/voxel/runlength.py @@ -41,6 +41,7 @@ This module contains implementations of various RLE/BRLE operations. """ import functools + import numpy as np diff --git a/trimesh/voxel/transforms.py b/trimesh/voxel/transforms.py index 03e6f8b25..f2b50a7cb 100644 --- a/trimesh/voxel/transforms.py +++ b/trimesh/voxel/transforms.py @@ -1,12 +1,10 @@ import numpy as np -from .. import util -from .. import caching - +from .. import caching, util from .. import transformations as tr -class Transform(object): +class Transform: """ Class for caching metadata associated with 4x4 transformations. From 63affb21cc8faa1ff85157470a441d8ade88ed26 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Wed, 23 Aug 2023 16:33:32 -0400 Subject: [PATCH 02/84] passing ruff with upgrades --- MANIFEST.in | 3 - pyproject.toml | 95 +++++++++++++- setup.py | 249 ------------------------------------- tests/test_copy.py | 8 +- tests/test_mutate.py | 144 ++++++++++----------- tests/test_paths.py | 2 +- trimesh/version.py | 5 +- trimesh/viewer/notebook.py | 3 +- trimesh/visual/color.py | 6 +- 9 files changed, 180 insertions(+), 335 deletions(-) delete mode 100644 MANIFEST.in delete mode 100644 setup.py diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index dee9803ed..000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,3 +0,0 @@ -include LICENSE.md -include README.md -include trimesh/resources diff --git a/pyproject.toml b/pyproject.toml index f8cc629bb..ecb079149 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,98 @@ [build-system] build-backend = "setuptools.build_meta" -requires = ["setuptools >= 40.8", "wheel"] +requires = ["setuptools >= 60", "wheel"] + +[project] +name = "trimesh" +version = "3.23.3" +authors = [{name = "Michael Dawson-Haggerty", email = "mikedh@kerfed.com"}] +license = {text = "MIT"} +description = "Import, export, process, analyze and view triangular meshes." +keywords = ["graphics", "mesh", "geometry", "3D"] +classifiers = [ + "Development Status :: 4 - Beta", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Natural Language :: English", + "Topic :: Scientific/Engineering", +] +urls = {Homepage = "https://github.com/mikedh/trimesh"} +dependencies = ["numpy"] + +[project.readme] +file = "README.md" +content-type = "text/markdown" + +[project.optional-dependencies] +test = [ + "pytest-cov", + "coveralls", + "pytest", + "pymeshlab", + "pyinstrument", + "ruff", + "ezdxf", + "autopep8<2", +] +easy = [ + "colorlog", + "mapbox-earcut", + "chardet", + "lxml", + "jsonschema", + "networkx", + "svg.path", + "pycollada", + "setuptools", + "shapely", + "xxhash", + "rtree", + "requests", + "scipy", + "embreex", + "pillow", +] +all = [ + "colorlog", + "mapbox-earcut", + "lxml", + "networkx", + "svg.path", + "pycollada", + "shapely", + "xatlas", + "python-fcl", + "scipy", + "embreex", + "chardet", + "pyglet<2", + "jsonschema", + "setuptools", + "xxhash", + "scikit-image", + "rtree", + "psutil", + "requests", + "pillow", +] +recommends = ["glooey", "sympy", "meshio"] + +[tool.setuptools.packages.find] +include = ["trimesh"] + +[tool.setuptools.package-data] +trimesh = [ + "resources/templates/*", + "resources/*.json", + "resources/schema/*", + "resources/schema/primitive/*.json", + "resources/*.zip", +] + [tool.ruff] # See https://github.com/charliermarsh/ruff#rules for error code definitions. @@ -21,6 +113,7 @@ ignore = [ "N802", # Function name should be lowercase "N806", # Variable in function should be lowercase "E501", # Line too long ({width} > {limit} characters) + "B904", # raise ... from err "B905", # zip() without an explicit strict= parameter ] line-length = 90 diff --git a/setup.py b/setup.py deleted file mode 100644 index 617ca81c5..000000000 --- a/setup.py +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/env python -# ruff: noqa -import os -import sys -from setuptools import setup - -# load __version__ without importing anything -_version_file = os.path.join(os.path.dirname(__file__), "trimesh", "version.py") - -if os.path.isfile(_version_file): - with open(_version_file, "r") as f: - _version_raw = f.read() - # use eval to get a clean string of version from file - __version__ = eval( - next( - line.strip().split("=")[-1] - for line in str.splitlines(_version_raw) - if "_version_" in line - ) - ) -else: - __version__ = None - -# load README.md as long_description -long_description = "" -if os.path.exists("README.md"): - with open("README.md", "r") as f: - long_description = f.read() - -# minimal requirements for installing trimesh -requirements_default = set(["numpy"]) - -# "easy" requirements should install without compiling -# anything on Windows, Linux, and Mac, for Python >= 3.6 -requirements_easy = set( - [ - "scipy", # provide convex hulls, fast graph ops, etc - "networkx", # provide slow graph ops with a nice API - "lxml", # handle XML better and faster than built- in XML - "shapely", # handle 2D polygons robustly - "rtree", # create N-dimension trees for broad-phase queries - "svg.path", # handle SVG format path strings - "pillow", # load images - "embreex", # Intel's Embree ray check engine with wheels - "requests", # do network requests - "xxhash", # hash ndarrays faster than built-in MD5/CRC - "setuptools", # do setuptools stuff - "jsonschema", # validate JSON schemas like GLTF - "pycollada", # parse collada/dae/zae files - "chardet", # figure out if someone used UTF-16 - "mapbox-earcut", # fast 2D triangulations of polygons - "colorlog", - ] -) # log in pretty colors - -# "all" requirements only need to be installable -# through some mechanism on Linux with Python 3.5+ -# and are allowed to compile code -requirements_all = requirements_easy.union( - [ - "python-fcl", # do fast 3D collision queries - "psutil", # figure out how much memory we have - "scikit-image", # marching cubes and other nice stuff - "xatlas", # texture unwrapping - "pyglet<2", # render preview windows nicely : note pyglet 2.0 is basically a re-write - ] -) -# requirements for running unit tests -requirements_test = set( - [ - "pytest", # run all unit tests - "pytest-cov", # coverage plugin - "pyinstrument", # profile code - "coveralls", # report coverage stats - "autopep8<2", # check and autoformat - "ruff", # static code analysis - "pymeshlab", # used as a validator for exports - "ezdxf", - ] -) # used as a validator for exports - -# things that are used implicitly -requirements_recommends = set(["meshio", "sympy", "glooey"]) - -# Python 2.7 and 3.4 support has been dropped from packages -# version lock those packages here so install succeeds -current = (sys.version_info.major, sys.version_info.minor) - -# packages that no longer support old Python -lock = [ - ((3, 4), "lxml", "4.3.5"), - ((3, 4), "shapely", "1.6.4"), - ((3, 4), "pyglet", "1.4.10"), - ((3, 5), "sympy", None), - ((3, 6), "pyglet<2", None), - ((3, 6), "autopep8", None), - ((3, 6), "ruff", None), - ((3, 7), "pymeshlab", None), - ((3, 5), "embreex", None), - ((3, 6), "svg.path", "4.1"), -] -for max_python, name, version in lock: - if current <= max_python: - # remove version-free requirements - requirements_easy.discard(name) - requirements_test.discard(name) - - # if version is None drop that package - if version is not None: - # add working version locked requirements - requirements_easy.add("{}=={}".format(name, version)) - - -def format_all(): - """ - A shortcut to run automatic formatting and complaining - on all of the trimesh subdirectories. - """ - import subprocess - - def run_on(target): - # words that codespell hates - # note that it always checks against the lower case - word_skip = "datas,coo,nd,files',filetests,ba,childs,whats" - # files to skip spelling on - file_skip = "*.pyc,*.zip,.DS_Store,*.js,./trimesh/resources" - spell = [ - "codespell", - "-i", - "3", - "--skip=" + file_skip, - "-L", - word_skip, - "-w", - target, - ] - print("Running: \n {} \n\n\n".format(" ".join(spell))) - subprocess.check_call(spell) - - formatter = [ - "autopep8", - "--recursive", - "--verbose", - "--in-place", - "--aggressive", - target, - ] - print("Running: \n {} \n\n\n".format(" ".join(formatter))) - subprocess.check_call(formatter) - - flake = ["flake8", target] - print("Running: \n {} \n\n\n".format(" ".join(flake))) - subprocess.check_call(flake) - - # run on our target locations - for t in ["trimesh", "tests", "examples"]: - run_on(t) - - -# if someone wants to output a requirements file -# `python setup.py --list-all > requirements.txt` -if "--list-all" in sys.argv: - # will not include default requirements (numpy) - print("\n".join(requirements_all)) - exit() -elif "--list-easy" in sys.argv: - # again will not include numpy+setuptools - print("\n".join(requirements_easy)) - exit() -elif "--list-test" in sys.argv: - # again will not include numpy+setuptools - print("\n".join(requirements_test)) - exit() -elif "--format" in sys.argv: - format_all() - exit() -elif "--bump" in sys.argv: - # bump the version number - # convert current version to integers - bumped = [int(i) for i in __version__.split(".")] - # increment the last field by one - bumped[-1] += 1 - # re-combine into a version string - version_new = ".".join(str(i) for i in bumped) - print("version bump `{}` => `{}`".format(__version__, version_new)) - # write back the original version file with - # just the value replaced with the new one - raw_new = _version_raw.replace(__version__, version_new) - with open(_version_file, "w") as f: - f.write(raw_new) - exit() - - -# call the magical setuptools setup -setup( - name="trimesh", - version=__version__, - description="Import, export, process, analyze and view triangular meshes.", - long_description=long_description, - long_description_content_type="text/markdown", - author="Michael Dawson-Haggerty", - author_email="mikedh@kerfed.com", - license="MIT", - url="https://github.com/mikedh/trimesh", - keywords="graphics mesh geometry 3D", - classifiers=[ - "Development Status :: 4 - Beta", - "License :: OSI Approved :: MIT License", - "Programming Language :: Python", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Natural Language :: English", - "Topic :: Scientific/Engineering", - ], - packages=[ - "trimesh", - "trimesh.ray", - "trimesh.path", - "trimesh.path.exchange", - "trimesh.scene", - "trimesh.voxel", - "trimesh.visual", - "trimesh.viewer", - "trimesh.exchange", - "trimesh.resources", - "trimesh.interfaces", - ], - package_data={ - "trimesh": [ - "resources/templates/*", - "resources/*.json", - "resources/schema/*", - "resources/schema/primitive/*.json", - "resources/*.zip", - ] - }, - install_requires=list(requirements_default), - extras_require={ - "test": list(requirements_test), - "easy": list(requirements_easy), - "all": list(requirements_all), - "recommends": list(requirments_recommends), - }, -) diff --git a/tests/test_copy.py b/tests/test_copy.py index ae8c566f0..9e6bde033 100644 --- a/tests/test_copy.py +++ b/tests/test_copy.py @@ -16,10 +16,10 @@ def test_copy(self): start = hash(mesh) # make sure some stuff is populated - mesh.kdtree - mesh.triangles_tree - mesh.face_adjacency_angles - mesh.facets + _ = mesh.kdtree + _ = mesh.triangles_tree + _ = mesh.face_adjacency_angles + _ = mesh.facets assert 'triangles_tree' in mesh._cache assert len(mesh._cache) > 0 diff --git a/tests/test_mutate.py b/tests/test_mutate.py index 71d3ff8d9..139b3a8fb 100644 --- a/tests/test_mutate.py +++ b/tests/test_mutate.py @@ -68,78 +68,78 @@ def _test_not_mutated(self, mesh, verts, faces): faces = g.np.copy(faces) lo, hi = mesh.bounds - mesh.faces_sparse - mesh.face_normals - mesh.vertex_normals - mesh.extents - mesh.scale - mesh.centroid - mesh.center_mass - mesh.density - mesh.volume - mesh.mass - mesh.moment_inertia - mesh.principal_inertia_components - mesh.principal_inertia_vectors - mesh.principal_inertia_transform - mesh.symmetry - mesh.symmetry_axis - mesh.symmetry_section - mesh.triangles - mesh.triangles_tree - mesh.triangles_center - mesh.triangles_cross - mesh.edges - mesh.edges_face - mesh.edges_unique - mesh.edges_unique_length - mesh.edges_unique_inverse - mesh.edges_sorted - mesh.edges_sparse - mesh.body_count - mesh.faces_unique_edges - mesh.euler_number - mesh.referenced_vertices - mesh.units - mesh.face_adjacency - mesh.face_adjacency_edges - mesh.face_adjacency_angles - mesh.face_adjacency_projections - mesh.face_adjacency_convex - mesh.face_adjacency_unshared - mesh.face_adjacency_radius - mesh.face_adjacency_span - mesh.vertex_adjacency_graph - mesh.vertex_neighbors - mesh.is_winding_consistent - mesh.is_watertight - mesh.is_volume - mesh.is_empty - mesh.is_convex - mesh.kdtree - mesh.facets - mesh.facets_area - mesh.facets_normal - mesh.facets_origin - mesh.facets_boundary - mesh.facets_on_hull - mesh.visual - mesh.convex_hull - mesh.sample(500, False) - mesh.voxelized((hi[0] - lo[0]) / 100.0) - mesh.outline() - mesh.area - mesh.area_faces - mesh.mass_properties - mesh.scene() - mesh.identifier - mesh.identifier_hash - mesh.to_dict() - mesh.face_angles - mesh.face_angles_sparse - mesh.vertex_defects - mesh.face_adjacency_tree - mesh.copy() + _ = mesh.faces_sparse + _ = mesh.face_normals + _ = mesh.vertex_normals + _ = mesh.extents + _ = mesh.scale + _ = mesh.centroid + _ = mesh.center_mass + _ = mesh.density + _ = mesh.volume + _ = mesh.mass + _ = mesh.moment_inertia + _ = mesh.principal_inertia_components + _ = mesh.principal_inertia_vectors + _ = mesh.principal_inertia_transform + _ = mesh.symmetry + _ = mesh.symmetry_axis + _ = mesh.symmetry_section + _ = mesh.triangles + _ = mesh.triangles_tree + _ = mesh.triangles_center + _ = mesh.triangles_cross + _ = mesh.edges + _ = mesh.edges_face + _ = mesh.edges_unique + _ = mesh.edges_unique_length + _ = mesh.edges_unique_inverse + _ = mesh.edges_sorted + _ = mesh.edges_sparse + _ = mesh.body_count + _ = mesh.faces_unique_edges + _ = mesh.euler_number + _ = mesh.referenced_vertices + _ = mesh.units + _ = mesh.face_adjacency + _ = mesh.face_adjacency_edges + _ = mesh.face_adjacency_angles + _ = mesh.face_adjacency_projections + _ = mesh.face_adjacency_convex + _ = mesh.face_adjacency_unshared + _ = mesh.face_adjacency_radius + _ = mesh.face_adjacency_span + _ = mesh.vertex_adjacency_graph + _ = mesh.vertex_neighbors + _ = mesh.is_winding_consistent + _ = mesh.is_watertight + _ = mesh.is_volume + _ = mesh.is_empty + _ = mesh.is_convex + _ = mesh.kdtree + _ = mesh.facets + _ = mesh.facets_area + _ = mesh.facets_normal + _ = mesh.facets_origin + _ = mesh.facets_boundary + _ = mesh.facets_on_hull + _ = mesh.visual + _ = mesh.convex_hull + _ = mesh.sample(500, False) + _ = mesh.voxelized((hi[0] - lo[0]) / 100.0) + _ = mesh.outline() + _ = mesh.area + _ = mesh.area_faces + _ = mesh.mass_properties + _ = mesh.scene() + _ = mesh.identifier + _ = mesh.identifier_hash + _ = mesh.to_dict() + _ = mesh.face_angles + _ = mesh.face_angles_sparse + _ = mesh.vertex_defects + _ = mesh.face_adjacency_tree + _ = mesh.copy() # ray.intersects_id centre = mesh.vertices.mean(axis=0) diff --git a/tests/test_paths.py b/tests/test_paths.py index 80a27541b..0c511f382 100644 --- a/tests/test_paths.py +++ b/tests/test_paths.py @@ -88,7 +88,7 @@ def test_discrete(self): d.metadata['file_name'], len(split)) for body in split: - body.identifier + _ = body.identifier if len(d.root) == 1: d.apply_obb() diff --git a/trimesh/version.py b/trimesh/version.py index 85b1ed9b8..5a80da940 100644 --- a/trimesh/version.py +++ b/trimesh/version.py @@ -1,4 +1,7 @@ -__version__ = '3.23.3' +from importlib.metadata import version + +# will get the version the package was installed with +__version__ = version('trimesh') if __name__ == '__main__': # print version if run directly i.e. in a CI script diff --git a/trimesh/viewer/notebook.py b/trimesh/viewer/notebook.py index 451471b1e..ddda0e69a 100644 --- a/trimesh/viewer/notebook.py +++ b/trimesh/viewer/notebook.py @@ -32,7 +32,8 @@ def scene_to_html(scene): base = util.decompress( resources.get('templates/viewer.zip', decode=False), file_type='zip')['viewer.html.template'].read().decode('utf-8') - scene.camera + # make sure scene has camera populated before export + _ = scene.camera # get export as bytes data = scene.export(file_type='glb') # encode as base64 string diff --git a/trimesh/visual/color.py b/trimesh/visual/color.py index 7ac684569..114e00210 100644 --- a/trimesh/visual/color.py +++ b/trimesh/visual/color.py @@ -146,9 +146,9 @@ def copy(self): Contains the same information as self """ copied = ColorVisuals() - # call the literally insane generators - self.face_colors - self.vertex_colors + # call the literally insane generators to validate + self.face_colors # noqa + self.vertex_colors # noqa # copy anything that's actually data copied._data.data = copy.deepcopy(self._data.data) From 37039c081e5c64aec30e26bafd0d9e7b206a670d Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 24 Aug 2023 21:13:02 -0400 Subject: [PATCH 03/84] use manual setuptools discovery --- pyproject.toml | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5e2d7c1a4..9fcab084b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,18 +33,28 @@ file = "README.md" content-type = "text/markdown" [tool.setuptools] -include-package-data = true - -[tool.setuptools.packages.find] -where = ["trimesh"] +packages = [ + "trimesh", + "trimesh.ray", + "trimesh.path", + "trimesh.path.exchange", + "trimesh.scene", + "trimesh.voxel", + "trimesh.visual", + "trimesh.viewer", + "trimesh.exchange", + "trimesh.resources", + "trimesh.interfaces", +] +include-package-data = false [tool.setuptools.package-data] -"trimesh.resources" = [ - "templates/*", - "*.json", - "schema/*", - "schema/primitive/*.json", - "*.zip", +trimesh = [ + "resources/templates/*", + "resources/*.json", + "resources/schema/*", + "resources/schema/primitive/*.json", + "resources/*.zip", ] [project.optional-dependencies] From 831d769f58d84b1d0d55159303749097fbc9ce92 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 24 Aug 2023 21:19:44 -0400 Subject: [PATCH 04/84] better error message for test_minimal --- tests/test_minimal.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_minimal.py b/tests/test_minimal.py index a3c38ddc5..8aa1347a1 100644 --- a/tests/test_minimal.py +++ b/tests/test_minimal.py @@ -108,13 +108,14 @@ def test_load_wrap(self): try: get_mesh('cycloidal.3DXML') except BaseException as E: - exc = str(E) + exc = str(E).lower() # should have raised assert exc is not None # error message should have been useful - assert 'lxml' in exc + if 'lxml' not in exc: + raise ValueError(exc) if __name__ == '__main__': From 42815ec56c47408e357a5c861a471e3f1e6b8779 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 24 Aug 2023 21:28:06 -0400 Subject: [PATCH 05/84] edit test_minimal --- tests/test_minimal.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_minimal.py b/tests/test_minimal.py index 8aa1347a1..4cd4941df 100644 --- a/tests/test_minimal.py +++ b/tests/test_minimal.py @@ -114,7 +114,8 @@ def test_load_wrap(self): assert exc is not None # error message should have been useful - if 'lxml' not in exc: + # containing which module the user was missing + if not any(m in exc for m in ('lxml', 'networkx')): raise ValueError(exc) From 2e077ff7a42b9f3a7ffd4667c584cb672332bde1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=96=B9=E6=AD=A6=E5=8D=93?= Date: Mon, 28 Aug 2023 09:59:10 +0800 Subject: [PATCH 06/84] prevent division by zero --- trimesh/ray/ray_triangle.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/trimesh/ray/ray_triangle.py b/trimesh/ray/ray_triangle.py index b502ad6f3..2743ba024 100644 --- a/trimesh/ray/ray_triangle.py +++ b/trimesh/ray/ray_triangle.py @@ -391,6 +391,9 @@ def ray_bounds(ray_origins, axis_dir = np.array([ray_directions[i][a] for i, a in enumerate(axis)]).reshape((-1, 1)) + # prevent division by zero + axis_dir[axis_dir == 0] = tol.zero + # parametric equation of a line # point = direction*t + origin # p = dt + o From e4d51e5ba2b4f4a42816e5e9d188c40994fe0450 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 12:52:37 -0400 Subject: [PATCH 07/84] add prerelease warning to readme and test on 3.6 --- .github/workflows/test.yml | 2 +- README.md | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 56f58e3bb..f33a280d7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -24,7 +24,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: ["3.11"] + python-version: ["3.6", "3.11"] os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v3 diff --git a/README.md b/README.md index afc64497c..021cf995a 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,12 @@ -Trimesh is a pure Python (2.7-3.5+) library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). +| :warning: WARNING | +|:---------------------------| +| trimesh 4.0.0 which makes the minimum Python version 3.7 is in pre-release and will be released soon, you may want to test your stack with: `pip install --pre trimesh` | + + +Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). The API is mostly stable, but this should not be relied on and is not guaranteed: install a specific version if you plan on deploying something using trimesh. From b3acf7c6534ce38c5c239494ea7f4b3959bc57e0 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 12:58:33 -0400 Subject: [PATCH 08/84] try old python on 20.04 --- .github/workflows/test.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f33a280d7..740f7a390 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -26,6 +26,12 @@ jobs: matrix: python-version: ["3.6", "3.11"] os: [ubuntu-latest, windows-latest, macos-latest] + exclude: + - os: ubuntu-latest + python-version: 3.6 + include: + - os: ubuntu-20.04 + python-version: 3.6 steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} From 2cd4d8fecc62dc80a6aa4421eb469bc96c0f58ce Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 12:59:49 -0400 Subject: [PATCH 09/84] try last 3.6 version of setuptools --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 9fcab084b..405be8b1b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [build-system] build-backend = "setuptools.build_meta" -requires = ["setuptools >= 60", "wheel"] +requires = ["setuptools >= 59.6", "wheel"] [project] name = "trimesh" From d3fc18ec7cb3f59d4506cc4feb8d3eaa407d373b Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 13:00:08 -0400 Subject: [PATCH 10/84] see if 3.6 can work --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 405be8b1b..c036b45f4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ requires = ["setuptools >= 59.6", "wheel"] [project] name = "trimesh" -requires-python = ">=3.7" +requires-python = ">=3.6" version = "4.0.0.rc0" authors = [{name = "Michael Dawson-Haggerty", email = "mikedh@kerfed.com"}] license = {text = "MIT"} From 88a688c24de4354dab558c72961e8a76505a081b Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 13:11:17 -0400 Subject: [PATCH 11/84] setuptools version --- .github/workflows/test.yml | 8 +------- pyproject.toml | 4 ++-- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 740f7a390..07a1d6308 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -24,14 +24,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: ["3.6", "3.11"] + python-version: ["3.7", "3.11"] os: [ubuntu-latest, windows-latest, macos-latest] - exclude: - - os: ubuntu-latest - python-version: 3.6 - include: - - os: ubuntu-20.04 - python-version: 3.6 steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} diff --git a/pyproject.toml b/pyproject.toml index c036b45f4..504ca9aae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,10 @@ [build-system] build-backend = "setuptools.build_meta" -requires = ["setuptools >= 59.6", "wheel"] +requires = ["setuptools >= 61.0", "wheel"] [project] name = "trimesh" -requires-python = ">=3.6" +requires-python = ">=3.7" version = "4.0.0.rc0" authors = [{name = "Michael Dawson-Haggerty", email = "mikedh@kerfed.com"}] license = {text = "MIT"} From a3c4edf0924639d8ed2fc8ae07be048a059e3722 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 13:20:46 -0400 Subject: [PATCH 12/84] use pkg_resources on older python --- trimesh/__init__.py | 6 +----- trimesh/version.py | 13 +++++++++---- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/trimesh/__init__.py b/trimesh/__init__.py index 591a78225..09aca216c 100644 --- a/trimesh/__init__.py +++ b/trimesh/__init__.py @@ -7,8 +7,6 @@ provide a fully featured Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the Shapely library. """ - -# current version # avoid a circular import in trimesh.base from . import bounds, collision, nsphere, primitives, smoothing, voxel @@ -36,9 +34,7 @@ from .exceptions import ExceptionWrapper path = ExceptionWrapper(E) -# explicitly list imports in __all__ -# as otherwise flake8 gets mad -__all__ = [__version__, +__all__ = ["__version__", 'Trimesh', 'PointCloud', 'Scene', diff --git a/trimesh/version.py b/trimesh/version.py index 5a80da940..032e7c9d5 100644 --- a/trimesh/version.py +++ b/trimesh/version.py @@ -1,7 +1,12 @@ -from importlib.metadata import version - -# will get the version the package was installed with -__version__ = version('trimesh') +# get the version trimesh was installed with from metadata +try: + # Python >= 3.8 + from importlib.metadata import version + __version__ = version('trimesh') +except BaseException: + # Python < 3.8 + from pkg_resources import get_distribution + __version__ = get_distribution('trimesh').version if __name__ == '__main__': # print version if run directly i.e. in a CI script From 6675fee485ee309807bb4a3fd885ae1afff2bb05 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 14:00:39 -0400 Subject: [PATCH 13/84] use target-version for ruff --- pyproject.toml | 1 + trimesh/exchange/binvox.py | 4 ++-- trimesh/exchange/dae.py | 2 +- trimesh/exchange/export.py | 2 +- trimesh/exchange/gltf.py | 8 ++++---- trimesh/exchange/load.py | 7 ++++--- trimesh/exchange/ply.py | 2 +- trimesh/interfaces/generic.py | 4 ++-- trimesh/interfaces/scad.py | 2 +- trimesh/path/exchange/svg_io.py | 12 ++++++------ trimesh/resolvers.py | 2 +- trimesh/scene/scene.py | 4 ++-- trimesh/util.py | 8 ++++---- trimesh/viewer/windowed.py | 32 ++++++++++++++++---------------- trimesh/voxel/encoding.py | 3 ++- 15 files changed, 48 insertions(+), 45 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 504ca9aae..956b15621 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -100,6 +100,7 @@ recommend = [ [tool.ruff] +target-version = "py37" # See https://github.com/charliermarsh/ruff#rules for error code definitions. select = [ # "ANN", # annotations diff --git a/trimesh/exchange/binvox.py b/trimesh/exchange/binvox.py index 54b9342fa..df5924113 100644 --- a/trimesh/exchange/binvox.py +++ b/trimesh/exchange/binvox.py @@ -430,7 +430,7 @@ def __init__( 'Maximum dimension using exact is 1024, got %d' % dimension) if file_type not in Binvoxer.SUPPORTED_OUTPUT_TYPES: raise ValueError( - 'file_type {} not in set of supported output types {}'.format(file_type, str(Binvoxer.SUPPORTED_OUTPUT_TYPES))) + f'file_type {file_type} not in set of supported output types {str(Binvoxer.SUPPORTED_OUTPUT_TYPES)}') args = [encoder, '-d', str(dimension), '-t', file_type] if exact: args.append('-e') @@ -515,7 +515,7 @@ def __call__(self, path, overwrite=False): ext = ext[1:].lower() if ext not in Binvoxer.SUPPORTED_INPUT_TYPES: raise ValueError( - 'file_type {} not in set of supported input types {}'.format(ext, str(Binvoxer.SUPPORTED_INPUT_TYPES))) + f'file_type {ext} not in set of supported input types {str(Binvoxer.SUPPORTED_INPUT_TYPES)}') out_path = f'{head}.{self._file_type}' if os.path.isfile(out_path) and not overwrite: raise OSError('Attempted to voxelize object at existing path') diff --git a/trimesh/exchange/dae.py b/trimesh/exchange/dae.py index 95356bdb9..dbf10b2fb 100644 --- a/trimesh/exchange/dae.py +++ b/trimesh/exchange/dae.py @@ -99,7 +99,7 @@ def export_collada(mesh, **kwargs): import collada meshes = mesh - if not isinstance(mesh, list | tuple | set | np.ndarray): + if not isinstance(mesh, (list, tuple, set, np.ndarray)): meshes = [mesh] c = collada.Collada() diff --git a/trimesh/exchange/export.py b/trimesh/exchange/export.py index d65bd2da0..7c5fadce7 100644 --- a/trimesh/exchange/export.py +++ b/trimesh/exchange/export.py @@ -67,7 +67,7 @@ def export_mesh(mesh, if file_type not in _mesh_exporters: raise ValueError('%s exporter not available!', file_type) - if isinstance(mesh, list | tuple | set | np.ndarray): + if isinstance(mesh, (list, tuple, set, np.ndarray)): faces = 0 for m in mesh: faces += len(m.faces) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 4c155510b..25748ae43 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -242,9 +242,9 @@ def export_glb( dtype=" 0 and v is not None - and len(v) > 0) + for k, v in bag.items() + if len(k) > 0 and v is not None + and len(v) > 0) def _encode(stuff): @@ -654,7 +654,7 @@ def _deep_same(original, other): # ndarrays will be converted to lists # but otherwise types should be identical if isinstance(original, np.ndarray): - assert isinstance(other, list | np.ndarray) + assert isinstance(other, (list, np.ndarray)) elif util.is_string(original): # handle python 2+3 unicode vs str assert util.is_string(other) @@ -662,11 +662,11 @@ def _deep_same(original, other): # otherwise they should be the same type assert isinstance(original, type(other)) - if isinstance(original, str | bytes): + if isinstance(original, (str, bytes)): # string and bytes should just be identical assert original == other return - elif isinstance(original, float | int | np.ndarray): + elif isinstance(original, (float, int, np.ndarray)): # for numeric classes use numpy magic comparison # which includes an epsilon for floating point assert np.allclose(original, other) diff --git a/trimesh/resolvers.py b/trimesh/resolvers.py index 532c0c67d..982469a59 100644 --- a/trimesh/resolvers.py +++ b/trimesh/resolvers.py @@ -257,7 +257,7 @@ def get(self, name): # get the stored data obj = archive[name] # if the dict is storing data as bytes just return - if isinstance(obj, bytes | str): + if isinstance(obj, (bytes, str)): return obj # otherwise get it as a file object # read file object from beginning diff --git a/trimesh/scene/scene.py b/trimesh/scene/scene.py index 392f30e96..28ca909cb 100644 --- a/trimesh/scene/scene.py +++ b/trimesh/scene/scene.py @@ -888,7 +888,7 @@ def subscene(self, node): graph.from_edgelist(edges) geometry_names = {e[2]['geometry'] for e in edges - if 'geometry' in e[2]} + if 'geometry' in e[2]} geometry = {k: self.geometry[k] for k in geometry_names} result = Scene(geometry=geometry, graph=graph) return result @@ -1092,7 +1092,7 @@ def scaled(self, scale): """ # convert 2D geometries to 3D for 3D scaling factors scale_is_3D = isinstance( - scale, list | tuple | np.ndarray) and len(scale) == 3 + scale, (list, tuple, np.ndarray)) and len(scale) == 3 if scale_is_3D and np.all(np.asarray(scale) == scale[0]): # scale is uniform diff --git a/trimesh/util.py b/trimesh/util.py index 872317031..a33127a81 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -2072,10 +2072,10 @@ def triangle_fans_to_faces(fans): """ faces = [np.transpose([ - fan[0]*np.ones(len(fan) - 2, dtype=int), - fan[1:-1], - fan[2:] - ]) for fan in fans] + fan[0] * np.ones(len(fan) - 2, dtype=int), + fan[1:-1], + fan[2:] + ]) for fan in fans] return np.concatenate(faces, axis=1) diff --git a/trimesh/viewer/windowed.py b/trimesh/viewer/windowed.py index b728d53cc..6d7a19dfc 100644 --- a/trimesh/viewer/windowed.py +++ b/trimesh/viewer/windowed.py @@ -170,27 +170,27 @@ def __init__(self, depth_size=24, double_buffer=True) super().__init__(config=conf, - visible=visible, - resizable=True, - width=resolution[0], - height=resolution[1], - caption=caption) + visible=visible, + resizable=True, + width=resolution[0], + height=resolution[1], + caption=caption) except pyglet.window.NoSuchConfigException: conf = gl.Config(double_buffer=True) super().__init__(config=conf, - resizable=True, - visible=visible, - width=resolution[0], - height=resolution[1], - caption=caption) + resizable=True, + visible=visible, + width=resolution[0], + height=resolution[1], + caption=caption) else: # window config was manually passed super().__init__(config=window_conf, - resizable=True, - visible=visible, - width=resolution[0], - height=resolution[1], - caption=caption) + resizable=True, + visible=visible, + width=resolution[0], + height=resolution[1], + caption=caption) # add scene geometry to viewer geometry self._update_vertex_list() @@ -284,7 +284,7 @@ def cleanup_geometries(self): graph = self.scene.graph # which parts of the graph still have geometry geom_keep = {graph[node][1] for - node in graph.nodes_geometry} + node in graph.nodes_geometry} # which geometries no longer need to be kept geom_delete = [geom for geom in self.vertex_list if geom not in geom_keep] diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 723ea8642..8ec3d7981 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -301,7 +301,8 @@ def __init__(self, indices, values, shape=None): 'indices must be 2D, got shaped %s' % str(indices.shape)) if data['values'].shape != (indices.shape[0],): raise ValueError( - 'values and indices shapes inconsistent: {} and {}'.format(data['values'], data['indices'])) + 'values and indices shapes inconsistent: {} and {}'.format( + data['values'], data['indices'])) if shape is None: self._shape = tuple(data['indices'].max(axis=0) + 1) else: From 917c8b59791e487d0af51f05309c9d4642ba535f Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 14:16:03 -0400 Subject: [PATCH 14/84] deprecate hash entry points --- tests/test_creation.py | 2 +- trimesh/caching.py | 66 ------------------------------------------ trimesh/parent.py | 55 ----------------------------------- 3 files changed, 1 insertion(+), 122 deletions(-) diff --git a/tests/test_creation.py b/tests/test_creation.py index 2781f4d23..5599c9810 100644 --- a/tests/test_creation.py +++ b/tests/test_creation.py @@ -126,7 +126,7 @@ def test_camera_marker(self): assert isinstance(meshes, list) # all meshes should be viewable type for mesh in meshes: - assert isinstance(mesh, g.trimesh.Trimesh | g.trimesh.path.Path3D) + assert isinstance(mesh, (g.trimesh.Trimesh, g.trimesh.path.Path3D)) def test_axis(self): # specify the size of the origin radius diff --git a/trimesh/caching.py b/trimesh/caching.py index ce429507f..9e71eebeb 100644 --- a/trimesh/caching.py +++ b/trimesh/caching.py @@ -21,7 +21,6 @@ """ import os import time -import warnings from functools import wraps import numpy as np @@ -203,30 +202,6 @@ def mutable(self): def mutable(self, value): self.flags.writeable = value - def hash(self): - warnings.warn( - '`array.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def crc(self): - warnings.warn( - '`array.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def md5(self): - warnings.warn( - '`array.md5()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - def __hash__(self): """ Return a fast hash of the contents of the array. @@ -723,44 +698,3 @@ def __hash__(self): if v is not None and (not hasattr(v, '__len__') or len(v) > 0)], dtype=np.int64).tobytes()) - - def crc(self): - """ - Get a CRC reflecting everything in the DataStore. - - Returns - ---------- - crc : int - CRC of data - """ - warnings.warn( - '`array.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def fast_hash(self): - """ - Get a CRC32 or xxhash.xxh64 reflecting the DataStore. - - Returns - ------------ - hashed : int - Checksum of data - """ - warnings.warn( - '`array.fast_hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def hash(self): - warnings.warn( - '`array.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - - return self.__hash__() diff --git a/trimesh/parent.py b/trimesh/parent.py index 5f63b7eb9..e8a70b7d3 100644 --- a/trimesh/parent.py +++ b/trimesh/parent.py @@ -5,7 +5,6 @@ The base class for Trimesh, PointCloud, and Scene objects """ import abc -import warnings import numpy as np @@ -40,60 +39,6 @@ def apply_transform(self, matrix): def is_empty(self): pass - def crc(self): - """ - DEPRECATED OCTOBER 2023 : Use `hash(geometry)` - - Get a hash of the current geometry. - - Returns - --------- - hash : int - Hash of current graph and geometry. - """ - warnings.warn( - '`geometry.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def hash(self): - """ - DEPRECATED OCTOBER 2023 : Use `hash(geometry)` - - Get a hash of the current geometry. - - Returns - --------- - hash : int - Hash of current graph and geometry. - """ - warnings.warn( - '`geometry.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def md5(self): - """ - DEPRECATED OCTOBER 2023 : Use `hash(geometry)` - - Get a hash of the current geometry. - - Returns - --------- - hash : int - Hash of current graph and geometry. - """ - warnings.warn( - '`geometry.md5()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - def __hash__(self): """ Get a hash of the current geometry. From 098ebd46568bd0d8397930c3c0782c560b5b3f79 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 14:53:17 -0400 Subject: [PATCH 15/84] fix test_repair --- tests/test_depr.py | 39 ------------------------ tests/test_repair.py | 71 ++++++++++++++++++++------------------------ 2 files changed, 33 insertions(+), 77 deletions(-) delete mode 100644 tests/test_depr.py diff --git a/tests/test_depr.py b/tests/test_depr.py deleted file mode 100644 index 992cebf8d..000000000 --- a/tests/test_depr.py +++ /dev/null @@ -1,39 +0,0 @@ -try: - from . import generic as g -except BaseException: - import generic as g - - -class DepTest(g.unittest.TestCase): - - def test_deprecated(self): - - tests = [g.get_mesh('2D/wrench.dxf'), - g.trimesh.creation.box()] - - # todo : properly hash transform trees - # so that copies of scenes hash the same - # g.get_mesh('cycloidal.3DXML')] - - for m in tests: - copy = m.copy() - # the modern cool way of hashing - assert hash(m) == hash(copy) - assert m.__hash__() == copy.__hash__() - assert m.identifier_hash == copy.identifier_hash - - # october 2023 deprecated ways of hashing - # geometries - assert m.md5() == copy.md5() - assert m.crc() == copy.crc() - assert m.hash() == copy.hash() - assert m.identifier_md5 == copy.identifier_md5 - # trackedarray - assert m.vertices.md5() == copy.vertices.md5() - assert m.vertices.hash() == copy.vertices.hash() - assert m.vertices.crc() == copy.vertices.crc() - - -if __name__ == '__main__': - g.trimesh.util.attach_to_log() - g.unittest.main() diff --git a/tests/test_repair.py b/tests/test_repair.py index f02d300da..a49beda09 100644 --- a/tests/test_repair.py +++ b/tests/test_repair.py @@ -5,18 +5,18 @@ class RepairTests(g.unittest.TestCase): - def test_fill_holes(self): - for mesh_name in ['unit_cube.STL', - 'machinist.XAML', - 'round.stl', - 'sphere.ply', - 'teapot.stl', - 'soup.stl', - 'featuretype.STL', - 'angle_block.STL', - 'quadknot.obj']: - + for mesh_name in [ + "unit_cube.STL", + "machinist.XAML", + "round.stl", + "sphere.ply", + "teapot.stl", + "soup.stl", + "featuretype.STL", + "angle_block.STL", + "quadknot.obj", + ]: mesh = g.get_mesh(mesh_name) if not mesh.is_watertight: # output of fill_holes should match watertight status @@ -24,21 +24,16 @@ def test_fill_holes(self): assert returned == mesh.is_watertight continue - hashes = [{mesh._data.__hash__(), - mesh._data.__hash__(), - mesh._data.fast_hash()}] + hashes = [{mesh._data.__hash__(), hash(mesh)}] mesh.faces = mesh.faces[1:-1] assert not mesh.is_watertight assert not mesh.is_volume # color some faces - g.trimesh.repair.broken_faces( - mesh, color=[255, 0, 0, 255]) + g.trimesh.repair.broken_faces(mesh, color=[255, 0, 0, 255]) - hashes.append({mesh._data.__hash__(), - mesh._data.__hash__(), - mesh._data.fast_hash()}) + hashes.append({mesh._data.__hash__(), hash(mesh)}) assert hashes[0] != hashes[1] @@ -49,14 +44,11 @@ def test_fill_holes(self): assert mesh.is_watertight assert mesh.is_winding_consistent - hashes.append({mesh._data.__hash__(), - mesh._data.__hash__(), - mesh._data.fast_hash()}) + hashes.append({mesh._data.__hash__(), hash(mesh)}) assert hashes[1] != hashes[2] # try broken faces on a watertight mesh - g.trimesh.repair.broken_faces( - mesh, color=[255, 255, 0, 255]) + g.trimesh.repair.broken_faces(mesh, color=[255, 255, 0, 255]) def test_fix_normals(self): for mesh in g.get_meshes(5): @@ -68,16 +60,20 @@ def test_winding(self): them back. """ - meshes = [g.get_mesh(i) for i in - ['unit_cube.STL', - 'machinist.XAML', - 'round.stl', - 'quadknot.obj', - 'soup.stl']] + meshes = [ + g.get_mesh(i) + for i in [ + "unit_cube.STL", + "machinist.XAML", + "round.stl", + "quadknot.obj", + "soup.stl", + ] + ] for i, mesh in enumerate(meshes): # turn scenes into multibody meshes - if g.trimesh.util.is_instance_named(mesh, 'Scene'): + if g.trimesh.util.is_instance_named(mesh, "Scene"): meta = mesh.metadata meshes[i] = mesh.dump().sum() meshes[i].metadata = meta @@ -100,7 +96,7 @@ def test_winding(self): assert mesh.is_winding_consistent == winding # save timings - timing[mesh.metadata['file_name']] = g.time.time() - tic + timing[mesh.metadata["file_name"]] = g.time.time() - tic # print timings as a warning g.log.warning(g.json.dumps(timing, indent=4)) @@ -124,7 +120,7 @@ def test_multi(self): Try repairing a multibody geometry """ # create a multibody mesh with two cubes - a = g.get_mesh('unit_cube.STL') + a = g.get_mesh("unit_cube.STL") b = a.copy() b.apply_translation([2, 0, 0]) m = a + b @@ -169,7 +165,6 @@ def test_flip(self): assert g.np.isclose(m.volume, a.volume * 2.0) def test_fan(self): - # start by creating an icosphere and removing # all faces that include a single vertex to make # a nice hole in the mesh @@ -193,16 +188,16 @@ def test_fan(self): # should be an (n, 3) int assert len(stitch.shape) == 2 assert stitch.shape[1] == 3 - assert stitch.dtype.kind == 'i' + assert stitch.dtype.kind == "i" # now check our stitch to see if it handled the hole repair = g.trimesh.Trimesh( - vertices=m.vertices.copy(), - faces=g.np.vstack((m.faces, stitch))) + vertices=m.vertices.copy(), faces=g.np.vstack((m.faces, stitch)) + ) assert repair.is_watertight assert repair.is_winding_consistent -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() From fe7ceec717a9728c42ea04ef50373feb9e6970bf Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 16:44:56 -0400 Subject: [PATCH 16/84] convert trimesh.transformations to be numpy-only --- tests/test_transformations.py | 56 +++--- trimesh/transformations.py | 341 ++++++++++++++++++---------------- 2 files changed, 202 insertions(+), 195 deletions(-) diff --git a/tests/test_transformations.py b/tests/test_transformations.py index 672fc3954..c31bdf9ab 100644 --- a/tests/test_transformations.py +++ b/tests/test_transformations.py @@ -5,7 +5,6 @@ class TransformTest(g.unittest.TestCase): - def test_doctest(self): """ Run doctests on transformations, which checks docstrings @@ -29,10 +28,13 @@ def test_doctest(self): # search for interactive sessions in docstrings and verify they work # they are super unreliable and depend on janky string formatting - results = doctest.testmod(trimesh.transformations, - verbose=False, - raise_on_error=True) - g.log.info(f'transformations {str(results)}') + results = doctest.testmod( + trimesh.transformations, verbose=False, raise_on_error=False + ) + + if results.failed > 0: + raise ValueError(str(results)) + g.log.debug(str(results)) def test_downstream(self): """ @@ -70,9 +72,8 @@ def test_around(self): for i, p in enumerate(points): offset = g.random(2) matrix = g.trimesh.transformations.planar_matrix( - theta=g.random() + .1, - offset=offset, - point=p) + theta=g.random() + 0.1, offset=offset, point=p + ) # apply the matrix check = g.trimesh.transform_points(points, matrix) @@ -103,9 +104,7 @@ def test_rotation(self): rotation_matrix = g.trimesh.transformations.rotation_matrix R = rotation_matrix(g.np.pi / 2, [0, 0, 1], [1, 0, 0]) - assert g.np.allclose(g.np.dot(R, - [0, 0, 0, 1]), - [1, -1, 0, 1]) + assert g.np.allclose(g.np.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1]) angle = (g.random() - 0.5) * (2 * g.np.pi) direc = g.random(3) - 0.5 @@ -121,23 +120,16 @@ def test_rotation(self): I = g.np.identity(4, g.np.float64) # NOQA assert g.np.allclose(I, rotation_matrix(g.np.pi * 2, direc)) - assert g.np.allclose( - 2, - g.np.trace(rotation_matrix(g.np.pi / 2, - direc, point))) + assert g.np.allclose(2, g.np.trace(rotation_matrix(g.np.pi / 2, direc, point))) # test symbolic if g.sp is not None: - angle = g.sp.Symbol('angle') + angle = g.sp.Symbol("angle") Rs = rotation_matrix(angle, [0, 0, 1], [1, 0, 0]) - R = g.np.array(Rs.subs( - angle, - g.np.pi / 2.0).evalf()).astype(g.np.float64) + R = g.np.array(Rs.subs(angle, g.np.pi / 2.0).evalf()).astype(g.np.float64) - assert g.np.allclose( - g.np.dot(R, [0, 0, 0, 1]), - [1, -1, 0, 1]) + assert g.np.allclose(g.np.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1]) def test_tiny(self): """ @@ -145,15 +137,13 @@ def test_tiny(self): very small triangles. """ for validate in [False, True]: - m = g.get_mesh('ADIS16480.STL', validate=validate) - m.apply_scale(.001) + m = g.get_mesh("ADIS16480.STL", validate=validate) + m.apply_scale(0.001) m._cache.clear() - g.np.nonzero(g.np.linalg.norm( - m.face_normals, - axis=1) < 1e-3) + g.np.nonzero(g.np.linalg.norm(m.face_normals, axis=1) < 1e-3) m.apply_transform( - g.trimesh.transformations.rotation_matrix( - g.np.pi / 4, [0, 0, 1])) + g.trimesh.transformations.rotation_matrix(g.np.pi / 4, [0, 0, 1]) + ) def test_quat(self): """ @@ -184,11 +174,11 @@ def test_quat(self): # all random matrices should be rigid transforms assert all(is_rigid(T) for T in random_matrix(num=100)) # random quaternions should all be unit vector - assert g.np.allclose(g.np.linalg.norm(random_quat(num=100), - axis=1), - 1.0, atol=1e-6) + assert g.np.allclose( + g.np.linalg.norm(random_quat(num=100), axis=1), 1.0, atol=1e-6 + ) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/trimesh/transformations.py b/trimesh/transformations.py index 0cfc67198..fc8efed44 100644 --- a/trimesh/transformations.py +++ b/trimesh/transformations.py @@ -181,7 +181,7 @@ True >>> np.allclose(trans, [1, 2, 3]) True ->>> np.allclose(shear, [0, math.tan(beta), 0]) +>>> np.allclose(shear, [0, np.tan(beta), 0]) True >>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) True @@ -196,17 +196,10 @@ """ - -import math - import numpy as np -__version__ = '2017.02.17' -__docformat__ = 'restructuredtext en' -__all__ = () - _IDENTITY = np.eye(4) -_IDENTITY.flags['WRITEABLE'] = False +_IDENTITY.flags["WRITEABLE"] = False def identity_matrix(): @@ -330,14 +323,14 @@ def rotation_matrix(angle, direction, point=None): Examples ------------- - >>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0]) + >>> R = rotation_matrix(np.pi/2, [0, 0, 1], [1, 0, 0]) >>> np.allclose(np.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1]) True - >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> angle = (random.random() - 0.5) * (2*np.pi) >>> direc = np.random.random(3) - 0.5 >>> point = np.random.random(3) - 0.5 >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) + >>> R1 = rotation_matrix(angle-2*np.pi, direc, point) >>> is_same_transform(R0, R1) True >>> R0 = rotation_matrix(angle, direc, point) @@ -345,22 +338,23 @@ def rotation_matrix(angle, direction, point=None): >>> is_same_transform(R0, R1) True >>> I = np.identity(4, np.float64) - >>> np.allclose(I, rotation_matrix(math.pi*2, direc)) + >>> np.allclose(I, rotation_matrix(np.pi*2, direc)) True - >>> np.allclose(2, np.trace(rotation_matrix(math.pi/2,direc,point))) + >>> np.allclose(2, np.trace(rotation_matrix(np.pi/2,direc,point))) True """ - if type(angle).__name__ == 'Symbol': + if type(angle).__name__ == "Symbol": # special case sympy symbolic angles import sympy as sp + symbolic = True sina = sp.sin(angle) cosa = sp.cos(angle) else: symbolic = False - sina = math.sin(angle) - cosa = math.cos(angle) + sina = np.sin(angle) + cosa = np.cos(angle) direction = unit_vector(direction[:3]) # rotation matrix around unit vector @@ -368,9 +362,13 @@ def rotation_matrix(angle, direction, point=None): M[:3, :3] += np.outer(direction, direction) * (1.0 - cosa) direction = direction * sina - M[:3, :3] += np.array([[0.0, -direction[2], direction[1]], - [direction[2], 0.0, -direction[0]], - [-direction[1], direction[0], 0.0]]) + M[:3, :3] += np.array( + [ + [0.0, -direction[2], direction[1]], + [direction[2], 0.0, -direction[0]], + [-direction[1], direction[0], 0.0], + ] + ) # if point is specified, rotation is not around origin if point is not None: @@ -387,7 +385,7 @@ def rotation_matrix(angle, direction, point=None): def rotation_from_matrix(matrix): """Return rotation angle and axis from rotation matrix. - >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> angle = (random.random() - 0.5) * (2*np.pi) >>> direc = np.random.random(3) - 0.5 >>> point = np.random.random(3) - 0.5 >>> R0 = rotation_matrix(angle, direc, point) @@ -415,15 +413,12 @@ def rotation_from_matrix(matrix): # rotation angle depending on direction cosa = (np.trace(R33) - 1.0) / 2.0 if abs(direction[2]) > 1e-8: - sina = (R[1, 0] + (cosa - 1.0) * direction[0] - * direction[1]) / direction[2] + sina = (R[1, 0] + (cosa - 1.0) * direction[0] * direction[1]) / direction[2] elif abs(direction[1]) > 1e-8: - sina = (R[0, 2] + (cosa - 1.0) * direction[0] - * direction[2]) / direction[1] + sina = (R[0, 2] + (cosa - 1.0) * direction[0] * direction[2]) / direction[1] else: - sina = (R[2, 1] + (cosa - 1.0) * direction[1] - * direction[2]) / direction[0] - angle = math.atan2(sina, cosa) + sina = (R[2, 1] + (cosa - 1.0) * direction[1] * direction[2]) / direction[0] + angle = np.arctan2(sina, cosa) return angle, direction, point @@ -502,8 +497,7 @@ def scale_from_matrix(matrix): return factor, origin, direction -def projection_matrix(point, normal, direction=None, - perspective=None, pseudo=False): +def projection_matrix(point, normal, direction=None, perspective=None, pseudo=False): """Return matrix to project onto plane defined by point and normal. Using either perspective point, projection direction, or none of both. @@ -539,8 +533,7 @@ def projection_matrix(point, normal, direction=None, normal = unit_vector(normal[:3]) if perspective is not None: # perspective projection - perspective = np.array(perspective[:3], dtype=np.float64, - copy=False) + perspective = np.array(perspective[:3], dtype=np.float64, copy=False) M[0, 0] = M[1, 1] = M[2, 2] = np.dot(perspective - point, normal) M[:3, :3] -= np.outer(perspective, normal) if pseudo: @@ -626,11 +619,10 @@ def projection_from_matrix(matrix, pseudo=False): # perspective projection i = np.where(abs(np.real(w)) > 1e-8)[0] if not len(i): - raise ValueError( - "no eigenvector not corresponding to eigenvalue 0") + raise ValueError("no eigenvector not corresponding to eigenvalue 0") point = np.real(V[:, i[-1]]).squeeze() point /= point[3] - normal = - M[3, :3] + normal = -M[3, :3] perspective = M[:3, 3] / np.dot(point[:3], normal) if pseudo: perspective -= normal @@ -681,15 +673,19 @@ def clip_matrix(left, right, bottom, top, near, far, perspective=False): if near <= _EPS: raise ValueError("invalid frustum: near <= 0") t = 2.0 * near - M = [[t / (left - right), 0.0, (right + left) / (right - left), 0.0], - [0.0, t / (bottom - top), (top + bottom) / (top - bottom), 0.0], - [0.0, 0.0, (far + near) / (near - far), t * far / (far - near)], - [0.0, 0.0, -1.0, 0.0]] + M = [ + [t / (left - right), 0.0, (right + left) / (right - left), 0.0], + [0.0, t / (bottom - top), (top + bottom) / (top - bottom), 0.0], + [0.0, 0.0, (far + near) / (near - far), t * far / (far - near)], + [0.0, 0.0, -1.0, 0.0], + ] else: - M = [[2.0 / (right - left), 0.0, 0.0, (right + left) / (left - right)], - [0.0, 2.0 / (top - bottom), 0.0, (top + bottom) / (bottom - top)], - [0.0, 0.0, 2.0 / (far - near), (far + near) / (near - far)], - [0.0, 0.0, 0.0, 1.0]] + M = [ + [2.0 / (right - left), 0.0, 0.0, (right + left) / (left - right)], + [0.0, 2.0 / (top - bottom), 0.0, (top + bottom) / (bottom - top)], + [0.0, 0.0, 2.0 / (far - near), (far + near) / (near - far)], + [0.0, 0.0, 0.0, 1.0], + ] return np.array(M) @@ -704,7 +700,7 @@ def shear_matrix(angle, direction, point, normal): given by the angle of P-P'-P", where P' is the orthogonal projection of P onto the shear plane. - >>> angle = (random.random() - 0.5) * 4*math.pi + >>> angle = (random.random() - 0.5) * 4*np.pi >>> direct = np.random.random(3) - 0.5 >>> point = np.random.random(3) - 0.5 >>> normal = np.cross(direct, np.random.random(3)) @@ -717,7 +713,7 @@ def shear_matrix(angle, direction, point, normal): direction = unit_vector(direction[:3]) if abs(np.dot(normal, direction)) > 1e-6: raise ValueError("direction and normal vectors are not orthogonal") - angle = math.tan(angle) + angle = np.tan(angle) M = np.identity(4) M[:3, :3] += angle * np.outer(direction, normal) M[:3, 3] = -angle * np.dot(point[:3], normal) * direction @@ -759,7 +755,7 @@ def shear_from_matrix(matrix): direction = np.dot(M33 - np.identity(3), normal) angle = vector_norm(direction) direction /= angle - angle = math.atan(angle) + angle = np.arctan(angle) # point: eigenvector corresponding to eigenvalue 1 w, V = np.linalg.eig(M) @@ -811,7 +807,7 @@ def decompose_matrix(matrix): if not np.linalg.det(P): raise ValueError("matrix is singular") - scale = np.zeros((3, )) + scale = np.zeros((3,)) shear = [0.0, 0.0, 0.0] angles = [0.0, 0.0, 0.0] @@ -844,19 +840,20 @@ def decompose_matrix(matrix): np.negative(scale, scale) np.negative(row, row) - angles[1] = math.asin(-row[0, 2]) - if math.cos(angles[1]): - angles[0] = math.atan2(row[1, 2], row[2, 2]) - angles[2] = math.atan2(row[0, 1], row[0, 0]) + angles[1] = np.arcsin(-row[0, 2]) + if np.cos(angles[1]): + angles[0] = np.arctan2(row[1, 2], row[2, 2]) + angles[2] = np.arctan2(row[0, 1], row[0, 0]) else: - angles[0] = math.atan2(-row[2, 1], row[1, 1]) + angles[0] = np.arctan2(-row[2, 1], row[1, 1]) angles[2] = 0.0 return scale, shear, angles, translate, perspective -def compose_matrix(scale=None, shear=None, angles=None, translate=None, - perspective=None): +def compose_matrix( + scale=None, shear=None, angles=None, translate=None, perspective=None +): """Return transformation matrix from sequence of transformations. This is the inverse of the decompose_matrix function. @@ -870,7 +867,7 @@ def compose_matrix(scale=None, shear=None, angles=None, translate=None, >>> scale = np.random.random(3) - 0.5 >>> shear = np.random.random(3) - 0.5 - >>> angles = (np.random.random(3) - 0.5) * (2*math.pi) + >>> angles = (np.random.random(3) - 0.5) * (2*np.pi) >>> trans = np.random.random(3) - 0.5 >>> persp = np.random.random(4) - 0.5 >>> M0 = compose_matrix(scale, shear, angles, trans, persp) @@ -890,7 +887,7 @@ def compose_matrix(scale=None, shear=None, angles=None, translate=None, T[:3, 3] = translate[:3] M = np.dot(M, T) if angles is not None: - R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') + R = euler_matrix(angles[0], angles[1], angles[2], "sxyz") M = np.dot(M, R) if shear is not None: Z = np.identity(4) @@ -928,11 +925,14 @@ def orthogonalization_matrix(lengths, angles): sina, sinb, _ = np.sin(angles) cosa, cosb, cosg = np.cos(angles) co = (cosa * cosb - cosg) / (sina * sinb) - return np.array([ - [a * sinb * math.sqrt(1.0 - co * co), 0.0, 0.0, 0.0], - [-a * sinb * co, b * sina, 0.0, 0.0], - [a * cosb, b * cosa, c, 0.0], - [0.0, 0.0, 0.0, 1.0]]) + return np.array( + [ + [a * sinb * np.sqrt(1.0 - co * co), 0.0, 0.0, 0.0], + [-a * sinb * co, b * sina, 0.0, 0.0], + [a * cosb, b * cosa, c, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): @@ -995,7 +995,7 @@ def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): u, s, vh = np.linalg.svd(A.T) vh = vh[:ndims].T B = vh[:ndims] - C = vh[ndims:2 * ndims] + C = vh[ndims : 2 * ndims] t = np.dot(C, np.linalg.pinv(B)) t = np.concatenate((t, np.zeros((ndims, 1))), axis=1) M = np.vstack((t, ((0.0,) * ndims) + (1.0,))) @@ -1017,10 +1017,12 @@ def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): xx, yy, zz = np.sum(v0 * v1, axis=1) xy, yz, zx = np.sum(v0 * np.roll(v1, -1, axis=0), axis=1) xz, yx, zy = np.sum(v0 * np.roll(v1, -2, axis=0), axis=1) - N = [[xx + yy + zz, 0.0, 0.0, 0.0], - [yz - zy, xx - yy - zz, 0.0, 0.0], - [zx - xz, xy + yx, yy - xx - zz, 0.0], - [xy - yx, zx + xz, yz + zy, zz - xx - yy]] + N = [ + [xx + yy + zz, 0.0, 0.0, 0.0], + [yz - zy, xx - yy - zz, 0.0, 0.0], + [zx - xz, xy + yx, yy - xx - zz, 0.0], + [xy - yx, zx + xz, yz + zy, zz - xx - yy], + ] # quaternion: eigenvector corresponding to most positive eigenvalue w, V = np.linalg.eigh(N) q = V[:, np.argmax(w)] @@ -1032,7 +1034,7 @@ def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): # Affine transformation; scale is ratio of RMS deviations from centroid v0 *= v0 v1 *= v1 - M[:ndims, :ndims] *= math.sqrt(np.sum(v1) / np.sum(v0)) + M[:ndims, :ndims] *= np.sqrt(np.sum(v1) / np.sum(v0)) # move centroids back M = np.dot(np.linalg.inv(M1), np.dot(M, M0)) @@ -1087,11 +1089,10 @@ def superimposition_matrix(v0, v1, scale=False, usesvd=True): """ v0 = np.array(v0, dtype=np.float64, copy=False)[:3] v1 = np.array(v1, dtype=np.float64, copy=False)[:3] - return affine_matrix_from_points(v0, v1, shear=False, - scale=scale, usesvd=usesvd) + return affine_matrix_from_points(v0, v1, shear=False, scale=scale, usesvd=usesvd) -def euler_matrix(ai, aj, ak, axes='sxyz'): +def euler_matrix(ai, aj, ak, axes="sxyz"): """Return homogeneous rotation matrix from Euler angles and axis sequence. ai, aj, ak : Euler's roll, pitch and yaw angles @@ -1103,7 +1104,7 @@ def euler_matrix(ai, aj, ak, axes='sxyz'): >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) >>> np.allclose(np.sum(R[0]), -0.383436184) True - >>> ai, aj, ak = (4*math.pi) * (np.random.random(3) - 0.5) + >>> ai, aj, ak = (4*np.pi) * (np.random.random(3) - 0.5) >>> for axes in _AXES2TUPLE.keys(): ... R = euler_matrix(ai, aj, ak, axes) >>> for axes in _TUPLE2AXES.keys(): @@ -1125,8 +1126,8 @@ def euler_matrix(ai, aj, ak, axes='sxyz'): if parity: ai, aj, ak = -ai, -aj, -ak - si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) - ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) + si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak) + ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak) cc, cs = ci * ck, ci * sk sc, ss = si * ck, si * sk @@ -1154,7 +1155,7 @@ def euler_matrix(ai, aj, ak, axes='sxyz'): return M -def euler_from_matrix(matrix, axes='sxyz'): +def euler_from_matrix(matrix, axes="sxyz"): """Return Euler angles from rotation matrix for specified axis sequence. axes : One of 24 axis sequences as string or encoded tuple @@ -1166,7 +1167,7 @@ def euler_from_matrix(matrix, axes='sxyz'): >>> R1 = euler_matrix(al, be, ga, 'syxz') >>> np.allclose(R0, R1) True - >>> angles = (4*math.pi) * (np.random.random(3) - 0.5) + >>> angles = (4*np.pi) * (np.random.random(3) - 0.5) >>> for axes in _AXES2TUPLE.keys(): ... R0 = euler_matrix(axes=axes, *angles) ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) @@ -1185,24 +1186,24 @@ def euler_from_matrix(matrix, axes='sxyz'): M = np.array(matrix, dtype=np.float64, copy=False)[:3, :3] if repetition: - sy = math.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k]) + sy = np.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k]) if sy > _EPS: - ax = math.atan2(M[i, j], M[i, k]) - ay = math.atan2(sy, M[i, i]) - az = math.atan2(M[j, i], -M[k, i]) + ax = np.arctan2(M[i, j], M[i, k]) + ay = np.arctan2(sy, M[i, i]) + az = np.arctan2(M[j, i], -M[k, i]) else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2(sy, M[i, i]) + ax = np.arctan2(-M[j, k], M[j, j]) + ay = np.arctan2(sy, M[i, i]) az = 0.0 else: - cy = math.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i]) + cy = np.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i]) if cy > _EPS: - ax = math.atan2(M[k, j], M[k, k]) - ay = math.atan2(-M[k, i], cy) - az = math.atan2(M[j, i], M[i, i]) + ax = np.arctan2(M[k, j], M[k, k]) + ay = np.arctan2(-M[k, i], cy) + az = np.arctan2(M[j, i], M[i, i]) else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2(-M[k, i], cy) + ax = np.arctan2(-M[j, k], M[j, j]) + ay = np.arctan2(-M[k, i], cy) az = 0.0 if parity: @@ -1212,7 +1213,7 @@ def euler_from_matrix(matrix, axes='sxyz'): return ax, ay, az -def euler_from_quaternion(quaternion, axes='sxyz'): +def euler_from_quaternion(quaternion, axes="sxyz"): """Return Euler angles from quaternion for specified axis sequence. >>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0]) @@ -1223,7 +1224,7 @@ def euler_from_quaternion(quaternion, axes='sxyz'): return euler_from_matrix(quaternion_matrix(quaternion), axes) -def quaternion_from_euler(ai, aj, ak, axes='sxyz'): +def quaternion_from_euler(ai, aj, ak, axes="sxyz"): """Return quaternion from Euler angles and axis sequence. ai, aj, ak : Euler's roll, pitch and yaw angles @@ -1252,18 +1253,18 @@ def quaternion_from_euler(ai, aj, ak, axes='sxyz'): ai /= 2.0 aj /= 2.0 ak /= 2.0 - ci = math.cos(ai) - si = math.sin(ai) - cj = math.cos(aj) - sj = math.sin(aj) - ck = math.cos(ak) - sk = math.sin(ak) + ci = np.cos(ai) + si = np.sin(ai) + cj = np.cos(aj) + sj = np.sin(aj) + ck = np.cos(ak) + sk = np.sin(ak) cc = ci * ck cs = ci * sk sc = si * ck ss = si * sk - q = np.empty((4, )) + q = np.empty((4,)) if repetition: q[0] = cj * (cc - ss) q[i] = cj * (cs + sc) @@ -1291,8 +1292,8 @@ def quaternion_about_axis(angle, axis): q = np.array([0.0, axis[0], axis[1], axis[2]]) qlen = vector_norm(q) if qlen > _EPS: - q *= math.sin(angle / 2.0) / qlen - q[0] = math.cos(angle / 2.0) + q *= np.sin(angle / 2.0) / qlen + q[0] = np.cos(angle / 2.0) return q @@ -1315,15 +1316,13 @@ def quaternion_matrix(quaternion): """ - q = np.array(quaternion, - dtype=np.float64, - copy=True).reshape((-1, 4)) - n = np.einsum('ij,ij->i', q, q) + q = np.array(quaternion, dtype=np.float64, copy=True).reshape((-1, 4)) + n = np.einsum("ij,ij->i", q, q) # how many entries do we have num_qs = len(n) identities = n < _EPS q[~identities, :] *= np.sqrt(2.0 / n[~identities, None]) - q = np.einsum('ij,ik->ikj', q, q) + q = np.einsum("ij,ik->ikj", q, q) # store the result ret = np.zeros((num_qs, 4, 4)) @@ -1386,7 +1385,7 @@ def quaternion_from_matrix(matrix, isprecise=False): """ M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4] if isprecise: - q = np.empty((4, )) + q = np.empty((4,)) t = np.trace(M) if t > M[3, 3]: q[0] = t @@ -1405,7 +1404,7 @@ def quaternion_from_matrix(matrix, isprecise=False): q[k] = M[k, i] + M[i, k] q[3] = M[k, j] - M[j, k] q = q[[3, 0, 1, 2]] - q *= 0.5 / math.sqrt(t * M[3, 3]) + q *= 0.5 / np.sqrt(t * M[3, 3]) else: m00 = M[0, 0] m01 = M[0, 1] @@ -1417,10 +1416,14 @@ def quaternion_from_matrix(matrix, isprecise=False): m21 = M[2, 1] m22 = M[2, 2] # symmetric matrix K - K = np.array([[m00 - m11 - m22, 0.0, 0.0, 0.0], - [m01 + m10, m11 - m00 - m22, 0.0, 0.0], - [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0], - [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]]) + K = np.array( + [ + [m00 - m11 - m22, 0.0, 0.0, 0.0], + [m01 + m10, m11 - m00 - m22, 0.0, 0.0], + [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0], + [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22], + ] + ) K /= 3.0 # quaternion is eigenvector of K that corresponds to largest eigenvalue w, V = np.linalg.eigh(K) @@ -1440,10 +1443,15 @@ def quaternion_multiply(quaternion1, quaternion0): """ w0, x0, y0, z0 = quaternion0 w1, x1, y1, z1 = quaternion1 - return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, - x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, - -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, - x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64) + return np.array( + [ + -x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, + x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, + -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, + x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0, + ], + dtype=np.float64, + ) def quaternion_conjugate(quaternion): @@ -1506,9 +1514,9 @@ def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): >>> np.allclose(q, q1) True >>> q = quaternion_slerp(q0, q1, 0.5) - >>> angle = math.acos(np.dot(q0, q)) - >>> np.allclose(2, math.acos(np.dot(q0, q1)) / angle) or \ - np.allclose(2, math.acos(-np.dot(q0, q1)) / angle) + >>> angle = np.arccos(np.dot(q0, q)) + >>> np.allclose(2, np.arccos(np.dot(q0, q1)) / angle) or \ + np.allclose(2, np.arccos(-np.dot(q0, q1)) / angle) True """ @@ -1525,12 +1533,12 @@ def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): # invert rotation d = -d np.negative(q1, q1) - angle = math.acos(d) + spin * math.pi + angle = np.arccos(d) + spin * np.pi if abs(angle) < _EPS: return q0 - isin = 1.0 / math.sin(angle) - q0 *= math.sin((1.0 - fraction) * angle) * isin - q1 *= math.sin(fraction * angle) * isin + isin = 1.0 / np.sin(angle) + q0 *= np.sin((1.0 - fraction) * angle) * isin + q1 *= np.sin(fraction * angle) * isin q0 += q1 return q0 @@ -1559,11 +1567,12 @@ def random_quaternion(rand=None, num=1): assert rand.shape[0] == 3 r1 = np.sqrt(1.0 - rand[0]) r2 = np.sqrt(rand[0]) - pi2 = math.pi * 2.0 + pi2 = np.pi * 2.0 t1 = pi2 * rand[1] t2 = pi2 * rand[2] - return np.array([np.cos(t2) * r2, np.sin(t1) * r1, - np.cos(t1) * r1, np.sin(t2) * r2]).T.squeeze() + return np.array( + [np.cos(t2) * r2, np.sin(t1) * r1, np.cos(t1) * r1, np.sin(t2) * r2] + ).T.squeeze() def random_rotation_matrix(rand=None, num=1, translate=False): @@ -1581,8 +1590,7 @@ def random_rotation_matrix(rand=None, num=1, translate=False): True """ - matrix = quaternion_matrix( - random_quaternion(rand=rand, num=num)) + matrix = quaternion_matrix(random_quaternion(rand=rand, num=num)) if translate: scale = float(translate) matrix[:3, 3] = (np.random.random(3) - 0.5) * scale @@ -1631,7 +1639,7 @@ def __init__(self, initial=None): initial = np.array(initial, dtype=np.float64) if initial.shape == (4, 4): self._qdown = quaternion_from_matrix(initial) - elif initial.shape == (4, ): + elif initial.shape == (4,): initial /= vector_norm(initial) self._qdown = initial else: @@ -1708,10 +1716,10 @@ def arcball_map_to_sphere(point, center, radius): n = v0 * v0 + v1 * v1 if n > 1.0: # position outside of sphere - n = math.sqrt(n) + n = np.sqrt(n) return np.array([v0 / n, v1 / n, 0.0]) else: - return np.array([v0, v1, math.sqrt(1.0 - n)]) + return np.array([v0, v1, np.sqrt(1.0 - n)]) def arcball_constrain_to_axis(point, axis): @@ -1751,14 +1759,31 @@ def arcball_nearest_axis(point, axes): # map axes strings to/from tuples of inner axis, parity, repetition, frame _AXES2TUPLE = { - 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), - 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), - 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), - 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), - 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), - 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), - 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), - 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} + "sxyz": (0, 0, 0, 0), + "sxyx": (0, 0, 1, 0), + "sxzy": (0, 1, 0, 0), + "sxzx": (0, 1, 1, 0), + "syzx": (1, 0, 0, 0), + "syzy": (1, 0, 1, 0), + "syxz": (1, 1, 0, 0), + "syxy": (1, 1, 1, 0), + "szxy": (2, 0, 0, 0), + "szxz": (2, 0, 1, 0), + "szyx": (2, 1, 0, 0), + "szyz": (2, 1, 1, 0), + "rzyx": (0, 0, 0, 1), + "rxyx": (0, 0, 1, 1), + "ryzx": (0, 1, 0, 1), + "rxzx": (0, 1, 1, 1), + "rxzy": (1, 0, 0, 1), + "ryzy": (1, 0, 1, 1), + "rzxy": (1, 1, 0, 1), + "ryxy": (1, 1, 1, 1), + "ryxz": (2, 0, 0, 1), + "rzxz": (2, 0, 1, 1), + "rxyz": (2, 1, 0, 1), + "rzyz": (2, 1, 1, 1), +} _TUPLE2AXES = {v: k for k, v in _AXES2TUPLE.items()} @@ -1791,7 +1816,7 @@ def vector_norm(data, axis=None, out=None): data = np.array(data, dtype=np.float64, copy=True) if out is None: if data.ndim == 1: - return math.sqrt(np.dot(data, data)) + return np.sqrt(np.dot(data, data)) data *= data out = np.atleast_1d(np.sum(data, axis=axis)) np.sqrt(out, out) @@ -1831,7 +1856,7 @@ def unit_vector(data, axis=None, out=None): if out is None: data = np.array(data, dtype=np.float64, copy=True) if data.ndim == 1: - data /= math.sqrt(np.dot(data, data)) + data /= np.sqrt(np.dot(data, data)) return data else: if out is not data: @@ -1889,7 +1914,7 @@ def angle_between_vectors(v0, v1, directed=True, axis=0): i.e. the maximum angle is pi/2. >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3]) - >>> np.allclose(a, math.pi) + >>> np.allclose(a, np.pi) True >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False) >>> np.allclose(a, 0) @@ -1985,9 +2010,8 @@ def transform_around(matrix, point): point = np.asanyarray(point) matrix = np.asanyarray(matrix) dim = len(point) - if matrix.shape != (dim + 1, - dim + 1): - raise ValueError('matrix must be (d+1, d+1)') + if matrix.shape != (dim + 1, dim + 1): + raise ValueError("matrix must be (d+1, d+1)") translate = np.eye(dim + 1) translate[:dim, dim] = -point @@ -1998,10 +2022,7 @@ def transform_around(matrix, point): return result -def planar_matrix(offset=None, - theta=None, - point=None, - scale=None): +def planar_matrix(offset=None, theta=None, point=None, scale=None): """ 2D homogeonous transformation matrix. @@ -2028,9 +2049,9 @@ def planar_matrix(offset=None, offset = np.asanyarray(offset, dtype=np.float64) theta = float(theta) if not np.isfinite(theta): - raise ValueError('theta must be finite angle!') + raise ValueError("theta must be finite angle!") if offset.shape != (2,): - raise ValueError('offset must be length 2!') + raise ValueError("offset must be length 2!") T = np.eye(3, dtype=np.float64) s = np.sin(theta) @@ -2067,7 +2088,7 @@ def planar_matrix_to_3D(matrix_2D): matrix_2D = np.asanyarray(matrix_2D, dtype=np.float64) if matrix_2D.shape != (3, 3): - raise ValueError('Homogenous 2D transformation matrix required!') + raise ValueError("Homogenous 2D transformation matrix required!") matrix_3D = np.eye(4) # translation @@ -2078,7 +2099,7 @@ def planar_matrix_to_3D(matrix_2D): return matrix_3D -def spherical_matrix(theta, phi, axes='sxyz'): +def spherical_matrix(theta, phi, axes="sxyz"): """ Give a spherical coordinate vector, find the rotation that will transform a [0,0,1] vector to those coordinates @@ -2100,9 +2121,7 @@ def spherical_matrix(theta, phi, axes='sxyz'): return result -def transform_points(points, - matrix, - translate=True): +def transform_points(points, matrix, translate=True): """ Returns points rotated by a homogeneous transformation matrix. @@ -2134,7 +2153,7 @@ def transform_points(points, count, dim = points.shape # quickly check to see if we've been passed an identity matrix - if np.abs(matrix - _IDENTITY[:dim + 1, :dim + 1]).max() < 1e-8: + if np.abs(matrix - _IDENTITY[: dim + 1, : dim + 1]).max() < 1e-8: return np.ascontiguousarray(points.copy()) if translate: @@ -2166,8 +2185,9 @@ def fix_rigid(matrix, max_deviance=1e-5): Repaired homogeneous transformation matrix """ dim = matrix.shape[0] - 1 - check = np.abs(np.dot(matrix[:dim, :dim], matrix[:dim, :dim].T) - - _IDENTITY[:dim, :dim]).max() + check = np.abs( + np.dot(matrix[:dim, :dim], matrix[:dim, :dim].T) - _IDENTITY[:dim, :dim] + ).max() # if the matrix differs by more than float-zero and less # than the threshold try to repair the matrix with SVD if check > 1e-13 and check < max_deviance: @@ -2211,8 +2231,7 @@ def is_rigid(matrix, epsilon=1e-8): return False # check dot product of rotation against transpose - check = np.dot(matrix[:3, :3], - matrix[:3, :3].T) - _IDENTITY[:3, :3] + check = np.dot(matrix[:3, :3], matrix[:3, :3].T) - _IDENTITY[:3, :3] return check.ptp() < epsilon @@ -2267,14 +2286,12 @@ def flips_winding(matrix): vectors = np.diff(triangles, axis=1) cross = np.cross(vectors[:, 0], vectors[:, 1]) # rotate the original normals to match - cross[:count] = np.dot(matrix[:3, :3], - cross[:count].T).T + cross[:count] = np.dot(matrix[:3, :3], cross[:count].T).T # unitize normals norm = np.sqrt(np.dot(cross * cross, [1, 1, 1])).reshape((-1, 1)) cross = cross / norm # find the projection of the two normals - projection = np.dot(cross[:count] * cross[count:], - [1.0] * 3) + projection = np.dot(cross[:count] * cross[count:], [1.0] * 3) # if the winding was flipped but not the normal # the projection will be negative, and since we're # checking a few triangles check against the mean From 76be3a9988673b5f52f38edc6f2fe9129f2d3d2b Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 17:11:45 -0400 Subject: [PATCH 17/84] fix docs --- README.md | 7 ++-- docs/_static/custom.css | 19 ----------- docs/conf.py | 73 +++++++++++++++++++---------------------- docs/requirements.txt | 15 +++++---- 4 files changed, 46 insertions(+), 68 deletions(-) diff --git a/README.md b/README.md index 021cf995a..02bcd3ec4 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,14 @@ [![trimesh](https://trimsh.org/images/logotype-a.svg)](http://trimsh.org) ----------- -[![Github Actions](https://github.com/mikedh/trimesh/workflows/Release%20Trimesh/badge.svg)](https://github.com/mikedh/trimesh/actions) [![PyPI version](https://badge.fury.io/py/trimesh.svg)](https://badge.fury.io/py/trimesh) [![codecov](https://codecov.io/gh/mikedh/trimesh/branch/main/graph/badge.svg?token=4PVRQXyl2h)](https://codecov.io/gh/mikedh/trimesh) [![Docker Image Version (latest by date)](https://img.shields.io/docker/v/trimesh/trimesh?label=docker&sort=semver)](https://hub.docker.com/r/trimesh/trimesh/tags) +[![Github Actions](https://github.com/mikedh/trimesh/workflows/Release%20Trimesh/badge.svg)](https://github.com/mikedh/trimesh/actions) [![codecov](https://codecov.io/gh/mikedh/trimesh/branch/main/graph/badge.svg?token=4PVRQXyl2h)](https://codecov.io/gh/mikedh/trimesh) [![Docker Image Version (latest by date)](https://img.shields.io/docker/v/trimesh/trimesh?label=docker&sort=semver)](https://hub.docker.com/r/trimesh/trimesh/tags) [![PyPI version](https://badge.fury.io/py/trimesh.svg)](https://badge.fury.io/py/trimesh) | :warning: WARNING | -|:---------------------------| -| trimesh 4.0.0 which makes the minimum Python version 3.7 is in pre-release and will be released soon, you may want to test your stack with: `pip install --pre trimesh` | +|---------------------------| +| `trimesh >= 4.0.0` makes the minimum Python 3.7 and is in pre-release and will be released soon | +| You can test your stack with: `pip install --pre trimesh` or if you are on older Python you should lock `trimesh<4`| Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). diff --git a/docs/_static/custom.css b/docs/_static/custom.css index 82ec91f36..e69de29bb 100644 --- a/docs/_static/custom.css +++ b/docs/_static/custom.css @@ -1,19 +0,0 @@ -/* override table width restrictions */ - .wy-table-responsive table td { - /* !important prevents the common CSS stylesheets from overriding - this as on RTD they are loaded after this stylesheet */ - white-space: normal !important; - } - - .wy-table-responsive { - overflow: visible !important; - } - - -dl.py.property { - display: unset; -} - -.wy-nav-content { - max-width: 70em; -} diff --git a/docs/conf.py b/docs/conf.py index 5f13e6801..4c3174b3b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,46 +20,43 @@ def abspath(rel): """ # current working directory - cwd = os.path.dirname(os.path.abspath( - inspect.getfile(inspect.currentframe()))) + cwd = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) return os.path.abspath(os.path.join(cwd, rel)) -extensions = ['sphinx.ext.napoleon', # numpy-style docstring - 'myst_parser'] # allows markdown +extensions = [ + "sphinx.ext.napoleon", # numpy-style docstring + "myst_parser", +] # allows markdown myst_all_links_external = True # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. source_suffix = { - '.rst': 'restructuredtext', - '.txt': 'markdown', - '.md': 'markdown', + ".rst": "restructuredtext", + ".txt": "markdown", + ".md": "markdown", } # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'trimesh' -copyright = '2022, Michael Dawson-Haggerty' -author = 'Michael Dawson-Haggerty' +project = "trimesh" +copyright = "2022, Michael Dawson-Haggerty" +author = "Michael Dawson-Haggerty" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # get version from trimesh without installing -with open(abspath('../trimesh/version.py')) as f: - _version_raw = f.read() -version = eval(next( - line.strip().split('=')[-1] - for line in str.splitlines(_version_raw) - if '_version_' in line)) +import trimesh + # The full version, including alpha/beta/rc tags. -release = version +release = trimesh.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -70,10 +67,10 @@ def abspath(rel): # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -81,44 +78,42 @@ def abspath(rel): # -- Options for HTML output -------------------------------------- # The theme to use for HTML and HTML Help pages -html_theme = 'furo' +html_theme = "furo" # options for rtd-theme html_theme_options = { - 'analytics_id': 'UA-161434837-1', - 'display_version': True, - 'prev_next_buttons_location': 'bottom', - 'style_external_links': False, + "display_version": True, + "prev_next_buttons_location": "bottom", + "style_external_links": False, # toc options - 'collapse_navigation': True, - 'sticky_navigation': True, - 'navigation_depth': 4, - 'includehidden': True, - 'titles_only': False, - + "collapse_navigation": True, + "sticky_navigation": True, + "navigation_depth": 4, + "includehidden": True, + "titles_only": False, } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] +html_logo = "images/trimesh-logo.png" # custom css -html_css_files = ['custom.css'] +html_css_files = ["custom.css"] html_context = { "display_github": True, "github_user": "mikedh", "github_repo": "trimesh", "github_version": "main", - "conf_py_path": "/docs/" + "conf_py_path": "/docs/", } # Output file base name for HTML help builder. -htmlhelp_basename = 'trimeshdoc' +htmlhelp_basename = "trimeshdoc" -# -- Extensions configuration ---------------------------------- autodoc_default_options = { - 'autosummary': True, - 'special-members': '__init__', + "autosummary": True, + "special-members": "__init__", } diff --git a/docs/requirements.txt b/docs/requirements.txt index 3bbfa4165..2121f623c 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,12 +1,13 @@ pypandoc==1.11 recommonmark==0.7.1 -sphinx==6.1.3 jupyter==1.0.0 -sphinx_rtd_theme==1.2.0 -myst-parser==1.0.0 -pyopenssl==23.1.1 -autodocsumm==0.2.10 + +# get sphinx version range from furo install +furo==2023.8.19 +myst-parser==2.0.0 +pyopenssl==23.2.0 +autodocsumm==0.2.11 jinja2==3.1.2 -matplotlib==3.7.1 -nbconvert==7.3.1 +matplotlib==3.7.2 +nbconvert==7.7.4 From 473a2f7992d2fe5061434a06c81269e145486357 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 17:48:18 -0400 Subject: [PATCH 18/84] ruff --- docs/conf.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 4c3174b3b..62aa3a568 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -2,6 +2,9 @@ import inspect import os +# get version from trimesh without installing +import trimesh + def abspath(rel): """ @@ -51,10 +54,6 @@ def abspath(rel): # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. - -# get version from trimesh without installing -import trimesh - # The full version, including alpha/beta/rc tags. release = trimesh.__version__ From ac9d093ccc26554bc4cc5944e72d682ea65ae924 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=96=B9=E6=AD=A6=E5=8D=93?= Date: Wed, 30 Aug 2023 10:24:21 +0800 Subject: [PATCH 19/84] force np.inf in t to zero --- trimesh/ray/ray_triangle.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/trimesh/ray/ray_triangle.py b/trimesh/ray/ray_triangle.py index 2743ba024..74280def8 100644 --- a/trimesh/ray/ray_triangle.py +++ b/trimesh/ray/ray_triangle.py @@ -391,14 +391,13 @@ def ray_bounds(ray_origins, axis_dir = np.array([ray_directions[i][a] for i, a in enumerate(axis)]).reshape((-1, 1)) - # prevent division by zero - axis_dir[axis_dir == 0] = tol.zero - # parametric equation of a line # point = direction*t + origin # p = dt + o # t = (p-o)/d t = (axis_bound - axis_ori) / axis_dir + # prevent np.inf by division by zero + t[axis_dir == 0.0] = 0 # prevent the bounding box from including triangles # behind the ray origin From 2b8548c78eba5c0a8d6dcd26578701a8244e2bb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=96=B9=E6=AD=A6=E5=8D=93?= Date: Wed, 30 Aug 2023 17:22:39 +0800 Subject: [PATCH 20/84] nonzero mask --- trimesh/ray/ray_triangle.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/trimesh/ray/ray_triangle.py b/trimesh/ray/ray_triangle.py index 74280def8..16a64306d 100644 --- a/trimesh/ray/ray_triangle.py +++ b/trimesh/ray/ray_triangle.py @@ -395,9 +395,9 @@ def ray_bounds(ray_origins, # point = direction*t + origin # p = dt + o # t = (p-o)/d - t = (axis_bound - axis_ori) / axis_dir - # prevent np.inf by division by zero - t[axis_dir == 0.0] = 0 + nonzero = (axis_dir != 0.0).reshape(-1) + t = np.zeros_like(axis_bound) + t[nonzero] = (axis_bound[nonzero] - axis_ori[nonzero]) / axis_dir[nonzero] # prevent the bounding box from including triangles # behind the ray origin From d52b6dab01954e252ac2030aded9d32cc0f5c75e Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Wed, 30 Aug 2023 17:40:22 +0200 Subject: [PATCH 21/84] fixed perceived brightness calculation --- trimesh/visual/gloss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trimesh/visual/gloss.py b/trimesh/visual/gloss.py index 7ab55d0d3..771453522 100644 --- a/trimesh/visual/gloss.py +++ b/trimesh/visual/gloss.py @@ -83,7 +83,7 @@ def solve_metallic(diffuse, specular, one_minus_specular_strength): return metallic def get_perceived_brightness(rgb): - return np.dot(rgb[..., :3], [0.299, 0.587, 0.114]) + return np.sqrt(np.dot(rgb[..., :3]**2, [0.299, 0.587, 0.114])) def toPIL(img): if isinstance(img, Image): From f27e8ed2c36240f59aaf9dac91a8bc1edf57f5c0 Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Wed, 30 Aug 2023 17:48:17 +0200 Subject: [PATCH 22/84] keep vertex normals when fusing meshes --- trimesh/util.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/trimesh/util.py b/trimesh/util.py index 0be3003fd..a143c53ef 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -1503,6 +1503,10 @@ def concatenate(a, b=None): if all('face_normals' in m._cache for m in is_mesh): face_normals = np.vstack( [m.face_normals for m in is_mesh]) + + # always save vertex normals + vertex_normals = vstack_empty( + [m.vertex_normals.copy() for m in is_mesh]) try: # concatenate visuals @@ -1516,6 +1520,7 @@ def concatenate(a, b=None): return trimesh_type(vertices=vertices, faces=faces, face_normals=face_normals, + vertex_normals=vertex_normals, visual=visual, process=False) From b15b2162e295316f922fc319f56e6181b6df9003 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Wed, 30 Aug 2023 14:16:46 -0400 Subject: [PATCH 23/84] ci release changes --- .github/workflows/release.yml | 11 ++++------- .github/workflows/test.yml | 9 ++++++--- LICENSE.md | 2 +- codecov.yml | 8 -------- pyproject.toml | 3 +++ 5 files changed, 14 insertions(+), 19 deletions(-) delete mode 100644 codecov.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a3096fe9c..76251f2f8 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -26,13 +26,10 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: [3.6, 3.7, 3.8, 3.9, "3.10", "3.11"] - os: [ubuntu-20.04, macos-latest, windows-latest] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + os: [ubuntu-latest, macos-latest, windows-latest] exclude: - # windows runners have gotten very flaky - # exclude all windows test runs except for one - - os: windows-latest - python-version: 3.6 + # windows runners have gotten flaky - os: windows-latest python-version: 3.8 - os: windows-latest @@ -63,7 +60,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.x' + python-version: '3.11' - name: Install publishing dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 07a1d6308..992b7f9d1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -15,10 +15,13 @@ jobs: uses: actions/setup-python@v4 with: python-version: "3.11" - - name: Install Formatting - run: pip install ruff - - name: Check Formatting + - name: Install + run: pip install ruff black + - name: Run Ruff run: ruff . +# - name: Run Black +# run: black --check . + tests: name: Run Unit Tests runs-on: ${{ matrix.os }} diff --git a/LICENSE.md b/LICENSE.md index d80b18bf3..d0571124d 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2019 Michael Dawson-Haggerty +Copyright (c) 2023 Michael Dawson-Haggerty Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index cf9c95b91..000000000 --- a/codecov.yml +++ /dev/null @@ -1,8 +0,0 @@ -coverage: - status: - project: - default: - # basic - target: 75% - threshold: 10% - patch: off diff --git a/pyproject.toml b/pyproject.toml index 956b15621..ad709dab6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -121,6 +121,9 @@ ignore = [ "E501", # Line too long ({width} > {limit} characters) "B904", # raise ... from err "B905", # zip() without an explicit strict= parameter + "ANN101", # type hint for `self` + "ANN002", # type hint for *args + "ANN003", # type hint for **kwargs ] line-length = 90 From d57c974c251633619eaf9d4b705c2919391c9613 Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Wed, 30 Aug 2023 17:24:37 -0700 Subject: [PATCH 24/84] Add torus --- trimesh/creation.py | 64 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/trimesh/creation.py b/trimesh/creation.py index 4d7897912..1c9b0fc6c 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -1272,3 +1272,67 @@ def truncated_prisms(tris, origin=None, normal=None): mesh = Trimesh(vertices=vertices, faces=faces, process=False) return mesh + + +def torus(major_radius, + minor_radius, + major_sections=32, + minor_sections=32, + transform=None, + **kwargs): + """Create a mesh of a torus around Z centered at the origin. + + Parameters + ------------ + major_radius: (float) + Radius from the center of the torus to the center of the tube. + minor_radius: (float) + Radius of the tube. + major_sections: int + Number of sections around major radius result should have + If not specified default is 32 per revolution + minor_sections: int + Number of sections around minor radius result should have + If not specified default is 32 per revolution + transform: (4, 4) float + Transformation matrix + **kwargs: + passed to Trimesh to create torus + + Returns + ------------ + geometry : trimesh.Trimesh + Mesh of a torus + """ + vertices = [] + faces = [] + + for i in range(major_sections): + theta = 2 * np.pi * i / major_sections + for j in range(minor_sections): + phi = 2 * np.pi * j / minor_sections + + x = (major_radius + minor_radius * np.cos(phi)) * np.cos(theta) + y = (major_radius + minor_radius * np.cos(phi)) * np.sin(theta) + z = minor_radius * np.sin(phi) + + vertices.append([x, y, z]) + + # Create faces + a = i * minor_sections + j + b = ((i + 1) % major_sections) * minor_sections + j + c = ((i + 1) % major_sections) * minor_sections + (j + 1) % minor_sections + d = i * minor_sections + (j + 1) % minor_sections + + faces.append([a, b, c]) + faces.append([a, c, d]) + + torus = Trimesh(vertices=vertices, + faces=faces, + process=False, + **kwargs) + + if transform is not None: + torus.apply_transform(transform) + + return torus From f21d3adba6406b95d30ab03e20f91cd742eaa833 Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Wed, 30 Aug 2023 17:28:11 -0700 Subject: [PATCH 25/84] Add test for torus --- tests/test_creation.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/test_creation.py b/tests/test_creation.py index f6511fa5f..1736939b2 100644 --- a/tests/test_creation.py +++ b/tests/test_creation.py @@ -326,6 +326,15 @@ def check_triangulation(v, f, true_area): assert g.np.isclose(area, true_area) +def test_torus(self): + torus = g.trimesh.creation.torus + + m = torus(major_radius=1.0, minor_radius=0.2) + + extents = g.np.array([1.4, 1.4, 0.4]) + assert g.np.allclose(m.extents, extents) + assert g.np.allclose(m.bounds, [-extents / 2.0, extents / 2.0]) + if __name__ == '__main__': g.trimesh.util.attach_to_log() g.unittest.main() From 2b549f0ac3c660595c907dfa2396f168f48b1c0c Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Thu, 31 Aug 2023 19:38:46 -0700 Subject: [PATCH 26/84] Fix test --- tests/test_creation.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/test_creation.py b/tests/test_creation.py index 1736939b2..4f654f0a1 100644 --- a/tests/test_creation.py +++ b/tests/test_creation.py @@ -326,12 +326,16 @@ def check_triangulation(v, f, true_area): assert g.np.isclose(area, true_area) -def test_torus(self): +def test_torus(): torus = g.trimesh.creation.torus - m = torus(major_radius=1.0, minor_radius=0.2) + major_radius = 1.0 + minor_radius = 0.2 + m = torus(major_radius=major_radius, minor_radius=minor_radius) - extents = g.np.array([1.4, 1.4, 0.4]) + extents = g.np.array([2 * major_radius + 2 * minor_radius, + 2 * major_radius + 2 * minor_radius, + 2 * minor_radius]) assert g.np.allclose(m.extents, extents) assert g.np.allclose(m.bounds, [-extents / 2.0, extents / 2.0]) From f8652b5174d911252732bbce6806f893646f2580 Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Thu, 31 Aug 2023 19:39:46 -0700 Subject: [PATCH 27/84] Vectorize code --- trimesh/creation.py | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/trimesh/creation.py b/trimesh/creation.py index 1c9b0fc6c..409f26fa1 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -1287,7 +1287,7 @@ def torus(major_radius, major_radius: (float) Radius from the center of the torus to the center of the tube. minor_radius: (float) - Radius of the tube. + Radius of the tube. major_sections: int Number of sections around major radius result should have If not specified default is 32 per revolution @@ -1298,41 +1298,41 @@ def torus(major_radius, Transformation matrix **kwargs: passed to Trimesh to create torus - + Returns ------------ geometry : trimesh.Trimesh Mesh of a torus """ - vertices = [] - faces = [] + # Calculate vertex coordinates + theta = np.linspace(0, 2 * np.pi, major_sections, endpoint=False).repeat(minor_sections) + phi = np.tile(np.linspace(0, 2 * np.pi, minor_sections, endpoint=False), major_sections) - for i in range(major_sections): - theta = 2 * np.pi * i / major_sections - for j in range(minor_sections): - phi = 2 * np.pi * j / minor_sections + x = (major_radius + minor_radius * np.cos(phi)) * np.cos(theta) + y = (major_radius + minor_radius * np.cos(phi)) * np.sin(theta) + z = minor_radius * np.sin(phi) - x = (major_radius + minor_radius * np.cos(phi)) * np.cos(theta) - y = (major_radius + minor_radius * np.cos(phi)) * np.sin(theta) - z = minor_radius * np.sin(phi) + vertices = np.stack((x, y, z), axis=-1).reshape(-1, 3) - vertices.append([x, y, z]) + # Calculate faces + i_range = np.arange(minor_sections) + j_range = np.arange(major_sections) - # Create faces - a = i * minor_sections + j - b = ((i + 1) % major_sections) * minor_sections + j - c = ((i + 1) % major_sections) * minor_sections + (j + 1) % minor_sections - d = i * minor_sections + (j + 1) % minor_sections + i_grid, j_grid = np.meshgrid(i_range, j_range, indexing='ij') - faces.append([a, b, c]) - faces.append([a, c, d]) + a = (i_grid * minor_sections + j_grid).ravel() + b = (((i_grid + 1) % major_sections) * minor_sections + j_grid).ravel() + c = (((i_grid + 1) % major_sections) * minor_sections + (j_grid + 1) % minor_sections).ravel() + d = (i_grid * minor_sections + (j_grid + 1) % minor_sections).ravel() + + faces = np.column_stack((a, b, c, a, c, d)).reshape(-1, 3) torus = Trimesh(vertices=vertices, faces=faces, process=False, **kwargs) - + if transform is not None: torus.apply_transform(transform) - + return torus From e4f4db3f6a695534e5a9a14856067fa14d23d82e Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Thu, 31 Aug 2023 19:43:56 -0700 Subject: [PATCH 28/84] Fix formatting issues --- trimesh/creation.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/trimesh/creation.py b/trimesh/creation.py index 409f26fa1..1b97b8c36 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -1305,9 +1305,11 @@ def torus(major_radius, Mesh of a torus """ # Calculate vertex coordinates - theta = np.linspace(0, 2 * np.pi, major_sections, endpoint=False).repeat(minor_sections) - phi = np.tile(np.linspace(0, 2 * np.pi, minor_sections, endpoint=False), major_sections) - + theta = np.linspace(0, 2 * np.pi, major_sections, + endpoint=False).repeat(minor_sections) + phi = np.tile(np.linspace(0, 2 * np.pi, minor_sections, + endpoint=False), major_sections) + x = (major_radius + minor_radius * np.cos(phi)) * np.cos(theta) y = (major_radius + minor_radius * np.cos(phi)) * np.sin(theta) z = minor_radius * np.sin(phi) @@ -1322,7 +1324,8 @@ def torus(major_radius, a = (i_grid * minor_sections + j_grid).ravel() b = (((i_grid + 1) % major_sections) * minor_sections + j_grid).ravel() - c = (((i_grid + 1) % major_sections) * minor_sections + (j_grid + 1) % minor_sections).ravel() + c = (((i_grid + 1) % major_sections) * minor_sections + (j_grid + 1) + % minor_sections).ravel() d = (i_grid * minor_sections + (j_grid + 1) % minor_sections).ravel() faces = np.column_stack((a, b, c, a, c, d)).reshape(-1, 3) From aa99d92bb190d19fac58a2b05e4b05aa3b00c215 Mon Sep 17 00:00:00 2001 From: munahaf Date: Fri, 1 Sep 2023 18:46:23 +0000 Subject: [PATCH 29/84] Comment: Updated a module name to allow import without type errors. --- trimesh/interfaces/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trimesh/interfaces/__init__.py b/trimesh/interfaces/__init__.py index d9b3aa506..4c2bb091e 100644 --- a/trimesh/interfaces/__init__.py +++ b/trimesh/interfaces/__init__.py @@ -6,4 +6,4 @@ from . import vhacd # add to __all__ as per pep8 -__all__ = [scad, blender, vhacd] +__all__ = ['scad', 'blender', 'vhacd'] From 0c1afc95aae326800686ea5e941c350830fe441f Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Fri, 1 Sep 2023 12:26:22 -0700 Subject: [PATCH 30/84] Create torus using revolve() --- trimesh/creation.py | 48 ++++++++++++++------------------------------- 1 file changed, 15 insertions(+), 33 deletions(-) diff --git a/trimesh/creation.py b/trimesh/creation.py index 1b97b8c36..1a8983245 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -1304,38 +1304,20 @@ def torus(major_radius, geometry : trimesh.Trimesh Mesh of a torus """ - # Calculate vertex coordinates - theta = np.linspace(0, 2 * np.pi, major_sections, - endpoint=False).repeat(minor_sections) - phi = np.tile(np.linspace(0, 2 * np.pi, minor_sections, - endpoint=False), major_sections) + phi = np.linspace(0, 2 * np.pi, minor_sections, endpoint=False) + linestring = np.column_stack((minor_radius * np.cos(phi), + minor_radius * np.sin(phi))) \ + + [major_radius, 0] - x = (major_radius + minor_radius * np.cos(phi)) * np.cos(theta) - y = (major_radius + minor_radius * np.cos(phi)) * np.sin(theta) - z = minor_radius * np.sin(phi) - - vertices = np.stack((x, y, z), axis=-1).reshape(-1, 3) - - # Calculate faces - i_range = np.arange(minor_sections) - j_range = np.arange(major_sections) - - i_grid, j_grid = np.meshgrid(i_range, j_range, indexing='ij') - - a = (i_grid * minor_sections + j_grid).ravel() - b = (((i_grid + 1) % major_sections) * minor_sections + j_grid).ravel() - c = (((i_grid + 1) % major_sections) * minor_sections + (j_grid + 1) - % minor_sections).ravel() - d = (i_grid * minor_sections + (j_grid + 1) % minor_sections).ravel() - - faces = np.column_stack((a, b, c, a, c, d)).reshape(-1, 3) - - torus = Trimesh(vertices=vertices, - faces=faces, - process=False, - **kwargs) - - if transform is not None: - torus.apply_transform(transform) + if 'metadata' not in kwargs: + kwargs['metadata'] = dict() + kwargs['metadata'].update( + {'shape': 'torus', + 'major_radius': major_radius, + 'minor_radius': minor_radius}) - return torus + # generate torus through simple revolution + return revolve(linestring=linestring, + sections=major_sections, + transform=transform, + **kwargs) From 69c2d9517f93b1127f9cc37b6c1300c97946bde1 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sat, 2 Sep 2023 01:21:23 -0400 Subject: [PATCH 31/84] ruff --- tests/test_creation.py | 2 +- trimesh/creation.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_creation.py b/tests/test_creation.py index 7777e27e5..fa60f9293 100644 --- a/tests/test_creation.py +++ b/tests/test_creation.py @@ -330,7 +330,7 @@ def test_torus(): major_radius = 1.0 minor_radius = 0.2 m = torus(major_radius=major_radius, minor_radius=minor_radius) - + extents = g.np.array([2 * major_radius + 2 * minor_radius, 2 * major_radius + 2 * minor_radius, 2 * minor_radius]) diff --git a/trimesh/creation.py b/trimesh/creation.py index e3c435bcc..ba49f378d 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -1304,7 +1304,7 @@ def torus(major_radius, + [major_radius, 0] if 'metadata' not in kwargs: - kwargs['metadata'] = dict() + kwargs['metadata'] = {} kwargs['metadata'].update( {'shape': 'torus', 'major_radius': major_radius, From 9a6e5f980ae5ea94d24aa64e8dbcc0dbad4e8dba Mon Sep 17 00:00:00 2001 From: Oliver Lengwinat Date: Mon, 4 Sep 2023 14:50:53 +0200 Subject: [PATCH 32/84] Set gmsh options before opening the file Ensure gmsh options are set before opening the file. This allows hiding Terminal output during loading (e.g. "Info: Label...", "Info: Color...") etc. --- trimesh/interfaces/gmsh.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/trimesh/interfaces/gmsh.py b/trimesh/interfaces/gmsh.py index ebabcee0c..e94569819 100644 --- a/trimesh/interfaces/gmsh.py +++ b/trimesh/interfaces/gmsh.py @@ -67,6 +67,10 @@ def load_gmsh(file_name, gmsh_args=None): gmsh.initialize() gmsh.option.setNumber("General.Terminal", 1) gmsh.model.add('Surface_Mesh_Generation') + # loop through our numbered args which do things, stuff + for arg in args: + gmsh.option.setNumber(*arg) + gmsh.open(file_name) # create a temporary file for the results @@ -78,9 +82,6 @@ def load_gmsh(file_name, gmsh_args=None): if any(file_name.lower().endswith(e) for e in ['.brep', '.stp', '.step', '.igs', '.iges']): gmsh.model.geo.synchronize() - # loop through our numbered args which do things, stuff - for arg in args: - gmsh.option.setNumber(*arg) # generate the mesh gmsh.model.mesh.generate(2) # write to the temporary file From 729dfbf2a6321ded463beda6b1dbfcd3b899f260 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 5 Sep 2023 15:00:23 -0400 Subject: [PATCH 33/84] type hints --- README.md | 5 +- trimesh/base.py | 948 +++++++++++++++++++------------------ trimesh/interfaces/gmsh.py | 2 +- trimesh/primitives.py | 28 +- 4 files changed, 494 insertions(+), 489 deletions(-) diff --git a/README.md b/README.md index 02bcd3ec4..032d58588 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,9 @@ | :warning: WARNING | |---------------------------| -| `trimesh >= 4.0.0` makes the minimum Python 3.7 and is in pre-release and will be released soon | -| You can test your stack with: `pip install --pre trimesh` or if you are on older Python you should lock `trimesh<4`| +| `trimesh >= 4.0.0` which is now on `main` makes the minimum Python 3.7 and is in pre-release | +| Testing the prerelease with `pip install --pre trimesh` would be much appriciated! | +| For projects that support Python < 3.7 you should update your dependency to `trimesh<4` | Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). diff --git a/trimesh/base.py b/trimesh/base.py index 41b25ce2c..c699870a2 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -7,8 +7,19 @@ import copy import warnings +from io import BufferedRandom +from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np +import scipy.spatial._ckdtree +from networkx.classes.graph import Graph +from numpy import float64, int64, ndarray +from numpy.typing import ArrayLike +from rtree.index import Index +from scipy.sparse._coo import coo_matrix + +from trimesh.caching import TrackedArray +from trimesh.path.path import Path3D from . import ( boolean, @@ -40,29 +51,30 @@ from .exchange.export import export_mesh from .parent import Geometry3D from .scene import Scene -from .visual import TextureVisuals, create_visual +from .visual import ColorVisuals, TextureVisuals, create_visual +from numpy.typing import NDArray class Trimesh(Geometry3D): - - def __init__(self, - vertices=None, - faces=None, - face_normals=None, - vertex_normals=None, - face_colors=None, - vertex_colors=None, - face_attributes=None, - vertex_attributes=None, - metadata=None, - process=True, - validate=False, - merge_tex=None, - merge_norm=None, - use_embree=True, - initial_cache=None, - visual=None, - **kwargs): + def __init__( + self, + vertices: Optional[NDArray[float64]] = None, + faces: Optional[NDArray[int64]] = None, + face_normals: Optional[NDArray[float64]] = None, + vertex_normals: Optional[NDArray[float64]] = None, + face_colors: Optional[NDArray[float64]] = None, + vertex_colors: Optional[NDArray[float64]] = None, + face_attributes: Optional[Dict[str, NDArray]] = None, + vertex_attributes: Optional[Dict[str, NDArray]] = None, + metadata: Optional[Dict[str, Any]] = None, + process: bool = True, + validate: bool = False, + merge_tex: Optional[bool] = None, + merge_norm: Optional[bool] = None, + use_embree: bool = True, + initial_cache: Optional[Dict[str, ndarray]] = None, + visual: Optional[Union[ColorVisuals, TextureVisuals]] = None, + ) -> None: """ A Trimesh object contains a triangular 3D mesh. @@ -113,8 +125,8 @@ def __init__(self, # In order to maintain consistency # the cache is cleared when self._data.__hash__() changes self._cache = caching.Cache( - id_function=self._data.__hash__, - force_immutable=True) + id_function=self._data.__hash__, force_immutable=True + ) self._cache.update(initial_cache) # check for None only to avoid warning messages in subclasses @@ -128,9 +140,8 @@ def __init__(self, # hold visual information about the mesh (vertex and face colors) if visual is None: self.visual = create_visual( - face_colors=face_colors, - vertex_colors=vertex_colors, - mesh=self) + face_colors=face_colors, vertex_colors=vertex_colors, mesh=self + ) else: self.visual = visual @@ -169,7 +180,8 @@ def __init__(self, self.metadata.update(metadata) elif metadata is not None: raise ValueError( - 'metadata should be a dict or None, got %s' % str(metadata)) + "metadata should be a dict or None, got %s" % str(metadata) + ) # store per-face and per-vertex attributes which will # be updated when an update_faces call is made @@ -184,17 +196,14 @@ def __init__(self, # process will remove NaN and Inf values and merge vertices # if validate, will remove degenerate and duplicate faces if process or validate: - self.process(validate=validate, - merge_tex=merge_tex, - merge_norm=merge_norm) + self.process(validate=validate, merge_tex=merge_tex, merge_norm=merge_norm) - # save reference to kwargs - self._kwargs = kwargs - - def process(self, - validate=False, - merge_tex=None, - merge_norm=None): + def process( + self, + validate: bool = False, + merge_tex: Optional[bool] = None, + merge_norm: Optional[bool] = None, + ) -> "Trimesh": """ Do processing to make a mesh useful. @@ -238,12 +247,10 @@ def process(self, # if faces or vertices have been removed, normals are validated before # being returned so there is no danger of inconsistent dimensions self.remove_infinite_values() - self.merge_vertices(merge_tex=merge_tex, - merge_norm=merge_norm) - self._cache.clear(exclude={'face_normals', - 'vertex_normals'}) + self.merge_vertices(merge_tex=merge_tex, merge_norm=merge_norm) + self._cache.clear(exclude={"face_normals", "vertex_normals"}) - self.metadata['processed'] = True + self.metadata["processed"] = True return self @property @@ -261,8 +268,7 @@ def faces(self): faces : (n, 3) int64 References for `self.vertices` for triangles. """ - return self._data.get( - 'faces', np.empty(shape=(0, 3), dtype=np.int64)) + return self._data.get("faces", np.empty(shape=(0, 3), dtype=np.int64)) @faces.setter def faces(self, values): @@ -275,18 +281,18 @@ def faces(self, values): Indexes of self.vertices """ if values is None or len(values) == 0: - return self._data.data.pop('faces', None) + return self._data.data.pop("faces", None) if not (isinstance(values, np.ndarray) and values.dtype == np.int64): values = np.asanyarray(values, dtype=np.int64) # automatically triangulate quad faces if len(values.shape) == 2 and values.shape[1] != 3: - log.info('triangulating faces') + log.info("triangulating faces") values = geometry.triangulate_quads(values) - self._data['faces'] = values + self._data["faces"] = values @caching.cache_decorator - def faces_sparse(self): + def faces_sparse(self) -> coo_matrix: """ A sparse matrix representation of the faces. @@ -297,9 +303,7 @@ def faces_sparse(self): dtype : bool shape : (len(self.vertices), len(self.faces)) """ - sparse = geometry.index_sparse( - columns=len(self.vertices), - indices=self.faces) + sparse = geometry.index_sparse(columns=len(self.vertices), indices=self.faces) return sparse @property @@ -316,10 +320,10 @@ def face_normals(self): Normal vectors of each face """ # check shape of cached normals - cached = self._cache['face_normals'] + cached = self._cache["face_normals"] # get faces from datastore - if 'faces' in self._data: - faces = self._data.data['faces'] + if "faces" in self._data: + faces = self._data.data["faces"] else: faces = None @@ -336,22 +340,21 @@ def face_normals(self): # will be zero or an arbitrary vector if the inputs had # a cross product below machine epsilon normals, valid = triangles.normals( - triangles=self.triangles, - crosses=self.triangles_cross) + triangles=self.triangles, crosses=self.triangles_cross + ) # if all triangles are valid shape is correct if valid.all(): # put calculated face normals into cache manually - self._cache['face_normals'] = normals + self._cache["face_normals"] = normals return normals # make a padded list of normals for correct shape - padded = np.zeros((len(self.triangles), 3), - dtype=np.float64) + padded = np.zeros((len(self.triangles), 3), dtype=np.float64) padded[valid] = normals # put calculated face normals into cache manually - self._cache['face_normals'] = padded + self._cache["face_normals"] = padded return padded @@ -369,32 +372,32 @@ def face_normals(self, values): if values is None: return # make sure candidate face normals are C-contiguous float - values = np.asanyarray( - values, order='C', dtype=np.float64) + values = np.asanyarray(values, order="C", dtype=np.float64) # face normals need to correspond to faces if len(values) == 0 or values.shape != self.faces.shape: - log.debug('face_normals incorrect shape, ignoring!') + log.debug("face_normals incorrect shape, ignoring!") return # check if any values are larger than tol.merge # don't set the normals if they are all zero ptp = values.ptp() if not np.isfinite(ptp): - log.debug('face_normals contain NaN, ignoring!') + log.debug("face_normals contain NaN, ignoring!") return if ptp < tol.merge: - log.debug('face_normals all zero, ignoring!') + log.debug("face_normals all zero, ignoring!") return # make sure the first few normals match the first few triangles check, valid = triangles.normals( - self.vertices.view(np.ndarray)[self.faces[:20]]) + self.vertices.view(np.ndarray)[self.faces[:20]] + ) compare = np.zeros((len(valid), 3)) compare[valid] = check if not np.allclose(compare, values[:20]): log.debug("face_normals didn't match triangles, ignoring!") return # otherwise store face normals - self._cache['face_normals'] = values + self._cache["face_normals"] = values @property def vertices(self): @@ -411,8 +414,7 @@ def vertices(self): vertices : (n, 3) float Points in cartesian space referenced by self.faces """ - return self._data.get('vertices', np.empty( - shape=(0, 3), dtype=np.float64)) + return self._data.get("vertices", np.empty(shape=(0, 3), dtype=np.float64)) @vertices.setter def vertices(self, values): @@ -424,8 +426,7 @@ def vertices(self, values): values : (n, 3) float Points in space """ - self._data['vertices'] = np.asanyarray( - values, order='C', dtype=np.float64) + self._data["vertices"] = np.asanyarray(values, order="C", dtype=np.float64) @caching.cache_decorator def vertex_normals(self): @@ -444,12 +445,13 @@ def vertex_normals(self): Where n == len(self.vertices) """ # make sure we have faces_sparse - assert hasattr(self.faces_sparse, 'dot') + assert hasattr(self.faces_sparse, "dot") vertex_normals = geometry.weighted_vertex_normals( vertex_count=len(self.vertices), faces=self.faces, face_normals=self.face_normals, - face_angles=self.face_angles) + face_angles=self.face_angles, + ) return vertex_normals @vertex_normals.setter @@ -463,17 +465,15 @@ def vertex_normals(self, values): Unit normal vectors for each vertex """ if values is not None: - values = np.asanyarray(values, - order='C', - dtype=np.float64) + values = np.asanyarray(values, order="C", dtype=np.float64) if values.shape == self.vertices.shape: # check to see if they assigned all zeros if values.ptp() < tol.merge: - log.debug('vertex_normals are all zero!') - self._cache['vertex_normals'] = values + log.debug("vertex_normals are all zero!") + self._cache["vertex_normals"] = values @caching.cache_decorator - def vertex_faces(self): + def vertex_faces(self) -> ndarray: """ A representation of the face indices that correspond to each vertex. @@ -487,11 +487,12 @@ def vertex_faces(self): vertex_faces = geometry.vertex_face_indices( vertex_count=len(self.vertices), faces=self.faces, - faces_sparse=self.faces_sparse) + faces_sparse=self.faces_sparse, + ) return vertex_faces @caching.cache_decorator - def bounds(self): + def bounds(self) -> ndarray: """ The axis aligned bounds of the faces of the mesh. @@ -507,11 +508,10 @@ def bounds(self): if len(in_mesh) == 0: return None # get mesh bounds with min and max - return np.array([in_mesh.min(axis=0), - in_mesh.max(axis=0)]) + return np.array([in_mesh.min(axis=0), in_mesh.max(axis=0)]) @caching.cache_decorator - def extents(self): + def extents(self) -> ndarray: """ The length, width, and height of the axis aligned bounding box of the mesh. @@ -530,7 +530,7 @@ def extents(self): return extents @caching.cache_decorator - def scale(self): + def scale(self) -> float: """ A metric for the overall scale of the mesh, the length of the diagonal of the axis aligned bounding box of the mesh. @@ -544,11 +544,11 @@ def scale(self): if self.extents is None: return 1.0 # make sure we are returning python floats - scale = float((self.extents ** 2).sum() ** .5) + scale = float((self.extents**2).sum() ** 0.5) return scale @caching.cache_decorator - def centroid(self): + def centroid(self) -> ndarray: """ The point in space which is the average of the triangle centroids weighted by the area of each triangle. @@ -565,9 +565,9 @@ def centroid(self): # use the centroid of each triangle weighted by # the area of the triangle to find the overall centroid try: - centroid = np.average(self.triangles_center, - weights=self.area_faces, - axis=0) + centroid = np.average( + self.triangles_center, weights=self.area_faces, axis=0 + ) except BaseException: # if all triangles are zero-area weights will not work centroid = self.triangles_center.mean(axis=0) @@ -597,7 +597,7 @@ def center_mass(self, value): """ value = np.array(value, dtype=np.float64) if value.shape != (3,): - raise ValueError('shape must be (3,) float!') + raise ValueError("shape must be (3,) float!") self._data["center_mass"] = value self._cache.delete("mass_properties") @@ -629,7 +629,7 @@ def density(self, value): self._cache.delete("mass_properties") @property - def volume(self): + def volume(self) -> float64: """ Volume of the current mesh calculated using a surface integral. If the current mesh isn't watertight this is @@ -640,11 +640,11 @@ def volume(self): volume : float Volume of the current mesh """ - volume = self.mass_properties['volume'] + volume = self.mass_properties["volume"] return volume @property - def mass(self): + def mass(self) -> float64: """ Mass of the current mesh, based on specified density and volume. If the current mesh isn't watertight this is garbage. @@ -654,11 +654,11 @@ def mass(self): mass : float Mass of the current mesh """ - mass = self.mass_properties['mass'] + mass = self.mass_properties["mass"] return mass @property - def moment_inertia(self): + def moment_inertia(self) -> ndarray: """ Return the moment of inertia matrix of the current mesh. If mesh isn't watertight this is garbage. The returned @@ -673,10 +673,10 @@ def moment_inertia(self): Moment of inertia of the current mesh at the center of mass and aligned with the cartesian axis. """ - inertia = self.mass_properties['inertia'] + inertia = self.mass_properties["inertia"] return inertia - def moment_inertia_frame(self, transform): + def moment_inertia_frame(self, transform: ndarray) -> ndarray: """ Get the moment of inertia of this mesh with respect to an arbitrary frame, versus with respect to the center @@ -704,17 +704,18 @@ def moment_inertia_frame(self, transform): # so we want to offset our requested translation by that # center of mass offset = np.eye(4) - offset[:3, 3] = -props['center_mass'] + offset[:3, 3] = -props["center_mass"] # apply the parallel axis theorum to get the new inertia return inertia.transform_inertia( - inertia_tensor=props['inertia'], + inertia_tensor=props["inertia"], transform=np.dot(offset, transform), - mass=props['mass'], - parallel_axis=True) + mass=props["mass"], + parallel_axis=True, + ) @caching.cache_decorator - def principal_inertia_components(self): + def principal_inertia_components(self) -> ndarray: """ Return the principal components of inertia @@ -728,12 +729,12 @@ def principal_inertia_components(self): # both components and vectors from inertia matrix components, vectors = inertia.principal_axis(self.moment_inertia) # store vectors in cache for later - self._cache['principal_inertia_vectors'] = vectors + self._cache["principal_inertia_vectors"] = vectors return components @property - def principal_inertia_vectors(self): + def principal_inertia_vectors(self) -> ndarray: """ Return the principal axis of inertia as unit vectors. The order corresponds to `mesh.principal_inertia_components`. @@ -745,10 +746,10 @@ def principal_inertia_vectors(self): principal axis of inertia directions """ _ = self.principal_inertia_components - return self._cache['principal_inertia_vectors'] + return self._cache["principal_inertia_vectors"] @caching.cache_decorator - def principal_inertia_transform(self): + def principal_inertia_transform(self) -> ndarray: """ A transform which moves the current mesh so the principal inertia vectors are on the X,Y, and Z axis, and the centroid is @@ -766,14 +767,14 @@ def principal_inertia_transform(self): transform = np.eye(4) transform[:3, :3] = vectors transform = transformations.transform_around( - matrix=transform, - point=self.centroid) + matrix=transform, point=self.centroid + ) transform[:3, 3] -= self.centroid return transform @caching.cache_decorator - def symmetry(self): + def symmetry(self) -> Optional[str]: """ Check whether a mesh has rotational symmetry around an axis (radial) or point (spherical). @@ -784,12 +785,12 @@ def symmetry(self): What kind of symmetry does the mesh have. """ symmetry, axis, section = inertia.radial_symmetry(self) - self._cache['symmetry_axis'] = axis - self._cache['symmetry_section'] = section + self._cache["symmetry_axis"] = axis + self._cache["symmetry_section"] = section return symmetry @property - def symmetry_axis(self): + def symmetry_axis(self) -> ndarray: """ If a mesh has rotational symmetry, return the axis. @@ -799,10 +800,10 @@ def symmetry_axis(self): Axis around which a 2D profile was revolved to create this mesh. """ if self.symmetry is not None: - return self._cache['symmetry_axis'] + return self._cache["symmetry_axis"] @property - def symmetry_section(self): + def symmetry_section(self) -> ndarray: """ If a mesh has rotational symmetry return the two vectors which make up a section coordinate frame. @@ -813,10 +814,10 @@ def symmetry_section(self): Vectors to take a section along """ if self.symmetry is not None: - return self._cache['symmetry_section'] + return self._cache["symmetry_section"] @caching.cache_decorator - def triangles(self): + def triangles(self) -> ndarray: """ Actual triangles of the mesh (points, not indexes) @@ -833,7 +834,7 @@ def triangles(self): return triangles @caching.cache_decorator - def triangles_tree(self): + def triangles_tree(self) -> Index: """ An R-tree containing each face of the mesh. @@ -846,7 +847,7 @@ def triangles_tree(self): return tree @caching.cache_decorator - def triangles_center(self): + def triangles_center(self) -> ndarray: """ The center of each triangle (barycentric [1/3, 1/3, 1/3]) @@ -859,7 +860,7 @@ def triangles_center(self): return triangles_center @caching.cache_decorator - def triangles_cross(self): + def triangles_cross(self) -> ndarray: """ The cross product of two edges of each triangle. @@ -872,7 +873,7 @@ def triangles_cross(self): return crosses @caching.cache_decorator - def edges(self): + def edges(self) -> ndarray: """ Edges of the mesh (derived from faces). @@ -881,9 +882,10 @@ def edges(self): edges : (n, 2) int List of vertex indices making up edges """ - edges, index = geometry.faces_to_edges(self.faces.view(np.ndarray), - return_index=True) - self._cache['edges_face'] = index + edges, index = geometry.faces_to_edges( + self.faces.view(np.ndarray), return_index=True + ) + self._cache["edges_face"] = index return edges @caching.cache_decorator @@ -897,10 +899,10 @@ def edges_face(self): Index of self.faces """ _ = self.edges - return self._cache['edges_face'] + return self._cache["edges_face"] @caching.cache_decorator - def edges_unique(self): + def edges_unique(self) -> ndarray: """ The unique edges of the mesh. @@ -913,12 +915,12 @@ def edges_unique(self): edges_unique = self.edges_sorted[unique] # edges_unique will be added automatically by the decorator # additional terms generated need to be added to the cache manually - self._cache['edges_unique_idx'] = unique - self._cache['edges_unique_inverse'] = inverse + self._cache["edges_unique_idx"] = unique + self._cache["edges_unique_inverse"] = inverse return edges_unique @caching.cache_decorator - def edges_unique_length(self): + def edges_unique_length(self) -> TrackedArray: """ How long is each unique edge. @@ -946,10 +948,10 @@ def edges_unique_inverse(self): Indexes of self.edges_unique """ _ = self.edges_unique - return self._cache['edges_unique_inverse'] + return self._cache["edges_unique_inverse"] @caching.cache_decorator - def edges_sorted(self): + def edges_sorted(self) -> ndarray: """ Edges sorted along axis 1 @@ -962,7 +964,7 @@ def edges_sorted(self): return edges_sorted @caching.cache_decorator - def edges_sorted_tree(self): + def edges_sorted_tree(self) -> scipy.spatial._ckdtree.cKDTree: """ A KDTree for mapping edges back to edge index. @@ -973,10 +975,11 @@ def edges_sorted_tree(self): their index in mesh.edges_sorted """ from scipy.spatial import cKDTree + return cKDTree(self.edges_sorted) @caching.cache_decorator - def edges_sparse(self): + def edges_sparse(self) -> coo_matrix: """ Edges in sparse bool COO graph format where connected vertices are True. @@ -986,12 +989,11 @@ def edges_sparse(self): sparse: (len(self.vertices), len(self.vertices)) bool Sparse graph in COO format """ - sparse = graph.edges_to_coo(self.edges, - count=len(self.vertices)) + sparse = graph.edges_to_coo(self.edges, count=len(self.vertices)) return sparse @caching.cache_decorator - def body_count(self): + def body_count(self) -> int: """ How many connected groups of vertices exist in this mesh. Note that this number may differ from result in mesh.split, @@ -1004,14 +1006,13 @@ def body_count(self): """ # labels are (len(vertices), int) OB count, labels = graph.csgraph.connected_components( - self.edges_sparse, - directed=False, - return_labels=True) - self._cache['vertices_component_label'] = labels + self.edges_sparse, directed=False, return_labels=True + ) + self._cache["vertices_component_label"] = labels return count @caching.cache_decorator - def faces_unique_edges(self): + def faces_unique_edges(self) -> ndarray: """ For each face return which indexes in mesh.unique_edges constructs that face. @@ -1041,11 +1042,11 @@ def faces_unique_edges(self): # make sure we have populated unique edges _ = self.edges_unique # we are relying on the fact that edges are stacked in triplets - result = self._cache['edges_unique_inverse'].reshape((-1, 3)) + result = self._cache["edges_unique_inverse"].reshape((-1, 3)) return result @caching.cache_decorator - def euler_number(self): + def euler_number(self) -> int: """ Return the Euler characteristic (a topological invariant) for the mesh In order to guarantee correctness, this should be called after @@ -1056,13 +1057,13 @@ def euler_number(self): euler_number : int Topological invariant """ - euler = int(self.referenced_vertices.sum() - - len(self.edges_unique) + - len(self.faces)) + euler = int( + self.referenced_vertices.sum() - len(self.edges_unique) + len(self.faces) + ) return euler @caching.cache_decorator - def referenced_vertices(self): + def referenced_vertices(self) -> ndarray: """ Which vertices in the current mesh are referenced by a face. @@ -1085,15 +1086,15 @@ def units(self): units : str Unit system mesh is in, or None if not defined """ - if 'units' in self.metadata: - return self.metadata['units'] + if "units" in self.metadata: + return self.metadata["units"] else: return None @units.setter def units(self, value): value = str(value).lower() - self.metadata['units'] = value + self.metadata["units"] = value def convert_units(self, desired, guess=False): """ @@ -1111,12 +1112,13 @@ def convert_units(self, desired, guess=False): return self def merge_vertices( - self, - merge_tex=None, - merge_norm=None, - digits_vertex=None, - digits_norm=None, - digits_uv=None): + self, + merge_tex: Optional[bool] = None, + merge_norm: Optional[bool] = None, + digits_vertex: None = None, + digits_norm: None = None, + digits_uv: None = None, + ) -> None: """ Removes duplicate vertices grouped by position and optionally texture coordinate and normal. @@ -1144,9 +1146,14 @@ def merge_vertices( merge_norm=merge_norm, digits_vertex=digits_vertex, digits_norm=digits_norm, - digits_uv=digits_uv) + digits_uv=digits_uv, + ) - def update_vertices(self, mask, inverse=None): + def update_vertices( + self, + mask: NDArray, + inverse: Optional[NDArray] = None, + ) -> None: """ Update vertices with a mask. @@ -1165,17 +1172,20 @@ def update_vertices(self, mask, inverse=None): # make sure mask is a numpy array mask = np.asanyarray(mask) - if ((mask.dtype.name == 'bool' and mask.all()) or - len(mask) == 0 or self.is_empty): + if ( + (mask.dtype.name == "bool" and mask.all()) + or len(mask) == 0 + or self.is_empty + ): # mask doesn't remove any vertices so exit early return # create the inverse mask if not passed if inverse is None: inverse = np.zeros(len(self.vertices), dtype=np.int64) - if mask.dtype.kind == 'b': + if mask.dtype.kind == "b": inverse[mask] = np.arange(mask.sum()) - elif mask.dtype.kind == 'i': + elif mask.dtype.kind == "i": inverse[mask] = np.arange(len(mask)) else: inverse = None @@ -1187,7 +1197,7 @@ def update_vertices(self, mask, inverse=None): # update the visual object with our mask self.visual.update_vertices(mask) # get the normals from cache before dumping - cached_normals = self._cache['vertex_normals'] + cached_normals = self._cache["vertex_normals"] # apply to face_attributes count = len(self.vertices) @@ -1211,7 +1221,7 @@ def update_vertices(self, mask, inverse=None): except BaseException: pass - def update_faces(self, mask): + def update_faces(self, mask: NDArray) -> None: """ In many cases, we will want to remove specific faces. However, there is additional bookkeeping to do this cleanly. @@ -1228,18 +1238,18 @@ def update_faces(self, mask): return mask = np.asanyarray(mask) - if mask.dtype.name == 'bool' and mask.all(): + if mask.dtype.name == "bool" and mask.all(): # mask removes no faces so exit early return # try to save face normals before dumping cache - cached_normals = self._cache['face_normals'] + cached_normals = self._cache["face_normals"] - faces = self._data['faces'] + faces = self._data["faces"] # if Trimesh has been subclassed and faces have been moved # from data to cache, get faces from cache. if not util.is_shape(faces, (-1, 3)): - faces = self._cache['faces'] + faces = self._cache["faces"] # apply to face_attributes count = len(self.faces) @@ -1249,7 +1259,6 @@ def update_faces(self, mask): if len(value) != count: raise TypeError() except TypeError: - continue # apply the mask to the attribute self.face_attributes[key] = value[mask] @@ -1264,7 +1273,7 @@ def update_faces(self, mask): if util.is_shape(cached_normals, (-1, 3)): self.face_normals = cached_normals[mask] - def remove_infinite_values(self): + def remove_infinite_values(self) -> None: """ Ensure that every vertex and face consists of finite numbers. This will remove vertices or faces containing np.nan and np.inf @@ -1294,16 +1303,18 @@ def unique_faces(self): mask[grouping.unique_rows(np.sort(self.faces, axis=1))[0]] = True return mask - def remove_duplicate_faces(self): + def remove_duplicate_faces(self) -> None: """ DERECATED MARCH 2024 REPLACE WITH: `mesh.update_faces(mesh.unique_faces())` """ warnings.warn( - '`remove_duplicate_faces` is deprecated ' + - 'and will be removed in March 2024: ' + - 'replace with `mesh.update_faces(mesh.unique_faces())`', - category=DeprecationWarning, stacklevel=2) + "`remove_duplicate_faces` is deprecated " + + "and will be removed in March 2024: " + + "replace with `mesh.update_faces(mesh.unique_faces())`", + category=DeprecationWarning, + stacklevel=2, + ) self.update_faces(self.unique_faces()) def rezero(self): @@ -1335,7 +1346,7 @@ def split(self, **kwargs): return graph.split(self, **kwargs) @caching.cache_decorator - def face_adjacency(self): + def face_adjacency(self) -> NDArray[int64]: """ Find faces that share an edge i.e. 'adjacent' faces. @@ -1370,13 +1381,12 @@ def face_adjacency(self): In [6]: groups = nx.connected_components(graph) """ - adjacency, edges = graph.face_adjacency( - mesh=self, return_edges=True) - self._cache['face_adjacency_edges'] = edges + adjacency, edges = graph.face_adjacency(mesh=self, return_edges=True) + self._cache["face_adjacency_edges"] = edges return adjacency @caching.cache_decorator - def face_neighborhood(self): + def face_neighborhood(self) -> NDArray[int64]: """ Find faces that share a vertex i.e. 'neighbors' faces. @@ -1388,7 +1398,7 @@ def face_neighborhood(self): return graph.face_neighborhood(self) @caching.cache_decorator - def face_adjacency_edges(self): + def face_adjacency_edges(self) -> NDArray[int64]: """ Returns the edges that are shared by the adjacent faces. @@ -1399,10 +1409,10 @@ def face_adjacency_edges(self): """ # this value is calculated as a byproduct of the face adjacency _ = self.face_adjacency - return self._cache['face_adjacency_edges'] + return self._cache["face_adjacency_edges"] @caching.cache_decorator - def face_adjacency_edges_tree(self): + def face_adjacency_edges_tree(self) -> scipy.spatial._ckdtree.cKDTree: """ A KDTree for mapping edges back face adjacency index. @@ -1413,10 +1423,11 @@ def face_adjacency_edges_tree(self): their index in mesh.face_adjacency """ from scipy.spatial import cKDTree + return cKDTree(self.face_adjacency_edges) @caching.cache_decorator - def face_adjacency_angles(self): + def face_adjacency_angles(self) -> NDArray[float64]: """ Return the angle between adjacent faces @@ -1433,7 +1444,7 @@ def face_adjacency_angles(self): return angles @caching.cache_decorator - def face_adjacency_projections(self): + def face_adjacency_projections(self) -> NDArray[float64]: """ The projection of the non-shared vertex of a triangle onto its adjacent face @@ -1448,7 +1459,7 @@ def face_adjacency_projections(self): return projections @caching.cache_decorator - def face_adjacency_convex(self): + def face_adjacency_convex(self) -> NDArray[bool]: """ Return faces which are adjacent and locally convex. @@ -1461,11 +1472,10 @@ def face_adjacency_convex(self): are_convex : (len(self.face_adjacency), ) bool Face pairs that are locally convex """ - are_convex = self.face_adjacency_projections < tol.merge - return are_convex + return self.face_adjacency_projections < tol.merge @caching.cache_decorator - def face_adjacency_unshared(self): + def face_adjacency_unshared(self) -> NDArray[int64]: """ Return the vertex index of the two vertices not in the shared edge between two adjacent faces @@ -1475,11 +1485,10 @@ def face_adjacency_unshared(self): vid_unshared : (len(mesh.face_adjacency), 2) int Indexes of mesh.vertices """ - vid_unshared = graph.face_adjacency_unshared(self) - return vid_unshared + return graph.face_adjacency_unshared(self) @caching.cache_decorator - def face_adjacency_radius(self): + def face_adjacency_radius(self) -> NDArray[float64]: """ The approximate radius of a cylinder that fits inside adjacent faces. @@ -1488,12 +1497,11 @@ def face_adjacency_radius(self): radii : (len(self.face_adjacency), ) float Approximate radius formed by triangle pair """ - radii, span = graph.face_adjacency_radius(mesh=self) - self._cache['face_adjacency_span'] = span + radii, self._cache["face_adjacency_span"] = graph.face_adjacency_radius(mesh=self) return radii @caching.cache_decorator - def face_adjacency_span(self): + def face_adjacency_span(self) -> NDArray[float64]: """ The approximate perpendicular projection of the non-shared vertices in a pair of adjacent faces onto the shared edge of @@ -1505,10 +1513,10 @@ def face_adjacency_span(self): Approximate span between the non-shared vertices """ _ = self.face_adjacency_radius - return self._cache['face_adjacency_span'] + return self._cache["face_adjacency_span"] @caching.cache_decorator - def integral_mean_curvature(self): + def integral_mean_curvature(self) -> float64: """ The integral mean curvature, or the surface integral of the mean curvature. @@ -1517,13 +1525,13 @@ def integral_mean_curvature(self): area : float Integral mean curvature of mesh """ - edges_length = np.linalg.norm(np.subtract( - *self.vertices[self.face_adjacency_edges.T]), axis=1) - imc = (self.face_adjacency_angles * edges_length).sum() * 0.5 - return imc + edges_length = np.linalg.norm( + np.subtract(*self.vertices[self.face_adjacency_edges.T]), axis=1 + ) + return (self.face_adjacency_angles * edges_length).sum() * 0.5 @caching.cache_decorator - def vertex_adjacency_graph(self): + def vertex_adjacency_graph(self) -> Graph: """ Returns a networkx graph representing the vertices and their connections in the mesh. @@ -1549,7 +1557,7 @@ def vertex_adjacency_graph(self): return adjacency_g @caching.cache_decorator - def vertex_neighbors(self): + def vertex_neighbors(self) -> List[List[int64]]: """ The vertex neighbors of each vertex of the mesh, determined from the cached vertex_adjacency_graph, if already existent. @@ -1569,12 +1577,10 @@ def vertex_neighbors(self): >>> mesh.vertex_neighbors[0] [1, 2, 3, 4] """ - return graph.neighbors( - edges=self.edges_unique, - max_index=len(self.vertices)) + return graph.neighbors(edges=self.edges_unique, max_index=len(self.vertices)) @caching.cache_decorator - def is_winding_consistent(self): + def is_winding_consistent(self) -> bool: """ Does the mesh have consistent winding or not. A mesh with consistent winding has each shared edge @@ -1589,10 +1595,10 @@ def is_winding_consistent(self): return False # consistent winding check is populated into the cache by is_watertight _ = self.is_watertight - return self._cache['is_winding_consistent'] + return self._cache["is_winding_consistent"] @caching.cache_decorator - def is_watertight(self): + def is_watertight(self) -> bool: """ Check if a mesh is watertight by making sure every edge is included in two faces. @@ -1605,12 +1611,13 @@ def is_watertight(self): if self.is_empty: return False watertight, winding = graph.is_watertight( - edges=self.edges, edges_sorted=self.edges_sorted) - self._cache['is_winding_consistent'] = winding + edges=self.edges, edges_sorted=self.edges_sorted + ) + self._cache["is_winding_consistent"] = winding return watertight @caching.cache_decorator - def is_volume(self): + def is_volume(self) -> bool: """ Check if a mesh has all the properties required to represent a valid volume, rather than just a surface. @@ -1623,14 +1630,16 @@ def is_volume(self): valid : bool Does the mesh represent a volume """ - valid = bool(self.is_watertight and - self.is_winding_consistent and - np.isfinite(self.center_mass).all() and - self.volume > 0.0) + valid = bool( + self.is_watertight + and self.is_winding_consistent + and np.isfinite(self.center_mass).all() + and self.volume > 0.0 + ) return valid @property - def is_empty(self): + def is_empty(self) -> bool: """ Does the current mesh have data defined. @@ -1642,7 +1651,7 @@ def is_empty(self): return self._data.is_empty() @caching.cache_decorator - def is_convex(self): + def is_convex(self) -> bool: """ Check if a mesh is convex or not. @@ -1658,7 +1667,7 @@ def is_convex(self): return is_convex @caching.cache_decorator - def kdtree(self): + def kdtree(self) -> scipy.spatial._ckdtree.cKDTree: """ Return a scipy.spatial.cKDTree of the vertices of the mesh. Not cached as this lead to observed memory issues and segfaults. @@ -1670,22 +1679,25 @@ def kdtree(self): """ from scipy.spatial import cKDTree + tree = cKDTree(self.vertices.view(np.ndarray)) return tree - def remove_degenerate_faces(self, height=tol.merge): + def remove_degenerate_faces(self, height: float = tol.merge) -> None: """ DERECATED MARCH 2024 REPLACE WITH: `self.update_faces(self.nondegenerate_faces(height=height))` """ warnings.warn( - '`remove_degenerate_faces` is deprecated ' + - 'and will be removed in March 2024 replace with ' + - '`self.update_faces(self.nondegenerate_faces(height=height))`', - category=DeprecationWarning, stacklevel=2) + "`remove_degenerate_faces` is deprecated " + + "and will be removed in March 2024 replace with " + + "`self.update_faces(self.nondegenerate_faces(height=height))`", + category=DeprecationWarning, + stacklevel=2, + ) self.update_faces(self.nondegenerate_faces(height=height)) - def nondegenerate_faces(self, height=tol.merge): + def nondegenerate_faces(self, height=tol.merge) -> NDArray[bool]: """ Remove degenerate faces (faces without 3 unique vertex indices) from the current mesh. @@ -1707,12 +1719,11 @@ def nondegenerate_faces(self, height=tol.merge): Mask used to remove faces """ return triangles.nondegenerate( - self.triangles, - areas=self.area_faces, - height=height) + self.triangles, areas=self.area_faces, height=height + ) @caching.cache_decorator - def facets(self): + def facets(self) -> List[NDArray[int64]]: """ Return a list of face indices for coplanar adjacent faces. @@ -1725,7 +1736,7 @@ def facets(self): return facets @caching.cache_decorator - def facets_area(self): + def facets_area(self) -> NDArray[float64]: """ Return an array containing the area of each facet. @@ -1740,13 +1751,11 @@ def facets_area(self): # use native python sum in tight loop as opposed to array.sum() # as in this case the lower function call overhead of # native sum provides roughly a 50% speedup - areas = np.array([sum(area_faces[i]) - for i in self.facets], - dtype=np.float64) + areas = np.array([sum(area_faces[i]) for i in self.facets], dtype=np.float64) return areas @caching.cache_decorator - def facets_normal(self): + def facets_normal(self) -> NDArray[float64]: """ Return the normal of each facet @@ -1761,19 +1770,18 @@ def facets_normal(self): area_faces = self.area_faces # the face index of the largest face in each facet - index = np.array([i[area_faces[i].argmax()] - for i in self.facets]) + index = np.array([i[area_faces[i].argmax()] for i in self.facets]) # (n, 3) float, unit normal vectors of facet plane normals = self.face_normals[index] # (n, 3) float, points on facet plane origins = self.vertices[self.faces[:, 0][index]] # save origins in cache - self._cache['facets_origin'] = origins + self._cache["facets_origin"] = origins return normals @caching.cache_decorator - def facets_origin(self): + def facets_origin(self) -> NDArray[float64]: """ Return a point on the facet plane. @@ -1783,10 +1791,10 @@ def facets_origin(self): A point on each facet plane """ _ = self.facets_normal - return self._cache['facets_origin'] + return self._cache["facets_origin"] @caching.cache_decorator - def facets_boundary(self): + def facets_boundary(self) -> List[NDArray[int64]]: """ Return the edges which represent the boundary of each facet @@ -1799,12 +1807,13 @@ def facets_boundary(self): edges = self.edges_sorted.reshape((-1, 6)) # get the edges for each facet edges_facet = [edges[i].reshape((-1, 2)) for i in self.facets] - edges_boundary = [i[grouping.group_rows(i, require_count=1)] - for i in edges_facet] + edges_boundary = [ + i[grouping.group_rows(i, require_count=1)] for i in edges_facet + ] return edges_boundary @caching.cache_decorator - def facets_on_hull(self): + def facets_on_hull(self) -> ndarray: """ Find which facets of the mesh are on the convex hull. @@ -1856,7 +1865,7 @@ def fix_normals(self, multibody=None): multibody = self.body_count > 1 repair.fix_normals(self, multibody=multibody) - def fill_holes(self): + def fill_holes(self) -> bool: """ Fill single triangle and single quad holes in the current mesh. @@ -1895,17 +1904,12 @@ def register(self, other, **kwargs): cost : float Average square distance per point """ - mesh_to_other, cost = registration.mesh_other( - mesh=self, - other=other, - **kwargs) + mesh_to_other, cost = registration.mesh_other(mesh=self, other=other, **kwargs) return mesh_to_other, cost - def compute_stable_poses(self, - center_mass=None, - sigma=0.0, - n_samples=1, - threshold=0.0): + def compute_stable_poses( + self, center_mass=None, sigma=0.0, n_samples=1, threshold=0.0 + ): """ Computes stable orientations of a mesh and their quasi-static probabilities. @@ -1947,13 +1951,15 @@ def compute_stable_poses(self, probs : (n, ) float A probability ranging from 0.0 to 1.0 for each pose """ - return poses.compute_stable_poses(mesh=self, - center_mass=center_mass, - sigma=sigma, - n_samples=n_samples, - threshold=threshold) + return poses.compute_stable_poses( + mesh=self, + center_mass=center_mass, + sigma=sigma, + n_samples=n_samples, + threshold=threshold, + ) - def subdivide(self, face_index=None): + def subdivide(self, face_index: None = None) -> "Trimesh": """ Subdivide a mesh, with each subdivided face replaced with four smaller faces. @@ -1972,15 +1978,17 @@ def subdivide(self, face_index=None): # subdivide vertex attributes vertex_attributes = {} visual = None - if (hasattr(self.visual, 'uv') and - np.shape(self.visual.uv) == (len(self.vertices), 2)): - + if hasattr(self.visual, "uv") and np.shape(self.visual.uv) == ( + len(self.vertices), + 2, + ): # uv coords divided along with vertices vertices, faces, attr = remesh.subdivide( vertices=np.hstack((self.vertices, self.visual.uv)), faces=self.faces, face_index=face_index, - vertex_attributes=vertex_attributes) + vertex_attributes=vertex_attributes, + ) # get a copy of the current visuals visual = self.visual.copy() @@ -1994,7 +2002,8 @@ def subdivide(self, face_index=None): vertices=self.vertices, faces=self.faces, face_index=face_index, - vertex_attributes=vertex_attributes) + vertex_attributes=vertex_attributes, + ) # create a new mesh result = Trimesh( @@ -2002,7 +2011,8 @@ def subdivide(self, face_index=None): faces=faces, visual=visual, vertex_attributes=attr, - process=False) + process=False, + ) return result def subdivide_to_size(self, max_edge, max_iter=10, return_index=False): @@ -2023,16 +2033,18 @@ def subdivide_to_size(self, max_edge, max_iter=10, return_index=False): """ # subdivide vertex attributes visual = None - if (hasattr(self.visual, 'uv') and - np.shape(self.visual.uv) == (len(self.vertices), 2)): - + if hasattr(self.visual, "uv") and np.shape(self.visual.uv) == ( + len(self.vertices), + 2, + ): # uv coords divided along with vertices vertices_faces = remesh.subdivide_to_size( vertices=np.hstack((self.vertices, self.visual.uv)), faces=self.faces, max_edge=max_edge, max_iter=max_iter, - return_index=return_index) + return_index=return_index, + ) # unpack result if return_index: vertices, faces, final_index = vertices_faces @@ -2052,7 +2064,8 @@ def subdivide_to_size(self, max_edge, max_iter=10, return_index=False): faces=self.faces, max_edge=max_edge, max_iter=max_iter, - return_index=return_index) + return_index=return_index, + ) # unpack result if return_index: vertices, faces, final_index = vertices_faces @@ -2060,11 +2073,7 @@ def subdivide_to_size(self, max_edge, max_iter=10, return_index=False): vertices, faces = vertices_faces # create a new mesh - result = Trimesh( - vertices=vertices, - faces=faces, - visual=visual, - process=False) + result = Trimesh(vertices=vertices, faces=faces, visual=visual, process=False) if return_index: return result, final_index @@ -2088,14 +2097,10 @@ def subdivide_loop(self, iterations=None): """ # perform subdivision for one mesh new_vertices, new_faces = remesh.subdivide_loop( - vertices=self.vertices, - faces=self.faces, - iterations=iterations) + vertices=self.vertices, faces=self.faces, iterations=iterations + ) # create new mesh - result = Trimesh( - vertices=new_vertices, - faces=new_faces, - process=False) + result = Trimesh(vertices=new_vertices, faces=new_faces, process=False) return result @log_time @@ -2123,13 +2128,12 @@ def smoothed(self, **kwargs): # smooth should be recomputed if visuals change self.visual._verify_hash() - cached = self.visual._cache['smoothed'] + cached = self.visual._cache["smoothed"] if cached is not None: return cached # run smoothing - smoothed = graph.smoothed( - self, **kwargs) - self.visual._cache['smoothed'] = smoothed + smoothed = graph.smoothed(self, **kwargs) + self.visual._cache["smoothed"] = smoothed return smoothed @property @@ -2142,7 +2146,7 @@ def visual(self): visual : ColorVisuals or TextureVisuals Contains visual information about the mesh """ - if hasattr(self, '_visual'): + if hasattr(self, "_visual"): return self._visual return None @@ -2160,10 +2164,9 @@ def visual(self, value): value.mesh = self self._visual = value - def section(self, - plane_normal, - plane_origin, - **kwargs): + def section( + self, plane_normal: List[int], plane_origin: List[int], **kwargs + ) -> Path3D: """ Returns a 3D cross section of the current mesh and a plane defined by origin and normal. @@ -2189,7 +2192,8 @@ def section(self, plane_normal=plane_normal, plane_origin=plane_origin, return_faces=True, - **kwargs) + **kwargs, + ) # if the section didn't hit the mesh return None if len(lines) == 0: @@ -2199,14 +2203,11 @@ def section(self, path = load_path(lines) # add the face index info into metadata - path.metadata['face_index'] = face_index + path.metadata["face_index"] = face_index return path - def section_multiplane(self, - plane_origin, - plane_normal, - heights): + def section_multiplane(self, plane_origin, plane_normal, heights): """ Return multiple parallel cross sections of the current mesh in 2D. @@ -2230,32 +2231,31 @@ def section_multiplane(self, """ # turn line segments into Path2D/Path3D objects from .exchange.load import load_path + # do a multiplane intersection lines, transforms, faces = intersections.mesh_multiplane( mesh=self, plane_normal=plane_normal, plane_origin=plane_origin, - heights=heights) + heights=heights, + ) # turn the line segments into Path2D objects paths = [None] * len(lines) - for i, f, segments, T in zip(range(len(lines)), - faces, - lines, - transforms): + for i, f, segments, T in zip(range(len(lines)), faces, lines, transforms): if len(segments) > 0: - paths[i] = load_path( - segments, - metadata={'to_3D': T, 'face_index': f}) + paths[i] = load_path(segments, metadata={"to_3D": T, "face_index": f}) return paths - def slice_plane(self, - plane_origin, - plane_normal, - cap=False, - face_index=None, - cached_dots=None, - **kwargs): + def slice_plane( + self, + plane_origin, + plane_normal, + cap=False, + face_index=None, + cached_dots=None, + **kwargs, + ): """ Slice the mesh with a plane, returning a new mesh that is the portion of the original mesh to the positive normal side of the plane @@ -2288,7 +2288,8 @@ def slice_plane(self, cap=cap, face_index=face_index, cached_dots=cached_dots, - **kwargs) + **kwargs, + ) return new_mesh @@ -2313,13 +2314,14 @@ def unwrap(self, image=None): """ import xatlas - vmap, faces, uv = xatlas.parametrize( - self.vertices, self.faces) + vmap, faces, uv = xatlas.parametrize(self.vertices, self.faces) - result = Trimesh(vertices=self.vertices[vmap], - faces=faces, - visual=TextureVisuals(uv=uv, image=image), - process=False) + result = Trimesh( + vertices=self.vertices[vmap], + faces=faces, + visual=TextureVisuals(uv=uv, image=image), + process=False, + ) # run additional checks for unwrapping if tol.strict: @@ -2330,22 +2332,22 @@ def unwrap(self, image=None): assert np.allclose(result.vertices, self.vertices[vmap]) # check to make sure indices are still the # same order after we've exported to OBJ - export = result.export(file_type='obj') - uv_recon = np.array([L[3:].split() for L in - str.splitlines(export) if - L.startswith('vt ')], - dtype=np.float64) + export = result.export(file_type="obj") + uv_recon = np.array( + [L[3:].split() for L in str.splitlines(export) if L.startswith("vt ")], + dtype=np.float64, + ) assert np.allclose(uv_recon, uv) - v_recon = np.array([L[2:].split() for L in - str.splitlines(export) if - L.startswith('v ')], - dtype=np.float64) + v_recon = np.array( + [L[2:].split() for L in str.splitlines(export) if L.startswith("v ")], + dtype=np.float64, + ) assert np.allclose(v_recon, self.vertices[vmap]) return result @caching.cache_decorator - def convex_hull(self): + def convex_hull(self) -> "Trimesh": """ Returns a Trimesh object representing the convex hull of the current mesh. @@ -2382,12 +2384,13 @@ def sample(self, count, return_index=False, face_weight=None): Index of self.faces """ samples, index = sample.sample_surface( - mesh=self, count=count, face_weight=face_weight) + mesh=self, count=count, face_weight=face_weight + ) if return_index: return samples, index return samples - def remove_unreferenced_vertices(self): + def remove_unreferenced_vertices(self) -> None: """ Remove all vertices in the current mesh which are not referenced by a face. @@ -2400,14 +2403,13 @@ def remove_unreferenced_vertices(self): self.update_vertices(mask=referenced, inverse=inverse) - def unmerge_vertices(self): + def unmerge_vertices(self) -> None: """ Removes all face references so that every face contains three unique vertex indices and no faces are adjacent. """ # new faces are incrementing so every vertex is unique - faces = np.arange(len(self.faces) * 3, - dtype=np.int64).reshape((-1, 3)) + faces = np.arange(len(self.faces) * 3, dtype=np.int64).reshape((-1, 3)) # use update_vertices to apply mask to # all properties that are per-vertex @@ -2415,9 +2417,9 @@ def unmerge_vertices(self): # set faces to incrementing indexes self.faces = faces # keep face normals as the haven't changed - self._cache.clear(exclude=['face_normals']) + self._cache.clear(exclude=["face_normals"]) - def apply_transform(self, matrix): + def apply_transform(self, matrix: NDArray[float64]) -> "Trimesh": """ Transform mesh by a homogeneous transformation matrix. @@ -2431,12 +2433,11 @@ def apply_transform(self, matrix): Homogeneous transformation matrix """ # get c-order float64 matrix - matrix = np.asanyarray( - matrix, order='C', dtype=np.float64) + matrix = np.asanyarray(matrix, order="C", dtype=np.float64) # only support homogeneous transformations if matrix.shape != (4, 4): - raise ValueError('Transformation matrix must be (4, 4)!') + raise ValueError("Transformation matrix must be (4, 4)!") # exit early if we've been passed an identity matrix # np.allclose is surprisingly slow so do this test @@ -2444,74 +2445,78 @@ def apply_transform(self, matrix): return self # new vertex positions - new_vertices = transformations.transform_points( - self.vertices, - matrix=matrix) + new_vertices = transformations.transform_points(self.vertices, matrix=matrix) # check to see if the matrix has rotation # rather than just translation - has_rotation = not util.allclose( - matrix[:3, :3], np.eye(3), atol=1e-6) + has_rotation = not util.allclose(matrix[:3, :3], np.eye(3), atol=1e-6) # transform overridden center of mass - if 'center_mass' in self._data: - center_mass = self._data['center_mass'] + if "center_mass" in self._data: + center_mass = self._data["center_mass"] self.center_mass = transformations.transform_points( - np.array([center_mass,]), - matrix)[0] + np.array( + [ + center_mass, + ] + ), + matrix, + )[0] # preserve face normals if we have them stored - if has_rotation and 'face_normals' in self._cache: + if has_rotation and "face_normals" in self._cache: # transform face normals by rotation component - self._cache.cache['face_normals'] = util.unitize( + self._cache.cache["face_normals"] = util.unitize( transformations.transform_points( - self.face_normals, - matrix=matrix, - translate=False)) + self.face_normals, matrix=matrix, translate=False + ) + ) # preserve vertex normals if we have them stored - if has_rotation and 'vertex_normals' in self._cache: - self._cache.cache['vertex_normals'] = util.unitize( + if has_rotation and "vertex_normals" in self._cache: + self._cache.cache["vertex_normals"] = util.unitize( transformations.transform_points( - self.vertex_normals, - matrix=matrix, - translate=False)) + self.vertex_normals, matrix=matrix, translate=False + ) + ) # if transformation flips winding of triangles if has_rotation and transformations.flips_winding(matrix): - log.debug('transform flips winding') + log.debug("transform flips winding") # fliplr will make array non C contiguous # which will cause hashes to be more # expensive than necessary so wrap - self.faces = np.ascontiguousarray( - np.fliplr(self.faces)) + self.faces = np.ascontiguousarray(np.fliplr(self.faces)) # assign the new values self.vertices = new_vertices # preserve normals and topology in cache # while dumping everything else - self._cache.clear(exclude={ - 'face_normals', # transformed by us - 'vertex_normals', # also transformed by us - 'face_adjacency', # topological - 'face_adjacency_edges', - 'face_adjacency_unshared', - 'edges', - 'edges_face', - 'edges_sorted', - 'edges_unique', - 'edges_unique_idx', - 'edges_unique_inverse', - 'edges_sparse', - 'body_count', - 'faces_unique_edges', - 'euler_number'}) + self._cache.clear( + exclude={ + "face_normals", # transformed by us + "vertex_normals", # also transformed by us + "face_adjacency", # topological + "face_adjacency_edges", + "face_adjacency_unshared", + "edges", + "edges_face", + "edges_sorted", + "edges_unique", + "edges_unique_idx", + "edges_unique_inverse", + "edges_sparse", + "body_count", + "faces_unique_edges", + "euler_number", + } + ) # set the cache ID with the current hash value self._cache.id_set() return self - def voxelized(self, pitch, method='subdivide', **kwargs): + def voxelized(self, pitch, method="subdivide", **kwargs): """ Return a VoxelGrid object representing the current mesh discretized into voxels at the specified pitch @@ -2529,8 +2534,8 @@ def voxelized(self, pitch, method='subdivide', **kwargs): Representing the current mesh """ from .voxel import creation - return creation.voxelize( - mesh=self, pitch=pitch, method=method, **kwargs) + + return creation.voxelize(mesh=self, pitch=pitch, method=method, **kwargs) @caching.cache_decorator def as_open3d(self): @@ -2544,12 +2549,12 @@ def as_open3d(self): Current mesh as an open3d object. """ import open3d + # create from numpy arrays return open3d.geometry.TriangleMesh( - vertices=open3d.utility.Vector3dVector( - self.vertices.copy()), - triangles=open3d.utility.Vector3iVector( - self.faces.copy())) + vertices=open3d.utility.Vector3dVector(self.vertices.copy()), + triangles=open3d.utility.Vector3iVector(self.faces.copy()), + ) def simplify_quadratic_decimation(self, *args, **kwargs): """ @@ -2557,13 +2562,15 @@ def simplify_quadratic_decimation(self, *args, **kwargs): `mesh.simplify_quadric_decimation` """ warnings.warn( - '`simplify_quadratic_decimation` is deprecated ' + - 'as it was a typo and will be removed in March 2024: ' + - 'replace with `simplify_quadric_decimation`', - category=DeprecationWarning, stacklevel=2) + "`simplify_quadratic_decimation` is deprecated " + + "as it was a typo and will be removed in March 2024: " + + "replace with `simplify_quadric_decimation`", + category=DeprecationWarning, + stacklevel=2, + ) return self.simplify_quadric_decimation(*args, **kwargs) - def simplify_quadric_decimation(self, face_count): + def simplify_quadric_decimation(self, face_count: int): """ A thin wrapper around the `open3d` implementation of this: `open3d.geometry.TriangleMesh.simplify_quadric_decimation` @@ -2578,11 +2585,10 @@ def simplify_quadric_decimation(self, face_count): simple : trimesh.Trimesh Simplified version of mesh. """ - simple = self.as_open3d.simplify_quadric_decimation( - int(face_count)) + simple = self.as_open3d.simplify_quadric_decimation(int(face_count)) return Trimesh(vertices=simple.vertices, faces=simple.triangles) - def outline(self, face_ids=None, **kwargs): + def outline(self, face_ids: Optional[NDArray[int64]]=None, **kwargs): """ Given a list of face indexes find the outline of those faces and return it as a Path3D. @@ -2607,12 +2613,10 @@ def outline(self, face_ids=None, **kwargs): """ from .path import Path3D from .path.exchange.misc import faces_to_path - return Path3D(**faces_to_path( - self, face_ids, **kwargs)) - def projected(self, - normal, - **kwargs): + return Path3D(**faces_to_path(self, face_ids, **kwargs)) + + def projected(self, normal, **kwargs): """ Project a mesh onto a plane and then extract the polygon that outlines the mesh projection on that @@ -2646,14 +2650,13 @@ def projected(self, from .path import Path2D from .path.polygons import projected - projection = projected( - mesh=self, normal=normal, **kwargs) + projection = projected(mesh=self, normal=normal, **kwargs) if projection is None: return Path2D() return load_path(projection) @caching.cache_decorator - def area(self): + def area(self) -> float64: """ Summed area of all triangles in the current mesh. @@ -2666,7 +2669,7 @@ def area(self): return area @caching.cache_decorator - def area_faces(self): + def area_faces(self) -> ndarray: """ The area of each face in the mesh. @@ -2675,13 +2678,11 @@ def area_faces(self): area_faces : (n, ) float Area of each face """ - area_faces = triangles.area( - crosses=self.triangles_cross, - sum=False) + area_faces = triangles.area(crosses=self.triangles_cross, sum=False) return area_faces @caching.cache_decorator - def mass_properties(self): + def mass_properties(self) -> MassProperties: """ Returns the mass properties of the current mesh. @@ -2700,18 +2701,19 @@ def mass_properties(self): 'center_mass' : Center of mass location, in global coordinate system """ # if the density or center of mass was overridden they will be put into data - density = self._data.data.get('density', [None])[0] - center_mass = self._data.data.get('center_mass', None) + density = self._data.data.get("density", [None])[0] + center_mass = self._data.data.get("center_mass", None) mass = triangles.mass_properties( triangles=self.triangles, crosses=self.triangles_cross, density=density, center_mass=center_mass, - skip_inertia=False) + skip_inertia=False, + ) return mass - def invert(self): + def invert(self) -> None: """ Invert the mesh in-place by reversing the winding of every face and negating normals without dumping the cache. @@ -2720,18 +2722,16 @@ def invert(self): `self.face_normals` and `self.vertex_normals`. """ with self._cache: - if 'face_normals' in self._cache: - self.face_normals = self._cache['face_normals'] * -1.0 - if 'vertex_normals' in self._cache: - self.vertex_normals = self._cache['vertex_normals'] * -1.0 + if "face_normals" in self._cache: + self.face_normals = self._cache["face_normals"] * -1.0 + if "vertex_normals" in self._cache: + self.vertex_normals = self._cache["vertex_normals"] * -1.0 # fliplr makes array non-contiguous so cache checks slow - self.faces = np.ascontiguousarray( - np.fliplr(self.faces)) + self.faces = np.ascontiguousarray(np.fliplr(self.faces)) # save our normals - self._cache.clear(exclude=['face_normals', - 'vertex_normals']) + self._cache.clear(exclude=["face_normals", "vertex_normals"]) - def scene(self, **kwargs): + def scene(self, **kwargs) -> Scene: """ Returns a Scene object containing the current mesh. @@ -2760,7 +2760,9 @@ def show(self, **kwargs): scene = self.scene() return scene.show(**kwargs) - def submesh(self, faces_sequence, **kwargs): + def submesh( + self, faces_sequence: List[NDArray[int64]], **kwargs + ) -> Union["Trimesh", List["Trimesh"]: """ Return a subset of the mesh. @@ -2779,13 +2781,10 @@ def submesh(self, faces_sequence, **kwargs): submesh : Trimesh or (n,) Trimesh Single mesh if `append` or list of submeshes """ - return util.submesh( - mesh=self, - faces_sequence=faces_sequence, - **kwargs) + return util.submesh(mesh=self, faces_sequence=faces_sequence, **kwargs) @caching.cache_decorator - def identifier(self): + def identifier(self) -> ndarray: """ Return a float vector which is unique to the mesh and is robust to rotation and translation. @@ -2798,7 +2797,7 @@ def identifier(self): return comparison.identifier_simple(self) @caching.cache_decorator - def identifier_hash(self): + def identifier_hash(self) -> str: """ A hash of the rotation invariant identifier vector. @@ -2810,16 +2809,17 @@ def identifier_hash(self): """ return comparison.identifier_hash(self.identifier) - @property - def identifier_md5(self): - warnings.warn( - '`geom.identifier_md5` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geom.identifier_hash`', - category=DeprecationWarning, stacklevel=2) - return self.identifier_hash - - def export(self, file_obj=None, file_type=None, **kwargs): + def export( + self, + file_obj: Optional[Union[str, BufferedRandom]] = None, + file_type: Optional[str] = None, + **kwargs, + ) -> Union[ + Dict[str, Union[Dict[str, str], List[List[int]], List[List[float]]]], + str, + bytes, + Dict[str, Union[Dict[str, str], Dict[str, Union[str, Tuple[int, int]]]]], + ]: """ Export the current mesh to a file object. If file_obj is a filename, file will be written there. @@ -2836,13 +2836,9 @@ def export(self, file_obj=None, file_type=None, **kwargs): Which file type to export as, if `file_name` is passed this is not required. """ - return export_mesh( - mesh=self, - file_obj=file_obj, - file_type=file_type, - **kwargs) + return export_mesh(mesh=self, file_obj=file_obj, file_type=file_type, **kwargs) - def to_dict(self): + def to_dict(self) -> Dict[str, Union[str, List[List[float]], List[List[int]]]]: """ Return a dictionary representation of the current mesh with keys that can be used as the kwargs for the @@ -2854,9 +2850,11 @@ def to_dict(self): result : dict Matches schema and Trimesh constructor. """ - return {'kind': 'trimesh', - 'vertices': self.vertices.tolist(), - 'faces': self.faces.tolist()} + return { + "kind": "trimesh", + "vertices": self.vertices.tolist(), + "faces": self.faces.tolist(), + } def convex_decomposition(self, maxhulls=20, **kwargs): """ @@ -2896,12 +2894,12 @@ def convex_decomposition(self, maxhulls=20, **kwargs): meshes : list of trimesh.Trimesh List of convex meshes that approximate the original """ - result = decomposition.convex_decomposition(self, - maxhulls=maxhulls, - **kwargs) + result = decomposition.convex_decomposition(self, maxhulls=maxhulls, **kwargs) return result - def union(self, other, engine=None, **kwargs): + def union( + self, other: "Trimesh", engine: Optional[str] = None, **kwargs + ) -> "Trimesh": """ Boolean union between this mesh and n other meshes @@ -2917,13 +2915,12 @@ def union(self, other, engine=None, **kwargs): union : trimesh.Trimesh Union of self and other Trimesh objects """ - result = boolean.union( - meshes=np.append(self, other), - engine=engine, - **kwargs) + result = boolean.union(meshes=np.append(self, other), engine=engine, **kwargs) return result - def difference(self, other, engine=None, **kwargs): + def difference( + self, other: "Trimesh", engine: Optional[str] = None, **kwargs + ) -> "Trimesh": """ Boolean difference between this mesh and n other meshes @@ -2937,11 +2934,14 @@ def difference(self, other, engine=None, **kwargs): difference : trimesh.Trimesh Difference between self and other Trimesh objects """ - result = boolean.difference(meshes=np.append(self, other), - engine=engine, **kwargs) + result = boolean.difference( + meshes=np.append(self, other), engine=engine, **kwargs + ) return result - def intersection(self, other, engine=None, **kwargs): + def intersection( + self, other: "Trimesh", engine: Optional[str] = None, **kwargs + ) -> "Trimesh": """ Boolean intersection between this mesh and n other meshes @@ -2955,11 +2955,12 @@ def intersection(self, other, engine=None, **kwargs): intersection : trimesh.Trimesh Mesh of the volume contained by all passed meshes """ - result = boolean.intersection(meshes=np.append(self, other), - engine=engine, **kwargs) + result = boolean.intersection( + meshes=np.append(self, other), engine=engine, **kwargs + ) return result - def contains(self, points): + def contains(self, points: TrackedArray) -> ndarray: """ Given an array of points determine whether or not they are inside the mesh. This raises an error if called on a @@ -2978,7 +2979,7 @@ def contains(self, points): return self.ray.contains_points(points) @caching.cache_decorator - def face_angles(self): + def face_angles(self) -> ndarray: """ Returns the angle at each vertex of a face. @@ -2991,7 +2992,7 @@ def face_angles(self): return angles @caching.cache_decorator - def face_angles_sparse(self): + def face_angles_sparse(self) -> coo_matrix: """ A sparse matrix representation of the face angles. @@ -3005,7 +3006,7 @@ def face_angles_sparse(self): return angles @caching.cache_decorator - def vertex_defects(self): + def vertex_defects(self) -> ndarray: """ Return the vertex defects, or (2*pi) minus the sum of the angles of every face that includes that vertex. @@ -3023,7 +3024,7 @@ def vertex_defects(self): return defects @caching.cache_decorator - def vertex_degree(self): + def vertex_degree(self) -> ndarray: """ Return the number of faces each vertex is included in. @@ -3037,7 +3038,7 @@ def vertex_degree(self): return degree @caching.cache_decorator - def face_adjacency_tree(self): + def face_adjacency_tree(self) -> Index: """ An R-tree of face adjacencies. @@ -3048,13 +3049,16 @@ def face_adjacency_tree(self): rectangular cell """ # the (n,6) interleaved bounding box for every line segment - segment_bounds = np.column_stack(( - self.vertices[self.face_adjacency_edges].min(axis=1), - self.vertices[self.face_adjacency_edges].max(axis=1))) + segment_bounds = np.column_stack( + ( + self.vertices[self.face_adjacency_edges].min(axis=1), + self.vertices[self.face_adjacency_edges].max(axis=1), + ) + ) tree = util.bounds_tree(segment_bounds) return tree - def copy(self, include_cache=False): + def copy(self, include_cache: bool = False) -> "Trimesh": """ Safely return a copy of the current mesh. @@ -3096,11 +3100,11 @@ def copy(self, include_cache=False): return copied - def __deepcopy__(self, *args): + def __deepcopy__(self, *args) -> "Trimesh": # interpret deep copy as "get rid of cached data" return self.copy(include_cache=False) - def __copy__(self, *args): + def __copy__(self, *args) -> "Trimesh": # interpret shallow copy as "keep cached data" return self.copy(include_cache=True) @@ -3127,8 +3131,8 @@ def eval_cached(self, statement, *args): """ statement = str(statement) - key = 'eval_cached_' + statement - key += '_'.join(str(i) for i in args) + key = "eval_cached_" + statement + key += "_".join(str(i) for i in args) if key in self._cache: return self._cache[key] @@ -3137,7 +3141,7 @@ def eval_cached(self, statement, *args): self._cache[key] = result return result - def __add__(self, other): + def __add__(self, other: "Trimesh") -> "Trimesh": """ Concatenate the mesh with another mesh. diff --git a/trimesh/interfaces/gmsh.py b/trimesh/interfaces/gmsh.py index c3ecffd8e..0a2f23f10 100644 --- a/trimesh/interfaces/gmsh.py +++ b/trimesh/interfaces/gmsh.py @@ -71,7 +71,7 @@ def load_gmsh(file_name, gmsh_args=None): # loop through our numbered args which do things, stuff for arg in args: gmsh.option.setNumber(*arg) - + gmsh.open(file_name) # create a temporary file for the results diff --git a/trimesh/primitives.py b/trimesh/primitives.py index 33768741e..70828b9d9 100644 --- a/trimesh/primitives.py +++ b/trimesh/primitives.py @@ -21,9 +21,9 @@ _IDENTITY.flags.writeable = False -class _Primitive(Trimesh): +class Primitive(Trimesh): """ - Geometric _Primitives which are a subclass of Trimesh. + Geometric Primitives which are a subclass of Trimesh. Mesh is generated lazily when vertices or faces are requested. """ @@ -219,7 +219,7 @@ def _create_mesh(self): raise ValueError('Primitive doesn\'t define mesh creation!') -class _PrimitiveAttributes: +class PrimitiveAttributes: """ Hold the mutable data which defines a primitive. """ @@ -230,7 +230,7 @@ def __init__(self, parent, defaults, kwargs, mutable=True): Parameters ------------ - parent : _Primitive + parent : Primitive Parent object reference. defaults : dict The default values for this primitive type. @@ -318,7 +318,7 @@ def __dir__(self): return result -class Cylinder(_Primitive): +class Cylinder(Primitive): def __init__(self, radius=1.0, @@ -348,7 +348,7 @@ def __init__(self, 'radius': 1.0, 'transform': np.eye(4), 'sections': 32} - self.primitive = _PrimitiveAttributes( + self.primitive = PrimitiveAttributes( self, defaults=defaults, kwargs={'height': height, @@ -472,7 +472,7 @@ def _create_mesh(self): self._cache['face_normals'] = mesh.face_normals -class Capsule(_Primitive): +class Capsule(Primitive): def __init__(self, radius=1.0, @@ -502,7 +502,7 @@ def __init__(self, 'radius': 1.0, 'transform': np.eye(4), 'sections': 32} - self.primitive = _PrimitiveAttributes( + self.primitive = PrimitiveAttributes( self, defaults=defaults, kwargs={'height': height, @@ -557,7 +557,7 @@ def _create_mesh(self): self._cache['face_normals'] = mesh.face_normals -class Sphere(_Primitive): +class Sphere(Primitive): def __init__(self, radius=1.0, @@ -602,7 +602,7 @@ def __init__(self, constructor['transform'] = transform # create the attributes object - self.primitive = _PrimitiveAttributes( + self.primitive = PrimitiveAttributes( self, defaults=defaults, kwargs=constructor, mutable=mutable) @property @@ -697,7 +697,7 @@ def _create_mesh(self): self._cache['face_normals'] = unit.face_normals -class Box(_Primitive): +class Box(Primitive): def __init__(self, extents=None, transform=None, @@ -736,7 +736,7 @@ def __init__(self, transform = np.eye(4) transform[:3, 3] = bounds[0] + extents / 2.0 - self.primitive = _PrimitiveAttributes( + self.primitive = PrimitiveAttributes( self, defaults=defaults, kwargs={'extents': extents, @@ -871,7 +871,7 @@ def as_outline(self): transform=self.primitive.transform) -class Extrusion(_Primitive): +class Extrusion(Primitive): def __init__(self, polygon=None, transform=None, @@ -902,7 +902,7 @@ def __init__(self, 'transform': np.eye(4), 'height': 1.0} - self.primitive = _PrimitiveAttributes( + self.primitive = PrimitiveAttributes( self, defaults=defaults, kwargs={'transform': transform, From 1a19958cff25217aba0a9dc513a9a60a868b5464 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 5 Sep 2023 17:25:16 -0400 Subject: [PATCH 34/84] base importing with type hints --- trimesh/base.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index c699870a2..c12de3464 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -55,6 +55,7 @@ from numpy.typing import NDArray + class Trimesh(Geometry3D): def __init__( self, @@ -1497,7 +1498,9 @@ def face_adjacency_radius(self) -> NDArray[float64]: radii : (len(self.face_adjacency), ) float Approximate radius formed by triangle pair """ - radii, self._cache["face_adjacency_span"] = graph.face_adjacency_radius(mesh=self) + radii, self._cache["face_adjacency_span"] = graph.face_adjacency_radius( + mesh=self + ) return radii @caching.cache_decorator @@ -2570,7 +2573,7 @@ def simplify_quadratic_decimation(self, *args, **kwargs): ) return self.simplify_quadric_decimation(*args, **kwargs) - def simplify_quadric_decimation(self, face_count: int): + def simplify_quadric_decimation(self, face_count: int) -> "Trimesh": """ A thin wrapper around the `open3d` implementation of this: `open3d.geometry.TriangleMesh.simplify_quadric_decimation` @@ -2588,7 +2591,9 @@ def simplify_quadric_decimation(self, face_count: int): simple = self.as_open3d.simplify_quadric_decimation(int(face_count)) return Trimesh(vertices=simple.vertices, faces=simple.triangles) - def outline(self, face_ids: Optional[NDArray[int64]]=None, **kwargs): + def outline( + self, face_ids: Optional[NDArray[int64]] = None, **kwargs + ) -> "trimesh.path.Path3D": """ Given a list of face indexes find the outline of those faces and return it as a Path3D. @@ -2616,7 +2621,7 @@ def outline(self, face_ids: Optional[NDArray[int64]]=None, **kwargs): return Path3D(**faces_to_path(self, face_ids, **kwargs)) - def projected(self, normal, **kwargs): + def projected(self, normal, **kwargs) -> "trimesh.path.Path2D": """ Project a mesh onto a plane and then extract the polygon that outlines the mesh projection on that @@ -2669,7 +2674,7 @@ def area(self) -> float64: return area @caching.cache_decorator - def area_faces(self) -> ndarray: + def area_faces(self) -> NDArray[float64]: """ The area of each face in the mesh. @@ -2682,7 +2687,7 @@ def area_faces(self) -> ndarray: return area_faces @caching.cache_decorator - def mass_properties(self) -> MassProperties: + def mass_properties(self) -> Dict: """ Returns the mass properties of the current mesh. @@ -2760,9 +2765,7 @@ def show(self, **kwargs): scene = self.scene() return scene.show(**kwargs) - def submesh( - self, faces_sequence: List[NDArray[int64]], **kwargs - ) -> Union["Trimesh", List["Trimesh"]: + def submesh(self, faces_sequence: List[NDArray[int64]], **kwargs): """ Return a subset of the mesh. @@ -2784,7 +2787,7 @@ def submesh( return util.submesh(mesh=self, faces_sequence=faces_sequence, **kwargs) @caching.cache_decorator - def identifier(self) -> ndarray: + def identifier(self) -> NDArray[float64]: """ Return a float vector which is unique to the mesh and is robust to rotation and translation. @@ -2811,7 +2814,7 @@ def identifier_hash(self) -> str: def export( self, - file_obj: Optional[Union[str, BufferedRandom]] = None, + file_obj=None, file_type: Optional[str] = None, **kwargs, ) -> Union[ From 5c3d0b1376567052d8b67fc30d1ae8b57f88ea53 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 5 Sep 2023 17:34:36 -0400 Subject: [PATCH 35/84] sigh take kwargs again --- trimesh/base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/trimesh/base.py b/trimesh/base.py index c12de3464..a83247d73 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -75,6 +75,7 @@ def __init__( use_embree: bool = True, initial_cache: Optional[Dict[str, ndarray]] = None, visual: Optional[Union[ColorVisuals, TextureVisuals]] = None, + **kwargs, ) -> None: """ A Trimesh object contains a triangular 3D mesh. From bd33e2e52f2c5ba273d3df6db538b7bc719463ee Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 5 Sep 2023 20:11:05 -0400 Subject: [PATCH 36/84] use string annotations --- trimesh/base.py | 70 +++++++++++++++++++++---------------------------- 1 file changed, 30 insertions(+), 40 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index a83247d73..b241dd273 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -7,19 +7,13 @@ import copy import warnings -from io import BufferedRandom from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np -import scipy.spatial._ckdtree -from networkx.classes.graph import Graph from numpy import float64, int64, ndarray -from numpy.typing import ArrayLike -from rtree.index import Index -from scipy.sparse._coo import coo_matrix +from numpy.typing import NDArray from trimesh.caching import TrackedArray -from trimesh.path.path import Path3D from . import ( boolean, @@ -53,8 +47,6 @@ from .scene import Scene from .visual import ColorVisuals, TextureVisuals, create_visual -from numpy.typing import NDArray - class Trimesh(Geometry3D): def __init__( @@ -792,7 +784,7 @@ def symmetry(self) -> Optional[str]: return symmetry @property - def symmetry_axis(self) -> ndarray: + def symmetry_axis(self) -> NDArray[float64]: """ If a mesh has rotational symmetry, return the axis. @@ -805,7 +797,7 @@ def symmetry_axis(self) -> ndarray: return self._cache["symmetry_axis"] @property - def symmetry_section(self) -> ndarray: + def symmetry_section(self) -> NDArray[float64]: """ If a mesh has rotational symmetry return the two vectors which make up a section coordinate frame. @@ -836,7 +828,7 @@ def triangles(self) -> ndarray: return triangles @caching.cache_decorator - def triangles_tree(self) -> Index: + def triangles_tree(self) -> "rtree.Index": """ An R-tree containing each face of the mesh. @@ -849,7 +841,7 @@ def triangles_tree(self) -> Index: return tree @caching.cache_decorator - def triangles_center(self) -> ndarray: + def triangles_center(self) -> NDArray[float64]: """ The center of each triangle (barycentric [1/3, 1/3, 1/3]) @@ -862,7 +854,7 @@ def triangles_center(self) -> ndarray: return triangles_center @caching.cache_decorator - def triangles_cross(self) -> ndarray: + def triangles_cross(self) -> NDArray[float64]: """ The cross product of two edges of each triangle. @@ -875,7 +867,7 @@ def triangles_cross(self) -> ndarray: return crosses @caching.cache_decorator - def edges(self) -> ndarray: + def edges(self) -> NDArray[int64]: """ Edges of the mesh (derived from faces). @@ -891,7 +883,7 @@ def edges(self) -> ndarray: return edges @caching.cache_decorator - def edges_face(self): + def edges_face(self) -> NDArray[int64]: """ Which face does each edge belong to. @@ -904,7 +896,7 @@ def edges_face(self): return self._cache["edges_face"] @caching.cache_decorator - def edges_unique(self) -> ndarray: + def edges_unique(self) -> NDArray[int64]: """ The unique edges of the mesh. @@ -922,7 +914,7 @@ def edges_unique(self) -> ndarray: return edges_unique @caching.cache_decorator - def edges_unique_length(self) -> TrackedArray: + def edges_unique_length(self) -> NDArray[float64]: """ How long is each unique edge. @@ -936,7 +928,7 @@ def edges_unique_length(self) -> TrackedArray: return length @caching.cache_decorator - def edges_unique_inverse(self): + def edges_unique_inverse(self) -> NDArray[int64]: """ Return the inverse required to reproduce self.edges_sorted from self.edges_unique. @@ -953,7 +945,7 @@ def edges_unique_inverse(self): return self._cache["edges_unique_inverse"] @caching.cache_decorator - def edges_sorted(self) -> ndarray: + def edges_sorted(self) -> NDArray[int64]: """ Edges sorted along axis 1 @@ -966,7 +958,7 @@ def edges_sorted(self) -> ndarray: return edges_sorted @caching.cache_decorator - def edges_sorted_tree(self) -> scipy.spatial._ckdtree.cKDTree: + def edges_sorted_tree(self) -> "scipy.spatial.cKDTree": """ A KDTree for mapping edges back to edge index. @@ -981,7 +973,7 @@ def edges_sorted_tree(self) -> scipy.spatial._ckdtree.cKDTree: return cKDTree(self.edges_sorted) @caching.cache_decorator - def edges_sparse(self) -> coo_matrix: + def edges_sparse(self) -> "scipy.sparse.coo_matrix": """ Edges in sparse bool COO graph format where connected vertices are True. @@ -1014,7 +1006,7 @@ def body_count(self) -> int: return count @caching.cache_decorator - def faces_unique_edges(self) -> ndarray: + def faces_unique_edges(self) -> NDArray[int64]: """ For each face return which indexes in mesh.unique_edges constructs that face. @@ -1065,7 +1057,7 @@ def euler_number(self) -> int: return euler @caching.cache_decorator - def referenced_vertices(self) -> ndarray: + def referenced_vertices(self) -> NDArray[bool]: """ Which vertices in the current mesh are referenced by a face. @@ -1079,7 +1071,7 @@ def referenced_vertices(self) -> ndarray: return referenced @property - def units(self): + def units(self) -> Optional[str]: """ Definition of units for the mesh. @@ -1094,11 +1086,11 @@ def units(self): return None @units.setter - def units(self, value): + def units(self, value: str) -> None: value = str(value).lower() self.metadata["units"] = value - def convert_units(self, desired, guess=False): + def convert_units(self, desired: str, guess: bool=False) -> "Trimesh": """ Convert the units of the mesh into a specified unit. @@ -1153,7 +1145,7 @@ def merge_vertices( def update_vertices( self, - mask: NDArray, + mask: NDArray[bool], inverse: Optional[NDArray] = None, ) -> None: """ @@ -1223,7 +1215,7 @@ def update_vertices( except BaseException: pass - def update_faces(self, mask: NDArray) -> None: + def update_faces(self, mask: NDArray[bool]) -> None: """ In many cases, we will want to remove specific faces. However, there is additional bookkeeping to do this cleanly. @@ -1292,7 +1284,7 @@ def remove_infinite_values(self) -> None: vertex_mask = np.isfinite(self.vertices).all(axis=1) self.update_vertices(vertex_mask) - def unique_faces(self): + def unique_faces(self) -> NDArray[bool]: """ On the current mesh find which faces are unique. @@ -1327,8 +1319,7 @@ def rezero(self): """ self.apply_translation(self.bounds[0] * -1.0) - @log_time - def split(self, **kwargs): + def split(self, **kwargs) -> List["Trimesh"]: """ Returns a list of Trimesh objects, based on face connectivity. Splits into individual components, sometimes referred to as 'bodies' @@ -1414,7 +1405,7 @@ def face_adjacency_edges(self) -> NDArray[int64]: return self._cache["face_adjacency_edges"] @caching.cache_decorator - def face_adjacency_edges_tree(self) -> scipy.spatial._ckdtree.cKDTree: + def face_adjacency_edges_tree(self) -> "scipy.spatial.cKDTree": """ A KDTree for mapping edges back face adjacency index. @@ -1535,7 +1526,7 @@ def integral_mean_curvature(self) -> float64: return (self.face_adjacency_angles * edges_length).sum() * 0.5 @caching.cache_decorator - def vertex_adjacency_graph(self) -> Graph: + def vertex_adjacency_graph(self) -> "networkx.Graph": """ Returns a networkx graph representing the vertices and their connections in the mesh. @@ -1671,7 +1662,7 @@ def is_convex(self) -> bool: return is_convex @caching.cache_decorator - def kdtree(self) -> scipy.spatial._ckdtree.cKDTree: + def kdtree(self) -> "scipy.spatial.cKDTree": """ Return a scipy.spatial.cKDTree of the vertices of the mesh. Not cached as this lead to observed memory issues and segfaults. @@ -1817,7 +1808,7 @@ def facets_boundary(self) -> List[NDArray[int64]]: return edges_boundary @caching.cache_decorator - def facets_on_hull(self) -> ndarray: + def facets_on_hull(self) -> NDArray[bool]: """ Find which facets of the mesh are on the convex hull. @@ -1849,8 +1840,7 @@ def facets_on_hull(self) -> ndarray: return on_hull - @log_time - def fix_normals(self, multibody=None): + def fix_normals(self, multibody: Optional[bool]=None): """ Find and fix problems with self.face_normals and self.faces winding direction. @@ -2170,7 +2160,7 @@ def visual(self, value): def section( self, plane_normal: List[int], plane_origin: List[int], **kwargs - ) -> Path3D: + ) -> "trimesh.path.Path3D": """ Returns a 3D cross section of the current mesh and a plane defined by origin and normal. @@ -3042,7 +3032,7 @@ def vertex_degree(self) -> ndarray: return degree @caching.cache_decorator - def face_adjacency_tree(self) -> Index: + def face_adjacency_tree(self) -> "rtree.Index": """ An R-tree of face adjacencies. From 0796b7ac16c4bff689a04a704844edd92d39c571 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 5 Sep 2023 22:56:26 -0400 Subject: [PATCH 37/84] import in base --- trimesh/base.py | 102 +++++++++++++++++++++++++--------------------- trimesh/parent.py | 2 +- trimesh/py.typed | 0 3 files changed, 57 insertions(+), 47 deletions(-) create mode 100644 trimesh/py.typed diff --git a/trimesh/base.py b/trimesh/base.py index b241dd273..ee1080518 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -42,11 +42,34 @@ util, ) from .constants import log, log_time, tol +from .exceptions import ExceptionWrapper from .exchange.export import export_mesh from .parent import Geometry3D from .scene import Scene from .visual import ColorVisuals, TextureVisuals, create_visual +try: + from scipy.sparse import coo_matrix + from scipy.spatial import cKDTree +except BaseException as E: + cKDTree = ExceptionWrapper(E) + coo_matrix = ExceptionWrapper(E) +try: + from networkx import Graph +except BaseException as E: + Graph = ExceptionWrapper(E) + +try: + from rtree import Index +except BaseException as E: + Index = ExceptionWrapper(E) + +try: + from .path import Path2D, Path3D +except BaseException as E: + Path2D = ExceptionWrapper(E) + Path3D = ExceptionWrapper(E) + class Trimesh(Geometry3D): def __init__( @@ -248,7 +271,7 @@ def process( return self @property - def faces(self): + def faces(self) -> NDArray[int64]: """ The faces of the mesh. @@ -265,7 +288,7 @@ def faces(self): return self._data.get("faces", np.empty(shape=(0, 3), dtype=np.int64)) @faces.setter - def faces(self, values): + def faces(self, values: Union[List[List[int]], NDArray[int64]]): """ Set the vertex indexes that make up triangular faces. @@ -449,7 +472,7 @@ def vertex_normals(self): return vertex_normals @vertex_normals.setter - def vertex_normals(self, values): + def vertex_normals(self, values: NDArray[float64]): """ Assign values to vertex normals. @@ -467,7 +490,7 @@ def vertex_normals(self, values): self._cache["vertex_normals"] = values @caching.cache_decorator - def vertex_faces(self) -> ndarray: + def vertex_faces(self) -> NDArray[int64]: """ A representation of the face indices that correspond to each vertex. @@ -486,7 +509,7 @@ def vertex_faces(self) -> ndarray: return vertex_faces @caching.cache_decorator - def bounds(self) -> ndarray: + def bounds(self) -> NDArray[float64]: """ The axis aligned bounds of the faces of the mesh. @@ -505,7 +528,7 @@ def bounds(self) -> ndarray: return np.array([in_mesh.min(axis=0), in_mesh.max(axis=0)]) @caching.cache_decorator - def extents(self) -> ndarray: + def extents(self) -> NDArray[float64]: """ The length, width, and height of the axis aligned bounding box of the mesh. @@ -542,7 +565,7 @@ def scale(self) -> float: return scale @caching.cache_decorator - def centroid(self) -> ndarray: + def centroid(self) -> NDArray[float64]: """ The point in space which is the average of the triangle centroids weighted by the area of each triangle. @@ -652,7 +675,7 @@ def mass(self) -> float64: return mass @property - def moment_inertia(self) -> ndarray: + def moment_inertia(self) -> NDArray[float64]: """ Return the moment of inertia matrix of the current mesh. If mesh isn't watertight this is garbage. The returned @@ -670,7 +693,7 @@ def moment_inertia(self) -> ndarray: inertia = self.mass_properties["inertia"] return inertia - def moment_inertia_frame(self, transform: ndarray) -> ndarray: + def moment_inertia_frame(self, transform: NDArray[float64]) -> NDArray[float64]: """ Get the moment of inertia of this mesh with respect to an arbitrary frame, versus with respect to the center @@ -709,7 +732,7 @@ def moment_inertia_frame(self, transform: ndarray) -> ndarray: ) @caching.cache_decorator - def principal_inertia_components(self) -> ndarray: + def principal_inertia_components(self) -> NDArray[float64]: """ Return the principal components of inertia @@ -728,7 +751,7 @@ def principal_inertia_components(self) -> ndarray: return components @property - def principal_inertia_vectors(self) -> ndarray: + def principal_inertia_vectors(self) -> NDArray[float64]: """ Return the principal axis of inertia as unit vectors. The order corresponds to `mesh.principal_inertia_components`. @@ -743,7 +766,7 @@ def principal_inertia_vectors(self) -> ndarray: return self._cache["principal_inertia_vectors"] @caching.cache_decorator - def principal_inertia_transform(self) -> ndarray: + def principal_inertia_transform(self) -> NDArray[float64]: """ A transform which moves the current mesh so the principal inertia vectors are on the X,Y, and Z axis, and the centroid is @@ -811,7 +834,7 @@ def symmetry_section(self) -> NDArray[float64]: return self._cache["symmetry_section"] @caching.cache_decorator - def triangles(self) -> ndarray: + def triangles(self) -> NDArray[float64]: """ Actual triangles of the mesh (points, not indexes) @@ -828,7 +851,7 @@ def triangles(self) -> ndarray: return triangles @caching.cache_decorator - def triangles_tree(self) -> "rtree.Index": + def triangles_tree(self) -> Index: """ An R-tree containing each face of the mesh. @@ -958,7 +981,7 @@ def edges_sorted(self) -> NDArray[int64]: return edges_sorted @caching.cache_decorator - def edges_sorted_tree(self) -> "scipy.spatial.cKDTree": + def edges_sorted_tree(self) -> cKDTree: """ A KDTree for mapping edges back to edge index. @@ -968,12 +991,10 @@ def edges_sorted_tree(self) -> "scipy.spatial.cKDTree": Tree when queried with edges will return their index in mesh.edges_sorted """ - from scipy.spatial import cKDTree - return cKDTree(self.edges_sorted) @caching.cache_decorator - def edges_sparse(self) -> "scipy.sparse.coo_matrix": + def edges_sparse(self) -> coo_matrix: """ Edges in sparse bool COO graph format where connected vertices are True. @@ -1090,7 +1111,7 @@ def units(self, value: str) -> None: value = str(value).lower() self.metadata["units"] = value - def convert_units(self, desired: str, guess: bool=False) -> "Trimesh": + def convert_units(self, desired: str, guess: bool = False) -> "Trimesh": """ Convert the units of the mesh into a specified unit. @@ -1405,7 +1426,7 @@ def face_adjacency_edges(self) -> NDArray[int64]: return self._cache["face_adjacency_edges"] @caching.cache_decorator - def face_adjacency_edges_tree(self) -> "scipy.spatial.cKDTree": + def face_adjacency_edges_tree(self) -> cKDTree: """ A KDTree for mapping edges back face adjacency index. @@ -1415,8 +1436,6 @@ def face_adjacency_edges_tree(self) -> "scipy.spatial.cKDTree": Tree when queried with SORTED edges will return their index in mesh.face_adjacency """ - from scipy.spatial import cKDTree - return cKDTree(self.face_adjacency_edges) @caching.cache_decorator @@ -1526,7 +1545,7 @@ def integral_mean_curvature(self) -> float64: return (self.face_adjacency_angles * edges_length).sum() * 0.5 @caching.cache_decorator - def vertex_adjacency_graph(self) -> "networkx.Graph": + def vertex_adjacency_graph(self) -> Graph: """ Returns a networkx graph representing the vertices and their connections in the mesh. @@ -1662,7 +1681,7 @@ def is_convex(self) -> bool: return is_convex @caching.cache_decorator - def kdtree(self) -> "scipy.spatial.cKDTree": + def kdtree(self) -> cKDTree: """ Return a scipy.spatial.cKDTree of the vertices of the mesh. Not cached as this lead to observed memory issues and segfaults. @@ -1672,11 +1691,7 @@ def kdtree(self) -> "scipy.spatial.cKDTree": tree : scipy.spatial.cKDTree Contains mesh.vertices """ - - from scipy.spatial import cKDTree - - tree = cKDTree(self.vertices.view(np.ndarray)) - return tree + return cKDTree(self.vertices.view(np.ndarray)) def remove_degenerate_faces(self, height: float = tol.merge) -> None: """ @@ -1840,7 +1855,7 @@ def facets_on_hull(self) -> NDArray[bool]: return on_hull - def fix_normals(self, multibody: Optional[bool]=None): + def fix_normals(self, multibody: Optional[bool] = None): """ Find and fix problems with self.face_normals and self.faces winding direction. @@ -2160,7 +2175,7 @@ def visual(self, value): def section( self, plane_normal: List[int], plane_origin: List[int], **kwargs - ) -> "trimesh.path.Path3D": + ) -> Path3D: """ Returns a 3D cross section of the current mesh and a plane defined by origin and normal. @@ -2582,9 +2597,7 @@ def simplify_quadric_decimation(self, face_count: int) -> "Trimesh": simple = self.as_open3d.simplify_quadric_decimation(int(face_count)) return Trimesh(vertices=simple.vertices, faces=simple.triangles) - def outline( - self, face_ids: Optional[NDArray[int64]] = None, **kwargs - ) -> "trimesh.path.Path3D": + def outline(self, face_ids: Optional[NDArray[int64]] = None, **kwargs) -> Path3D: """ Given a list of face indexes find the outline of those faces and return it as a Path3D. @@ -2607,12 +2620,11 @@ def outline( path : Path3D Curve in 3D of the outline """ - from .path import Path3D from .path.exchange.misc import faces_to_path return Path3D(**faces_to_path(self, face_ids, **kwargs)) - def projected(self, normal, **kwargs) -> "trimesh.path.Path2D": + def projected(self, normal, **kwargs) -> Path2D: """ Project a mesh onto a plane and then extract the polygon that outlines the mesh projection on that @@ -2891,8 +2903,7 @@ def convex_decomposition(self, maxhulls=20, **kwargs): result = decomposition.convex_decomposition(self, maxhulls=maxhulls, **kwargs) return result - def union( - self, other: "Trimesh", engine: Optional[str] = None, **kwargs + def union(self, other: "Trimesh", engine: Optional[str] = None, **kwargs ) -> "Trimesh": """ Boolean union between this mesh and n other meshes @@ -2929,7 +2940,7 @@ def difference( Difference between self and other Trimesh objects """ result = boolean.difference( - meshes=np.append(self, other), engine=engine, **kwargs + meshes=[self, other], engine=engine, **kwargs ) return result @@ -2954,7 +2965,7 @@ def intersection( ) return result - def contains(self, points: TrackedArray) -> ndarray: + def contains(self, points: TrackedArray) -> NDArray[bool]: """ Given an array of points determine whether or not they are inside the mesh. This raises an error if called on a @@ -2973,7 +2984,7 @@ def contains(self, points: TrackedArray) -> ndarray: return self.ray.contains_points(points) @caching.cache_decorator - def face_angles(self) -> ndarray: + def face_angles(self) -> NDArray[float64]: """ Returns the angle at each vertex of a face. @@ -2982,8 +2993,7 @@ def face_angles(self) -> ndarray: angles : (len(self.faces), 3) float Angle at each vertex of a face """ - angles = triangles.angles(self.triangles) - return angles + return triangles.angles(self.triangles) @caching.cache_decorator def face_angles_sparse(self) -> coo_matrix: @@ -3000,7 +3010,7 @@ def face_angles_sparse(self) -> coo_matrix: return angles @caching.cache_decorator - def vertex_defects(self) -> ndarray: + def vertex_defects(self) -> NDArray[float64]: """ Return the vertex defects, or (2*pi) minus the sum of the angles of every face that includes that vertex. @@ -3018,7 +3028,7 @@ def vertex_defects(self) -> ndarray: return defects @caching.cache_decorator - def vertex_degree(self) -> ndarray: + def vertex_degree(self) -> NDArray[int64]: """ Return the number of faces each vertex is included in. @@ -3032,7 +3042,7 @@ def vertex_degree(self) -> ndarray: return degree @caching.cache_decorator - def face_adjacency_tree(self) -> "rtree.Index": + def face_adjacency_tree(self) -> Index: """ An R-tree of face adjacencies. diff --git a/trimesh/parent.py b/trimesh/parent.py index e8a70b7d3..b0b0b4c40 100644 --- a/trimesh/parent.py +++ b/trimesh/parent.py @@ -36,7 +36,7 @@ def apply_transform(self, matrix): pass @abc.abstractmethod - def is_empty(self): + def is_empty(self) -> bool: pass def __hash__(self): diff --git a/trimesh/py.typed b/trimesh/py.typed new file mode 100644 index 000000000..e69de29bb From d99d1afcba1354c3c52156d8b4a5a3e22056f5bd Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 7 Sep 2023 14:54:53 -0400 Subject: [PATCH 38/84] play with generics --- trimesh/base.py | 6 ++---- trimesh/typed.py | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 trimesh/typed.py diff --git a/trimesh/base.py b/trimesh/base.py index ee1080518..5c692f7ba 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -11,9 +11,6 @@ import numpy as np from numpy import float64, int64, ndarray -from numpy.typing import NDArray - -from trimesh.caching import TrackedArray from . import ( boolean, @@ -41,6 +38,7 @@ units, util, ) +from .typed import NDArray, ArrayLike from .constants import log, log_time, tol from .exceptions import ExceptionWrapper from .exchange.export import export_mesh @@ -2965,7 +2963,7 @@ def intersection( ) return result - def contains(self, points: TrackedArray) -> NDArray[bool]: + def contains(self, points: ArrayLike[float64]) -> NDArray[bool]: """ Given an array of points determine whether or not they are inside the mesh. This raises an error if called on a diff --git a/trimesh/typed.py b/trimesh/typed.py new file mode 100644 index 000000000..8ae83e7ab --- /dev/null +++ b/trimesh/typed.py @@ -0,0 +1,14 @@ +from typing import Any, Union, List, TypeAlias, Sequence +from numpy import ndarray, float64, int64 + +import numpy as np + + +#NDArray: TypeAlias = ndarray +ArrayLike: TypeAlias = Union[Sequence, ndarray] + +from numpy.typing import NDArray + +def _check(values: ArrayLike[float64]) -> NDArray[int64]: + return (np.array(values, dtype=float64) * 100).astype(int64) + From 8e7eacf92774e52191246bbf6d25cb08613f6a33 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 7 Sep 2023 15:11:55 -0400 Subject: [PATCH 39/84] add base typed --- trimesh/base.py | 31 +++++++++++++++++++++++++++---- trimesh/typed.py | 11 +++++++---- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index 5c692f7ba..f62fa583d 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -268,6 +268,30 @@ def process( self.metadata["processed"] = True return self + @property + def mutable(self) -> bool: + """ + Is the current mesh allowed to be altered in-place? + + Returns + ------------- + mutable + If data is allowed to be set for the mesh. + """ + return self._data.mutable + + @mutable.setter + def mutable(self, value: bool): + """ + Set the mutability of the current mesh. + + Parameters + ---------- + value + Change whether the current mesh is allowed to be altered in-place. + """ + self._data.mutable = value + @property def faces(self) -> NDArray[int64]: """ @@ -2901,7 +2925,8 @@ def convex_decomposition(self, maxhulls=20, **kwargs): result = decomposition.convex_decomposition(self, maxhulls=maxhulls, **kwargs) return result - def union(self, other: "Trimesh", engine: Optional[str] = None, **kwargs + def union( + self, other: "Trimesh", engine: Optional[str] = None, **kwargs ) -> "Trimesh": """ Boolean union between this mesh and n other meshes @@ -2937,9 +2962,7 @@ def difference( difference : trimesh.Trimesh Difference between self and other Trimesh objects """ - result = boolean.difference( - meshes=[self, other], engine=engine, **kwargs - ) + result = boolean.difference(meshes=[self, other], engine=engine, **kwargs) return result def intersection( diff --git a/trimesh/typed.py b/trimesh/typed.py index 8ae83e7ab..89822d880 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -3,12 +3,15 @@ import numpy as np - -#NDArray: TypeAlias = ndarray -ArrayLike: TypeAlias = Union[Sequence, ndarray] +# NDArray: TypeAlias = ndarray +# ArrayLike: TypeAlias = Union[Sequence, ndarray] from numpy.typing import NDArray +# todo make this a generic List|ndarray +ArrayLike = NDArray + + +# this should pass mypy def _check(values: ArrayLike[float64]) -> NDArray[int64]: return (np.array(values, dtype=float64) * 100).astype(int64) - From 2e0c841d34ae68c6c8bff50d199793ffe4d9d679 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 8 Sep 2023 14:22:37 -0400 Subject: [PATCH 40/84] ruff --- trimesh/base.py | 2 +- trimesh/exchange/gltf.py | 3 ++- trimesh/typed.py | 14 +++++++++----- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index f62fa583d..32fb01078 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -38,12 +38,12 @@ units, util, ) -from .typed import NDArray, ArrayLike from .constants import log, log_time, tol from .exceptions import ExceptionWrapper from .exchange.export import export_mesh from .parent import Geometry3D from .scene import Scene +from .typed import ArrayLike, NDArray from .visual import ColorVisuals, TextureVisuals, create_visual try: diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 25748ae43..1cc737aea 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -1764,6 +1764,8 @@ def _append_image(img, tree, buffer_items): # for everything else just use PNG save_as = 'png' + from IPython import embed + embed() # get the image data into a bytes object with util.BytesIO() as f: img.save(f, format=save_as) @@ -1779,7 +1781,6 @@ def _append_image(img, tree, buffer_items): # index is length minus one return len(tree['images']) - 1 - def _append_material(mat, tree, buffer_items, mat_hashes): """ Add passed PBRMaterial as GLTF 2.0 specification JSON diff --git a/trimesh/typed.py b/trimesh/typed.py index 89822d880..bb573470d 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,17 +1,21 @@ -from typing import Any, Union, List, TypeAlias, Sequence -from numpy import ndarray, float64, int64 +from typing import Any import numpy as np +from numpy import float64, int64 # NDArray: TypeAlias = ndarray # ArrayLike: TypeAlias = Union[Sequence, ndarray] -from numpy.typing import NDArray +try: + from numpy.typing import NDArray +except BaseException: + NDArray = Any # todo make this a generic List|ndarray ArrayLike = NDArray - -# this should pass mypy +# this should pass mypy eventually def _check(values: ArrayLike[float64]) -> NDArray[int64]: return (np.array(values, dtype=float64) * 100).astype(int64) + +__all__ = ['NDArray', 'ArrayLike'] From 891dbcdb943b169cd6c004b2f7cb5dfde9a74b28 Mon Sep 17 00:00:00 2001 From: xiaoxiae Date: Sat, 9 Sep 2023 16:01:16 +0200 Subject: [PATCH 41/84] support for glTF WebP extension --- trimesh/exchange/gltf.py | 76 +++++++++++++++++++++++++++++++--------- 1 file changed, 59 insertions(+), 17 deletions(-) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 822989db8..cc1f7c3b1 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -77,7 +77,8 @@ def export_gltf(scene, merge_buffers=False, unitize_normals=False, tree_postprocessor=None, - embed_buffers=False): + embed_buffers=False, + extension_webp=False): """ Export a scene object as a GLTF directory. @@ -98,6 +99,8 @@ def export_gltf(scene, Run this on the header tree before exiting. embed_buffers : bool Embed the buffer into JSON file as a base64 string in the URI + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). Returns ---------- @@ -113,7 +116,8 @@ def export_gltf(scene, tree, buffer_items = _create_gltf_structure( scene=scene, unitize_normals=unitize_normals, - include_normals=include_normals) + include_normals=include_normals, + extension_webp=extension_webp) # allow custom postprocessing if tree_postprocessor is not None: @@ -171,7 +175,8 @@ def export_glb( include_normals=None, unitize_normals=False, tree_postprocessor=None, - buffer_postprocessor=None): + buffer_postprocessor=None, + extension_webp=False): """ Export a scene as a binary GLTF (GLB) file. @@ -186,6 +191,8 @@ def export_glb( tree_postprocessor : func Custom function to (in-place) post-process the tree before exporting. + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). Returns ---------- @@ -201,7 +208,9 @@ def export_glb( tree, buffer_items = _create_gltf_structure( scene=scene, unitize_normals=unitize_normals, - include_normals=include_normals, buffer_postprocessor=buffer_postprocessor) + include_normals=include_normals, + buffer_postprocessor=buffer_postprocessor, + extension_webp=extension_webp) # allow custom postprocessing if tree_postprocessor is not None: @@ -604,7 +613,8 @@ def _create_gltf_structure(scene, include_normals=None, include_metadata=True, unitize_normals=None, - buffer_postprocessor=None): + buffer_postprocessor=None, + extension_webp=False): """ Generate a GLTF header. @@ -618,6 +628,8 @@ def _create_gltf_structure(scene, Include vertex normals in output file? unitize_normals : bool Unitize all exported normals so as to pass GLTF validation + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). Returns --------------- @@ -676,7 +688,8 @@ def _create_gltf_structure(scene, buffer_items=buffer_items, include_normals=include_normals, unitize_normals=unitize_normals, - mat_hashes=mat_hashes) + mat_hashes=mat_hashes, + extension_webp=extension_webp) elif util.is_instance_named(geometry, "Path"): # add Path2D and Path3D objects _append_path( @@ -739,7 +752,8 @@ def _append_mesh(mesh, buffer_items, include_normals, unitize_normals, - mat_hashes): + mat_hashes, + extension_webp): """ Append a mesh to the scene structure and put the data into buffer_items. @@ -762,6 +776,8 @@ def _append_mesh(mesh, mat_hashes : dict Which materials have already been added + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). """ # return early from empty meshes to avoid crashing later if len(mesh.faces) == 0 or len(mesh.vertices) == 0: @@ -844,7 +860,8 @@ def _append_mesh(mesh, mat=mesh.visual.material, tree=tree, buffer_items=buffer_items, - mat_hashes=mat_hashes) + mat_hashes=mat_hashes, + extension_webp=extension_webp) # if mesh has UV coordinates defined export them has_uv = (hasattr(mesh.visual, 'uv') and @@ -1233,8 +1250,21 @@ def parse_values_and_textures(input_dict): result[k] = v elif "index" in v: # get the index of image for texture + try: - idx = header["textures"][v["index"]]["source"] + texture = header["textures"][v["index"]] + + # extensions + if "extensions" in texture: + if "EXT_texture_webp" in texture["extensions"]: + idx = texture["extensions"]["EXT_texture_webp"]["source"] + else: + raise ValueError("unsupported texture extension" + "in {texture['extensions']}!") + else: + # fallback (or primary, if extensions are not present) + idx = texture["source"] + # store the actual image as the value result[k] = images[idx] except BaseException: @@ -1743,7 +1773,7 @@ def _convert_camera(camera): return result -def _append_image(img, tree, buffer_items): +def _append_image(img, tree, buffer_items, extension_webp): """ Append a PIL image to a GLTF2.0 tree. @@ -1755,6 +1785,8 @@ def _append_image(img, tree, buffer_items): GLTF 2.0 format tree buffer_items : (n,) bytes Binary blobs containing data + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). Returns ----------- @@ -1766,9 +1798,11 @@ def _append_image(img, tree, buffer_items): if not hasattr(img, 'format'): return None - # don't re-encode JPEGs - if img.format == 'JPEG': - # no need to mangle JPEGs + if extension_webp: + # support WebP if extension is specified + save_as = 'WEBP' + elif img.format == 'JPEG': + # don't re-encode JPEGs save_as = 'JPEG' else: # for everything else just use PNG @@ -1790,7 +1824,7 @@ def _append_image(img, tree, buffer_items): return len(tree['images']) - 1 -def _append_material(mat, tree, buffer_items, mat_hashes): +def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): """ Add passed PBRMaterial as GLTF 2.0 specification JSON serializable data: @@ -1809,6 +1843,8 @@ def _append_material(mat, tree, buffer_items, mat_hashes): mat_hashes : dict Which materials have already been added Stored as { hashed : material index } + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). Returns ------------- @@ -1879,14 +1915,20 @@ def _append_material(mat, tree, buffer_items, mat_hashes): index = _append_image( img=img, tree=tree, - buffer_items=buffer_items) + buffer_items=buffer_items, + extension_webp=extension_webp) # if the image was added successfully it will return index # if it failed for any reason, it will return None if index is not None: # add a reference to the base color texture result[key] = {'index': len(tree['textures'])} - # add an object for the texture - tree['textures'].append({'source': index}) + + # add an object for the texture (possibly according to the WebP extension) + if extension_webp: + tree['textures'].append({'extensions': {'EXT_texture_webp': + {'source': index}}}) + else: + tree['textures'].append({'source': index}) # for our PBRMaterial object we flatten all keys # however GLTF would like some of them under the From 548a9e091811f20a3060c1d7ddbd175ce84696da Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sun, 10 Sep 2023 17:39:02 -0400 Subject: [PATCH 42/84] change polygon_hash to identifier --- README.md | 6 +- trimesh/path/path.py | 2 +- trimesh/path/polygons.py | 218 ++++++++++++++++++--------------------- trimesh/typed.py | 19 +++- 4 files changed, 117 insertions(+), 128 deletions(-) diff --git a/README.md b/README.md index 032d58588..6aaa40602 100644 --- a/README.md +++ b/README.md @@ -7,9 +7,9 @@ | :warning: WARNING | |---------------------------| -| `trimesh >= 4.0.0` which is now on `main` makes the minimum Python 3.7 and is in pre-release | -| Testing the prerelease with `pip install --pre trimesh` would be much appriciated! | -| For projects that support Python < 3.7 you should update your dependency to `trimesh<4` | +| `trimesh >= 4.0.0` on `main` makes minimum Python 3.7 and is in pre-release | +| Testing with `pip install --pre trimesh` would be much appreciated! | +| Projects that support Python < 3.7 should update requirement to `trimesh<4` | Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). diff --git a/trimesh/path/path.py b/trimesh/path/path.py index 4ba418698..20c15b488 100644 --- a/trimesh/path/path.py +++ b/trimesh/path/path.py @@ -1438,7 +1438,7 @@ def identifier(self): identifier : (5,) float Unique identifier """ - hasher = polygons.polygon_hash + hasher = polygons.identifier target = self.polygons_full if len(target) == 1: return hasher(self.polygons_full[0]) diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index 96315502a..03174e46d 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -9,18 +9,22 @@ from .simplify import fit_circle_check from .traversal import resample_path +from ..typed import NDArray, float64 + try: import networkx as nx except BaseException as E: # create a dummy module which will raise the ImportError # or other exception only when someone tries to use networkx from ..exceptions import ExceptionWrapper + nx = ExceptionWrapper(E) try: from rtree import Rtree except BaseException as E: # create a dummy module which will raise the ImportError from ..exceptions import ExceptionWrapper + Rtree = ExceptionWrapper(E) @@ -65,7 +69,7 @@ def enclosure_tree(polygons): # we first query for bounding box intersections from the R-tree for j in tree.intersection(polygon.bounds): # if we are checking a polygon against itself continue - if (i == j): + if i == j: continue # do a more accurate polygon in polygon test # for the enclosure tree information @@ -93,8 +97,7 @@ def enclosure_tree(polygons): # find edges of subgraph for each root and children for root in roots: children = indexes[degrees == degree[root] + 1] - edges.extend(contains.subgraph( - np.append(children, root)).edges()) + edges.extend(contains.subgraph(np.append(children, root)).edges()) # stack edges into new directed graph contains = nx.from_edgelist(edges, nx.DiGraph()) # if roots have no children add them anyway @@ -126,12 +129,12 @@ def edges_to_polygons(edges, vertices): # create closed polygon objects polygons = [] # loop through a sequence of ordered traversals - for dfs in graph.traversals(edges, mode='dfs'): + for dfs in graph.traversals(edges, mode="dfs"): try: # try to recover polygons before they are more complicated repaired = repair_invalid(Polygon(vertices[dfs])) # if it returned a multipolygon extend into a flat list - if hasattr(repaired, 'geoms'): + if hasattr(repaired, "geoms"): polygons.extend(repaired.geoms) else: polygons.append(repaired) @@ -151,8 +154,7 @@ def edges_to_polygons(edges, vertices): interior = list(tree[root].keys()) shell = polygons[root].exterior.coords holes = [polygons[i].exterior.coords for i in interior] - complete.append(Polygon(shell=shell, - holes=holes)) + complete.append(Polygon(shell=shell, holes=holes)) return complete @@ -187,12 +189,12 @@ def polygon_obb(polygon): extents : (2,) float Extents of transformed polygon """ - if hasattr(polygon, 'exterior'): + if hasattr(polygon, "exterior"): points = np.asanyarray(polygon.exterior.coords) elif isinstance(polygon, np.ndarray): points = polygon else: - raise ValueError('polygon or points must be provided') + raise ValueError("polygon or points must be provided") transform, extents = bounds.oriented_bounds_2D(points) @@ -222,17 +224,15 @@ def transform_polygon(polygon, matrix): """ matrix = np.asanyarray(matrix, dtype=np.float64) - if hasattr(polygon, 'geoms'): - result = [transform_polygon(p, t) - for p, t in zip(polygon, matrix)] + if hasattr(polygon, "geoms"): + result = [transform_polygon(p, t) for p, t in zip(polygon, matrix)] return result # transform the outer shell - shell = transform_points(np.array(polygon.exterior.coords), - matrix)[:, :2] + shell = transform_points(np.array(polygon.exterior.coords), matrix)[:, :2] # transform the interiors - holes = [transform_points(np.array(i.coords), - matrix)[:, :2] - for i in polygon.interiors] + holes = [ + transform_points(np.array(i.coords), matrix)[:, :2] for i in polygon.interiors + ] # create a new polygon with the result result = Polygon(shell=shell, holes=holes) return result @@ -258,13 +258,12 @@ def polygon_bounds(polygon, matrix=None): if matrix is not None: assert matrix.shape == (3, 3) points = transform_points( - points=np.array(polygon.exterior.coords), - matrix=matrix) + points=np.array(polygon.exterior.coords), matrix=matrix + ) else: points = np.array(polygon.exterior.coords) - bounds = np.array([points.min(axis=0), - points.max(axis=0)]) + bounds = np.array([points.min(axis=0), points.max(axis=0)]) assert bounds.shape == (2, 2) return bounds @@ -288,14 +287,15 @@ def plot_single(single): axes.plot(*single.exterior.xy, **kwargs) for interior in single.interiors: axes.plot(*interior.xy, **kwargs) + # make aspect ratio non-stupid if axes is None: axes = plt.axes() - axes.set_aspect('equal', 'datalim') + axes.set_aspect("equal", "datalim") - if polygon.__class__.__name__ == 'MultiPolygon': + if polygon.__class__.__name__ == "MultiPolygon": [plot_single(i) for i in polygon.geoms] - elif hasattr(polygon, '__iter__'): + elif hasattr(polygon, "__iter__"): [plot_single(i) for i in polygon] elif polygon is not None: plot_single(polygon) @@ -308,7 +308,7 @@ def plot_single(single): def resample_boundaries(polygon, resolution, clip=None): """ - Return a version of a polygon with boundaries resampled + Return a version of a polygon with boundaries re-sampled to a specified resolution. Parameters @@ -326,19 +326,20 @@ def resample_boundaries(polygon, resolution, clip=None): kwargs : dict Keyword args for a Polygon constructor `Polygon(**kwargs)` """ + def resample_boundary(boundary): # add a polygon.exterior or polygon.interior to # the deque after resampling based on our resolution count = boundary.length / resolution count = int(np.clip(count, *clip)) return resample_path(boundary.coords, count=count) + if clip is None: clip = [8, 200] # create a sequence of [(n,2)] points - kwargs = {'shell': resample_boundary(polygon.exterior), - 'holes': []} + kwargs = {"shell": resample_boundary(polygon.exterior), "holes": []} for interior in polygon.interiors: - kwargs['holes'].append(resample_boundary(interior)) + kwargs["holes"].append(resample_boundary(interior)) return kwargs @@ -358,16 +359,13 @@ def stack_boundaries(boundaries): stacked : (n, 2) float Stacked vertices """ - if len(boundaries['holes']) == 0: - return boundaries['shell'] - result = np.vstack((boundaries['shell'], - np.vstack(boundaries['holes']))) + if len(boundaries["holes"]) == 0: + return boundaries["shell"] + result = np.vstack((boundaries["shell"], np.vstack(boundaries["holes"]))) return result -def medial_axis(polygon, - resolution=None, - clip=None): +def medial_axis(polygon, resolution=None, clip=None): """ Given a shapely polygon, find the approximate medial axis using a voronoi diagram of evenly spaced points on the @@ -395,17 +393,15 @@ def medial_axis(polygon, # what is the approximate scale of the polygon scale = np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).max() # a (center, radius, error) tuple - fit = fit_circle_check( - polygon.exterior.coords, scale=scale) + fit = fit_circle_check(polygon.exterior.coords, scale=scale) # is this polygon in fact a circle if fit is not None: # return an edge that has the center as the midpoint - epsilon = np.clip( - fit['radius'] / 500, 1e-5, np.inf) + epsilon = np.clip(fit["radius"] / 500, 1e-5, np.inf) vertices = np.array( - [fit['center'] + [0, epsilon], - fit['center'] - [0, epsilon]], - dtype=np.float64) + [fit["center"] + [0, epsilon], fit["center"] - [0, epsilon]], + dtype=np.float64, + ) # return a single edge to avoid consumers needing to special case edges = np.array([[0, 1]], dtype=np.int64) return edges, vertices @@ -414,13 +410,10 @@ def medial_axis(polygon, from shapely import vectorized if resolution is None: - resolution = np.reshape( - polygon.bounds, (2, 2)).ptp(axis=0).max() / 100 + resolution = np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).max() / 100 # get evenly spaced points on the polygons boundaries - samples = resample_boundaries(polygon=polygon, - resolution=resolution, - clip=clip) + samples = resample_boundaries(polygon=polygon, resolution=resolution, clip=clip) # stack the boundary into a (m,2) float array samples = stack_boundaries(samples) # create the voronoi diagram on 2D points @@ -446,15 +439,14 @@ def medial_axis(polygon, if tol.strict: # make sure we didn't screw up indexes - assert (vertices[edges_final] - - voronoi.vertices[edges]).ptp() < 1e-5 + assert (vertices[edges_final] - voronoi.vertices[edges]).ptp() < 1e-5 return edges_final, vertices -def polygon_hash(polygon): +def identifier(polygon: Polygon) -> NDArray[float64]: """ - Return a vector containing values representitive of + Return a vector containing values representative of a particular polygon. Parameters @@ -464,18 +456,19 @@ def polygon_hash(polygon): Returns --------- - hashed: (6), float - Representitive values representing input polygon - """ - result = np.array( - [len(polygon.interiors), - polygon.convex_hull.area, - polygon.convex_hull.length, - polygon.area, - polygon.length, - polygon.exterior.length], - dtype=np.float64) - return result + hashed : (10), + Some values that should be unique for this polygon. + """ + result = [ + len(polygon.interiors), + polygon.convex_hull.area, + polygon.convex_hull.length, + polygon.area, + polygon.length, + polygon.exterior.length, + ] + result.extend(polygon.bounds) + return np.array(result, dtype=np.float64) def random_polygon(segments=8, radius=1.0): @@ -494,14 +487,12 @@ def random_polygon(segments=8, radius=1.0): polygon : shapely.geometry.Polygon Geometry object with random exterior and no interiors. """ - angles = np.sort(np.cumsum(np.random.random( - segments) * np.pi * 2) % (np.pi * 2)) + angles = np.sort(np.cumsum(np.random.random(segments) * np.pi * 2) % (np.pi * 2)) radii = np.random.random(segments) * radius - points = np.column_stack( - (np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) + points = np.column_stack((np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) points = np.vstack((points, points[0])) polygon = Polygon(points).buffer(0.0) - if hasattr(polygon, 'geoms'): + if hasattr(polygon, "geoms"): return polygon.geoms[0] return polygon @@ -521,7 +512,7 @@ def polygon_scale(polygon): Length of AABB diagonal """ extents = np.reshape(polygon.bounds, (2, 2)).ptp(axis=0) - scale = (extents ** 2).sum() ** .5 + scale = (extents**2).sum() ** 0.5 return scale @@ -535,7 +526,7 @@ def paths_to_polygons(paths, scale=None): ----------- paths : (n,) sequence Of (m, 2) float closed paths - scale: float + scale : float Approximate scale of drawing for precision Returns @@ -557,7 +548,7 @@ def paths_to_polygons(paths, scale=None): # raised if a polygon is unrecoverable continue except BaseException: - log.error('unrecoverable polygon', exc_info=True) + log.error("unrecoverable polygon", exc_info=True) polygons = np.array(polygons) return polygons @@ -625,7 +616,7 @@ def sample(polygon, count, factor=1.5, max_iter=10): return hit -def repair_invalid(polygon, scale=None, rtol=.5): +def repair_invalid(polygon, scale=None, rtol=0.5): """ Given a shapely.geometry.Polygon, attempt to return a valid version of the polygon through buffering tricks. @@ -649,25 +640,22 @@ def repair_invalid(polygon, scale=None, rtol=.5): ValueError If polygon can't be repaired """ - if hasattr(polygon, 'is_valid') and polygon.is_valid: + if hasattr(polygon, "is_valid") and polygon.is_valid: return polygon # basic repair involves buffering the polygon outwards # this will fix a subset of problems. basic = polygon.buffer(tol.zero) # if it returned multiple polygons check the largest - if hasattr(basic, 'geoms'): + if hasattr(basic, "geoms"): basic = basic.geoms[np.argmax([i.area for i in basic.geoms])] # check perimeter of result against original perimeter - if basic.is_valid and np.isclose(basic.length, - polygon.length, - rtol=rtol): + if basic.is_valid and np.isclose(basic.length, polygon.length, rtol=rtol): return basic if scale is None: - distance = 0.002 * np.reshape( - polygon.bounds, (2, 2)).ptp(axis=0).mean() + distance = 0.002 * np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).mean() else: distance = 0.002 * scale @@ -681,9 +669,7 @@ def repair_invalid(polygon, scale=None, rtol=.5): # reconstruct a single polygon from the interior ring recon = Polygon(shell=rings[0]).buffer(distance) # check perimeter of result against original perimeter - if recon.is_valid and np.isclose(recon.length, - polygon.length, - rtol=rtol): + if recon.is_valid and np.isclose(recon.length, polygon.length, rtol=rtol): return recon # try de-deuplicating the outside ring @@ -691,41 +677,40 @@ def repair_invalid(polygon, scale=None, rtol=.5): # remove any segments shorter than tol.merge # this is a little risky as if it was discretized more # finely than 1-e8 it may remove detail - unique = np.append(True, (np.diff(points, axis=0)**2).sum( - axis=1)**.5 > 1e-8) + unique = np.append( + True, (np.diff(points, axis=0) ** 2).sum(axis=1) ** 0.5 > 1e-8 + ) # make a new polygon with result dedupe = Polygon(shell=points[unique]) # check result - if dedupe.is_valid and np.isclose(dedupe.length, - polygon.length, - rtol=rtol): + if dedupe.is_valid and np.isclose(dedupe.length, polygon.length, rtol=rtol): return dedupe # buffer and unbuffer the whole polygon buffered = polygon.buffer(distance).buffer(-distance) # if it returned multiple polygons check the largest - if hasattr(buffered, 'geoms'): + if hasattr(buffered, "geoms"): areas = np.array([b.area for b in buffered.geoms]) return buffered.geoms[areas.argmax()] # check perimeter of result against original perimeter - if buffered.is_valid and np.isclose(buffered.length, - polygon.length, - rtol=rtol): - log.debug('Recovered invalid polygon through double buffering') + if buffered.is_valid and np.isclose(buffered.length, polygon.length, rtol=rtol): + log.debug("Recovered invalid polygon through double buffering") return buffered - raise ValueError('unable to recover polygon!') + raise ValueError("unable to recover polygon!") -def projected(mesh, - normal, - origin=None, - ignore_sign=True, - rpad=1e-5, - apad=None, - tol_dot=0.01, - max_regions=200): +def projected( + mesh, + normal, + origin=None, + ignore_sign=True, + rpad=1e-5, + apad=None, + tol_dot=0.01, + max_regions=200, +): """ Project a mesh onto a plane and then extract the polygon that outlines the mesh projection on that plane. @@ -814,22 +799,19 @@ def projected(mesh, adjacency = mesh.face_adjacency[adjacency_check] # a sequence of face indexes that are connected - face_groups = graph.connected_components( - adjacency, nodes=np.nonzero(side)[0]) + face_groups = graph.connected_components(adjacency, nodes=np.nonzero(side)[0]) # if something is goofy we may end up with thousands of # regions that do nothing except hang for an hour then segfault if len(face_groups) > max_regions: - raise ValueError('too many disconnected groups!') + raise ValueError("too many disconnected groups!") # reshape edges into shape length of faces for indexing edges = mesh.edges_sorted.reshape((-1, 6)) # transform from the mesh frame in 3D to the XY plane - to_2D = geometry.plane_transform( - origin=origin, normal=normal) + to_2D = geometry.plane_transform(origin=origin, normal=normal) # transform mesh vertices to 2D and clip the zero Z - vertices_2D = transform_points( - mesh.vertices, to_2D)[:, :2] + vertices_2D = transform_points(mesh.vertices, to_2D)[:, :2] polygons = [] for faces in face_groups: @@ -838,8 +820,7 @@ def projected(mesh, # edges that occur only once are on the boundary group = grouping.group_rows(edge, require_count=1) # turn each region into polygons - polygons.extend(edges_to_polygons( - edges=edge[group], vertices=vertices_2D)) + polygons.extend(edges_to_polygons(edges=edge[group], vertices=vertices_2D)) padding = 0.0 if apad is not None: @@ -855,7 +836,7 @@ def projected(mesh, # regions and the union will take forever to fail # so exit here early if len(polygons) > max_regions: - raise ValueError('too many disconnected groups!') + raise ValueError("too many disconnected groups!") # if there is only one region we don't need to run a union elif len(polygons) == 1: @@ -873,9 +854,9 @@ def projected(mesh, # join_style=2, # mitre_limit=1.5) # for p in polygons]).buffer(-padding) - polygon = ops.unary_union( - [p.buffer(padding) - for p in polygons]).buffer(-padding) + polygon = ops.unary_union([p.buffer(padding) for p in polygons]).buffer( + -padding + ) return polygon @@ -911,7 +892,7 @@ def second_moments(polygon, return_centered=False): transform = np.eye(3) if return_centered: # calculate centroid and move polygon - transform[:2, 2] = - np.array(polygon.centroid.coords) + transform[:2, 2] = -np.array(polygon.centroid.coords) polygon = transform_polygon(polygon, transform) # start with the exterior @@ -934,8 +915,7 @@ def second_moments(polygon, return_centered=False): v = x1 * y2 - x2 * y1 Ixx -= np.sum(v * (y1 * y1 + y1 * y2 + y2 * y2)) / 12.0 Iyy -= np.sum(v * (x1 * x1 + x1 * x2 + x2 * x2)) / 12.0 - Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + - 2 * x2 * y2 + x2 * y1)) / 24.0 + Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + 2 * x2 * y2 + x2 * y1)) / 24.0 moments = [Ixx, Iyy, Ixy] @@ -943,7 +923,7 @@ def second_moments(polygon, return_centered=False): return moments # get the principal moments - root = np.sqrt(((Iyy - Ixx) / 2.0)**2 + Ixy**2) + root = np.sqrt(((Iyy - Ixx) / 2.0) ** 2 + Ixy**2) Imax = (Ixx + Iyy) / 2.0 + root Imin = (Ixx + Iyy) / 2.0 - root principal_moments = [Imax, Imin] @@ -963,7 +943,7 @@ def second_moments(polygon, return_centered=False): transform[0, 0] = cos_alpha transform[1, 1] = cos_alpha - transform[0, 1] = - sin_alpha + transform[0, 1] = -sin_alpha transform[1, 0] = sin_alpha return moments, principal_moments, alpha, transform diff --git a/trimesh/typed.py b/trimesh/typed.py index bb573470d..7c82181da 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, Sequence, Union import numpy as np from numpy import float64, int64 @@ -9,13 +9,22 @@ try: from numpy.typing import NDArray except BaseException: - NDArray = Any + # NDArray = ndarray + pass -# todo make this a generic List|ndarray -ArrayLike = NDArray +# for input arrays we want to say "list[int], ndarray[int64], etc" +IntLike = Union[int, np.int64] +FloatLike = Union[float, np.float64] +BoolLike = Union[bool, np.bool_] +ArrayLike = Sequence # this should pass mypy eventually -def _check(values: ArrayLike[float64]) -> NDArray[int64]: + +def _check(values: ArrayLike[FloatLike]) -> NDArray[int64]: return (np.array(values, dtype=float64) * 100).astype(int64) +def _run() -> NDArray[int64]: + return _check(values=[1, 2]) + + __all__ = ['NDArray', 'ArrayLike'] From 3fe80c5fd8d8b604177cbeebb6efb38e30f4f47f Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 11 Sep 2023 14:37:50 -0400 Subject: [PATCH 43/84] apply deprecation --- trimesh/caching.py | 2 +- trimesh/creation.py | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/trimesh/caching.py b/trimesh/caching.py index 9e71eebeb..a41396a9c 100644 --- a/trimesh/caching.py +++ b/trimesh/caching.py @@ -369,7 +369,7 @@ def __init__(self, id_function, force_immutable=False): # for stored numpy arrays set `flags.writable = False` self.force_immutable = bool(force_immutable) # call the id function for initial value - self.id_current = self._id_function() + self.id_current = None # a counter for locks self._lock = 0 # actual store for data diff --git a/trimesh/creation.py b/trimesh/creation.py index ba49f378d..73d53c9a1 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -85,6 +85,7 @@ def revolve(linestring, if sections is None: # default to 32 sections for a full revolution sections = int(angle / (np.pi * 2) * 32) + # change to face count sections += 1 # create equally spaced angles @@ -107,8 +108,11 @@ def revolve(linestring, if closed: # should be a duplicate set of vertices - assert np.allclose(vertices[:per], - vertices[-per:]) + if tol.strict: + assert util.allclose(vertices[:per], + vertices[-per:], + atol=1e-8) + # chop off duplicate vertices vertices = vertices[:-per] @@ -130,7 +134,7 @@ def revolve(linestring, # remove any zero-area triangle # this covers many cases without having to think too much single = single[triangles.area(vertices[single]) > tol.merge] - + # how much to offset each slice # note arange multiplied by vertex stride # but tiled by the number of faces we actually have @@ -148,6 +152,11 @@ def revolve(linestring, # offset stacked and wrap vertices faces = (stacked + offset) % len(vertices) + + + #if 'process' not in kwargs: + # kwargs['process'] = False + # create the mesh from our vertices and faces mesh = Trimesh(vertices=vertices, faces=faces, From 3dd27b09e11d34af2b8ea628af7c6ea9a61319a5 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 11 Sep 2023 16:12:55 -0400 Subject: [PATCH 44/84] ruff --- pyproject.toml | 5 +++-- tests/test_typed.py | 11 +++++++++++ trimesh/creation.py | 8 ++++---- trimesh/path/polygons.py | 3 +-- trimesh/typed.py | 30 ++++++++++++++++++------------ 5 files changed, 37 insertions(+), 20 deletions(-) create mode 100644 tests/test_typed.py diff --git a/pyproject.toml b/pyproject.toml index ad709dab6..1b7d87e6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,12 +61,13 @@ trimesh = [ test = [ "pytest-cov", "coveralls", + "mypy", + "ezdxf", "pytest", "pymeshlab", "pyinstrument", "ruff", - "ezdxf", - "autopep8<2", + "black", ] easy = [ "colorlog", diff --git a/tests/test_typed.py b/tests/test_typed.py new file mode 100644 index 000000000..677bb7869 --- /dev/null +++ b/tests/test_typed.py @@ -0,0 +1,11 @@ +import numpy as np + +from trimesh.typed import ArrayLike, FloatLike, NDArray, float64, int64 + + +# see if we pass mypy +def _check(values: ArrayLike[FloatLike]) -> NDArray[int64]: + return (np.array(values, dtype=float64) * 100).astype(int64) + +def _run() -> NDArray[int64]: + return _check(values=[1, 2]) diff --git a/trimesh/creation.py b/trimesh/creation.py index 73d53c9a1..26f059aaf 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -112,7 +112,7 @@ def revolve(linestring, assert util.allclose(vertices[:per], vertices[-per:], atol=1e-8) - + # chop off duplicate vertices vertices = vertices[:-per] @@ -134,7 +134,7 @@ def revolve(linestring, # remove any zero-area triangle # this covers many cases without having to think too much single = single[triangles.area(vertices[single]) > tol.merge] - + # how much to offset each slice # note arange multiplied by vertex stride # but tiled by the number of faces we actually have @@ -152,8 +152,8 @@ def revolve(linestring, # offset stacked and wrap vertices faces = (stacked + offset) % len(vertices) - - + + #if 'process' not in kwargs: # kwargs['process'] = False diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index 03174e46d..bf54c339b 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -6,11 +6,10 @@ from ..constants import log from ..constants import tol_path as tol from ..transformations import transform_points +from ..typed import NDArray, float64 from .simplify import fit_circle_check from .traversal import resample_path -from ..typed import NDArray, float64 - try: import networkx as nx except BaseException as E: diff --git a/trimesh/typed.py b/trimesh/typed.py index 7c82181da..de46c34fb 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,7 +1,6 @@ -from typing import Any, Sequence, Union +from typing import Sequence, Union import numpy as np -from numpy import float64, int64 # NDArray: TypeAlias = ndarray # ArrayLike: TypeAlias = Union[Sequence, ndarray] @@ -13,18 +12,25 @@ pass # for input arrays we want to say "list[int], ndarray[int64], etc" -IntLike = Union[int, np.int64] -FloatLike = Union[float, np.float64] +# all the integer types +IntLike = Union[ + int, + np.int8, + np.int16, + np.int32, + np.int64, + np.intc, + np.intp, + np.uint8, + np.uint16, + np.uint32, + np.uint64, +] + +FloatLike = Union[float, np.float16, np.float32, np.float64, np.float128, np.float_] BoolLike = Union[bool, np.bool_] ArrayLike = Sequence -# this should pass mypy eventually -def _check(values: ArrayLike[FloatLike]) -> NDArray[int64]: - return (np.array(values, dtype=float64) * 100).astype(int64) -def _run() -> NDArray[int64]: - return _check(values=[1, 2]) - - -__all__ = ['NDArray', 'ArrayLike'] +__all__ = ["NDArray", "ArrayLike"] From fb1aa096e9ef731bb92059bfc9fe2b3ddb8391f4 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 11 Sep 2023 16:17:17 -0400 Subject: [PATCH 45/84] add back int64 --- trimesh/typed.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/trimesh/typed.py b/trimesh/typed.py index de46c34fb..2da19c524 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -2,14 +2,13 @@ import numpy as np -# NDArray: TypeAlias = ndarray -# ArrayLike: TypeAlias = Union[Sequence, ndarray] +# our default integer and floating point types +from numpy import float64, int64 try: from numpy.typing import NDArray except BaseException: - # NDArray = ndarray - pass + NDArray = Sequence # for input arrays we want to say "list[int], ndarray[int64], etc" # all the integer types @@ -18,7 +17,7 @@ np.int8, np.int16, np.int32, - np.int64, + int64, np.intc, np.intp, np.uint8, @@ -27,7 +26,7 @@ np.uint64, ] -FloatLike = Union[float, np.float16, np.float32, np.float64, np.float128, np.float_] +FloatLike = Union[float, np.float16, np.float32, float64, np.float128, np.float_] BoolLike = Union[bool, np.bool_] ArrayLike = Sequence From 3f93c0916944bff18fa0d5b7f763c088469302bf Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 11 Sep 2023 16:19:36 -0400 Subject: [PATCH 46/84] not everyone has float128 --- trimesh/typed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trimesh/typed.py b/trimesh/typed.py index 2da19c524..2f2aa0e97 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -26,7 +26,7 @@ np.uint64, ] -FloatLike = Union[float, np.float16, np.float32, float64, np.float128, np.float_] +FloatLike = Union[float, np.float16, np.float32, float64, np.float_] BoolLike = Union[bool, np.bool_] ArrayLike = Sequence From 628741864c08d5dd83405d8e7077c632acf10937 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 11 Sep 2023 16:22:43 -0400 Subject: [PATCH 47/84] remove missed embed --- trimesh/exchange/gltf.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 1cc737aea..4feca33e4 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -1764,8 +1764,6 @@ def _append_image(img, tree, buffer_items): # for everything else just use PNG save_as = 'png' - from IPython import embed - embed() # get the image data into a bytes object with util.BytesIO() as f: img.save(f, format=save_as) From f26043916bf5dd9c8e4ad9838de5d9eea2ba226b Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 12 Sep 2023 13:14:07 -0400 Subject: [PATCH 48/84] update renamed field --- tests/test_primitives.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_primitives.py b/tests/test_primitives.py index 9ed1001b5..c6b4cc060 100644 --- a/tests/test_primitives.py +++ b/tests/test_primitives.py @@ -137,7 +137,7 @@ def test_scaling(self): m = p.to_mesh() # make sure we have the types we think we do - assert isinstance(p, g.trimesh.primitives._Primitive) + assert isinstance(p, g.trimesh.primitives.Primitive) assert isinstance(m, g.trimesh.Trimesh) assert g.np.allclose(p.extents, m.extents) From c36c586433ae4e276d92004f1ca56e571a420ddd Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 12 Sep 2023 13:17:16 -0400 Subject: [PATCH 49/84] release candidates --- .github/workflows/release.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 76251f2f8..46ab54f14 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,6 +4,7 @@ on: push: branches: - main + - release-candidate jobs: formatting: From 8846c650e529b975aaacbe31adf8aa4f28bd838c Mon Sep 17 00:00:00 2001 From: xiaoxiae Date: Wed, 13 Sep 2023 17:58:13 +0200 Subject: [PATCH 50/84] glTF WebP test --- tests/test_gltf.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 767fefe53..2f3ef2668 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -1003,6 +1003,22 @@ def test_embed_buffer(self): reloaded = g.trimesh.load(path) assert set(reloaded.geometry.keys()) == set(scene.geometry.keys()) + def test_webp(self): + # load textured file + mesh = g.get_mesh('fuze.ply') + assert hasattr(mesh.visual, 'uv') + + for extension in ["glb"]: + export = mesh.export(file_type=extension, extension_webp=True) + validate_glb(export) + + # roundtrip + reloaded = g.trimesh.load( + g.trimesh.util.wrap_as_stream(export), + file_type=extension) + + g.scene_equal(g.trimesh.Scene(mesh), reloaded) + if __name__ == '__main__': g.trimesh.util.attach_to_log() From 0b9b77eb235e43f6cd3463472d7a5aed7f70f3b9 Mon Sep 17 00:00:00 2001 From: xiaoxiae Date: Wed, 13 Sep 2023 18:16:41 +0200 Subject: [PATCH 51/84] add WebP to used/required glTF extensions --- trimesh/exchange/gltf.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index cc1f7c3b1..6003338f7 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -728,9 +728,17 @@ def _create_gltf_structure(scene, # Add any extensions already in the tree (e.g. node extensions) if 'extensionsUsed' in tree: extensions_used = extensions_used.union(set(tree['extensionsUsed'])) + # Add WebP if used + if extension_webp: + extensions_used.add("EXT_texture_webp") if len(extensions_used) > 0: tree['extensionsUsed'] = list(extensions_used) + # Also add WebP to required (no fallback currently implemented) + # 'extensionsRequired' aren't currently used so this doesn't overwrite + if extension_webp: + tree['extensionsRequired'] = ["EXT_texture_webp"] + if buffer_postprocessor is not None: buffer_postprocessor(buffer_items, tree) From b7338a0b31e1ec83d37959d6a6cd74bbda8978c3 Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Thu, 14 Sep 2023 16:02:48 +0200 Subject: [PATCH 52/84] fixed gltf export when using paths instead of only file name --- trimesh/exchange/export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trimesh/exchange/export.py b/trimesh/exchange/export.py index b4fd127fd..edfef4884 100644 --- a/trimesh/exchange/export.py +++ b/trimesh/exchange/export.py @@ -90,7 +90,7 @@ def export_mesh(mesh, if isinstance(export, dict): # if we have a filename rename the default GLTF if file_name is not None and 'model.gltf' in export: - export[file_name] = export.pop('model.gltf') + export[os.path.basename(file_name)] = export.pop('model.gltf') # write the files if a resolver has been passed if resolver is not None: From 97b7b2088a107f96c7186782f0327445de4f21c4 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 13:52:23 -0400 Subject: [PATCH 53/84] fix and test #2033 --- tests/test_voxel.py | 23 +++++ trimesh/ray/ray_triangle.py | 41 +++++---- trimesh/voxel/__init__.py | 2 +- trimesh/voxel/base.py | 102 ++++++++------------- trimesh/voxel/creation.py | 91 ++++++++---------- trimesh/voxel/encoding.py | 178 ++++++++++++++++++------------------ trimesh/voxel/morphology.py | 33 ++++--- trimesh/voxel/ops.py | 80 ++++++---------- trimesh/voxel/runlength.py | 54 ++++++----- trimesh/voxel/transforms.py | 84 ++++++++--------- 10 files changed, 323 insertions(+), 365 deletions(-) diff --git a/tests/test_voxel.py b/tests/test_voxel.py index 9ea20dbf4..685625db1 100644 --- a/tests/test_voxel.py +++ b/tests/test_voxel.py @@ -384,6 +384,29 @@ def test_binvox_with_dimension(self): exact=True) assert octant.shape == (dim, dim, dim) + def test_transform_cache(self): + encoding = [ + [[0, 0, 0], [0, 1, 0], [0, 0, 0]], + [[0, 1, 1], [0, 1, 0], [1, 1, 0]], + [[0, 0, 0], [0, 1, 0], [0, 0, 0]]] + vg = g.trimesh.voxel.VoxelGrid(g.np.asarray(encoding)) + + scale = g.np.asarray([12, 23, 24]) + s_matrix = g.np.eye(4) + s_matrix[:3, :3] *= scale + + # original scale should be identity + assert g.np.allclose(vg.scale, 1.0) + + # save the hash + hash_ori = hash(vg._data) + # modify the voxelgrid + vg.apply_transform(s_matrix) + + # hash should have changed + assert hash_ori != hash(vg._data) + assert g.np.allclose(vg.scale, scale) + if __name__ == '__main__': g.trimesh.util.attach_to_log() diff --git a/trimesh/ray/ray_triangle.py b/trimesh/ray/ray_triangle.py index a254c7f70..1f0cc58de 100644 --- a/trimesh/ray/ray_triangle.py +++ b/trimesh/ray/ray_triangle.py @@ -324,30 +324,31 @@ def ray_triangle_candidates(ray_origins, Parameters ------------ - ray_origins: (m,3) float, ray origin points - ray_directions: (m,3) float, ray direction vectors - tree: rtree object, contains AABB of each triangle + ray_origins : (m, 3) float + Ray origin points. + ray_directions : (m, 3) float + Ray direction vectors + tree : rtree object + Ccontains AABB of each triangle Returns ---------- - ray_candidates: (n,) int, triangle indexes - ray_id: (n,) int, corresponding ray index for a triangle candidate + ray_candidates : (n,) int + Triangle indexes + ray_id : (n,) int + Corresponding ray index for a triangle candidate """ - ray_bounding = ray_bounds(ray_origins=ray_origins, - ray_directions=ray_directions, - bounds=tree.bounds) - ray_candidates = [[]] * len(ray_origins) - ray_id = [[]] * len(ray_origins) - - for i, bounds in enumerate(ray_bounding): - ray_candidates[i] = np.array(list(tree.intersection(bounds)), - dtype=np.int64) - ray_id[i] = np.ones(len(ray_candidates[i]), dtype=np.int64) * i - - ray_id = np.hstack(ray_id) - ray_candidates = np.hstack(ray_candidates) - - return ray_candidates, ray_id + bounding = ray_bounds(ray_origins=ray_origins, + ray_directions=ray_directions, + bounds=tree.bounds) + + index = [] + candidates = [] + for i, bounds in enumerate(bounding): + cand = list(tree.intersection(bounds)) + candidates.extend(cand) + index.extend([i] * len(cand)) + return np.array(candidates, dtype=np.int64), np.array(index, dtype=np.int64) def ray_bounds(ray_origins, diff --git a/trimesh/voxel/__init__.py b/trimesh/voxel/__init__.py index d2e5f88e1..83599cd2b 100644 --- a/trimesh/voxel/__init__.py +++ b/trimesh/voxel/__init__.py @@ -1,3 +1,3 @@ from .base import VoxelGrid -__all__ = ['VoxelGrid'] +__all__ = ["VoxelGrid"] diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index 58ec95422..3d4fd476d 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -17,44 +17,33 @@ class VoxelGrid(Geometry): - """ - Store 3D voxels. - """ - def __init__(self, encoding, transform=None, metadata=None): + """ + Store 3D voxels. + + Parameters + -------------- + encoding + A numpy array of voxels, or an encoding object + """ if transform is None: transform = np.eye(4) if isinstance(encoding, np.ndarray): encoding = DenseEncoding(encoding.astype(bool)) if encoding.dtype != bool: - raise ValueError('encoding must have dtype bool') + raise ValueError("encoding must have dtype bool") self._data = caching.DataStore() - self.encoding = encoding - self._data['transform'] = transforms.Transform(transform) - self._cache = caching.Cache( - id_function=self._data.__hash__) + self._cache = caching.Cache(id_function=self._data.__hash__) + self._transform = transforms.Transform(transform, datastore=self._data) + self.encoding = encoding self.metadata = {} + # update the mesh metadata with passed metadata if isinstance(metadata, dict): self.metadata.update(metadata) elif metadata is not None: - raise ValueError( - 'metadata should be a dict or None, got %s' % str(metadata)) - - def crc(self): - util.log.warning( - '`geometry.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') - return self.__hash__() - - def hash(self): - util.log.warning( - '`geometry.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') - return self.__hash__() + raise ValueError(f"metadata should be a dict or None, not {type(metadata)}") def __hash__(self): """ @@ -74,29 +63,24 @@ def encoding(self): See `trimesh.voxel.encoding` for implementations. """ - return self._data['encoding'] + return self._data["encoding"] @encoding.setter def encoding(self, encoding): if isinstance(encoding, np.ndarray): encoding = DenseEncoding(encoding) elif not isinstance(encoding, Encoding): - raise ValueError( - 'encoding must be an Encoding, got %s' % str(encoding)) + raise ValueError("encoding must be an Encoding, got %s" % str(encoding)) if len(encoding.shape) != 3: raise ValueError( - 'encoding must be rank 3, got shape %s' % str(encoding.shape)) + "encoding must be rank 3, got shape %s" % str(encoding.shape) + ) if encoding.dtype != bool: - raise ValueError( - 'encoding must be binary, got %s' % encoding.dtype) - self._data['encoding'] = encoding - - @property - def _transform(self): - return self._data['transform'] + raise ValueError("encoding must be binary, got %s" % encoding.dtype) + self._data["encoding"] = encoding @property - def transform(self): + def transform(self) -> NDArray[float64]: """4x4 homogeneous transformation matrix.""" return self._transform.matrix @@ -171,8 +155,8 @@ def bounds(self): indices = self.sparse_indices # get all 8 corners of the AABB corners = bounds_module.corners( - [indices.min(axis=0) - 0.5, - indices.max(axis=0) + 0.5]) + [indices.min(axis=0) - 0.5, indices.max(axis=0) + 0.5] + ) # transform these corners to a new frame corners = self._transform.transform_points(corners) # get the AABB of corners in-frame @@ -220,13 +204,14 @@ def is_filled(self, point): indices = self.points_to_indices(point) in_range = np.logical_and( np.all(indices < np.array(self.shape), axis=-1), - np.all(indices >= 0, axis=-1)) + np.all(indices >= 0, axis=-1), + ) is_filled = np.zeros_like(in_range) is_filled[in_range] = self.encoding.gather_nd(indices[in_range]) return is_filled - def fill(self, method='holes', **kwargs): + def fill(self, method="holes", **kwargs): """ Mutates self by filling in the encoding according to `morphology.fill`. @@ -245,8 +230,7 @@ def fill(self, method='holes', **kwargs): self : VoxelGrid After replacing encoding with a filled version. """ - self.encoding = morphology.fill( - self.encoding, method=method, **kwargs) + self.encoding = morphology.fill(self.encoding, method=method, **kwargs) return self def hollow(self): @@ -319,8 +303,7 @@ def points(self): points : (self.filled, 3) float Points in space. """ - return self._transform.transform_points( - self.sparse_indices.astype(float)) + return self._transform.transform_points(self.sparse_indices.astype(float)) @property def sparse_indices(self): @@ -353,14 +336,13 @@ def as_boxes(self, colors=None, **kwargs): # encoding.as_mask? colors = colors[encoding.dense] else: - log.warning('colors incorrect shape!') + log.warning("colors incorrect shape!") colors = None elif colors.shape not in ((3,), (4,)): - log.warning('colors incorrect shape!') + log.warning("colors incorrect shape!") colors = None - mesh = ops.multibox( - centers=self.sparse_indices.astype(float), colors=colors) + mesh = ops.multibox(centers=self.sparse_indices.astype(float), colors=colors) mesh = mesh.apply_transform(self.transform) return mesh @@ -388,13 +370,10 @@ def show(self, *args, **kwargs): Convert the current set of voxels into a trimesh for visualization and show that via its built- in preview method. """ - return self.as_boxes(kwargs.pop( - 'colors', None)).show(*args, **kwargs) + return self.as_boxes(kwargs.pop("colors", None)).show(*args, **kwargs) def copy(self): - return VoxelGrid( - self.encoding.copy(), - self._transform.matrix.copy()) + return VoxelGrid(self.encoding.copy(), self._transform.matrix.copy()) def export(self, file_obj=None, file_type=None, **kwargs): """ @@ -415,14 +394,14 @@ def export(self, file_obj=None, file_type=None, **kwargs): if isinstance(file_obj, str) and file_type is None: file_type = util.split_extension(file_obj).lower() - if file_type != 'binvox': - raise ValueError('only binvox exports supported!') + if file_type != "binvox": + raise ValueError("only binvox exports supported!") exported = export_binvox(self, **kwargs) - if hasattr(file_obj, 'write'): + if hasattr(file_obj, "write"): file_obj.write(exported) elif isinstance(file_obj, str): - with open(file_obj, 'wb') as f: + with open(file_obj, "wb") as f: f.write(exported) return exported @@ -445,14 +424,11 @@ def revoxelized(self, shape): shape = tuple(shape) bounds = self.bounds.copy() extents = self.extents - points = util.grid_linspace( - bounds, shape).reshape(shape + (3,)) + points = util.grid_linspace(bounds, shape).reshape(shape + (3,)) dense = self.is_filled(points) scale = extents / np.asanyarray(shape) translate = bounds[0] - return VoxelGrid( - dense, - transform=tr.scale_and_translate(scale, translate)) + return VoxelGrid(dense, transform=tr.scale_and_translate(scale, translate)) def __add__(self, other): raise NotImplementedError("TODO : implement voxel concatenation") diff --git a/trimesh/voxel/creation.py b/trimesh/voxel/creation.py index 55a67e8ee..85ee37507 100644 --- a/trimesh/voxel/creation.py +++ b/trimesh/voxel/creation.py @@ -8,10 +8,7 @@ @log_time -def voxelize_subdivide(mesh, - pitch, - max_iter=10, - edge_factor=2.0): +def voxelize_subdivide(mesh, pitch, max_iter=10, edge_factor=2.0): """ Voxelize a surface by subdividing a mesh until every edge is shorter than: (pitch / edge_factor) @@ -35,19 +32,19 @@ def voxelize_subdivide(mesh, if max_iter is None: longest_edge = np.linalg.norm( - mesh.vertices[mesh.edges[:, 0]] - - mesh.vertices[mesh.edges[:, 1]], - axis=1).max() - max_iter = max(int(np.ceil(np.log2( - longest_edge / max_edge))), 0) + mesh.vertices[mesh.edges[:, 0]] - mesh.vertices[mesh.edges[:, 1]], axis=1 + ).max() + max_iter = max(int(np.ceil(np.log2(longest_edge / max_edge))), 0) # get the same mesh sudivided so every edge is shorter # than a factor of our pitch - v, f, idx = remesh.subdivide_to_size(mesh.vertices, - mesh.faces, - max_edge=max_edge, - max_iter=max_iter, - return_index=True) + v, f, idx = remesh.subdivide_to_size( + mesh.vertices, + mesh.faces, + max_edge=max_edge, + max_iter=max_iter, + return_index=True, + ) # convert the vertices to their voxel grid position hit = v / pitch @@ -67,16 +64,11 @@ def voxelize_subdivide(mesh, return base.VoxelGrid( enc.SparseBinaryEncoding(occupied_index - origin_index), - transform=tr.scale_and_translate( - scale=pitch, translate=origin_position)) + transform=tr.scale_and_translate(scale=pitch, translate=origin_position), + ) -def local_voxelize(mesh, - point, - pitch, - radius, - fill=True, - **kwargs): +def local_voxelize(mesh, point, pitch, radius, fill=True, **kwargs): """ Voxelize a mesh in the region of a cube around a point. When fill=True, uses proximity.contains to fill the resulting voxels so may be meaningless @@ -107,11 +99,12 @@ def local_voxelize(mesh, # this is a gotcha- radius sounds a lot like it should be in # float model space, not int voxel space so check if not isinstance(radius, int): - raise ValueError('radius needs to be an integer number of cubes!') + raise ValueError("radius needs to be an integer number of cubes!") # Bounds of region - bounds = np.concatenate((point - (radius + 0.5) * pitch, - point + (radius + 0.5) * pitch)) + bounds = np.concatenate( + (point - (radius + 0.5) * pitch, point + (radius + 0.5) * pitch) + ) # faces that intersect axis aligned bounding box faces = list(mesh.triangles_tree.intersection(bounds)) @@ -137,14 +130,15 @@ def local_voxelize(mesh, prepad = np.maximum(radius - center, 0) postpad = np.maximum(center + radius + 1 - matrix.shape, 0) - matrix = np.pad(matrix, np.stack((prepad, postpad), axis=-1), - mode='constant') + matrix = np.pad(matrix, np.stack((prepad, postpad), axis=-1), mode="constant") center += prepad # Extract voxels within the bounding box - voxels = matrix[center[0] - radius:center[0] + radius + 1, - center[1] - radius:center[1] + radius + 1, - center[2] - radius:center[2] + radius + 1] + voxels = matrix[ + center[0] - radius : center[0] + radius + 1, + center[1] - radius : center[1] + radius + 1, + center[2] - radius : center[2] + radius + 1, + ] local_origin = point - radius * pitch # origin of local voxels # Fill internal regions @@ -152,12 +146,10 @@ def local_voxelize(mesh, regions, n = ndimage.label(~voxels) distance = ndimage.distance_transform_cdt(~voxels) representatives = [ - np.unravel_index((distance * (regions == i)).argmax(), - distance.shape) for i in range(1, n + 1)] - contains = mesh.contains( - np.asarray(representatives) * - pitch + - local_origin) + np.unravel_index((distance * (regions == i)).argmax(), distance.shape) + for i in range(1, n + 1) + ] + contains = mesh.contains(np.asarray(representatives) * pitch + local_origin) where = np.where(contains)[0] + 1 # use in1d vs isin for older numpy versions @@ -169,9 +161,7 @@ def local_voxelize(mesh, @log_time -def voxelize_ray(mesh, - pitch, - per_cell=None): +def voxelize_ray(mesh, pitch, per_cell=None): """ Voxelize a mesh using ray queries. @@ -225,16 +215,12 @@ def voxelize_ray(mesh, encoding = enc.SparseBinaryEncoding(voxels) origin_position = origin_index * pitch return base.VoxelGrid( - encoding, - tr.scale_and_translate(scale=pitch, translate=origin_position)) + encoding, tr.scale_and_translate(scale=pitch, translate=origin_position) + ) @log_time -def voxelize_binvox(mesh, - pitch=None, - dimension=None, - bounds=None, - **binvoxer_kwargs): +def voxelize_binvox(mesh, pitch=None, dimension=None, bounds=None, **binvoxer_kwargs): """ Voxelize via binvox tool. @@ -273,21 +259,20 @@ def voxelize_binvox(mesh, extents = maxs - mins dimension = int(np.ceil(np.max(extents) / pitch)) if bounds is not None: - if 'bounding_box' in binvoxer_kwargs: - raise ValueError('Cannot provide both bounds and bounding_box') - binvoxer_kwargs['bounding_box'] = np.asanyarray(bounds).flatten() + if "bounding_box" in binvoxer_kwargs: + raise ValueError("Cannot provide both bounds and bounding_box") + binvoxer_kwargs["bounding_box"] = np.asanyarray(bounds).flatten() binvoxer = binvox.Binvoxer(dimension=dimension, **binvoxer_kwargs) return binvox.voxelize_mesh(mesh, binvoxer) voxelizers = util.FunctionRegistry( - ray=voxelize_ray, - subdivide=voxelize_subdivide, - binvox=voxelize_binvox) + ray=voxelize_ray, subdivide=voxelize_subdivide, binvox=voxelize_binvox +) -def voxelize(mesh, pitch, method='subdivide', **kwargs): +def voxelize(mesh, pitch, method="subdivide", **kwargs): """ Voxelize the given mesh using the specified implementation. diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 8ec3d7981..ade24e022 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -11,13 +11,13 @@ from scipy import sparse as sp except BaseException as E: from ..exceptions import ExceptionWrapper + sp = ExceptionWrapper(E) def _empty_stripped(shape): num_dims = len(shape) - encoding = DenseEncoding( - np.empty(shape=(0,) * num_dims, dtype=bool)) + encoding = DenseEncoding(np.empty(shape=(0,) * num_dims, dtype=bool)) padding = np.zeros(shape=(num_dims, 2), dtype=int) padding[:, 1] = shape return encoding, padding @@ -36,8 +36,7 @@ class Encoding(ABC): def __init__(self, data): self._data = data - self._cache = caching.Cache( - id_function=self._data.__hash__) + self._cache = caching.Cache(id_function=self._data.__hash__) @abc.abstractproperty def dtype(self): @@ -109,7 +108,7 @@ def stripped(self): for dim, size in enumerate(shape): axis = tuple(range(dim)) + tuple(range(dim + 1, ndims)) filled = np.any(dense, axis=axis) - indices, = np.nonzero(filled) + (indices,) = np.nonzero(filled) lower = indices.min() upper = indices.max() + 1 padding.append([lower, size - upper]) @@ -121,16 +120,18 @@ def _flip(self, axes): def crc(self): log.warning( - '`geometry.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') + "`geometry.crc()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) return self.__hash__() def hash(self): log.warning( - '`geometry.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') + "`geometry.hash()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) return self.__hash__() def __hash__(self): @@ -168,14 +169,12 @@ def data(self): def run_length_data(self, dtype=np.int64): if self.ndims != 1: - raise ValueError( - '`run_length_data` only valid for flat encodings') + raise ValueError("`run_length_data` only valid for flat encodings") return runlength.dense_to_rle(self.dense, dtype=dtype) def binary_run_length_data(self, dtype=np.int64): if self.ndims != 1: - raise ValueError( - '`run_length_data` only valid for flat encodings') + raise ValueError("`run_length_data` only valid for flat encodings") return runlength.dense_to_brle(self.dense, dtype=dtype) def transpose(self, perm): @@ -199,7 +198,7 @@ class DenseEncoding(Encoding): def __init__(self, data): if not isinstance(data, caching.TrackedArray): if not isinstance(data, np.ndarray): - raise ValueError('DenseEncoding data must be a numpy array') + raise ValueError("DenseEncoding data must be a numpy array") data = caching.tracked_array(data) super().__init__(data=data) @@ -293,45 +292,48 @@ def __init__(self, indices, values, shape=None): """ data = caching.DataStore() super().__init__(data) - data['indices'] = indices - data['values'] = values - indices = data['indices'] + data["indices"] = indices + data["values"] = values + indices = data["indices"] if len(indices.shape) != 2: + raise ValueError("indices must be 2D, got shaped %s" % str(indices.shape)) + if data["values"].shape != (indices.shape[0],): raise ValueError( - 'indices must be 2D, got shaped %s' % str(indices.shape)) - if data['values'].shape != (indices.shape[0],): - raise ValueError( - 'values and indices shapes inconsistent: {} and {}'.format( - data['values'], data['indices'])) + "values and indices shapes inconsistent: {} and {}".format( + data["values"], data["indices"] + ) + ) if shape is None: - self._shape = tuple(data['indices'].max(axis=0) + 1) + self._shape = tuple(data["indices"].max(axis=0) + 1) else: self._shape = tuple(shape) if not np.all(indices < self._shape): - raise ValueError('all indices must be less than shape') + raise ValueError("all indices must be less than shape") if not np.all(indices >= 0): - raise ValueError('all indices must be non-negative') + raise ValueError("all indices must be non-negative") @staticmethod def from_dense(dense_data): sparse_indices = np.where(dense_data) values = dense_data[sparse_indices] return SparseEncoding( - np.stack(sparse_indices, axis=-1), values, shape=dense_data.shape) + np.stack(sparse_indices, axis=-1), values, shape=dense_data.shape + ) def copy(self): return SparseEncoding( indices=self.sparse_indices.copy(), values=self.sparse_values.copy(), - shape=self.shape) + shape=self.shape, + ) @property def sparse_indices(self): - return self._data['indices'] + return self._data["indices"] @property def sparse_values(self): - return self._data['values'] + return self._data["values"] @property def dtype(self): @@ -429,7 +431,8 @@ def SparseBinaryEncoding(indices, shape=None): rank n bool `SparseEncoding` with True values at each index. """ return SparseEncoding( - indices, np.ones(shape=(indices.shape[0],), dtype=bool), shape) + indices, np.ones(shape=(indices.shape[0],), dtype=bool), shape + ) class RunLengthEncoding(Encoding): @@ -446,18 +449,16 @@ def __init__(self, data, dtype=None): dtype: dtype of encoded data. Each second value of data is cast will be cast to this dtype if provided. """ - super().__init__( - data=caching.tracked_array(data)) + super().__init__(data=caching.tracked_array(data)) if dtype is None: dtype = self._data.dtype if len(self._data.shape) != 1: - raise ValueError('data must be 1D numpy array') + raise ValueError("data must be 1D numpy array") self._dtype = dtype @caching.cache_decorator def is_empty(self): - return not np.any( - np.logical_and(self._data[::2], self._data[1::2])) + return not np.any(np.logical_and(self._data[::2], self._data[1::2])) @property def ndims(self): @@ -473,16 +474,18 @@ def dtype(self): def crc(self): log.warning( - '`geometry.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') + "`geometry.crc()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) return self.__hash__() def hash(self): log.warning( - '`geometry.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') + "`geometry.hash()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) return self.__hash__() def __hash__(self): @@ -499,7 +502,8 @@ def __hash__(self): @staticmethod def from_dense(dense_data, dtype=np.int64, encoding_dtype=np.int64): return RunLengthEncoding( - runlength.dense_to_rle(dense_data, dtype=encoding_dtype), dtype=dtype) + runlength.dense_to_rle(dense_data, dtype=encoding_dtype), dtype=dtype + ) @staticmethod def from_rle(rle_data, dtype=None): @@ -533,8 +537,7 @@ def size(self): def _flip(self, axes): if axes != (0,): - raise ValueError( - 'encoding is 1D - cannot flip on axis %s' % str(axes)) + raise ValueError("encoding is 1D - cannot flip on axis %s" % str(axes)) return RunLengthEncoding(runlength.rle_reverse(self._data)) @caching.cache_decorator @@ -563,11 +566,11 @@ def gather_nd(self, indices): def sorted_gather(self, ordered_indices): return np.array( tuple(runlength.sorted_rle_gather_1d(self._data, ordered_indices)), - dtype=self._dtype) + dtype=self._dtype, + ) def mask(self, mask): - return np.array( - tuple(runlength.rle_mask(self._data, mask)), dtype=self._dtype) + return np.array(tuple(runlength.rle_mask(self._data, mask)), dtype=self._dtype) def get_value(self, index): for value in self.sorted_gather((index,)): @@ -604,12 +607,12 @@ def is_empty(self): @staticmethod def from_dense(dense_data, encoding_dtype=np.int64): return BinaryRunLengthEncoding( - runlength.dense_to_brle(dense_data, dtype=encoding_dtype)) + runlength.dense_to_brle(dense_data, dtype=encoding_dtype) + ) @staticmethod def from_rle(rle_data, dtype=None): - return BinaryRunLengthEncoding( - runlength.rle_to_brle(rle_data, dtype=dtype)) + return BinaryRunLengthEncoding(runlength.rle_to_brle(rle_data, dtype=dtype)) @staticmethod def from_brle(brle_data, dtype=None): @@ -639,8 +642,7 @@ def size(self): def _flip(self, axes): if axes != (0,): - raise ValueError( - 'encoding is 1D - cannot flip on axis %s' % str(axes)) + raise ValueError("encoding is 1D - cannot flip on axis %s" % str(axes)) return BinaryRunLengthEncoding(runlength.brle_reverse(self._data)) @property @@ -749,11 +751,12 @@ def _to_base_indices(self, indices): def _from_base_indices(self, base_indices): return np.expand_dims( - np.ravel_multi_index(base_indices.T, self._data.shape), axis=-1) + np.ravel_multi_index(base_indices.T, self._data.shape), axis=-1 + ) @property def shape(self): - return self.size, + return (self.size,) @property def dense(self): @@ -782,7 +785,7 @@ def __init__(self, encoding, shape): if encoding.ndims != 1: encoding = encoding.flat else: - raise ValueError('encoding must be an Encoding') + raise ValueError("encoding must be an Encoding") super().__init__(data=encoding) self._shape = tuple(shape) nn = self._shape.count(-1) @@ -791,23 +794,24 @@ def __init__(self, encoding, shape): size = np.abs(size) if self._data.size % size != 0: raise ValueError( - 'cannot reshape encoding of size %d into shape %s' % - (self._data.size, str(self._shape))) + "cannot reshape encoding of size %d into shape %s" + % (self._data.size, str(self._shape)) + ) rem = self._data.size // size self._shape = tuple(rem if s == -1 else s for s in self._shape) elif nn > 2: - raise ValueError('shape cannot have more than one -1 value') + raise ValueError("shape cannot have more than one -1 value") elif np.prod(self._shape) != self._data.size: raise ValueError( - 'cannot reshape encoding of size %d into shape %s' % - (self._data.size, str(self._shape))) + "cannot reshape encoding of size %d into shape %s" + % (self._data.size, str(self._shape)) + ) def _from_base_indices(self, base_indices): return np.column_stack(np.unravel_index(base_indices, self.shape)) def _to_base_indices(self, indices): - return np.expand_dims( - np.ravel_multi_index(indices.T, self.shape), axis=-1) + return np.expand_dims(np.ravel_multi_index(indices.T, self.shape), axis=-1) @property def flat(self): @@ -838,16 +842,17 @@ class TransposedEncoding(LazyIndexMap): def __init__(self, base_encoding, perm): if not isinstance(base_encoding, Encoding): raise ValueError( - 'base_encoding must be an Encoding, got %s' - % str(base_encoding)) + "base_encoding must be an Encoding, got %s" % str(base_encoding) + ) if len(base_encoding.shape) != len(perm): raise ValueError( - 'base_encoding has %d ndims - cannot transpose with perm %s' - % (base_encoding.ndims, str(perm))) + "base_encoding has %d ndims - cannot transpose with perm %s" + % (base_encoding.ndims, str(perm)) + ) super().__init__(base_encoding) perm = np.array(perm, dtype=np.int64) if not all(i in perm for i in range(base_encoding.ndims)): - raise ValueError('perm %s is not a valid permutation' % str(perm)) + raise ValueError("perm %s is not a valid permutation" % str(perm)) inv_perm = np.empty_like(perm) inv_perm[perm] = np.arange(base_encoding.ndims) self._perm = perm @@ -857,7 +862,7 @@ def transpose(self, perm): return _transposed(self._data, [self._perm[p] for p in perm]) def _transpose(self, perm): - raise RuntimeError('Should not be here') + raise RuntimeError("Should not be here") @property def perm(self): @@ -876,9 +881,9 @@ def _from_base_indices(self, base_indices): return np.take(base_indices, self._inv_perm, axis=-1) except TypeError: # windows sometimes tries to use wrong dtypes - return np.take(base_indices.astype(np.int64), - self._inv_perm.astype(np.int64), - axis=-1) + return np.take( + base_indices.astype(np.int64), self._inv_perm.astype(np.int64), axis=-1 + ) @property def dense(self): @@ -888,8 +893,7 @@ def gather(self, indices): return self._data.gather(self._base_indices(indices)) def mask(self, mask): - return self._data.mask( - mask.transpose(self._inv_perm)).transpose(self._perm) + return self._data.mask(mask.transpose(self._inv_perm)).transpose(self._perm) def get_value(self, index): return self._data[tuple(self._base_indices(index))] @@ -899,8 +903,7 @@ def data(self): return self._data def copy(self): - return TransposedEncoding( - base_encoding=self._data.copy(), perm=self._perm) + return TransposedEncoding(base_encoding=self._data.copy(), perm=self._perm) class FlippedEncoding(LazyIndexMap): @@ -913,19 +916,18 @@ class FlippedEncoding(LazyIndexMap): def __init__(self, encoding, axes): ndims = encoding.ndims if isinstance(axes, np.ndarray) and axes.size == 1: - axes = axes.item(), + axes = (axes.item(),) elif isinstance(axes, int): - axes = axes, + axes = (axes,) axes = tuple(a + ndims if a < 0 else a for a in axes) self._axes = tuple(sorted(axes)) if len(set(self._axes)) != len(self._axes): - raise ValueError( - "Axes cannot contain duplicates, got %s" % str(self._axes)) + raise ValueError("Axes cannot contain duplicates, got %s" % str(self._axes)) super().__init__(encoding) if not all(0 <= a < self._data.ndims for a in axes): raise ValueError( - 'Invalid axes %s for %d-d encoding' - % (str(axes), self._data.ndims)) + "Invalid axes %s for %d-d encoding" % (str(axes), self._data.ndims) + ) def _to_base_indices(self, indices): indices = indices.copy() @@ -961,22 +963,22 @@ def copy(self): def flip(self, axis=0): if isinstance(axis, np.ndarray): if axis.size == 1: - axis = axis.item(), + axis = (axis.item(),) else: axis = tuple(axis) elif isinstance(axis, int): - axes = axis, + axes = (axis,) else: axes = tuple(axis) return _flipped(self, self._axes + axes) def _flip(self, axes): - raise RuntimeError('Should not be here') + raise RuntimeError("Should not be here") def _flipped(encoding, axes): - if not hasattr(axes, '__iter__'): - axes = axes, + if not hasattr(axes, "__iter__"): + axes = (axes,) unique_ax = set() ndims = encoding.ndims axes = tuple(a + ndims if a < 0 else a for a in axes) diff --git a/trimesh/voxel/morphology.py b/trimesh/voxel/morphology.py index fc3cf41c5..c4a72b335 100644 --- a/trimesh/voxel/morphology.py +++ b/trimesh/voxel/morphology.py @@ -11,6 +11,7 @@ except BaseException as E: # scipy is a soft dependency from ..exceptions import ExceptionWrapper + ndimage = ExceptionWrapper(E) @@ -21,7 +22,8 @@ def _dense(encoding, rank=None): dense = encoding.dense else: raise ValueError( - 'encoding must be np.ndarray or Encoding, got %s' % str(encoding)) + "encoding must be np.ndarray or Encoding, got %s" % str(encoding) + ) if rank: _assert_rank(dense, rank) return dense @@ -34,7 +36,8 @@ def _sparse_indices(encoding, rank=None): sparse_indices = encoding.sparse_indices else: raise ValueError( - 'encoding must be np.ndarray or Encoding, got %s' % str(encoding)) + "encoding must be np.ndarray or Encoding, got %s" % str(encoding) + ) _assert_sparse_rank(sparse_indices, 3) return sparse_indices @@ -42,19 +45,19 @@ def _sparse_indices(encoding, rank=None): def _assert_rank(value, rank): if len(value.shape) != rank: - raise ValueError( - 'Expected rank %d, got shape %s' % (rank, str(value.shape))) + raise ValueError("Expected rank %d, got shape %s" % (rank, str(value.shape))) def _assert_sparse_rank(value, rank=None): if len(value.shape) != 2: raise ValueError( - 'sparse_indices must be rank 2, got shape %s' % str(value.shape)) + "sparse_indices must be rank 2, got shape %s" % str(value.shape) + ) if rank is not None: if value.shape[-1] != rank: raise ValueError( - 'sparse_indices.shape[1] must be %d, got %d' - % (rank, value.shape[-1])) + "sparse_indices.shape[1] must be %d, got %d" % (rank, value.shape[-1]) + ) @log_time @@ -70,8 +73,7 @@ def fill_base(encoding): -------------- A new filled encoding object. """ - return enc.SparseBinaryEncoding( - ops.fill_base(_sparse_indices(encoding, rank=3))) + return enc.SparseBinaryEncoding(ops.fill_base(_sparse_indices(encoding, rank=3))) @log_time @@ -111,7 +113,8 @@ def fill_holes(encoding, **kwargs): A new filled in encoding object. """ return enc.DenseEncoding( - ndimage.binary_fill_holes(_dense(encoding, rank=3), **kwargs)) + ndimage.binary_fill_holes(_dense(encoding, rank=3), **kwargs) + ) fillers = util.FunctionRegistry( @@ -121,7 +124,7 @@ def fill_holes(encoding, **kwargs): ) -def fill(encoding, method='base', **kwargs): +def fill(encoding, method="base", **kwargs): """ Fill the given encoding using the specified implementation. @@ -151,7 +154,8 @@ def binary_dilation(encoding, **kwargs): https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.ndimage.morphology.binary_dilation.html#scipy.ndimage.morphology.binary_dilation """ return enc.DenseEncoding( - ndimage.binary_dilation(_dense(encoding, rank=3), **kwargs)) + ndimage.binary_dilation(_dense(encoding, rank=3), **kwargs) + ) def binary_closing(encoding, **kwargs): @@ -160,8 +164,7 @@ def binary_closing(encoding, **kwargs): https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.ndimage.morphology.binary_closing.html#scipy.ndimage.morphology.binary_closing """ - return enc.DenseEncoding( - ndimage.binary_closing(_dense(encoding, rank=3), **kwargs)) + return enc.DenseEncoding(ndimage.binary_closing(_dense(encoding, rank=3), **kwargs)) def surface(encoding, structure=None): @@ -182,7 +185,7 @@ def surface(encoding, structure=None): """ dense = _dense(encoding, rank=3) # padding/unpadding resolves issues with occupied voxels on the boundary - dense = np.pad(dense, np.ones((3, 2), dtype=int), mode='constant') + dense = np.pad(dense, np.ones((3, 2), dtype=int), mode="constant") empty = np.logical_not(dense) dilated = ndimage.binary_dilation(empty, structure=structure) surface = np.logical_and(dense, dilated)[1:-1, 1:-1, 1:-1] diff --git a/trimesh/voxel/ops.py b/trimesh/voxel/ops.py index af69ec453..ac0123b85 100644 --- a/trimesh/voxel/ops.py +++ b/trimesh/voxel/ops.py @@ -7,8 +7,8 @@ def fill_orthographic(dense): shape = dense.shape indices = np.stack( - np.meshgrid(*(np.arange(s) for s in shape), indexing='ij'), - axis=-1) + np.meshgrid(*(np.arange(s) for s in shape), indexing="ij"), axis=-1 + ) empty = np.logical_not(dense) def fill_axis(axis): @@ -46,15 +46,12 @@ def fill_base(sparse_indices): # validate inputs sparse_indices = np.asanyarray(sparse_indices, dtype=np.int64) if not util.is_shape(sparse_indices, (-1, 3)): - raise ValueError('incorrect shape') + raise ValueError("incorrect shape") # create grid and mark inner voxels max_value = sparse_indices.max() + 3 - grid = np.zeros((max_value, - max_value, - max_value), - bool) + grid = np.zeros((max_value, max_value, max_value), bool) voxels_sparse = np.add(sparse_indices, 1) grid[tuple(voxels_sparse.T)] = 1 @@ -72,7 +69,7 @@ def fill_base(sparse_indices): if c < 4: continue for s in range(0, c - c % 4, 4): - grid[i, j, idx[s]:idx[s + 3]] = 1 + grid[i, j, idx[s] : idx[s + 3]] = 1 if not check_dir2: continue @@ -86,7 +83,7 @@ def fill_base(sparse_indices): if c < 4: continue for s in range(0, c - c % 4, 4): - grid[i, idx[s]:idx[s + 3], k] = 1 + grid[i, idx[s] : idx[s + 3], k] = 1 # generate new voxels filled = np.column_stack(np.where(grid)) @@ -123,13 +120,12 @@ def matrix_to_marching_cubes(matrix, pitch=1.0): # Add in padding so marching cubes can function properly with # voxels on edge of AABB pad_width = 1 - rev_matrix = np.pad(rev_matrix, - pad_width=(pad_width), - mode='constant', - constant_values=(1)) + rev_matrix = np.pad( + rev_matrix, pad_width=(pad_width), mode="constant", constant_values=(1) + ) # pick between old and new API - if hasattr(measure, 'marching_cubes_lewiner'): + if hasattr(measure, "marching_cubes_lewiner"): func = measure.marching_cubes_lewiner else: func = measure.marching_cubes @@ -138,15 +134,15 @@ def matrix_to_marching_cubes(matrix, pitch=1.0): pitch = np.asanyarray(pitch) if pitch.size == 1: pitch = (pitch,) * 3 - meshed = func(volume=rev_matrix, - level=.5, # it is a boolean voxel grid - spacing=pitch) + meshed = func( + volume=rev_matrix, level=0.5, spacing=pitch # it is a boolean voxel grid + ) # allow results from either marching cubes function in skimage # binaries available for python 3.3 and 3.4 appear to use the classic # method if len(meshed) == 2: - log.warning('using old marching cubes, may not be watertight!') + log.warning("using old marching cubes, may not be watertight!") vertices, faces = meshed normals = None elif len(meshed) == 4: @@ -155,9 +151,7 @@ def matrix_to_marching_cubes(matrix, pitch=1.0): # Return to the origin, add in the pad_width vertices = np.subtract(vertices, pad_width * pitch) # create the mesh - mesh = Trimesh(vertices=vertices, - faces=faces, - vertex_normals=normals) + mesh = Trimesh(vertices=vertices, faces=faces, vertex_normals=normals) return mesh @@ -179,7 +173,7 @@ def sparse_to_matrix(sparse): sparse = np.asanyarray(sparse, dtype=np.int64) if not util.is_shape(sparse, (-1, 3)): - raise ValueError('sparse must be (n,3)!') + raise ValueError("sparse must be (n,3)!") shape = sparse.max(axis=0) + 1 matrix = np.zeros(np.prod(shape), dtype=bool) @@ -250,24 +244,21 @@ def multibox(centers, pitch=1.0, colors=None): from ..base import Trimesh # get centers as numpy array - centers = np.asanyarray( - centers, dtype=np.float64) + centers = np.asanyarray(centers, dtype=np.float64) # get a basic box b = primitives.Box() # apply the pitch b.apply_scale(float(pitch)) # tile into one box vertex per center - v = np.tile( - centers, - (1, len(b.vertices))).reshape((-1, 3)) + v = np.tile(centers, (1, len(b.vertices))).reshape((-1, 3)) # offset to centers v += np.tile(b.vertices, (len(centers), 1)) f = np.tile(b.faces, (len(centers), 1)) f += np.tile( - np.arange(len(centers)) * len(b.vertices), - (len(b.faces), 1)).T.reshape((-1, 1)) + np.arange(len(centers)) * len(b.vertices), (len(b.faces), 1) + ).T.reshape((-1, 1)) face_colors = None if colors is not None: @@ -277,9 +268,7 @@ def multibox(centers, pitch=1.0, colors=None): if colors.ndim == 2 and len(colors) == len(centers): face_colors = colors.repeat(12, axis=0) - mesh = Trimesh(vertices=v, - faces=f, - face_colors=face_colors) + mesh = Trimesh(vertices=v, faces=f, face_colors=face_colors) return mesh @@ -306,20 +295,13 @@ def boolean_sparse(a, b, operation=np.logical_and): import sparse # find the bounding box of both arrays - extrema = np.array([a.min(axis=0), - a.max(axis=0), - b.min(axis=0), - b.max(axis=0)]) + extrema = np.array([a.min(axis=0), a.max(axis=0), b.min(axis=0), b.max(axis=0)]) origin = extrema.min(axis=0) - 1 size = tuple(extrema.ptp(axis=0) + 2) # put nearby voxel arrays into same shape sparse array - sp_a = sparse.COO((a - origin).T, - data=np.ones(len(a), dtype=bool), - shape=size) - sp_b = sparse.COO((b - origin).T, - data=np.ones(len(b), dtype=bool), - shape=size) + sp_a = sparse.COO((a - origin).T, data=np.ones(len(a), dtype=bool), shape=size) + sp_b = sparse.COO((b - origin).T, data=np.ones(len(b), dtype=bool), shape=size) # apply the logical operation # get a sparse matrix out @@ -338,7 +320,7 @@ def strip_array(data): for dim in range(len(shape)): axis = tuple(range(dim)) + tuple(range(dim + 1, ndims)) filled = np.any(data, axis=axis) - indices, = np.nonzero(filled) + (indices,) = np.nonzero(filled) pad_left = indices[0] pad_right = indices[-1] padding.append([pad_left, pad_right]) @@ -362,7 +344,7 @@ def indices_to_points(indices, pitch=None, origin=None): """ indices = np.asanyarray(indices) if indices.shape[1:] != (3,): - raise ValueError('shape of indices must be (q, 3)') + raise ValueError("shape of indices must be (q, 3)") points = np.array(indices, dtype=np.float64) if pitch is not None: @@ -370,7 +352,7 @@ def indices_to_points(indices, pitch=None, origin=None): if origin is not None: origin = np.asanyarray(origin) if origin.shape != (3,): - raise ValueError('shape of origin must be (3,)') + raise ValueError("shape of origin must be (3,)") points += origin return points @@ -391,9 +373,7 @@ def matrix_to_points(matrix, pitch=None, origin=None): points: (q, 3) list of points """ indices = np.column_stack(np.nonzero(matrix)) - points = indices_to_points(indices=indices, - pitch=pitch, - origin=origin) + points = indices_to_points(indices=indices, pitch=pitch, origin=origin) return points @@ -417,12 +397,12 @@ def points_to_indices(points, pitch=None, origin=None): """ points = np.array(points, dtype=np.float64) if points.shape != (points.shape[0], 3): - raise ValueError('shape of points must be (q, 3)') + raise ValueError("shape of points must be (q, 3)") if origin is not None: origin = np.asanyarray(origin) if origin.shape != (3,): - raise ValueError('shape of origin must be (3,)') + raise ValueError("shape of origin must be (3,)") points -= origin if pitch is not None: points /= pitch diff --git a/trimesh/voxel/runlength.py b/trimesh/voxel/runlength.py index ae8a0abd6..a466cc7fa 100644 --- a/trimesh/voxel/runlength.py +++ b/trimesh/voxel/runlength.py @@ -85,8 +85,7 @@ def rle_to_brle(rle, dtype=None): for value, count in np.reshape(rle, (-1, 2)): acc += count if value not in (0, 1): - raise ValueError( - "Invalid run length encoding for conversion to BRLE") + raise ValueError("Invalid run length encoding for conversion to BRLE") if value == curr_val: out[-1] += count else: @@ -117,7 +116,7 @@ def brle_logical_not(brle): element-wise not of the input. """ if brle[0] or brle[-1]: - return np.pad(brle, [1, 1], mode='constant') + return np.pad(brle, [1, 1], mode="constant") else: return brle[1:-1] @@ -163,8 +162,11 @@ def split_long_brle_lengths(lengths, dtype=np.int64): remainders = (lengths % max_val).astype(dtype) lengths = np.concatenate( - [np.array([max_val, 0] * repeat + [remainder], dtype=dtype) - for repeat, remainder in zip(repeats, remainders)]) + [ + np.array([max_val, 0] * repeat + [remainder], dtype=dtype) + for repeat, remainder in zip(repeats, remainders) + ] + ) lengths = lengths.reshape((np.sum(repeats) * 2 + nl,)).astype(dtype) return lengths elif lengths.dtype != dtype: @@ -199,7 +201,7 @@ def dense_to_brle(dense_data, dtype=np.int64): lengths = np.diff(np.r_[starts, n]) lengths = split_long_brle_lengths(lengths, dtype=dtype) if dense_data[0]: - lengths = np.pad(lengths, [1, 0], mode='constant') + lengths = np.pad(lengths, [1, 0], mode="constant") return lengths @@ -229,9 +231,8 @@ def brle_to_dense(brle_data, vals=None): vals = np.asarray(vals) if vals.shape != (2,): raise ValueError("vals.shape must be (2,), got %s" % (vals.shape)) - ft = np.repeat( - _ft[np.newaxis, :], (len(brle_data) + 1) // 2, axis=0).flatten() - return np.repeat(ft[:len(brle_data)], brle_data).flatten() + ft = np.repeat(_ft[np.newaxis, :], (len(brle_data) + 1) // 2, axis=0).flatten() + return np.repeat(ft[: len(brle_data)], brle_data).flatten() def rle_to_dense(rle_data, dtype=np.int64): @@ -240,12 +241,13 @@ def rle_to_dense(rle_data, dtype=np.int64): if dtype is not None: values = np.asanyarray(values, dtype=dtype) try: - result = np.repeat(np.squeeze(values, axis=-1), - np.squeeze(counts, axis=-1)) + result = np.repeat(np.squeeze(values, axis=-1), np.squeeze(counts, axis=-1)) except TypeError: # on windows it sometimes fails to cast data type - result = np.repeat(np.squeeze(values.astype(np.int64), axis=-1), - np.squeeze(counts.astype(np.int64), axis=-1)) + result = np.repeat( + np.squeeze(values.astype(np.int64), axis=-1), + np.squeeze(counts.astype(np.int64), axis=-1), + ) return result @@ -318,8 +320,7 @@ def brle_to_rle(brle, dtype=np.int64): brle = np.concatenate([brle, [0]]) lengths = brle values = np.tile(_ft, len(brle) // 2) - return rle_to_rle( - np.stack((values, lengths), axis=1).flatten(), dtype=dtype) + return rle_to_rle(np.stack((values, lengths), axis=1).flatten(), dtype=dtype) def brle_to_brle(brle, dtype=np.int64): @@ -350,9 +351,7 @@ def _unsorted_gatherer(indices, sorted_gather_fn): ordered_indices = indices[order] def f(data, dtype=None): - result = np.empty( - len(order), dtype=dtype or getattr( - data, 'dtype', None)) + result = np.empty(len(order), dtype=dtype or getattr(data, "dtype", None)) result[order] = tuple(sorted_gather_fn(data, ordered_indices)) return result @@ -390,8 +389,8 @@ def sorted_rle_gather_1d(rle_data, ordered_indices): start += next(data_iter) except StopIteration: raise IndexError( - 'Index %d out of range of raw_values length %d' - % (index, start)) + "Index %d out of range of raw_values length %d" % (index, start) + ) try: while index < start: yield value @@ -533,8 +532,8 @@ def sorted_brle_gather_1d(brle_data, ordered_indices): start += next(data_iter) except StopIteration: raise IndexError( - 'Index %d out of range of raw_values length %d' - % (index, start)) + "Index %d out of range of raw_values length %d" % (index, start) + ) try: while index < start: yield value @@ -564,7 +563,8 @@ def brle_gatherer_1d(indices): or rle_data.dtype if no dtype is provided. """ return functools.partial( - _unsorted_gatherer(indices, sorted_brle_gather_1d), dtype=bool) + _unsorted_gatherer(indices, sorted_brle_gather_1d), dtype=bool + ) def brle_gather_1d(brle_data, indices): @@ -634,8 +634,7 @@ def rle_to_sparse(rle_data): def brle_to_sparse(brle_data, dtype=np.int64): ends = np.cumsum(brle_data) - indices = [np.arange(s, e, dtype=dtype) for s, e in - zip(ends[::2], ends[1::2])] + indices = [np.arange(s, e, dtype=dtype) for s, e in zip(ends[::2], ends[1::2])] return np.concatenate(indices) @@ -672,8 +671,7 @@ def rle_strip(rle_data): else: end += count - rle_data = rle_data[ - final_i:None if final_j == 0 else -final_j].reshape((-1,)) + rle_data = rle_data[final_i : None if final_j == 0 else -final_j].reshape((-1,)) return rle_data, (start, end) @@ -712,6 +710,6 @@ def brle_strip(brle_data): else: end += count - brle_data = brle_data[final_i:None if final_j == 0 else -final_j] + brle_data = brle_data[final_i : None if final_j == 0 else -final_j] brle_data = np.concatenate([[0], brle_data]) return brle_data, (start, end) diff --git a/trimesh/voxel/transforms.py b/trimesh/voxel/transforms.py index f2b50a7cb..ac4729465 100644 --- a/trimesh/voxel/transforms.py +++ b/trimesh/voxel/transforms.py @@ -1,5 +1,7 @@ import numpy as np +from typing import Optional + from .. import caching, util from .. import transformations as tr @@ -12,41 +14,33 @@ class Transform: for the voxels, including pitch and origin. """ - def __init__(self, matrix): + def __init__(self, matrix, datastore: Optional[caching.DataStore] = None): """ - Initialize with a transform + Initialize with a transform. Parameters ----------- matrix : (4, 4) float Homogeneous transformation matrix + datastore + If passed store the actual values in a reference to + another datastore. """ matrix = np.asanyarray(matrix, dtype=np.float64) - if matrix.shape != (4, 4): - raise ValueError('matrix must be 4x4!') - - if not np.all(matrix[3, :] == [0, 0, 0, 1]): - raise ValueError('matrix not a valid transformation matrix') + if matrix.shape != (4, 4) or not np.allclose(matrix[3, :], [0, 0, 0, 1]): + raise ValueError("matrix is invalid!") # store matrix as data - self._data = caching.tracked_array(matrix, dtype=np.float64) + if datastore is None: + self._data = caching.DataStore() + elif isinstance(datastore, caching.DataStore): + self._data = datastore + else: + raise ValueError(f"{type(datastore)} != caching.DataStore") + + self._data["transform_matrix"] = matrix # dump cache when matrix changes - self._cache = caching.Cache( - id_function=self._data.__hash__) - - def crc(self): - util.log.warning( - '`geometry.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') - return self.__hash__() - - def hash(self): - util.log.warning( - '`geometry.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') - return self.__hash__() + self._cache = caching.Cache(id_function=self._data.__hash__) def __hash__(self): """ @@ -69,7 +63,7 @@ def translation(self): translation : (3,) float Cartesian translation """ - return self._data[:3, 3] + return self._data["transform_matrix"][:3, 3] @property def matrix(self): @@ -81,10 +75,10 @@ def matrix(self): matrix : (4, 4) float Transformation matrix """ - return self._data + return self._data["transform_matrix"] @matrix.setter - def matrix(self, data): + def matrix(self, values): """ Set the homogeneous transformation matrix. @@ -93,10 +87,10 @@ def matrix(self, data): matrix : (4, 4) float Transformation matrix """ - data = np.asanyarray(data, dtype=np.float64) - if data.shape != (4, 4): - raise ValueError('matrix must be (4, 4)!') - self._data = caching.tracked_array(data, dtype=np.float64) + values = np.asanyarray(values, dtype=np.float64) + if values.shape != (4, 4): + raise ValueError("matrix must be (4, 4)!") + self._data["transform_matrix"] = values @caching.cache_decorator def scale(self): @@ -112,26 +106,21 @@ def scale(self): matrix = self.matrix # get the (3,) diagonal of the rotation component scale = np.diag(matrix[:3, :3]) - if not np.allclose( - matrix[:3, :3], - scale * np.eye(3), - scale * 1e-6 + 1e-8): - raise RuntimeError('transform features a shear or rotation') + if not np.allclose(matrix[:3, :3], scale * np.eye(3), scale * 1e-6 + 1e-8): + raise RuntimeError("transform features a shear or rotation") return scale @caching.cache_decorator def pitch(self): scale = self.scale - if not util.allclose( - scale[0], scale[1:], - np.max(np.abs(scale)) * 1e-6 + 1e-8): - raise RuntimeError('transform features non-uniform scaling') + if not util.allclose(scale[0], scale[1:], np.max(np.abs(scale)) * 1e-6 + 1e-8): + raise RuntimeError("transform features non-uniform scaling") return scale @caching.cache_decorator def unit_volume(self): """Volume of a transformed unit cube.""" - return np.linalg.det(self._data[:3, :3]) + return np.linalg.det(self._data["transform_matrix"][:3, :3]) def apply_transform(self, matrix): """Mutate the transform in-place and return self.""" @@ -164,16 +153,17 @@ def transform_points(self, points): """ if self.is_identity: return points.copy() - return tr.transform_points( - points.reshape(-1, 3), self.matrix).reshape(points.shape) + return tr.transform_points(points.reshape(-1, 3), self.matrix).reshape( + points.shape + ) def inverse_transform_points(self, points): """Apply the inverse transformation to points (not in-place).""" if self.is_identity: return points - return tr.transform_points( - points.reshape(-1, 3), - self.inverse_matrix).reshape(points.shape) + return tr.transform_points(points.reshape(-1, 3), self.inverse_matrix).reshape( + points.shape + ) @caching.cache_decorator def inverse_matrix(self): @@ -182,7 +172,7 @@ def inverse_matrix(self): return inv def copy(self): - return Transform(self._data.copy()) + return Transform(matrix=self.matrix) @caching.cache_decorator def is_identity(self): From 5cffea114aa095bfb48f91d625269df2863a2218 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 13:57:45 -0400 Subject: [PATCH 54/84] fix import --- trimesh/voxel/base.py | 1 + trimesh/voxel/transforms.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index 3d4fd476d..c7c3d40e2 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -12,6 +12,7 @@ from ..constants import log from ..exchange.binvox import export_binvox from ..parent import Geometry +from ..typed import NDArray, float64 from . import morphology, ops, transforms from .encoding import DenseEncoding, Encoding diff --git a/trimesh/voxel/transforms.py b/trimesh/voxel/transforms.py index ac4729465..0cf7969f5 100644 --- a/trimesh/voxel/transforms.py +++ b/trimesh/voxel/transforms.py @@ -1,7 +1,7 @@ -import numpy as np - from typing import Optional +import numpy as np + from .. import caching, util from .. import transformations as tr From 7cac99e1167baddeeda404c0de2b9dca79ef1fb7 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 15:00:39 -0400 Subject: [PATCH 55/84] skip broken extensions --- pyproject.toml | 10 ++++------ trimesh/exchange/gltf.py | 20 +++++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1b7d87e6e..59e791848 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -128,9 +128,7 @@ ignore = [ ] line-length = 90 -[tool.autopep8] -max_line_length = 90 -in-place = true -recursive = true -aggressive = 3 -verbose = true \ No newline at end of file + +[tool.black] +line-length = 90 +target-version = ['py37'] \ No newline at end of file diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index eb6d66e2a..09418dd44 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -94,7 +94,7 @@ def export_gltf(scene, embed_buffers : bool Embed the buffer into JSON file as a base64 string in the URI extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP (using glTF's EXT_texture_webp extension). Returns ---------- @@ -186,7 +186,7 @@ def export_glb( Custom function to (in-place) post-process the tree before exporting. extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP using EXT_texture_webp extension. Returns ---------- @@ -621,7 +621,7 @@ def _create_gltf_structure(scene, unitize_normals : bool Unitize all exported normals so as to pass GLTF validation extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP using EXT_texture_webp extension. Returns --------------- @@ -777,7 +777,7 @@ def _append_mesh(mesh, mat_hashes : dict Which materials have already been added extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP (using glTF's EXT_texture_webp extension). """ # return early from empty meshes to avoid crashing later if len(mesh.faces) == 0 or len(mesh.vertices) == 0: @@ -1258,8 +1258,9 @@ def parse_values_and_textures(input_dict): if "EXT_texture_webp" in texture["extensions"]: idx = texture["extensions"]["EXT_texture_webp"]["source"] else: - raise ValueError("unsupported texture extension" - "in {texture['extensions']}!") + broken = list(texture['extensions'].keys()) + log.debug( + f"unsupported texture extension `{broken}`") else: # fallback (or primary, if extensions are not present) idx = texture["source"] @@ -1784,7 +1785,7 @@ def _append_image(img, tree, buffer_items, extension_webp): buffer_items : (n,) bytes Binary blobs containing data extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP (using glTF's EXT_texture_webp extension). Returns ----------- @@ -1821,6 +1822,7 @@ def _append_image(img, tree, buffer_items, extension_webp): # index is length minus one return len(tree['images']) - 1 + def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): """ Add passed PBRMaterial as GLTF 2.0 specification JSON @@ -1841,7 +1843,7 @@ def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): Which materials have already been added Stored as { hashed : material index } extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP using EXT_texture_webp extension. Returns ------------- @@ -1920,7 +1922,7 @@ def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): # add a reference to the base color texture result[key] = {'index': len(tree['textures'])} - # add an object for the texture (possibly according to the WebP extension) + # add an object for the texture according to the WebP extension if extension_webp: tree['textures'].append({'extensions': {'EXT_texture_webp': {'source': index}}}) From f5cc44b35ff5be20935c0b946d03a8d30b6e2f19 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 15:03:39 -0400 Subject: [PATCH 56/84] make extension optional --- trimesh/exchange/gltf.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 09418dd44..209175716 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -1253,14 +1253,13 @@ def parse_values_and_textures(input_dict): try: texture = header["textures"][v["index"]] - # extensions - if "extensions" in texture: - if "EXT_texture_webp" in texture["extensions"]: - idx = texture["extensions"]["EXT_texture_webp"]["source"] - else: - broken = list(texture['extensions'].keys()) - log.debug( - f"unsupported texture extension `{broken}`") + # check to see if this is using a webp extension texture + # should this be case sensitive? + webp = texture.get( + 'extensions', {}).get( + 'EXT_texture_webp', {}).get('source') + if webp is not None: + idx = webp else: # fallback (or primary, if extensions are not present) idx = texture["source"] From 18fbbf4e60079530215e29287a9fbb07974e6a2c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 15:33:35 -0400 Subject: [PATCH 57/84] add second moments to polygon identifier --- trimesh/path/polygons.py | 47 +++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index bf54c339b..506d4f7cc 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -412,7 +412,10 @@ def medial_axis(polygon, resolution=None, clip=None): resolution = np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).max() / 100 # get evenly spaced points on the polygons boundaries - samples = resample_boundaries(polygon=polygon, resolution=resolution, clip=clip) + samples = resample_boundaries( + polygon=polygon, + resolution=resolution, + clip=clip) # stack the boundary into a (m,2) float array samples = stack_boundaries(samples) # create the voronoi diagram on 2D points @@ -455,8 +458,8 @@ def identifier(polygon: Polygon) -> NDArray[float64]: Returns --------- - hashed : (10), - Some values that should be unique for this polygon. + identifier : (8,) float + Values which should be unique for this polygon. """ result = [ len(polygon.interiors), @@ -466,7 +469,11 @@ def identifier(polygon: Polygon) -> NDArray[float64]: polygon.length, polygon.exterior.length, ] - result.extend(polygon.bounds) + # include the principal second moments of inertia of the polygon + # this is invariant to rotation and translation + _, principal, _, _ = second_moments(polygon, return_centered=True) + result.extend(principal) + return np.array(result, dtype=np.float64) @@ -486,9 +493,14 @@ def random_polygon(segments=8, radius=1.0): polygon : shapely.geometry.Polygon Geometry object with random exterior and no interiors. """ - angles = np.sort(np.cumsum(np.random.random(segments) * np.pi * 2) % (np.pi * 2)) + angles = np.sort( + np.cumsum( + np.random.random(segments) * np.pi * 2) % + (np.pi * 2)) radii = np.random.random(segments) * radius - points = np.column_stack((np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) + + points = np.column_stack( + (np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) points = np.vstack((points, points[0])) polygon = Polygon(points).buffer(0.0) if hasattr(polygon, "geoms"): @@ -654,7 +666,8 @@ def repair_invalid(polygon, scale=None, rtol=0.5): return basic if scale is None: - distance = 0.002 * np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).mean() + distance = 0.002 * \ + np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).mean() else: distance = 0.002 * scale @@ -668,7 +681,8 @@ def repair_invalid(polygon, scale=None, rtol=0.5): # reconstruct a single polygon from the interior ring recon = Polygon(shell=rings[0]).buffer(distance) # check perimeter of result against original perimeter - if recon.is_valid and np.isclose(recon.length, polygon.length, rtol=rtol): + if recon.is_valid and np.isclose( + recon.length, polygon.length, rtol=rtol): return recon # try de-deuplicating the outside ring @@ -682,7 +696,8 @@ def repair_invalid(polygon, scale=None, rtol=0.5): # make a new polygon with result dedupe = Polygon(shell=points[unique]) # check result - if dedupe.is_valid and np.isclose(dedupe.length, polygon.length, rtol=rtol): + if dedupe.is_valid and np.isclose( + dedupe.length, polygon.length, rtol=rtol): return dedupe # buffer and unbuffer the whole polygon @@ -693,7 +708,8 @@ def repair_invalid(polygon, scale=None, rtol=0.5): return buffered.geoms[areas.argmax()] # check perimeter of result against original perimeter - if buffered.is_valid and np.isclose(buffered.length, polygon.length, rtol=rtol): + if buffered.is_valid and np.isclose( + buffered.length, polygon.length, rtol=rtol): log.debug("Recovered invalid polygon through double buffering") return buffered @@ -798,7 +814,8 @@ def projected( adjacency = mesh.face_adjacency[adjacency_check] # a sequence of face indexes that are connected - face_groups = graph.connected_components(adjacency, nodes=np.nonzero(side)[0]) + face_groups = graph.connected_components( + adjacency, nodes=np.nonzero(side)[0]) # if something is goofy we may end up with thousands of # regions that do nothing except hang for an hour then segfault @@ -819,7 +836,10 @@ def projected( # edges that occur only once are on the boundary group = grouping.group_rows(edge, require_count=1) # turn each region into polygons - polygons.extend(edges_to_polygons(edges=edge[group], vertices=vertices_2D)) + polygons.extend( + edges_to_polygons( + edges=edge[group], + vertices=vertices_2D)) padding = 0.0 if apad is not None: @@ -914,7 +934,8 @@ def second_moments(polygon, return_centered=False): v = x1 * y2 - x2 * y1 Ixx -= np.sum(v * (y1 * y1 + y1 * y2 + y2 * y2)) / 12.0 Iyy -= np.sum(v * (x1 * x1 + x1 * x2 + x2 * x2)) / 12.0 - Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + 2 * x2 * y2 + x2 * y1)) / 24.0 + Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + + 2 * x2 * y2 + x2 * y1)) / 24.0 moments = [Ixx, Iyy, Ixy] From 15b9b77eaef78d901f530b71627b70f04b2d108b Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 16:19:22 -0400 Subject: [PATCH 58/84] convert constants to dataclasses --- trimesh/constants.py | 82 ++++++++++++++++++++-------------------- trimesh/path/polygons.py | 62 ++++++++++-------------------- trimesh/typed.py | 4 +- 3 files changed, 63 insertions(+), 85 deletions(-) diff --git a/trimesh/constants.py b/trimesh/constants.py index 30bc2a0da..07acc2c36 100644 --- a/trimesh/constants.py +++ b/trimesh/constants.py @@ -1,8 +1,11 @@ +from dataclasses import dataclass + import numpy as np from .util import log, now +@dataclass class ToleranceMesh: """ ToleranceMesh objects hold tolerance information about meshes. @@ -24,23 +27,24 @@ class ToleranceMesh: If True, run additional in- process checks (slower) """ - def __init__(self, **kwargs): - # set our zero for floating point comparison to 100x - # the resolution of float64 which works out to 1e-13 - self.zero = np.finfo(np.float64).resolution * 100 - # vertices closer than this should be merged - self.merge = 1e-8 - # peak to valley flatness to be considered planar - self.planar = 1e-5 - # coplanar threshold: ratio of (radius / span) ** 2 - self.facet_threshold = 5000 - # run additional checks and asserts - self.strict = False + # set our zero for floating point comparison to 100x + # the resolution of float64 which works out to 1e-13 + zero: float = np.finfo(np.float64).resolution * 100 + + # vertices closer than this should be merged + merge: float = 1e-8 + + # peak to valley flatness to be considered planar + planar: float = 1e-5 + + # coplanar threshold: ratio of (radius / span) ** 2 + facet_threshold: int = 5000 - # add any passed kwargs - self.__dict__.update(kwargs) + # should additional slow checks be run inside functions + strict: bool = False +@dataclass class TolerancePath: """ TolerancePath objects contain tolerance information used in @@ -82,26 +86,23 @@ class TolerancePath: acceptable. """ - def __init__(self, **kwargs): - # default values - self.zero = 1e-12 - self.merge = 1e-5 - self.planar = 1e-5 - self.buffer = .05 - self.seg_frac = .125 - self.seg_angle = np.radians(50) - self.seg_angle_min = np.radians(1) - self.seg_angle_frac = .5 - self.aspect_frac = .1 - self.radius_frac = .02 - self.radius_min = 1e-4 - self.radius_max = 50 - self.tangent = np.radians(20) - # run additional checks and asserts - self.strict = False - self.__dict__.update(kwargs) + zero: float = 1e-12 + merge: float = 1e-5 + planar: float = 1e-5 + seg_frac: float = 0.125 + seg_angle: float = np.radians(50) + seg_angle_min: float = np.radians(1) + seg_angle_frac: float = 0.5 + aspect_frac: float = 0.1 + radius_frac: float = 0.02 + radius_min: float = 1e-4 + radius_max: float = 50.0 + tangent: float = np.radians(20) + strict: bool = False + +@dataclass class ResolutionPath: """ res.seg_frac : float @@ -119,12 +120,11 @@ class ResolutionPath: Format string to use when exporting floating point vertices """ - def __init__(self, **kwargs): - self.seg_frac = .05 - self.seg_angle = .08 - self.max_sections = 500 - self.min_sections = 20 - self.export = '0.10f' + seg_frac: float = 0.05 + seg_angle: float = 0.08 + max_sections: float = 500 + min_sections: float = 20 + export: str = "0.10f" # instantiate mesh tolerances with defaults @@ -141,14 +141,14 @@ def log_time(method): and then emit a log.debug message with the method name and how long it took to execute. """ + def timed(*args, **kwargs): tic = now() result = method(*args, **kwargs) - log.debug('%s executed in %.4f seconds.', - method.__name__, - now() - tic) + log.debug("%s executed in %.4f seconds.", method.__name__, now() - tic) return result + timed.__name__ = method.__name__ timed.__doc__ = method.__doc__ return timed diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index 506d4f7cc..0ef055455 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -6,7 +6,7 @@ from ..constants import log from ..constants import tol_path as tol from ..transformations import transform_points -from ..typed import NDArray, float64 +from ..typed import NDArray, Optional, float64 from .simplify import fit_circle_check from .traversal import resample_path @@ -27,7 +27,7 @@ Rtree = ExceptionWrapper(E) -def enclosure_tree(polygons): +def enclosure_tree(polygons: list[Polygon]): """ Given a list of shapely polygons with only exteriors, find which curves represent the exterior shell or root curve @@ -157,7 +157,7 @@ def edges_to_polygons(edges, vertices): return complete -def polygons_obb(polygons): +def polygons_obb(polygons: list[Polygon]): """ Find the OBBs for a list of shapely.geometry.Polygons """ @@ -168,7 +168,7 @@ def polygons_obb(polygons): return np.array(transforms), np.array(rectangles) -def polygon_obb(polygon): +def polygon_obb(polygon: Polygon): """ Find the oriented bounding box of a Shapely polygon. @@ -256,9 +256,7 @@ def polygon_bounds(polygon, matrix=None): """ if matrix is not None: assert matrix.shape == (3, 3) - points = transform_points( - points=np.array(polygon.exterior.coords), matrix=matrix - ) + points = transform_points(points=np.array(polygon.exterior.coords), matrix=matrix) else: points = np.array(polygon.exterior.coords) @@ -305,7 +303,7 @@ def plot_single(single): return axes -def resample_boundaries(polygon, resolution, clip=None): +def resample_boundaries(polygon: Polygon, resolution: float, clip=None): """ Return a version of a polygon with boundaries re-sampled to a specified resolution. @@ -364,7 +362,7 @@ def stack_boundaries(boundaries): return result -def medial_axis(polygon, resolution=None, clip=None): +def medial_axis(polygon: Polygon, resolution: Optional[float] = None, clip=None): """ Given a shapely polygon, find the approximate medial axis using a voronoi diagram of evenly spaced points on the @@ -412,10 +410,7 @@ def medial_axis(polygon, resolution=None, clip=None): resolution = np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).max() / 100 # get evenly spaced points on the polygons boundaries - samples = resample_boundaries( - polygon=polygon, - resolution=resolution, - clip=clip) + samples = resample_boundaries(polygon=polygon, resolution=resolution, clip=clip) # stack the boundary into a (m,2) float array samples = stack_boundaries(samples) # create the voronoi diagram on 2D points @@ -493,14 +488,10 @@ def random_polygon(segments=8, radius=1.0): polygon : shapely.geometry.Polygon Geometry object with random exterior and no interiors. """ - angles = np.sort( - np.cumsum( - np.random.random(segments) * np.pi * 2) % - (np.pi * 2)) + angles = np.sort(np.cumsum(np.random.random(segments) * np.pi * 2) % (np.pi * 2)) radii = np.random.random(segments) * radius - points = np.column_stack( - (np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) + points = np.column_stack((np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) points = np.vstack((points, points[0])) polygon = Polygon(points).buffer(0.0) if hasattr(polygon, "geoms"): @@ -666,8 +657,7 @@ def repair_invalid(polygon, scale=None, rtol=0.5): return basic if scale is None: - distance = 0.002 * \ - np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).mean() + distance = 0.002 * np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).mean() else: distance = 0.002 * scale @@ -681,8 +671,7 @@ def repair_invalid(polygon, scale=None, rtol=0.5): # reconstruct a single polygon from the interior ring recon = Polygon(shell=rings[0]).buffer(distance) # check perimeter of result against original perimeter - if recon.is_valid and np.isclose( - recon.length, polygon.length, rtol=rtol): + if recon.is_valid and np.isclose(recon.length, polygon.length, rtol=rtol): return recon # try de-deuplicating the outside ring @@ -690,14 +679,11 @@ def repair_invalid(polygon, scale=None, rtol=0.5): # remove any segments shorter than tol.merge # this is a little risky as if it was discretized more # finely than 1-e8 it may remove detail - unique = np.append( - True, (np.diff(points, axis=0) ** 2).sum(axis=1) ** 0.5 > 1e-8 - ) + unique = np.append(True, (np.diff(points, axis=0) ** 2).sum(axis=1) ** 0.5 > 1e-8) # make a new polygon with result dedupe = Polygon(shell=points[unique]) # check result - if dedupe.is_valid and np.isclose( - dedupe.length, polygon.length, rtol=rtol): + if dedupe.is_valid and np.isclose(dedupe.length, polygon.length, rtol=rtol): return dedupe # buffer and unbuffer the whole polygon @@ -708,8 +694,7 @@ def repair_invalid(polygon, scale=None, rtol=0.5): return buffered.geoms[areas.argmax()] # check perimeter of result against original perimeter - if buffered.is_valid and np.isclose( - buffered.length, polygon.length, rtol=rtol): + if buffered.is_valid and np.isclose(buffered.length, polygon.length, rtol=rtol): log.debug("Recovered invalid polygon through double buffering") return buffered @@ -814,8 +799,7 @@ def projected( adjacency = mesh.face_adjacency[adjacency_check] # a sequence of face indexes that are connected - face_groups = graph.connected_components( - adjacency, nodes=np.nonzero(side)[0]) + face_groups = graph.connected_components(adjacency, nodes=np.nonzero(side)[0]) # if something is goofy we may end up with thousands of # regions that do nothing except hang for an hour then segfault @@ -836,10 +820,7 @@ def projected( # edges that occur only once are on the boundary group = grouping.group_rows(edge, require_count=1) # turn each region into polygons - polygons.extend( - edges_to_polygons( - edges=edge[group], - vertices=vertices_2D)) + polygons.extend(edges_to_polygons(edges=edge[group], vertices=vertices_2D)) padding = 0.0 if apad is not None: @@ -873,13 +854,11 @@ def projected( # join_style=2, # mitre_limit=1.5) # for p in polygons]).buffer(-padding) - polygon = ops.unary_union([p.buffer(padding) for p in polygons]).buffer( - -padding - ) + polygon = ops.unary_union([p.buffer(padding) for p in polygons]).buffer(-padding) return polygon -def second_moments(polygon, return_centered=False): +def second_moments(polygon: Polygon, return_centered=False): """ Calculate the second moments of area of a polygon from the boundary. @@ -934,8 +913,7 @@ def second_moments(polygon, return_centered=False): v = x1 * y2 - x2 * y1 Ixx -= np.sum(v * (y1 * y1 + y1 * y2 + y2 * y2)) / 12.0 Iyy -= np.sum(v * (x1 * x1 + x1 * x2 + x2 * x2)) / 12.0 - Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + - 2 * x2 * y2 + x2 * y1)) / 24.0 + Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + 2 * x2 * y2 + x2 * y1)) / 24.0 moments = [Ixx, Iyy, Ixy] diff --git a/trimesh/typed.py b/trimesh/typed.py index 2f2aa0e97..da6fae33b 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,4 +1,4 @@ -from typing import Sequence, Union +from typing import Optional, Sequence, Union import numpy as np @@ -32,4 +32,4 @@ ArrayLike = Sequence -__all__ = ["NDArray", "ArrayLike"] +__all__ = ["NDArray", "ArrayLike", "Optional", "FloatLike", "IntLike", "BoolLike"] From a1697ca971b84c6535cbecd3c83dee0036736c06 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 16:37:16 -0400 Subject: [PATCH 59/84] change arc_center to dataclass --- trimesh/path/arc.py | 107 +++++++++++++++++--------------- trimesh/path/entities.py | 8 +-- trimesh/path/exchange/dxf.py | 10 +-- trimesh/path/exchange/svg_io.py | 2 +- trimesh/typed.py | 4 +- 5 files changed, 70 insertions(+), 61 deletions(-) diff --git a/trimesh/path/arc.py b/trimesh/path/arc.py index 936e1c167..204271a51 100644 --- a/trimesh/path/arc.py +++ b/trimesh/path/arc.py @@ -1,15 +1,39 @@ +from dataclasses import dataclass + import numpy as np from .. import util from ..constants import log from ..constants import res_path as res from ..constants import tol_path as tol +from ..typed import ArrayLike, FloatLike, NDArray, Optional, float64 # floating point zero _TOL_ZERO = 1e-12 -def arc_center(points, return_normal=True, return_angle=True): +@dataclass +class ArcInfo: + # What is the radius of the circular arc? + radius: float + + # what is the center of the circular arc + # it is either 2D or 3D depending on input. + center: NDArray[float64] + + # what is the 3D normal vector of the plane the arc lies on + normal: Optional[NDArray[float64]] = None + + # what is the starting and ending angle of the arc. + angles: Optional[NDArray[float64]] = None + + # what is the angular span of this circular arc. + span: Optional[float] = None + + +def arc_center( + points: ArrayLike[FloatLike], return_normal: bool = True, return_angle: bool = True +) -> ArcInfo: """ Given three points on a 2D or 3D arc find the center, radius, normal, and angular span. @@ -25,20 +49,15 @@ def arc_center(points, return_normal=True, return_angle=True): Returns --------- - result : dict - Contains arc center and other keys: - 'center' : (d,) float, cartesian center of the arc - 'radius' : float, radius of the arc - 'normal' : (3,) float, the plane normal. - 'angles' : (2,) float, angle of start and end in radians - 'span' : float, angle swept by the arc in radians + info + Arc center, radius, and other information. """ points = np.asanyarray(points, dtype=np.float64) # get the non-unit vectors of the three points vectors = points[[2, 0, 1]] - points[[1, 2, 0]] # we need both the squared row sum and the non-squared - abc2 = np.dot(vectors ** 2, [1] * points.shape[1]) + abc2 = np.dot(vectors**2, [1] * points.shape[1]) # same as np.linalg.norm(vectors, axis=1) abc = np.sqrt(abc2) @@ -52,37 +71,32 @@ def arc_center(points, return_normal=True, return_angle=True): # check the denominator for the radius calculation denom = half * np.prod(half - edges) if denom < tol.merge: - raise ValueError('arc is colinear!') + raise ValueError("arc is colinear!") # find the radius and scale back after the operation radius = scale * ((np.prod(edges) / 4.0) / np.sqrt(denom)) # use a barycentric approach to get the center - ba2 = (abc2[[1, 2, 0, 0, 2, 1, 0, 1, 2]] * - [1, 1, -1, 1, 1, -1, 1, 1, -1]).reshape( - (3, 3)).sum(axis=1) * abc2 + ba2 = (abc2[[1, 2, 0, 0, 2, 1, 0, 1, 2]] * [1, 1, -1, 1, 1, -1, 1, 1, -1]).reshape( + (3, 3) + ).sum(axis=1) * abc2 center = points.T.dot(ba2) / ba2.sum() if tol.strict: # all points should be at the calculated radius from center - assert util.allclose( - np.linalg.norm(points - center, axis=1), - radius) + assert util.allclose(np.linalg.norm(points - center, axis=1), radius) # start with initial results - result = {'center': center, - 'radius': radius} - + result = {"center": center, "radius": radius} if return_normal: if points.shape == (3, 2): # for 2D arcs still use the cross product so that # the sign of the normal vector is consistent - result['normal'] = util.unitize( - np.cross(np.append(-vectors[1], 0), - np.append(vectors[2], 0))) + result["normal"] = util.unitize( + np.cross(np.append(-vectors[1], 0), np.append(vectors[2], 0)) + ) else: # otherwise just take the cross product - result['normal'] = util.unitize( - np.cross(-vectors[1], vectors[2])) + result["normal"] = util.unitize(np.cross(-vectors[1], vectors[2])) if return_angle: # vectors from points on arc to center point @@ -104,16 +118,14 @@ def arc_center(points, return_normal=True, return_angle=True): angles = np.arctan2(*vector[:, :2].T[::-1]) + np.pi * 2 angles_sorted = np.sort(angles[[0, 2]]) reverse = angles_sorted[0] < angles[1] < angles_sorted[1] - angles_sorted = angles_sorted[::(1 - int(not reverse) * 2)] - result['angles'] = angles_sorted - result['span'] = angle + angles_sorted = angles_sorted[:: (1 - int(not reverse) * 2)] + result["angles"] = angles_sorted + result["span"] = angle - return result + return ArcInfo(**result) -def discretize_arc(points, - close=False, - scale=1.0): +def discretize_arc(points, close=False, scale=1.0): """ Returns a version of a three point arc consisting of line segments. @@ -146,10 +158,12 @@ def discretize_arc(points, return points[:, :2] return points - center, R, N, angle = (center_info['center'], - center_info['radius'], - center_info['normal'], - center_info['span']) + center, R, N, angle = ( + center_info.center, + center_info.radius, + center_info.normal, + center_info.span, + ) # if requested, close arc into a circle if close: @@ -181,11 +195,11 @@ def discretize_arc(points, arc_ok = (arc_dist < tol.merge).all() if not arc_ok: log.warning( - 'failed to discretize arc (endpoint_distance=%s R=%s)', - str(arc_dist), R) - log.warning('Failed arc points: %s', str(points)) - raise ValueError('Arc endpoints diverging!') - discrete = discrete[:, :(3 - is_2D)] + "failed to discretize arc (endpoint_distance=%s R=%s)", str(arc_dist), R + ) + log.warning("Failed arc points: %s", str(points)) + raise ValueError("Arc endpoints diverging!") + discrete = discrete[:, : (3 - is_2D)] return discrete @@ -216,24 +230,19 @@ def to_threepoint(center, radius, angles=None): # force angles to float64 angles = np.asanyarray(angles, dtype=np.float64) if angles.shape != (2,): - raise ValueError('angles must be (2,)!') + raise ValueError("angles must be (2,)!") # provide the wrap around if angles[1] < angles[0]: angles[1] += np.pi * 2 center = np.asanyarray(center, dtype=np.float64) if center.shape != (2,): - raise ValueError('only valid on 2D arcs!') + raise ValueError("only valid on 2D arcs!") # turn the angles of [start, end] # into [start, middle, end] - angles = np.array([angles[0], - angles.mean(), - angles[1]], - dtype=np.float64) + angles = np.array([angles[0], angles.mean(), angles[1]], dtype=np.float64) # turn angles into (3, 2) points - three = (np.column_stack( - (np.cos(angles), - np.sin(angles))) * radius) + center + three = (np.column_stack((np.cos(angles), np.sin(angles))) * radius) + center return three diff --git a/trimesh/path/entities.py b/trimesh/path/entities.py index e8a92dd76..6106c29c5 100644 --- a/trimesh/path/entities.py +++ b/trimesh/path/entities.py @@ -633,11 +633,11 @@ def length(self, vertices): # it's indicated as a closed circle fit = self.center( vertices, return_normal=False, return_angle=False) - return np.pi * fit['radius'] * 4 + return np.pi * fit.radius * 4 # get the angular span of the circular arc fit = self.center( vertices, return_normal=False, return_angle=True) - return fit['span'] * fit['radius'] * 2 + return fit.span * fit.radius * 2 def discrete(self, vertices, scale=1.0): """ @@ -699,8 +699,8 @@ def bounds(self, vertices): vertices, return_normal=False, return_angle=False) - bounds = np.array([info['center'] - info['radius'], - info['center'] + info['radius']], + bounds = np.array([info.center - info.radius, + info.center + info.radius], dtype=np.float64) else: # since the AABB of a partial arc is hard, approximate diff --git a/trimesh/path/exchange/dxf.py b/trimesh/path/exchange/dxf.py index 8768f55d2..0a49fee20 100644 --- a/trimesh/path/exchange/dxf.py +++ b/trimesh/path/exchange/dxf.py @@ -696,11 +696,11 @@ def convert_arc(arc, vertices): info = arc.center( vertices, return_angle=True, return_normal=False) subs = entity_info(arc) - center = info['center'] + center = info.center if len(center) == 2: center = np.append(center, 0.0) data = '10\n{:.12g}\n20\n{:.12g}\n30\n{:.12g}'.format(*center) - data += '\n40\n{:.12g}'.format(info['radius']) + data += f'\n40\n{info.radius:.12g}' if arc.closed: subs['TYPE'] = 'CIRCLE' @@ -710,7 +710,7 @@ def convert_arc(arc, vertices): # and end angle field data += '\n100\nAcDbArc' data += '\n50\n{:.12g}\n51\n{:.12g}'.format( - *np.degrees(info['angles'])) + *np.degrees(info.angles)) subs['DATA'] = data result = template['arc'].format(**subs) @@ -945,12 +945,12 @@ def bulge_to_arcs(lines, # have the same magnitude as the input data if tol.strict: from ..arc import arc_center - check_angle = [arc_center(i)['span'] + check_angle = [arc_center(i).span for i in three] assert np.allclose(np.abs(angle), np.abs(check_angle)) - check_radii = [arc_center(i)['radius'] + check_radii = [arc_center(i).radius for i in three] assert np.allclose(check_radii, np.abs(radius)) diff --git a/trimesh/path/exchange/svg_io.py b/trimesh/path/exchange/svg_io.py index f2a05027e..4f04c2877 100644 --- a/trimesh/path/exchange/svg_io.py +++ b/trimesh/path/exchange/svg_io.py @@ -448,7 +448,7 @@ def svg_arc(arc): vertices = points[arc.points] info = arc_center( vertices, return_normal=False, return_angle=True) - C, R, angle = info['center'], info['radius'], info['span'] + C, R, angle = info.center, info.radius, info.span if arc.closed: return temp_circle.format(x=C[0] - R, y=C[1], diff --git a/trimesh/typed.py b/trimesh/typed.py index da6fae33b..435b2049f 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,4 +1,4 @@ -from typing import Optional, Sequence, Union +from typing import List, Optional, Sequence, Tuple, Union import numpy as np @@ -32,4 +32,4 @@ ArrayLike = Sequence -__all__ = ["NDArray", "ArrayLike", "Optional", "FloatLike", "IntLike", "BoolLike"] +__all__ = ["NDArray", "ArrayLike", "Optional", "FloatLike", "IntLike", "BoolLike", "List", "Tuple"] From 9d766974b072030482ad1f7b5bce5bda521fa255 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 16:39:24 -0400 Subject: [PATCH 60/84] wrap getitem --- trimesh/path/arc.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/trimesh/path/arc.py b/trimesh/path/arc.py index 204271a51..271d0e807 100644 --- a/trimesh/path/arc.py +++ b/trimesh/path/arc.py @@ -30,6 +30,10 @@ class ArcInfo: # what is the angular span of this circular arc. span: Optional[float] = None + def __getitem__(self, item): + # add for backwards compatibility + return getattr(self, item) + def arc_center( points: ArrayLike[FloatLike], return_normal: bool = True, return_angle: bool = True From d2d236b11c6164fcd6e272ee9126a5089048fed4 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 20:54:13 -0400 Subject: [PATCH 61/84] use typed List --- tests/test_arc.py | 2 +- trimesh/path/polygons.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_arc.py b/tests/test_arc.py index 5aba967b7..7e5420364 100644 --- a/tests/test_arc.py +++ b/tests/test_arc.py @@ -26,7 +26,7 @@ def test_center(self): [[30156.18, 1673.64, -2914.56], [30152.91, 1780.09, -2885.51], [30148.3, 1875.81, -2857.79]]) - assert 'center' in c + assert len(c.center) == 3 def test_center_random(self): from trimesh.path.arc import arc_center diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index 0ef055455..45c192d39 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -6,7 +6,7 @@ from ..constants import log from ..constants import tol_path as tol from ..transformations import transform_points -from ..typed import NDArray, Optional, float64 +from ..typed import List, NDArray, Optional, float64 from .simplify import fit_circle_check from .traversal import resample_path @@ -27,7 +27,7 @@ Rtree = ExceptionWrapper(E) -def enclosure_tree(polygons: list[Polygon]): +def enclosure_tree(polygons: List[Polygon]): """ Given a list of shapely polygons with only exteriors, find which curves represent the exterior shell or root curve @@ -157,7 +157,7 @@ def edges_to_polygons(edges, vertices): return complete -def polygons_obb(polygons: list[Polygon]): +def polygons_obb(polygons: List[Polygon]): """ Find the OBBs for a list of shapely.geometry.Polygons """ From 5824483f377df780e3f9e993b9aeff43f8dc91bd Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 15 Sep 2023 12:54:04 -0400 Subject: [PATCH 62/84] add meta-extra --- pyproject.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 59e791848..d7096938d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,6 +69,7 @@ test = [ "ruff", "black", ] + easy = [ "colorlog", "mapbox-earcut", @@ -99,6 +100,9 @@ recommend = [ "python-fcl" ] +# requires pip >= 21.2 +# https://hynek.me/articles/python-recursive-optional-dependencies/ +all = ["trimesh[easy,recommend,test]"] [tool.ruff] target-version = "py37" @@ -115,6 +119,7 @@ select = [ "W", # style warnings "YTT", # sys.version ] + ignore = [ "C901", # Comprehension is too complex (11 > 10) "N802", # Function name should be lowercase From aca66174ed8a9d415c104585780e01f35146bc2a Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 15 Sep 2023 12:58:13 -0400 Subject: [PATCH 63/84] add test for #2035 --- tests/test_gltf.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 6ffd2a58d..825c95b1d 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -431,7 +431,8 @@ def test_material_primary_colors(self): scene = g.trimesh.Scene([sphere]) def to_integer(args): - args['materials'][0]['pbrMetallicRoughness']['baseColorFactor'] = [1, 0, 0, 1] + args['materials'][0]['pbrMetallicRoughness']['baseColorFactor'] = [ + 1, 0, 0, 1] export = scene.export(file_type='glb', tree_postprocessor=to_integer) validate_glb(export) @@ -441,7 +442,9 @@ def to_integer(args): assert len(reloaded.geometry) == 1 # get meshes back sphere_b = list(reloaded.geometry.values())[0] - assert (sphere_b.visual.material.baseColorFactor == (255, 0, 0, 255)).all() + assert ( + sphere_b.visual.material.baseColorFactor == ( + 255, 0, 0, 255)).all() def test_material_hash(self): @@ -975,7 +978,11 @@ def test_gltf_by_name(self): assert isinstance(r, g.trimesh.Scene) assert len(r.geometry) == 1 - assert g.np.isclose(next(iter(r.geometry.values())).volume, m.volume) + assert g.np.isclose( + next( + iter( + r.geometry.values())).volume, + m.volume) def test_embed_buffer(self): @@ -1019,6 +1026,24 @@ def test_webp(self): g.scene_equal(g.trimesh.Scene(mesh), reloaded) + def test_relative_paths(self): + # try with a relative path + with g.TemporaryDirectory() as d: + g.os.makedirs(g.os.path.join(d, 'fused')) + g.os.chdir(d) + g.trimesh.creation.box().export('fused/hi.gltf') + r = g.trimesh.load('fused/hi.gltf') + assert g.np.isclose(r.volume, 1.0) + + with g.TemporaryDirectory() as d: + # now try it without chaging to that directory + full = g.os.path.join(d, 'hi', 'there', 'different', 'levels') + path = g.os.path.join(full, 'hey.gltf') + g.os.makedirs(full) + g.trimesh.creation.box().export(path) + r = g.trimesh.load(path) + assert g.np.isclose(r.volume, 1.0) + if __name__ == '__main__': g.trimesh.util.attach_to_log() From f5c9dc8d7113eabb34f17a78a2ceade3e940770d Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 15 Sep 2023 13:15:10 -0400 Subject: [PATCH 64/84] fix #1970 --- tests/test_obj.py | 7 +++++++ trimesh/exchange/obj.py | 5 +++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/test_obj.py b/tests/test_obj.py index 90db07741..a5e8a5238 100644 --- a/tests/test_obj.py +++ b/tests/test_obj.py @@ -464,6 +464,13 @@ def test_export_normals(self): e = m.export(file_type='obj', include_normals=False) assert 'vn ' not in e + def test_export_mtl_args(): + mesh = g.trimesh.creation.box() + # check for a crash with no materials defined + a, b = g.trimesh.exchange.obj.export_obj(mesh, return_texture=True, mtl_name='hi.mtl') + + + def simple_load(text): # we're going to load faces in a basic text way diff --git a/trimesh/exchange/obj.py b/trimesh/exchange/obj.py index 7fb50f87d..22dae35ca 100644 --- a/trimesh/exchange/obj.py +++ b/trimesh/exchange/obj.py @@ -929,12 +929,13 @@ def export_obj(mesh, # add this object objects.append('\n'.join(export)) + + # collect files like images to write + mtl_data = {} # combine materials if len(materials) > 0: # collect text for a single mtllib file mtl_lib = [] - # collect files like images to write - mtl_data = {} # now loop through: keys are garbage hash # values are (data, name) for data, _ in materials.values(): From ad28bd0d612d038afedef6769025ac11e69bc025 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 15 Sep 2023 14:42:46 -0400 Subject: [PATCH 65/84] voxel caching still weird --- trimesh/voxel/encoding.py | 153 ++++++++++++++++-------------------- trimesh/voxel/morphology.py | 8 +- trimesh/voxel/ops.py | 6 +- 3 files changed, 73 insertions(+), 94 deletions(-) diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index ade24e022..6254475c1 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -4,7 +4,7 @@ import numpy as np from .. import caching -from ..util import ABC, log +from ..util import ABC from . import runlength try: @@ -35,9 +35,17 @@ class Encoding(ABC): """ def __init__(self, data): - self._data = data + # a key-value store of numpy arrays + self._data = caching.DataStore() + + # dumped when cache changes self._cache = caching.Cache(id_function=self._data.__hash__) + if isinstance(data, np.ndarray): + self._data["encoding"] = data + else: + raise TypeError(type(data)) + @abc.abstractproperty def dtype(self): pass @@ -118,22 +126,6 @@ def stripped(self): def _flip(self, axes): return FlippedEncoding(self, axes) - def crc(self): - log.warning( - "`geometry.crc()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - - def hash(self): - log.warning( - "`geometry.hash()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - def __hash__(self): """ Get the hash of the current transformation matrix. @@ -196,31 +188,29 @@ class DenseEncoding(Encoding): """Simple `Encoding` implementation based on a numpy ndarray.""" def __init__(self, data): - if not isinstance(data, caching.TrackedArray): - if not isinstance(data, np.ndarray): - raise ValueError("DenseEncoding data must be a numpy array") - data = caching.tracked_array(data) + if not isinstance(data, np.ndarray): + raise ValueError("DenseEncoding data must be a numpy array") super().__init__(data=data) @property def dtype(self): - return self._data.dtype + return self._data["encoding"].dtype @property def shape(self): - return self._data.shape + return self._data["encoding"].shape @caching.cache_decorator def sum(self): - return self._data.sum() + return self._data["encoding"].sum() @caching.cache_decorator def is_empty(self): - return not np.any(self._data) + return not np.any(self._data["encoding"]) @property def size(self): - return self._data.size + return self._data["encoding"].size @property def sparse_components(self): @@ -230,7 +220,7 @@ def sparse_components(self): @caching.cache_decorator def sparse_indices(self): - return np.column_stack(np.where(self._data)) + return np.column_stack(np.where(self._data["encoding"])) @caching.cache_decorator def sparse_values(self): @@ -244,19 +234,21 @@ def _flip(self, axes): @property def dense(self): - return self._data + return self._data["encoding"] def gather(self, indices): - return self._data[indices] + return self._data["encoding"][indices] def gather_nd(self, indices): - return self._data[tuple(indices.T)] + return self._data["encoding"][tuple(indices.T)] def mask(self, mask): - return self._data[mask if isinstance(mask, np.ndarray) else mask.dense] + return self._data["encoding"][ + mask if isinstance(mask, np.ndarray) else mask.dense + ] def get_value(self, index): - return self._data[tuple(index)] + return self._data["encoding"][tuple(index)] def reshape(self, shape): return DenseEncoding(self._data.reshape(shape)) @@ -329,11 +321,11 @@ def copy(self): @property def sparse_indices(self): - return self._data["indices"] + return self._data["encoding"]["indices"] @property def sparse_values(self): - return self._data["values"] + return self._data["encoding"]["values"] @property def dtype(self): @@ -430,9 +422,7 @@ def SparseBinaryEncoding(indices, shape=None): ------------ rank n bool `SparseEncoding` with True values at each index. """ - return SparseEncoding( - indices, np.ones(shape=(indices.shape[0],), dtype=bool), shape - ) + return SparseEncoding(indices, np.ones(shape=(indices.shape[0],), dtype=bool), shape) class RunLengthEncoding(Encoding): @@ -452,13 +442,15 @@ def __init__(self, data, dtype=None): super().__init__(data=caching.tracked_array(data)) if dtype is None: dtype = self._data.dtype - if len(self._data.shape) != 1: + if len(self._data["encoding"].shape) != 1: raise ValueError("data must be 1D numpy array") self._dtype = dtype @caching.cache_decorator def is_empty(self): - return not np.any(np.logical_and(self._data[::2], self._data[1::2])) + return not np.any( + np.logical_and(self._data["encoding"][::2], self._data["encoding"][1::2]) + ) @property def ndims(self): @@ -472,22 +464,6 @@ def shape(self): def dtype(self): return self._dtype - def crc(self): - log.warning( - "`geometry.crc()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - - def hash(self): - log.warning( - "`geometry.hash()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - def __hash__(self): """ Get the hash of the current transformation matrix. @@ -529,7 +505,7 @@ def stripped(self): @caching.cache_decorator def sum(self): - return (self._data[::2] * self._data[1::2]).sum() + return (self._data["encoding"][::2] * self._data["encoding"][1::2]).sum() @caching.cache_decorator def size(self): @@ -602,7 +578,7 @@ def __init__(self, data): @caching.cache_decorator def is_empty(self): - return not np.any(self._data[1::2]) + return not np.any(self._data["encoding"][1::2]) @staticmethod def from_dense(dense_data, encoding_dtype=np.int64): @@ -634,7 +610,7 @@ def stripped(self): @caching.cache_decorator def sum(self): - return self._data[1::2].sum() + return self._data["encoding"][1::2].sum() @caching.cache_decorator def size(self): @@ -736,7 +712,7 @@ def gather_nd(self, indices): return self._data.gather_nd(self._to_base_indices(indices)) def get_value(self, index): - return self._data[tuple(self._to_base_indices(index))] + return self._data["encoding"][tuple(self._to_base_indices(index))] class FlattenedEncoding(LazyIndexMap): @@ -747,11 +723,11 @@ class FlattenedEncoding(LazyIndexMap): """ def _to_base_indices(self, indices): - return np.column_stack(np.unravel_index(indices, self._data.shape)) + return np.column_stack(np.unravel_index(indices, self._data["encoding"].shape)) def _from_base_indices(self, base_indices): return np.expand_dims( - np.ravel_multi_index(base_indices.T, self._data.shape), axis=-1 + np.ravel_multi_index(base_indices.T, self._data["encoding"].shape), axis=-1 ) @property @@ -760,17 +736,17 @@ def shape(self): @property def dense(self): - return self._data.dense.reshape((-1,)) + return self._data["encoding"].dense.reshape((-1,)) def mask(self, mask): - return self._data.mask(mask.reshape(self._data.shape)) + return self._data["encoding"].mask(mask.reshape(self._data["encoding"].shape)) @property def flat(self): return self def copy(self): - return FlattenedEncoding(self._data.copy()) + return FlattenedEncoding(self._data["encoding"].copy()) class ShapedEncoding(LazyIndexMap): @@ -792,19 +768,19 @@ def __init__(self, encoding, shape): size = np.prod(self._shape) if nn == 1: size = np.abs(size) - if self._data.size % size != 0: + if self._data["encoding"].size % size != 0: raise ValueError( "cannot reshape encoding of size %d into shape %s" - % (self._data.size, str(self._shape)) + % (self._data["encoding"].size, str(self._shape)) ) - rem = self._data.size // size + rem = self._data["encoding"].size // size self._shape = tuple(rem if s == -1 else s for s in self._shape) elif nn > 2: raise ValueError("shape cannot have more than one -1 value") - elif np.prod(self._shape) != self._data.size: + elif np.prod(self._shape) != self._data["encoding"].size: raise ValueError( "cannot reshape encoding of size %d into shape %s" - % (self._data.size, str(self._shape)) + % (self._data["encoding"].size, str(self._shape)) ) def _from_base_indices(self, base_indices): @@ -823,13 +799,13 @@ def shape(self): @property def dense(self): - return self._data.dense.reshape(self.shape) + return self._data["encoding"].dense.reshape(self.shape) def mask(self, mask): - return self._data.mask(mask.flat) + return self._data["encoding"].mask(mask.flat) def copy(self): - return ShapedEncoding(encoding=self._data.copy(), shape=self.shape) + return ShapedEncoding(encoding=self._data["encoding"].copy(), shape=self.shape) class TransposedEncoding(LazyIndexMap): @@ -870,7 +846,7 @@ def perm(self): @property def shape(self): - shape = self._data.shape + shape = self._data["encoding"].shape return tuple(shape[p] for p in self._perm) def _to_base_indices(self, indices): @@ -887,23 +863,29 @@ def _from_base_indices(self, base_indices): @property def dense(self): - return self._data.dense.transpose(self._perm) + return self._data["encoding"].dense.transpose(self._perm) def gather(self, indices): - return self._data.gather(self._base_indices(indices)) + return self._data["encoding"].gather(self._base_indices(indices)) def mask(self, mask): - return self._data.mask(mask.transpose(self._inv_perm)).transpose(self._perm) + return ( + self._data["encoding"] + .mask(mask.transpose(self._inv_perm)) + .transpose(self._perm) + ) def get_value(self, index): - return self._data[tuple(self._base_indices(index))] + return self._data["encoding"][tuple(self._base_indices(index))] @property def data(self): return self._data def copy(self): - return TransposedEncoding(base_encoding=self._data.copy(), perm=self._perm) + return TransposedEncoding( + base_encoding=self._data["encoding"].copy(), perm=self._perm + ) class FlippedEncoding(LazyIndexMap): @@ -924,9 +906,10 @@ def __init__(self, encoding, axes): if len(set(self._axes)) != len(self._axes): raise ValueError("Axes cannot contain duplicates, got %s" % str(self._axes)) super().__init__(encoding) - if not all(0 <= a < self._data.ndims for a in axes): + if not all(0 <= a < self._data["encoding"].ndims for a in axes): raise ValueError( - "Invalid axes %s for %d-d encoding" % (str(axes), self._data.ndims) + "Invalid axes %s for %d-d encoding" + % (str(axes), self._data["encoding"].ndims) ) def _to_base_indices(self, indices): @@ -942,11 +925,11 @@ def _from_base_indices(self, base_indices): @property def shape(self): - return self._data.shape + return self._data["encoding"].shape @property def dense(self): - dense = self._data.dense + dense = self._data["encoding"].dense for a in self._axes: dense = np.flip(dense, a) return dense @@ -955,10 +938,10 @@ def mask(self, mask): if not isinstance(mask, Encoding): mask = DenseEncoding(mask) mask = mask.flip(self._axes) - return self._data.mask(mask).flip(self._axes) + return self._data["encoding"].mask(mask).flip(self._axes) def copy(self): - return FlippedEncoding(self._data.copy(), self._axes) + return FlippedEncoding(self._data["encoding"].copy(), self._axes) def flip(self, axis=0): if isinstance(axis, np.ndarray): diff --git a/trimesh/voxel/morphology.py b/trimesh/voxel/morphology.py index c4a72b335..afe0e1497 100644 --- a/trimesh/voxel/morphology.py +++ b/trimesh/voxel/morphology.py @@ -50,9 +50,7 @@ def _assert_rank(value, rank): def _assert_sparse_rank(value, rank=None): if len(value.shape) != 2: - raise ValueError( - "sparse_indices must be rank 2, got shape %s" % str(value.shape) - ) + raise ValueError("sparse_indices must be rank 2, got shape %s" % str(value.shape)) if rank is not None: if value.shape[-1] != rank: raise ValueError( @@ -153,9 +151,7 @@ def binary_dilation(encoding, **kwargs): https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.ndimage.morphology.binary_dilation.html#scipy.ndimage.morphology.binary_dilation """ - return enc.DenseEncoding( - ndimage.binary_dilation(_dense(encoding, rank=3), **kwargs) - ) + return enc.DenseEncoding(ndimage.binary_dilation(_dense(encoding, rank=3), **kwargs)) def binary_closing(encoding, **kwargs): diff --git a/trimesh/voxel/ops.py b/trimesh/voxel/ops.py index ac0123b85..afdab885b 100644 --- a/trimesh/voxel/ops.py +++ b/trimesh/voxel/ops.py @@ -256,9 +256,9 @@ def multibox(centers, pitch=1.0, colors=None): v += np.tile(b.vertices, (len(centers), 1)) f = np.tile(b.faces, (len(centers), 1)) - f += np.tile( - np.arange(len(centers)) * len(b.vertices), (len(b.faces), 1) - ).T.reshape((-1, 1)) + f += np.tile(np.arange(len(centers)) * len(b.vertices), (len(b.faces), 1)).T.reshape( + (-1, 1) + ) face_colors = None if colors is not None: From 49518e1701d34efd427b6142890384463d90d367 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sun, 17 Sep 2023 15:47:38 -0400 Subject: [PATCH 66/84] fix test_obj --- tests/test_cache.py | 239 ++++++++++++++++++++++------------- tests/test_obj.py | 296 ++++++++++++++++++++------------------------ 2 files changed, 287 insertions(+), 248 deletions(-) diff --git a/tests/test_cache.py b/tests/test_cache.py index f4b97cdd4..d74e376f5 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -7,7 +7,6 @@ class CacheTest(g.unittest.TestCase): - def test_track(self): """ Check to make sure our fancy caching system only changes @@ -15,17 +14,18 @@ def test_track(self): """ original = g.trimesh.caching.hash_fast - options = [g.trimesh.caching.hash_fast, - g.trimesh.caching.hash_fallback, - g.trimesh.caching.sha256] + options = [ + g.trimesh.caching.hash_fast, + g.trimesh.caching.hash_fallback, + g.trimesh.caching.sha256, + ] for option in options: - g.log.info(f'testing hash function: {option.__name__}') + g.log.info(f"testing hash function: {option.__name__}") g.trimesh.caching.hash_fast = option # generate test data and perform numpy operations - a = g.trimesh.caching.tracked_array( - g.random(TEST_DIM)) + a = g.trimesh.caching.tracked_array(g.random(TEST_DIM)) modified = [hash(a)] a[0][0] = 10 modified.append(hash(a)) @@ -65,9 +65,7 @@ def test_track(self): a += 10 modified.append(hash(a)) # assign some new data - a = g.trimesh.caching.tracked_array( - [.125, 115.32444, 4], - dtype=g.np.float64) + a = g.trimesh.caching.tracked_array([0.125, 115.32444, 4], dtype=g.np.float64) modified.append(hash(a)) a += [10, 0, 0] @@ -102,12 +100,14 @@ def test_contiguous(self): t = g.trimesh.caching.tracked_array(a) original = g.trimesh.caching.hash_fast - options = [g.trimesh.caching.hash_fast, - g.trimesh.caching.hash_fallback, - g.trimesh.caching.sha256] + options = [ + g.trimesh.caching.hash_fast, + g.trimesh.caching.hash_fallback, + g.trimesh.caching.sha256, + ] for option in options: - g.log.info(f'testing hash function: {option.__name__}') + g.log.info(f"testing hash function: {option.__name__}") g.trimesh.caching.hash_fast = option # hashing will fail on non- contiguous arrays # make sure our utility function has handled this @@ -122,10 +122,10 @@ def test_mutable(self): """ d = g.trimesh.caching.DataStore() - d['hi'] = g.random(100) + d["hi"] = g.random(100) hash_initial = hash(d) # mutate internal data - d['hi'][0] += 1 + d["hi"][0] += 1 assert hash(d) != hash_initial # should be mutable by default @@ -134,18 +134,18 @@ def test_mutable(self): d.mutable = False try: - d['hi'][1] += 1 + d["hi"][1] += 1 except ValueError: # should be raised when array is marked as read only return # we shouldn't make it past the try-except - raise ValueError('mutating data worked when it shouldn\'t!') + raise ValueError("mutating data worked when it shouldn't!") def test_transform(self): """ apply_transform tries to not dump the full cache """ - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") # should populate edges_face e_len = len(m.edges) # should maintain required properties @@ -154,18 +154,88 @@ def test_transform(self): assert len(m.edges_face) == e_len def test_simple_collision(self): - faces1 = g.np.array([0, 1, 2, 0, 3, 1, 0, - 2, 4, 0, 4, 5, 5, 6, - 3, 5, 3, 0, 7, 1, 3, - 7, 3, 6, 4, 2, 1, 4, - 1, 7, 5, 4, 7, 6, 5, 7], - dtype=g.np.int64).reshape(-1, 3) - faces2 = g.np.array([0, 1, 2, 0, 3, 1, 2, - 4, 0, 5, 4, 2, 6, 3, - 0, 6, 0, 4, 6, 1, 3, - 6, 7, 1, 2, 7, 5, 2, - 1, 7, 4, 5, 7, 6, 4, 7], - dtype=g.np.int64).reshape(-1, 3) + faces1 = g.np.array( + [ + 0, + 1, + 2, + 0, + 3, + 1, + 0, + 2, + 4, + 0, + 4, + 5, + 5, + 6, + 3, + 5, + 3, + 0, + 7, + 1, + 3, + 7, + 3, + 6, + 4, + 2, + 1, + 4, + 1, + 7, + 5, + 4, + 7, + 6, + 5, + 7, + ], + dtype=g.np.int64, + ).reshape(-1, 3) + faces2 = g.np.array( + [ + 0, + 1, + 2, + 0, + 3, + 1, + 2, + 4, + 0, + 5, + 4, + 2, + 6, + 3, + 0, + 6, + 0, + 4, + 6, + 1, + 3, + 6, + 7, + 1, + 2, + 7, + 5, + 2, + 1, + 7, + 4, + 5, + 7, + 6, + 4, + 7, + ], + dtype=g.np.int64, + ).reshape(-1, 3) hash_fast = g.trimesh.caching.hash_fast assert hash_fast(faces1) != hash_fast(faces2) @@ -193,90 +263,89 @@ def test_method_combinations(self): if not g.PY3: return - import itertools - + import itertools, warnings import numpy as np - from trimesh.caching import tracked_array dim = (100, 3) # generate a bunch of arguments for every function of an `ndarray` so # we can see if the functions mutate - flat = [2.3, - 1, - 10, - 4.2, - [3, -1], - {'shape': 10}, - np.int64, - np.float64, - True, True, - False, False, - g.random(dim), - g.random(dim[::1]), - 'shape'] + flat = [ + 2.3, + 1, + 10, + 4.2, + [3, -1], + {"shape": 10}, + np.int64, + np.float64, + True, + True, + False, + False, + g.random(dim), + g.random(dim[::1]), + "shape", + ] # start with no arguments attempts = [()] # add a single argument from our guesses attempts.extend([(A,) for A in flat]) # add 2 and 3 length permutations of our guesses - attempts.extend([tuple(G) for G in itertools.permutations(flat, 2)]) + attempts.extend([tuple(G) for G in itertools.product(flat, repeat=2)]) # adding 3-length permuations makes this test 10x slower but if you # are suspicious of a method caching you could uncomment this out: # attempts.extend([tuple(G) for G in itertools.permutations(flat, 3)]) - - skip = {'__array_ufunc__', # segfaulting when called with `(2.3, 1)` - 'astype', - } + skip = set() # collect functions which mutate arrays but don't change our hash broken = [] - for method in list(dir(tracked_array(g.random(dim)))): - - if method in skip: - continue - - failures = [] - g.log.debug(f'hash check: `{method}`') - for A in attempts: - m = g.random((100, 3)) - true_pre = m.tobytes() - m = tracked_array(m) - hash_pre = hash(m) - try: - eval(f'm.{method}(*A)') - except BaseException as J: - failures.append(str(J)) - - hash_post = hash(m) - true_post = m.tobytes() - - # if tobytes disagrees with our hashing logic - # it indicates we have cached incorrectly - if (hash_pre == hash_post) != (true_pre == true_post): - broken.append((method, A)) + with warnings.catch_warnings(): + # ignore all warnings inside this context manager + warnings.filterwarnings("ignore") + + for method in list(dir(tracked_array(g.random(dim)))): + if method in skip: + continue + + failures = [] + g.log.debug(f"hash check: `{method}`") + for A in attempts: + m = g.random((100, 3)) + true_pre = m.tobytes() + m = tracked_array(m) + hash_pre = hash(m) + try: + eval(f"m.{method}(*A)") + except BaseException as J: + failures.append(str(J)) + + hash_post = hash(m) + true_post = m.tobytes() + + # if tobytes disagrees with our hashing logic + # it indicates we have cached incorrectly + if (hash_pre == hash_post) != (true_pre == true_post): + broken.append((method, A)) if len(broken) > 0: method_busted = {method for method, _ in broken} raise ValueError( - f'`TrackedArray` incorrectly hashing methods: {method_busted}') + f"`TrackedArray` incorrectly hashing methods: {method_busted}" + ) def test_validate(self): # create a mesh with two duplicate triangles # and one degenerate triangle m = g.trimesh.Trimesh( - vertices=[[0, 0, 0], - [1, 0, 0], - [0, 1, 0], - [1, 0, 0], - [0, 1, 0], - [1, 1, 0]], + vertices=[[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]], faces=[[3, 4, 4], [0, 1, 2], [0, 1, 2]], - validate=False) + validate=False, + ) # should not have removed any triangles assert m.triangles.shape == (3, 3, 3) @@ -285,6 +354,6 @@ def test_validate(self): assert m.triangles.shape == (1, 3, 3) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_obj.py b/tests/test_obj.py index a5e8a5238..cb5e940b4 100644 --- a/tests/test_obj.py +++ b/tests/test_obj.py @@ -5,43 +5,39 @@ class OBJTest(g.unittest.TestCase): - def test_rabbit(self): # A BSD-licensed test model from pyglet # it has mixed triangles, quads, and 16 element faces -_- # this should test the non-vectorized load path - m = g.get_mesh('rabbit.obj') + m = g.get_mesh("rabbit.obj") assert len(m.faces) == 1252 - rec = g.wrapload( - m.export(file_type='obj'), file_type='obj') + rec = g.wrapload(m.export(file_type="obj"), file_type="obj") assert g.np.isclose(m.area, rec.area) def test_no_img(self): # sometimes people use the `vt` parameter for arbitrary # vertex attributes and thus want UV coordinates even # if there is no texture image - m = g.get_mesh('noimg.obj') + m = g.get_mesh("noimg.obj") assert m.visual.uv.shape == (len(m.vertices), 2) # make sure UV coordinates are in range 0.0 - 1.0 assert m.visual.uv.max() < (1 + 1e-5) assert m.visual.uv.min() > -1e-5 # check to make sure it's not all zeros assert m.visual.uv.ptp() > 0.5 - rec = g.wrapload( - m.export(file_type='obj'), file_type='obj') + rec = g.wrapload(m.export(file_type="obj"), file_type="obj") assert g.np.isclose(m.area, rec.area) def test_trailing(self): # test files with texture and trailing slashes - m = g.get_mesh('jacked.obj') + m = g.get_mesh("jacked.obj") assert len(m.visual.uv) == len(m.vertices) - rec = g.wrapload( - m.export(file_type='obj'), file_type='obj') + rec = g.wrapload(m.export(file_type="obj"), file_type="obj") assert g.np.isclose(m.area, rec.area) def test_obj_groups(self): # a wavefront file with groups defined - mesh = g.get_mesh('groups.obj') + mesh = g.get_mesh("groups.obj") # make sure some data got loaded assert g.trimesh.util.is_shape(mesh.faces, (-1, 3)) @@ -56,29 +52,26 @@ def test_obj_groups(self): def test_obj_negative_indices(self): # a wavefront file with negative indices - mesh = g.get_mesh('negative_indices.obj') + mesh = g.get_mesh("negative_indices.obj") # make sure some data got loaded assert g.trimesh.util.is_shape(mesh.faces, (12, 3)) assert g.trimesh.util.is_shape(mesh.vertices, (8, 3)) def test_obj_quad(self): - mesh = g.get_mesh('quadknot.obj') + mesh = g.get_mesh("quadknot.obj") # make sure some data got loaded assert g.trimesh.util.is_shape(mesh.faces, (-1, 3)) assert g.trimesh.util.is_shape(mesh.vertices, (-1, 3)) assert mesh.is_watertight assert mesh.is_winding_consistent - rec = g.wrapload( - mesh.export(file_type='obj'), file_type='obj') + rec = g.wrapload(mesh.export(file_type="obj"), file_type="obj") assert g.np.isclose(mesh.area, rec.area) def test_obj_multiobj(self): # test a wavefront file with multiple objects in the same file - scene = g.get_mesh('two_objects.obj', - split_object=True, - group_material=False) + scene = g.get_mesh("two_objects.obj", split_object=True, group_material=False) assert len(scene.geometry) == 2 for mesh in scene.geometry.values(): @@ -93,10 +86,12 @@ def test_obj_split_attributes(self): # test a wavefront file where pos/uv/norm have different indices # and where multiple objects share vertices # Note 'process=False' to avoid merging vertices - scene = g.get_mesh('joined_tetrahedra.obj', - process=False, - split_object=True, - group_material=False) + scene = g.get_mesh( + "joined_tetrahedra.obj", + process=False, + split_object=True, + group_material=False, + ) assert len(scene.geometry) == 2 @@ -108,10 +103,9 @@ def test_obj_split_attributes(self): assert g.trimesh.util.is_shape(geom[1].vertices, (9, 3)) def test_obj_simple_order(self): - # test a simple wavefront model without split indexes # and make sure we don't reorder vertices unnecessarily - file_name = g.os.path.join(g.dir_models, 'cube.OBJ') + file_name = g.os.path.join(g.dir_models, "cube.OBJ") # load a simple OBJ file without merging vertices m = g.trimesh.load(file_name, process=False) # use trivial loading to compare with fancy performant one @@ -122,15 +116,11 @@ def test_obj_simple_order(self): assert g.np.allclose(v, m.vertices) def test_order_tex(self): - # test a simple wavefront model without split indexes # and make sure we don't reorder vertices unnecessarily - file_name = g.os.path.join(g.dir_models, 'fuze.obj') + file_name = g.os.path.join(g.dir_models, "fuze.obj") # load a simple OBJ file without merging vertices - m = g.trimesh.load( - file_name, - process=False, - maintain_order=True) + m = g.trimesh.load(file_name, process=False, maintain_order=True) # use trivial loading to compare with fancy performant one with open(file_name) as f: f, v, vt = simple_load(f.read()) @@ -139,157 +129,148 @@ def test_order_tex(self): assert g.np.allclose(v, m.vertices) def test_obj_compressed(self): - mesh = g.get_mesh('cube_compressed.obj', process=False) - assert mesh._cache.cache['vertex_normals'].shape == mesh.vertices.shape - assert g.np.allclose( - g.np.abs(mesh.vertex_normals).sum(axis=1), 1.0) + mesh = g.get_mesh("cube_compressed.obj", process=False) + assert mesh._cache.cache["vertex_normals"].shape == mesh.vertices.shape + assert g.np.allclose(g.np.abs(mesh.vertex_normals).sum(axis=1), 1.0) def test_vertex_color(self): # get a box mesh mesh = g.trimesh.creation.box() # set each vertex to a unique random color mesh.visual.vertex_colors = [ - g.trimesh.visual.random_color() - for _ in range(len(mesh.vertices))] + g.trimesh.visual.random_color() for _ in range(len(mesh.vertices)) + ] # export and then reload the file as OBJ rec = g.trimesh.load( - g.trimesh.util.wrap_as_stream( - mesh.export(file_type='obj')), - file_type='obj') + g.trimesh.util.wrap_as_stream(mesh.export(file_type="obj")), file_type="obj" + ) # assert colors have survived the export cycle - assert (mesh.visual.vertex_colors == - rec.visual.vertex_colors).all() + assert (mesh.visual.vertex_colors == rec.visual.vertex_colors).all() def test_single_vn(self): """ Make sure files with a single VN load. """ - m = g.get_mesh('singlevn.obj') + m = g.get_mesh("singlevn.obj") assert len(m.vertices) > 0 assert len(m.faces) > 0 def test_polygon_faces(self): - m = g.get_mesh('polygonfaces.obj') + m = g.get_mesh("polygonfaces.obj") assert len(m.vertices) > 0 assert len(m.faces) > 0 def test_faces_not_enough_indices(self): - m = g.get_mesh('notenoughindices.obj') + m = g.get_mesh("notenoughindices.obj") assert len(m.vertices) > 0 assert len(m.faces) == 1 def test_export_path(self): - m = g.get_mesh('fuze.obj') + m = g.get_mesh("fuze.obj") g.check_fuze(m) - assert m._cache.cache['vertex_normals'].shape == m.vertices.shape + assert m._cache.cache["vertex_normals"].shape == m.vertices.shape with g.TemporaryDirectory() as d: - file_path = g.os.path.join(d, 'fz.obj') + file_path = g.os.path.join(d, "fz.obj") m.export(file_path) r = g.trimesh.load(file_path) g.check_fuze(r) def test_mtl(self): # get a mesh with texture - m = g.get_mesh('fuze.obj') + m = g.get_mesh("fuze.obj") # export the mesh including data - obj, data = g.trimesh.exchange.export.export_obj( - m, return_texture=True) + obj, data = g.trimesh.exchange.export.export_obj(m, return_texture=True) with g.trimesh.util.TemporaryDirectory() as path: # where is the OBJ file going to be saved - obj_path = g.os.path.join(path, 'test.obj') - with open(obj_path, 'w') as f: + obj_path = g.os.path.join(path, "test.obj") + with open(obj_path, "w") as f: f.write(obj) # save the MTL and images for k, v in data.items(): - with open(g.os.path.join(path, k), 'wb') as f: + with open(g.os.path.join(path, k), "wb") as f: f.write(v) # reload the mesh from the export rec = g.trimesh.load(obj_path) # make sure loaded image is the same size as the original - assert (rec.visual.material.image.size == - m.visual.material.image.size) + assert rec.visual.material.image.size == m.visual.material.image.size # make sure the faces are the same size assert rec.faces.shape == m.faces.shape def test_scene(self): - s = g.get_mesh('cycloidal.3DXML') + s = g.get_mesh("cycloidal.3DXML") e = g.trimesh.load( - g.io_wrap(s.export(file_type='obj')), - file_type='obj', + g.io_wrap(s.export(file_type="obj")), + file_type="obj", split_object=True, - group_materials=False) + group_materials=False, + ) - assert g.np.isclose(e.area, s.area, rtol=.01) + assert g.np.isclose(e.area, s.area, rtol=0.01) def test_edge_cases(self): # a mesh with some NaN colors - n = g.get_mesh('nancolor.obj') + n = g.get_mesh("nancolor.obj") assert n.faces.shape == (12, 3) - v = g.get_mesh('cubevt.obj') + v = g.get_mesh("cubevt.obj") assert v.faces.shape == (12, 3) def test_empty_or_pointcloud(self): # demo files to check - empty_files = ['obj_empty.obj', - 'obj_points.obj', - 'obj_wireframe.obj'] + empty_files = ["obj_empty.obj", "obj_points.obj", "obj_wireframe.obj"] for empty_file in empty_files: - e = g.get_mesh('emptyIO/' + empty_file) + e = g.get_mesh("emptyIO/" + empty_file) # create export - if 'empty' in empty_file: + if "empty" in empty_file: try: - export = e.export(file_type='ply') + export = e.export(file_type="ply") except BaseException: continue - raise ValueError('cannot export empty') - elif 'points' in empty_file: - export = e.export(file_type='ply') - reconstructed = g.wrapload(export, file_type='ply') + raise ValueError("cannot export empty") + elif "points" in empty_file: + export = e.export(file_type="ply") + reconstructed = g.wrapload(export, file_type="ply") # result should be a point cloud instance assert isinstance(e, g.trimesh.PointCloud) - assert hasattr(e, 'vertices') + assert hasattr(e, "vertices") # point cloud export should contain vertices assert isinstance(reconstructed, g.trimesh.PointCloud) - assert hasattr(reconstructed, 'vertices') + assert hasattr(reconstructed, "vertices") def test_backslash_continuation_character(self): # an obj file with \ (backslash) line continuation characters - m = g.get_mesh('wallhole.obj') + m = g.get_mesh("wallhole.obj") assert m.faces.shape == (66, 3) def test_no_uv(self): - mesh = g.get_mesh('box.obj') - rec = g.wrapload( - mesh.export(file_type='obj'), file_type='obj') + mesh = g.get_mesh("box.obj") + rec = g.wrapload(mesh.export(file_type="obj"), file_type="obj") assert g.np.isclose(mesh.area, rec.area) def test_no_uv_but_mtl(self): sphere = g.trimesh.creation.uv_sphere() sphere.visual = g.trimesh.visual.TextureVisuals( - uv=None, - material=g.trimesh.visual.material.empty_material()) - output = sphere.export('sphere.obj') - assert 'usemtl' in output + uv=None, material=g.trimesh.visual.material.empty_material() + ) + output = sphere.export("sphere.obj") + assert "usemtl" in output def test_chair(self): - mesh = next(iter(g.get_mesh('chair.zip').geometry.values())) + mesh = next(iter(g.get_mesh("chair.zip").geometry.values())) # this model comes with vertex normals - assert 'vertex_normals' in mesh._cache - assert g.np.allclose( - 1.0, g.np.linalg.norm(mesh.vertex_normals, axis=1)) + assert "vertex_normals" in mesh._cache + assert g.np.allclose(1.0, g.np.linalg.norm(mesh.vertex_normals, axis=1)) mesh.apply_scale(0.46377314288075433) - assert 'vertex_normals' in mesh._cache - assert g.np.allclose( - 1.0, g.np.linalg.norm(mesh.vertex_normals, axis=1)) - assert 'vertex_normals' in mesh._cache + assert "vertex_normals" in mesh._cache + assert g.np.allclose(1.0, g.np.linalg.norm(mesh.vertex_normals, axis=1)) + assert "vertex_normals" in mesh._cache mesh._cache.clear() - assert 'vertex_normals' not in mesh._cache + assert "vertex_normals" not in mesh._cache # if we recomputed now, the degenerate faces # would lead some of these vertex normals to be zero # assert g.np.allclose( @@ -298,32 +279,30 @@ def test_chair(self): def test_multi_nodupe(self): s = g.get_mesh("forearm.zae") obj, mtl = g.trimesh.exchange.obj.export_obj( - s, include_color=True, - include_texture=True, - return_texture=True) + s, include_color=True, include_texture=True, return_texture=True + ) # should be using one material file - assert obj.count('mtllib') == 1 - assert 'mtllib material.mtl' in obj + assert obj.count("mtllib") == 1 + assert "mtllib material.mtl" in obj # should be specifying 5 materials - assert obj.count('usemtl') == 5 + assert obj.count("usemtl") == 5 # this file has only the properties (no images) assert len(mtl) == 1 mtl_names = [ - L.strip().split()[-1].strip() for L in - mtl['material.mtl'].decode('utf-8').split('\n') - if 'newmtl' in L] + L.strip().split()[-1].strip() + for L in mtl["material.mtl"].decode("utf-8").split("\n") + if "newmtl" in L + ] # there should be 5 unique material names assert len(set(mtl_names)) == 5 def test_mtl_color_roundtrip(self): - # create a mesh with a simple material m = g.trimesh.creation.box() m.visual = m.visual.to_texture() # set each color component to a unique value - colors = [g.trimesh.visual.color.random_color() - for _ in range(3)] + colors = [g.trimesh.visual.color.random_color() for _ in range(3)] m.visual.material.ambient = colors[0] m.visual.material.specular = colors[1] m.visual.material.diffuse = colors[2] @@ -333,69 +312,59 @@ def test_mtl_color_roundtrip(self): # exporting by filename will automatically # create a FilePathResolver which writes the # `mtl` file to the same directory - file_name = g.os.path.join(d, 'hi.obj') + file_name = g.os.path.join(d, "hi.obj") m.export(file_name) # reload the export by file name r = g.trimesh.load(file_name) # these values should have survived the roundtrip - assert g.np.allclose(m.visual.material.ambient, - r.visual.material.ambient) - assert g.np.allclose(m.visual.material.specular, - r.visual.material.specular) - assert g.np.allclose(m.visual.material.diffuse, - r.visual.material.diffuse) - assert g.np.isclose(m.visual.material.glossiness, - r.visual.material.glossiness) + assert g.np.allclose(m.visual.material.ambient, r.visual.material.ambient) + assert g.np.allclose(m.visual.material.specular, r.visual.material.specular) + assert g.np.allclose(m.visual.material.diffuse, r.visual.material.diffuse) + assert g.np.isclose(m.visual.material.glossiness, r.visual.material.glossiness) def test_scene_export_material_name(self): - s = g.get_mesh('fuze.obj', force='scene') - dummy = 'fuxx' - s.geometry['fuze.obj'].visual.material.name = dummy + s = g.get_mesh("fuze.obj", force="scene") + dummy = "fuxx" + s.geometry["fuze.obj"].visual.material.name = dummy r = g.trimesh.resolvers.ZipResolver() - r['model.obj'] = s.export( - file_type='obj', - mtl_name='mystuff.mtl', - resolver=r) + r["model.obj"] = s.export(file_type="obj", mtl_name="mystuff.mtl", resolver=r) - mtl = r['mystuff.mtl'].decode('utf-8') - assert mtl.count('newmtl') == 1 - assert f'newmtl {dummy}' in mtl - assert f'{dummy}.jpeg' in r + mtl = r["mystuff.mtl"].decode("utf-8") + assert mtl.count("newmtl") == 1 + assert f"newmtl {dummy}" in mtl + assert f"{dummy}.jpeg" in r def test_compound_scene_export(self): - # generate a mesh with multiple textures - a = g.get_mesh('BoxTextured.glb') + a = g.get_mesh("BoxTextured.glb") a = a.scaled(1.0 / a.extents.max()) a.apply_translation(-a.bounds[0]) - b = g.get_mesh('fuze.obj').scene() + b = g.get_mesh("fuze.obj").scene() b = b.scaled(1.0 / b.extents.max()) b.apply_translation(-b.bounds[0] + [2, 0, 0]) d = next(iter(b.copy().geometry.values())) d.apply_translation([-1, 0, 0]) - assert hash(d.visual.material) == hash( - b.geometry['fuze.obj'].visual.material) + assert hash(d.visual.material) == hash(b.geometry["fuze.obj"].visual.material) # should change the material hash d.visual.material.glossiness = 0.1 - assert hash(d.visual.material) != hash( - b.geometry['fuze.obj'].visual.material) + assert hash(d.visual.material) != hash(b.geometry["fuze.obj"].visual.material) # generate a compound scene c = a + b + d for i in c.geometry.values(): # name all the materials the same thing - i.visual.material.name = 'material_0' + i.visual.material.name = "material_0" # export the compound scene - obj, mtl = c.export(file_type='obj', return_texture=True) + obj, mtl = c.export(file_type="obj", return_texture=True) # there should be exactly one mtllib referenced - assert obj.count('mtllib') == 1 - assert obj.count('usemtl') == 3 + assert obj.count("mtllib") == 1 + assert obj.count("usemtl") == 3 # should be one texture image for each of 3 # plus the `.mtl` file itself @@ -406,19 +375,21 @@ def test_compound_scene_export(self): # get the material names specified mtl_names = [ - L.strip().split()[-1].strip() for L in - mtl['material.mtl'].decode('utf-8').split('\n') - if 'newmtl' in L] + L.strip().split()[-1].strip() + for L in mtl["material.mtl"].decode("utf-8").split("\n") + if "newmtl" in L + ] # there should be 3 unique material names assert len(set(mtl_names)) == 3 # now reload the compound scene t = g.trimesh.load( file_obj=g.trimesh.util.wrap_as_stream(obj), - file_type='obj', + file_type="obj", resolver=g.trimesh.resolvers.ZipResolver(mtl), group_material=False, - split_object=True) + split_object=True, + ) # these names should match eventually assert len(t.geometry.keys()) == len(c.geometry.keys()) assert g.np.isclose(t.area, c.area) @@ -427,49 +398,49 @@ def test_face_parsing_in_group_names(self): # Checks that an obj with a g tag containinig a face like name (an 'f ' # followed by three space separated text chunks, ex: f 1 2 3) does load # properly - m = g.get_mesh('face_in_group_name.obj') + m = g.get_mesh("face_in_group_name.obj") assert len(m.vertices) == 1 def test_face_parsing_in_group_names_with_object_tag(self): # Checks that an obj with a g tag in the middle of a file, # containinig a face like name (an 'f ' followed by three # space separated text chunks, ex: f 1 2 3), does load properly - m = g.get_mesh('face_in_group_name_mid_file.obj') + m = g.get_mesh("face_in_group_name_mid_file.obj") assert len(m.vertices) == 5 assert len(m.faces) == 2 def test_chunk_parsing_with_no_faces_but_with_f_in_chunk(self): # Checks that a chunk with no faces but with 'f ' in it loads properly - m = g.get_mesh('obj_with_no_face_in_chunk.obj') + m = g.get_mesh("obj_with_no_face_in_chunk.obj") assert len(m.vertices) == 3 assert len(m.faces) == 1 def test_export_normals(self): m = g.trimesh.creation.box() - assert 'vertex_normals' not in m._cache.cache + assert "vertex_normals" not in m._cache.cache - e = m.export(file_type='obj', include_normals=None) - assert 'vn ' not in e - e = m.export(file_type='obj', include_normals=True) + e = m.export(file_type="obj", include_normals=None) + assert "vn " not in e + e = m.export(file_type="obj", include_normals=True) # should have included normals - assert 'vn ' in e + assert "vn " in e # should have forced generation of normals - assert 'vertex_normals' in m._cache.cache + assert "vertex_normals" in m._cache.cache # now that they're in cache include_normals=None should get them - e = m.export(file_type='obj', include_normals=None) - assert 'vn ' in e + e = m.export(file_type="obj", include_normals=None) + assert "vn " in e # or skip them if explicitly asked - e = m.export(file_type='obj', include_normals=False) - assert 'vn ' not in e + e = m.export(file_type="obj", include_normals=False) + assert "vn " not in e - def test_export_mtl_args(): + def test_export_mtl_args(self): mesh = g.trimesh.creation.box() # check for a crash with no materials defined - a, b = g.trimesh.exchange.obj.export_obj(mesh, return_texture=True, mtl_name='hi.mtl') - - + a, b = g.trimesh.exchange.obj.export_obj( + mesh, return_texture=True, mtl_name="hi.mtl" + ) def simple_load(text): @@ -485,15 +456,14 @@ def simple_load(text): line = line.strip() if len(line) < 2: continue - elif line.startswith('f '): - if '/' in line: - f.append([int(i.split('/', 1)[0]) - for i in line[1:].strip().split()]) + elif line.startswith("f "): + if "/" in line: + f.append([int(i.split("/", 1)[0]) for i in line[1:].strip().split()]) else: f.append(line[1:].strip().split()) - elif line.startswith('v '): + elif line.startswith("v "): v.append(line[1:].strip().split()) - elif line.startswith('vt '): + elif line.startswith("vt "): vt.append(line[2:].strip().split()) # get faces as basic numpy array @@ -504,6 +474,6 @@ def simple_load(text): return f, v, vt -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() From 49ec9aab81f4f0840235041dedfac83d827e074c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sun, 17 Sep 2023 16:48:41 -0400 Subject: [PATCH 67/84] fix formatting --- tests/test_cache.py | 5 ++++- trimesh/transformations.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/test_cache.py b/tests/test_cache.py index d74e376f5..09dda4ed2 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -263,8 +263,11 @@ def test_method_combinations(self): if not g.PY3: return - import itertools, warnings + import itertools + import warnings + import numpy as np + from trimesh.caching import tracked_array dim = (100, 3) diff --git a/trimesh/transformations.py b/trimesh/transformations.py index fc8efed44..bc2861b93 100644 --- a/trimesh/transformations.py +++ b/trimesh/transformations.py @@ -2144,7 +2144,7 @@ def transform_points(points, matrix, translate=True): Transformed points. """ points = np.asanyarray(points, dtype=np.float64) - if len(points) == 0: + if len(points) == 0 or matrix is None: return points.copy() # check the matrix against the points From af19e8be6c8f14642afe5ff42e6e258cc40bf9ac Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 18 Sep 2023 14:55:09 -0400 Subject: [PATCH 68/84] update domain trimsh.org->trimesh.org --- docs/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Makefile b/docs/Makefile index 4f9ef3903..f993b0d68 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -21,7 +21,7 @@ example_rsts = $(foreach name, $(example_names), examples.$(name).rst) html: conf.py index.rst *.md README.rst trimesh.rst examples.md $(example_rsts) $(example_htmls) .deps @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) touch "$(BUILDDIR)/html/.nojekyll" - echo "trimsh.org" > "$(BUILDDIR)/html/CNAME" + echo "trimesh.org" > "$(BUILDDIR)/html/CNAME" mv "$(BUILDDIR)/html/_static/examples" "$(BUILDDIR)/html/examples" || true mv "$(BUILDDIR)/html/_static/images" "$(BUILDDIR)/html/images" || true cp "$(STATICDIR)/favicon.ico" "$(BUILDDIR)/html/favicon.ico" || true From ba4857ad50cbdb424e65f381fab37c1fa1e685ca Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 14:07:16 -0400 Subject: [PATCH 69/84] attempt --- trimesh/voxel/base.py | 25 ++++++------ trimesh/voxel/encoding.py | 82 ++++++++++++++++++++++----------------- 2 files changed, 59 insertions(+), 48 deletions(-) diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index c7c3d40e2..1f4ccbe80 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -29,18 +29,17 @@ def __init__(self, encoding, transform=None, metadata=None): """ if transform is None: transform = np.eye(4) - if isinstance(encoding, np.ndarray): - encoding = DenseEncoding(encoding.astype(bool)) - if encoding.dtype != bool: - raise ValueError("encoding must have dtype bool") + self._data = caching.DataStore() self._cache = caching.Cache(id_function=self._data.__hash__) + self._transform = transforms.Transform(transform, datastore=self._data) + # use our setter self.encoding = encoding - self.metadata = {} # update the mesh metadata with passed metadata + self.metadata = {} if isinstance(metadata, dict): self.metadata.update(metadata) elif metadata is not None: @@ -64,21 +63,23 @@ def encoding(self): See `trimesh.voxel.encoding` for implementations. """ - return self._data["encoding"] + return self._encoding @encoding.setter def encoding(self, encoding): if isinstance(encoding, np.ndarray): encoding = DenseEncoding(encoding) elif not isinstance(encoding, Encoding): - raise ValueError("encoding must be an Encoding, got %s" % str(encoding)) + raise TypeError(type(encoding)) + if len(encoding.shape) != 3: - raise ValueError( - "encoding must be rank 3, got shape %s" % str(encoding.shape) - ) + raise ValueError(f"encoding.shape: (3,) != {encoding.shape}") if encoding.dtype != bool: - raise ValueError("encoding must be binary, got %s" % encoding.dtype) - self._data["encoding"] = encoding + raise ValueError(f"encoding.dtype: bool != {encoding.dtype}") + + self._data.data.update(encoding._data.data) + encoding._data = self._data + self._encoding = encoding @property def transform(self) -> NDArray[float64]: diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 6254475c1..644b6c4a3 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -34,7 +34,7 @@ class Encoding(ABC): and dense encodings (wrappers around np.ndarrays). """ - def __init__(self, data): + def __init__(self, data=None): # a key-value store of numpy arrays self._data = caching.DataStore() @@ -43,7 +43,9 @@ def __init__(self, data): if isinstance(data, np.ndarray): self._data["encoding"] = data - else: + elif isinstance(data, Encoding): + self._data.data.update(data._data.data) + elif data is not None: raise TypeError(type(data)) @abc.abstractproperty @@ -188,8 +190,6 @@ class DenseEncoding(Encoding): """Simple `Encoding` implementation based on a numpy ndarray.""" def __init__(self, data): - if not isinstance(data, np.ndarray): - raise ValueError("DenseEncoding data must be a numpy array") super().__init__(data=data) @property @@ -277,32 +277,35 @@ def __init__(self, indices, values, shape=None): """ Parameters ------------ - indices: (m, n)-sized int array of indices - values: (m, n)-sized dtype array of values at the specified indices - shape: (n,) iterable of integers. If None, the maximum value of indices + indices : (m, n)-sized int array of indices + values : (m, n)-sized dtype array of values at the specified indices + shape : (n,) iterable of integers. If None, the maximum value of indices + 1 is used. """ - data = caching.DataStore() - super().__init__(data) - data["indices"] = indices - data["values"] = values - indices = data["indices"] + + # create the datastore and cache + super().__init__() + + indices = np.asanyarray(indices, dtype=np.int64) + values = np.asanyarray(values) + + if not np.all(indices >= 0): + raise ValueError("all indices must be non-negative") + if len(indices.shape) != 2: raise ValueError("indices must be 2D, got shaped %s" % str(indices.shape)) - if data["values"].shape != (indices.shape[0],): - raise ValueError( - "values and indices shapes inconsistent: {} and {}".format( - data["values"], data["indices"] - ) - ) + if len(values) != len(indices): + raise ValueError("values and indices shapes inconsistent") if shape is None: - self._shape = tuple(data["indices"].max(axis=0) + 1) + shape = tuple(indices.max(axis=0) + 1) else: - self._shape = tuple(shape) - if not np.all(indices < self._shape): + shape = tuple(shape) + if (indices > shape).any(): raise ValueError("all indices must be less than shape") - if not np.all(indices >= 0): - raise ValueError("all indices must be non-negative") + + self._data["indices"] = indices + self._data["values"] = values + self._data["shape"] = shape @staticmethod def from_dense(dense_data): @@ -321,11 +324,11 @@ def copy(self): @property def sparse_indices(self): - return self._data["encoding"]["indices"] + return self._data["indices"] @property def sparse_values(self): - return self._data["encoding"]["values"] + return self._data["values"] @property def dtype(self): @@ -341,7 +344,7 @@ def ndims(self): @property def shape(self): - return self._shape + return self._data["shape"] @property def size(self): @@ -439,12 +442,12 @@ def __init__(self, data, dtype=None): dtype: dtype of encoded data. Each second value of data is cast will be cast to this dtype if provided. """ - super().__init__(data=caching.tracked_array(data)) - if dtype is None: - dtype = self._data.dtype - if len(self._data["encoding"].shape) != 1: + super().__init__() + data = np.asanyarray(data, dtype=dtype) + if len(data.shape) != 1: raise ValueError("data must be 1D numpy array") - self._dtype = dtype + + self._data["encoding"] = data @caching.cache_decorator def is_empty(self): @@ -462,7 +465,7 @@ def shape(self): @property def dtype(self): - return self._dtype + return self._data["encoding"].dtype def __hash__(self): """ @@ -495,7 +498,7 @@ def from_brle(brle_data, dtype=None): def stripped(self): if self.is_empty: return _empty_stripped(self.shape) - data, padding = runlength.rle_strip(self._data) + data, padding = runlength.rle_strip(self._data["encoding"]) if padding == (0, 0): encoding = self else: @@ -509,12 +512,12 @@ def sum(self): @caching.cache_decorator def size(self): - return runlength.rle_length(self._data) + return runlength.rle_length(self._data["encoding"]) def _flip(self, axes): if axes != (0,): raise ValueError("encoding is 1D - cannot flip on axis %s" % str(axes)) - return RunLengthEncoding(runlength.rle_reverse(self._data)) + return RunLengthEncoding(runlength.rle_reverse(self._data["encoding"])) @caching.cache_decorator def sparse_components(self): @@ -698,7 +701,10 @@ def sum(self): @property def size(self): - return self._data.size + from IPython import embed + + embed() + return self._data["encoding"].size @property def sparse_indices(self): @@ -734,6 +740,10 @@ def _from_base_indices(self, base_indices): def shape(self): return (self.size,) + @property + def size(self): + return np.prod(self._data["shape"]) + @property def dense(self): return self._data["encoding"].dense.reshape((-1,)) From b8ca1bbbc9a1907d67622680617f3072a963d101 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 14:15:47 -0400 Subject: [PATCH 70/84] revert broken voxel changes --- trimesh/voxel/base.py | 25 +++-- trimesh/voxel/encoding.py | 215 +++++++++++++++++++------------------- 2 files changed, 122 insertions(+), 118 deletions(-) diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index 1f4ccbe80..5d2f361c2 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -29,17 +29,18 @@ def __init__(self, encoding, transform=None, metadata=None): """ if transform is None: transform = np.eye(4) + if isinstance(encoding, np.ndarray): + encoding = DenseEncoding(encoding.astype(bool)) + if encoding.dtype != bool: + raise ValueError("encoding must have dtype bool") self._data = caching.DataStore() self._cache = caching.Cache(id_function=self._data.__hash__) - self._transform = transforms.Transform(transform, datastore=self._data) - - # use our setter self.encoding = encoding + self.metadata = {} # update the mesh metadata with passed metadata - self.metadata = {} if isinstance(metadata, dict): self.metadata.update(metadata) elif metadata is not None: @@ -63,23 +64,21 @@ def encoding(self): See `trimesh.voxel.encoding` for implementations. """ - return self._encoding + return self._data["encoding"] @encoding.setter def encoding(self, encoding): if isinstance(encoding, np.ndarray): encoding = DenseEncoding(encoding) elif not isinstance(encoding, Encoding): - raise TypeError(type(encoding)) - + raise ValueError("encoding must be an Encoding, got %s" % str(encoding)) if len(encoding.shape) != 3: - raise ValueError(f"encoding.shape: (3,) != {encoding.shape}") + raise ValueError( + "encoding must be rank 3, got shape %s" % str(encoding.shape) + ) if encoding.dtype != bool: - raise ValueError(f"encoding.dtype: bool != {encoding.dtype}") - - self._data.data.update(encoding._data.data) - encoding._data = self._data - self._encoding = encoding + raise ValueError("encoding must be binary, got %s" % encoding.dtype) + self._data["encoding"] = encoding @property def transform(self) -> NDArray[float64]: diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 644b6c4a3..3942a27e0 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -4,7 +4,7 @@ import numpy as np from .. import caching -from ..util import ABC +from ..util import ABC, log from . import runlength try: @@ -34,20 +34,10 @@ class Encoding(ABC): and dense encodings (wrappers around np.ndarrays). """ - def __init__(self, data=None): - # a key-value store of numpy arrays - self._data = caching.DataStore() - - # dumped when cache changes + def __init__(self, data): + self._data = data self._cache = caching.Cache(id_function=self._data.__hash__) - if isinstance(data, np.ndarray): - self._data["encoding"] = data - elif isinstance(data, Encoding): - self._data.data.update(data._data.data) - elif data is not None: - raise TypeError(type(data)) - @abc.abstractproperty def dtype(self): pass @@ -128,6 +118,22 @@ def stripped(self): def _flip(self, axes): return FlippedEncoding(self, axes) + def crc(self): + log.warning( + "`geometry.crc()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) + return self.__hash__() + + def hash(self): + log.warning( + "`geometry.hash()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) + return self.__hash__() + def __hash__(self): """ Get the hash of the current transformation matrix. @@ -190,27 +196,31 @@ class DenseEncoding(Encoding): """Simple `Encoding` implementation based on a numpy ndarray.""" def __init__(self, data): + if not isinstance(data, caching.TrackedArray): + if not isinstance(data, np.ndarray): + raise ValueError("DenseEncoding data must be a numpy array") + data = caching.tracked_array(data) super().__init__(data=data) @property def dtype(self): - return self._data["encoding"].dtype + return self._data.dtype @property def shape(self): - return self._data["encoding"].shape + return self._data.shape @caching.cache_decorator def sum(self): - return self._data["encoding"].sum() + return self._data.sum() @caching.cache_decorator def is_empty(self): - return not np.any(self._data["encoding"]) + return not np.any(self._data) @property def size(self): - return self._data["encoding"].size + return self._data.size @property def sparse_components(self): @@ -220,7 +230,7 @@ def sparse_components(self): @caching.cache_decorator def sparse_indices(self): - return np.column_stack(np.where(self._data["encoding"])) + return np.column_stack(np.where(self._data)) @caching.cache_decorator def sparse_values(self): @@ -234,21 +244,19 @@ def _flip(self, axes): @property def dense(self): - return self._data["encoding"] + return self._data def gather(self, indices): - return self._data["encoding"][indices] + return self._data[indices] def gather_nd(self, indices): - return self._data["encoding"][tuple(indices.T)] + return self._data[tuple(indices.T)] def mask(self, mask): - return self._data["encoding"][ - mask if isinstance(mask, np.ndarray) else mask.dense - ] + return self._data[mask if isinstance(mask, np.ndarray) else mask.dense] def get_value(self, index): - return self._data["encoding"][tuple(index)] + return self._data[tuple(index)] def reshape(self, shape): return DenseEncoding(self._data.reshape(shape)) @@ -277,35 +285,32 @@ def __init__(self, indices, values, shape=None): """ Parameters ------------ - indices : (m, n)-sized int array of indices - values : (m, n)-sized dtype array of values at the specified indices - shape : (n,) iterable of integers. If None, the maximum value of indices + indices: (m, n)-sized int array of indices + values: (m, n)-sized dtype array of values at the specified indices + shape: (n,) iterable of integers. If None, the maximum value of indices + 1 is used. """ - - # create the datastore and cache - super().__init__() - - indices = np.asanyarray(indices, dtype=np.int64) - values = np.asanyarray(values) - - if not np.all(indices >= 0): - raise ValueError("all indices must be non-negative") - + data = caching.DataStore() + super().__init__(data) + data["indices"] = indices + data["values"] = values + indices = data["indices"] if len(indices.shape) != 2: raise ValueError("indices must be 2D, got shaped %s" % str(indices.shape)) - if len(values) != len(indices): - raise ValueError("values and indices shapes inconsistent") + if data["values"].shape != (indices.shape[0],): + raise ValueError( + "values and indices shapes inconsistent: {} and {}".format( + data["values"], data["indices"] + ) + ) if shape is None: - shape = tuple(indices.max(axis=0) + 1) + self._shape = tuple(data["indices"].max(axis=0) + 1) else: - shape = tuple(shape) - if (indices > shape).any(): + self._shape = tuple(shape) + if not np.all(indices < self._shape): raise ValueError("all indices must be less than shape") - - self._data["indices"] = indices - self._data["values"] = values - self._data["shape"] = shape + if not np.all(indices >= 0): + raise ValueError("all indices must be non-negative") @staticmethod def from_dense(dense_data): @@ -344,7 +349,7 @@ def ndims(self): @property def shape(self): - return self._data["shape"] + return self._shape @property def size(self): @@ -442,18 +447,16 @@ def __init__(self, data, dtype=None): dtype: dtype of encoded data. Each second value of data is cast will be cast to this dtype if provided. """ - super().__init__() - data = np.asanyarray(data, dtype=dtype) - if len(data.shape) != 1: + super().__init__(data=caching.tracked_array(data)) + if dtype is None: + dtype = self._data.dtype + if len(self._data.shape) != 1: raise ValueError("data must be 1D numpy array") - - self._data["encoding"] = data + self._dtype = dtype @caching.cache_decorator def is_empty(self): - return not np.any( - np.logical_and(self._data["encoding"][::2], self._data["encoding"][1::2]) - ) + return not np.any(np.logical_and(self._data[::2], self._data[1::2])) @property def ndims(self): @@ -465,7 +468,23 @@ def shape(self): @property def dtype(self): - return self._data["encoding"].dtype + return self._dtype + + def crc(self): + log.warning( + "`geometry.crc()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) + return self.__hash__() + + def hash(self): + log.warning( + "`geometry.hash()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) + return self.__hash__() def __hash__(self): """ @@ -498,7 +517,7 @@ def from_brle(brle_data, dtype=None): def stripped(self): if self.is_empty: return _empty_stripped(self.shape) - data, padding = runlength.rle_strip(self._data["encoding"]) + data, padding = runlength.rle_strip(self._data) if padding == (0, 0): encoding = self else: @@ -508,16 +527,16 @@ def stripped(self): @caching.cache_decorator def sum(self): - return (self._data["encoding"][::2] * self._data["encoding"][1::2]).sum() + return (self._data[::2] * self._data[1::2]).sum() @caching.cache_decorator def size(self): - return runlength.rle_length(self._data["encoding"]) + return runlength.rle_length(self._data) def _flip(self, axes): if axes != (0,): raise ValueError("encoding is 1D - cannot flip on axis %s" % str(axes)) - return RunLengthEncoding(runlength.rle_reverse(self._data["encoding"])) + return RunLengthEncoding(runlength.rle_reverse(self._data)) @caching.cache_decorator def sparse_components(self): @@ -581,7 +600,7 @@ def __init__(self, data): @caching.cache_decorator def is_empty(self): - return not np.any(self._data["encoding"][1::2]) + return not np.any(self._data[1::2]) @staticmethod def from_dense(dense_data, encoding_dtype=np.int64): @@ -613,7 +632,7 @@ def stripped(self): @caching.cache_decorator def sum(self): - return self._data["encoding"][1::2].sum() + return self._data[1::2].sum() @caching.cache_decorator def size(self): @@ -701,10 +720,7 @@ def sum(self): @property def size(self): - from IPython import embed - - embed() - return self._data["encoding"].size + return self._data.size @property def sparse_indices(self): @@ -718,7 +734,7 @@ def gather_nd(self, indices): return self._data.gather_nd(self._to_base_indices(indices)) def get_value(self, index): - return self._data["encoding"][tuple(self._to_base_indices(index))] + return self._data[tuple(self._to_base_indices(index))] class FlattenedEncoding(LazyIndexMap): @@ -729,34 +745,30 @@ class FlattenedEncoding(LazyIndexMap): """ def _to_base_indices(self, indices): - return np.column_stack(np.unravel_index(indices, self._data["encoding"].shape)) + return np.column_stack(np.unravel_index(indices, self._data.shape)) def _from_base_indices(self, base_indices): return np.expand_dims( - np.ravel_multi_index(base_indices.T, self._data["encoding"].shape), axis=-1 + np.ravel_multi_index(base_indices.T, self._data.shape), axis=-1 ) @property def shape(self): return (self.size,) - @property - def size(self): - return np.prod(self._data["shape"]) - @property def dense(self): - return self._data["encoding"].dense.reshape((-1,)) + return self._data.dense.reshape((-1,)) def mask(self, mask): - return self._data["encoding"].mask(mask.reshape(self._data["encoding"].shape)) + return self._data.mask(mask.reshape(self._data.shape)) @property def flat(self): return self def copy(self): - return FlattenedEncoding(self._data["encoding"].copy()) + return FlattenedEncoding(self._data.copy()) class ShapedEncoding(LazyIndexMap): @@ -778,19 +790,19 @@ def __init__(self, encoding, shape): size = np.prod(self._shape) if nn == 1: size = np.abs(size) - if self._data["encoding"].size % size != 0: + if self._data.size % size != 0: raise ValueError( "cannot reshape encoding of size %d into shape %s" - % (self._data["encoding"].size, str(self._shape)) + % (self._data.size, str(self._shape)) ) - rem = self._data["encoding"].size // size + rem = self._data.size // size self._shape = tuple(rem if s == -1 else s for s in self._shape) elif nn > 2: raise ValueError("shape cannot have more than one -1 value") - elif np.prod(self._shape) != self._data["encoding"].size: + elif np.prod(self._shape) != self._data.size: raise ValueError( "cannot reshape encoding of size %d into shape %s" - % (self._data["encoding"].size, str(self._shape)) + % (self._data.size, str(self._shape)) ) def _from_base_indices(self, base_indices): @@ -809,13 +821,13 @@ def shape(self): @property def dense(self): - return self._data["encoding"].dense.reshape(self.shape) + return self._data.dense.reshape(self.shape) def mask(self, mask): - return self._data["encoding"].mask(mask.flat) + return self._data.mask(mask.flat) def copy(self): - return ShapedEncoding(encoding=self._data["encoding"].copy(), shape=self.shape) + return ShapedEncoding(encoding=self._data.copy(), shape=self.shape) class TransposedEncoding(LazyIndexMap): @@ -856,7 +868,7 @@ def perm(self): @property def shape(self): - shape = self._data["encoding"].shape + shape = self._data.shape return tuple(shape[p] for p in self._perm) def _to_base_indices(self, indices): @@ -873,29 +885,23 @@ def _from_base_indices(self, base_indices): @property def dense(self): - return self._data["encoding"].dense.transpose(self._perm) + return self._data.dense.transpose(self._perm) def gather(self, indices): - return self._data["encoding"].gather(self._base_indices(indices)) + return self._data.gather(self._base_indices(indices)) def mask(self, mask): - return ( - self._data["encoding"] - .mask(mask.transpose(self._inv_perm)) - .transpose(self._perm) - ) + return self._data.mask(mask.transpose(self._inv_perm)).transpose(self._perm) def get_value(self, index): - return self._data["encoding"][tuple(self._base_indices(index))] + return self._data[tuple(self._base_indices(index))] @property def data(self): return self._data def copy(self): - return TransposedEncoding( - base_encoding=self._data["encoding"].copy(), perm=self._perm - ) + return TransposedEncoding(base_encoding=self._data.copy(), perm=self._perm) class FlippedEncoding(LazyIndexMap): @@ -916,10 +922,9 @@ def __init__(self, encoding, axes): if len(set(self._axes)) != len(self._axes): raise ValueError("Axes cannot contain duplicates, got %s" % str(self._axes)) super().__init__(encoding) - if not all(0 <= a < self._data["encoding"].ndims for a in axes): + if not all(0 <= a < self._data.ndims for a in axes): raise ValueError( - "Invalid axes %s for %d-d encoding" - % (str(axes), self._data["encoding"].ndims) + "Invalid axes %s for %d-d encoding" % (str(axes), self._data.ndims) ) def _to_base_indices(self, indices): @@ -935,11 +940,11 @@ def _from_base_indices(self, base_indices): @property def shape(self): - return self._data["encoding"].shape + return self._data.shape @property def dense(self): - dense = self._data["encoding"].dense + dense = self._data.dense for a in self._axes: dense = np.flip(dense, a) return dense @@ -948,10 +953,10 @@ def mask(self, mask): if not isinstance(mask, Encoding): mask = DenseEncoding(mask) mask = mask.flip(self._axes) - return self._data["encoding"].mask(mask).flip(self._axes) + return self._data.mask(mask).flip(self._axes) def copy(self): - return FlippedEncoding(self._data["encoding"].copy(), self._axes) + return FlippedEncoding(self._data.copy(), self._axes) def flip(self, axis=0): if isinstance(axis, np.ndarray): From 0dc74111cbd3e67eccdace3c3ec78839d230c0aa Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 14:38:56 -0400 Subject: [PATCH 71/84] add a test for #2016 --- README.md | 4 +- tests/generic.py | 171 ++++++------- tests/test_util.py | 255 ++++++++++--------- trimesh/base.py | 1 + trimesh/util.py | 592 +++++++++++++++++++++------------------------ 5 files changed, 503 insertions(+), 520 deletions(-) diff --git a/README.md b/README.md index 6aaa40602..104ae2b04 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![trimesh](https://trimsh.org/images/logotype-a.svg)](http://trimsh.org) +[![trimesh](https://trimesh.org/images/logotype-a.svg)](http://trimesh.org) ----------- [![Github Actions](https://github.com/mikedh/trimesh/workflows/Release%20Trimesh/badge.svg)](https://github.com/mikedh/trimesh/actions) [![codecov](https://codecov.io/gh/mikedh/trimesh/branch/main/graph/badge.svg?token=4PVRQXyl2h)](https://codecov.io/gh/mikedh/trimesh) [![Docker Image Version (latest by date)](https://img.shields.io/docker/v/trimesh/trimesh?label=docker&sort=semver)](https://hub.docker.com/r/trimesh/trimesh/tags) [![PyPI version](https://badge.fury.io/py/trimesh.svg)](https://badge.fury.io/py/trimesh) @@ -9,7 +9,7 @@ |---------------------------| | `trimesh >= 4.0.0` on `main` makes minimum Python 3.7 and is in pre-release | | Testing with `pip install --pre trimesh` would be much appreciated! | -| Projects that support Python < 3.7 should update requirement to `trimesh<4` | +| Projects that support Python<3.7 should update requirement to `trimesh<4` | Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). diff --git a/tests/generic.py b/tests/generic.py index 871a5e305..d766c66ab 100644 --- a/tests/generic.py +++ b/tests/generic.py @@ -32,22 +32,14 @@ from trimesh.constants import tol, tol_path from collections import deque from copy import deepcopy +from http.server import SimpleHTTPRequestHandler +import socketserver -tf = trimesh.transformations -if sys.version_info >= (3, 1): - # Python 3 - from http.server import SimpleHTTPRequestHandler - import socketserver +tf = trimesh.transformations -else: - # Python 2 - from SimpleHTTPServer import SimpleHTTPRequestHandler - import SocketServer as socketserver # make a dummy profiler which does nothing - - class DummyProfiler(object): def __enter__(self, *args, **kwargs): return self @@ -56,7 +48,7 @@ def __exit__(*args, **kwargs): pass def output_text(*args, **kwargs): - return 'no `pyinstrument`' + return "no `pyinstrument`" # make sure dummy profiler works @@ -73,14 +65,14 @@ def output_text(*args, **kwargs): # should we require all soft dependencies # this is set in the docker images to catch missing packages -argv = ''.join(sys.argv) +argv = "".join(sys.argv) # if we're supposed to have everything -all_dependencies = 'ALL_DEPENDENCIES' in argv +all_dependencies = "ALL_DEPENDENCIES" in argv # if we're testing rendering scenes -include_rendering = 'INCLUDE_RENDERING' in argv +include_rendering = "INCLUDE_RENDERING" in argv if all_dependencies and not trimesh.ray.has_embree: - raise ValueError('missing embree!') + raise ValueError("missing embree!") try: import sympy as sp @@ -101,6 +93,7 @@ def output_text(*args, **kwargs): try: from mapbox_earcut import triangulate_float64 + has_earcut = True except BaseException as E: if all_dependencies: @@ -110,6 +103,7 @@ def output_text(*args, **kwargs): try: from shapely.geometry import Point, Polygon, LineString + has_path = True except ImportError as E: if all_dependencies: @@ -130,46 +124,45 @@ def output_text(*args, **kwargs): # find_executable for binvox has_binvox = trimesh.exchange.binvox.binvox_encoder is not None if all_dependencies and not has_binvox: - raise ValueError('missing binvox') + raise ValueError("missing binvox") # Python version as a tuple, i.e. [3, 6] -PY_VER = (sys.version_info.major, - sys.version_info.minor) +PY_VER = (sys.version_info.major, sys.version_info.minor) # some repeatable homogeneous transforms to use in tests -transforms = [trimesh.transformations.euler_matrix(np.pi / 4, i, 0) - for i in np.linspace(0.0, np.pi * 2.0, 100)] +transforms = [ + trimesh.transformations.euler_matrix(np.pi / 4, i, 0) + for i in np.linspace(0.0, np.pi * 2.0, 100) +] # should be a (100, 4, 4) float transforms = np.array(transforms) try: # do the imports for Python 2 from cStringIO import StringIO + PY3 = False except ImportError: # if that didn't work we're probably on Python 3 from io import StringIO from io import BytesIO + PY3 = True # are we on linux -is_linux = 'linux' in platform.system().lower() +is_linux = "linux" in platform.system().lower() # find the current absolute path using inspect -dir_current = os.path.dirname( - os.path.abspath(os.path.expanduser(__file__))) +dir_current = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) # the absolute path for our reference models -dir_models = os.path.abspath( - os.path.join(dir_current, '..', 'models')) +dir_models = os.path.abspath(os.path.join(dir_current, "..", "models")) # the absolute path for our 2D reference models -dir_2D = os.path.abspath( - os.path.join(dir_current, '..', 'models', '2D')) +dir_2D = os.path.abspath(os.path.join(dir_current, "..", "models", "2D")) # the absolute path for our test data and truth -dir_data = os.path.abspath( - os.path.join(dir_current, 'data')) +dir_data = os.path.abspath(os.path.join(dir_current, "data")) # a logger for tests to call -log = logging.getLogger('trimesh') +log = logging.getLogger("trimesh") log.addHandler(logging.NullHandler()) # turn strings / bytes into file- like objects @@ -216,8 +209,7 @@ def random_transforms(count, translate=1000): # random should be deterministic assert np.allclose(random(10), random(10)) -assert np.allclose(list(random_transforms(10)), - list(random_transforms(10))) +assert np.allclose(list(random_transforms(10)), list(random_transforms(10))) def _load_data(): @@ -227,15 +219,14 @@ def _load_data(): data = {} for file_name in os.listdir(dir_data): name, extension = os.path.splitext(file_name) - if extension != '.json': + if extension != ".json": continue file_path = os.path.join(dir_data, file_name) - with open(file_path, 'r') as file_obj: + with open(file_path, "r") as file_obj: data[name] = json.load(file_obj) - data['model_paths'] = [os.path.join(dir_models, f) - for f in os.listdir(dir_models)] - data['2D_files'] = [os.path.join(dir_2D, f) for f in os.listdir(dir_2D)] + data["model_paths"] = [os.path.join(dir_models, f) for f in os.listdir(dir_models)] + data["2D_files"] = [os.path.join(dir_2D, f) for f in os.listdir(dir_2D)] return data @@ -258,7 +249,7 @@ def get_mesh(file_name, *args, **kwargs): meshes = collections.deque() for name in np.append(file_name, args): location = get_path(name) - log.info('loading mesh from: %s', location) + log.info("loading mesh from: %s", location) meshes.append(trimesh.load(location, **kwargs)) if len(meshes) == 1: return meshes[0] @@ -279,8 +270,7 @@ def get_path(file_name): full : str Full absolute path to model. """ - return os.path.abspath( - os.path.join(dir_models, file_name)) + return os.path.abspath(os.path.join(dir_models, file_name)) @contextlib.contextmanager @@ -289,11 +279,12 @@ def serve_meshes(): This context manager serves meshes over HTTP at some available port. """ + class _ServerThread(threading.Thread): def run(self): os.chdir(dir_models) Handler = SimpleHTTPRequestHandler - self.httpd = socketserver.TCPServer(('', 0), Handler) + self.httpd = socketserver.TCPServer(("", 0), Handler) _, self.port = self.httpd.server_address self.httpd.serve_forever() @@ -301,16 +292,14 @@ def run(self): t.daemon = False t.start() time.sleep(0.2) - yield 'http://localhost:{}'.format(t.port) + yield "http://localhost:{}".format(t.port) t.httpd.shutdown() t.join() -def get_meshes(count=np.inf, - raise_error=False, - split=False, - min_volume=None, - only_watertight=True): +def get_meshes( + count=np.inf, raise_error=False, split=False, min_volume=None, only_watertight=True +): """ Get meshes to test with. @@ -358,8 +347,7 @@ def check(item): extension = trimesh.util.split_extension(file_name).lower() if extension in trimesh.available_formats(): try: - loaded = trimesh.load( - os.path.join(dir_models, file_name)) + loaded = trimesh.load(os.path.join(dir_models, file_name)) except BaseException as E: if raise_error: raise E @@ -367,21 +355,21 @@ def check(item): batched = [] if isinstance(loaded, trimesh.Scene): - batched.extend(m for m in loaded.geometry.values() - if isinstance(m, trimesh.Trimesh)) + batched.extend( + m for m in loaded.geometry.values() if isinstance(m, trimesh.Trimesh) + ) elif isinstance(loaded, trimesh.Trimesh): batched.append(loaded) for mesh in batched: - mesh.metadata['file_name'] = file_name + mesh.metadata["file_name"] = file_name # only return our limit if returned[0] >= count: return # previous checks should ensure only trimesh assert isinstance(mesh, trimesh.Trimesh) if split: - for submesh in mesh.split( - only_watertight=only_watertight): + for submesh in mesh.split(only_watertight=only_watertight): checked = check(submesh) if checked is not None: yield checked @@ -390,8 +378,7 @@ def check(item): if checked is not None: yield checked else: - log.warning('%s has no loader, not running test on!', - file_name) + log.warning("%s has no loader, not running test on!", file_name) def get_2D(count=None): @@ -429,8 +416,7 @@ def get_2D(count=None): try: paths.append(trimesh.load(location)) except BaseException as E: - log.error('failed on: {}'.format(file_name), - exc_info=True) + log.error("failed on: {}".format(file_name), exc_info=True) raise E yield paths[-1] @@ -448,18 +434,17 @@ def check_path2D(path): assert len(path.root) == len(path.polygons_full) # make sure polygons are really polygons - assert all(type(i).__name__ == 'Polygon' - for i in path.polygons_full) - assert all(type(i).__name__ == 'Polygon' - for i in path.polygons_closed) + assert all(type(i).__name__ == "Polygon" for i in path.polygons_full) + assert all(type(i).__name__ == "Polygon" for i in path.polygons_closed) # these should all correspond to each other assert len(path.discrete) == len(path.polygons_closed) assert len(path.discrete) == len(path.paths) # make sure None polygons are not referenced in graph - assert all(path.polygons_closed[i] is not None - for i in path.enclosure_directed.nodes()) + assert all( + path.polygons_closed[i] is not None for i in path.enclosure_directed.nodes() + ) if any(e.color is not None for e in path.entities): assert path.colors.shape == (len(path.entities), 4) @@ -482,8 +467,7 @@ def scene_equal(a, b): for k, m in a.geometry.items(): # each mesh should correspond by name # and have the same volume - assert np.isclose( - m.volume, b.geometry[k].volume, rtol=0.001) + assert np.isclose(m.volume, b.geometry[k].volume, rtol=0.001) # the axis aligned bounding box should be the same assert np.allclose(a.bounds, b.bounds) @@ -503,14 +487,12 @@ def texture_equal(a, b): try: from scipy.spatial import cKDTree except BaseException: - log.error('no scipy for check!', exc_info=True) + log.error("no scipy for check!", exc_info=True) return # an ordered position-face-UV blob to check - pa = np.hstack((a.vertices, a.visual.uv))[ - a.faces].reshape((-1, 15)) - pb = np.hstack((b.vertices, b.visual.uv))[ - b.faces].reshape((-1, 15)) + pa = np.hstack((a.vertices, a.visual.uv))[a.faces].reshape((-1, 15)) + pb = np.hstack((b.vertices, b.visual.uv))[b.faces].reshape((-1, 15)) # query their actual ordered values against each other q = cKDTree(pa).query_ball_tree(cKDTree(pb), r=1e-4) assert all(i in match for i, match in enumerate(q)) @@ -521,8 +503,7 @@ def check_fuze(fuze): Check the classic textured mesh: a fuze bottle """ # these loaded fuze bottles should have textures - assert isinstance( - fuze.visual, trimesh.visual.TextureVisuals) + assert isinstance(fuze.visual, trimesh.visual.TextureVisuals) # image should be loaded with correct resolution assert fuze.visual.material.image.size == (1024, 1024) # UV coordinates should be unmerged correctly @@ -533,9 +514,11 @@ def check_fuze(fuze): assert fuze.visual.uv.min() > -tol.merge assert fuze.visual.uv.max() < 1 + tol.merge # check color factors - factors = [fuze.visual.material.ambient, - fuze.visual.material.diffuse, - fuze.visual.material.specular] + factors = [ + fuze.visual.material.ambient, + fuze.visual.material.diffuse, + fuze.visual.material.specular, + ] for f in factors: # should be RGBA assert len(f) == 4 @@ -550,7 +533,7 @@ def check_fuze(fuze): assert fuze.vertices.shape == (664, 3) # convert TextureVisuals to ColorVisuals viz = fuze.visual.to_color() - assert viz.kind == 'vertex' + assert viz.kind == "vertex" # should be actual colors defined assert viz.vertex_colors.ptp(axis=0).ptp() != 0 # shouldn't crash @@ -576,9 +559,9 @@ def wrapload(exported, file_type, **kwargs): loaded : trimesh.Trimesh Loaded result """ - return trimesh.load(file_obj=trimesh.util.wrap_as_stream(exported), - file_type=file_type, - **kwargs) + return trimesh.load( + file_obj=trimesh.util.wrap_as_stream(exported), file_type=file_type, **kwargs + ) TemporaryDirectory = trimesh.util.TemporaryDirectory @@ -590,8 +573,28 @@ def wrapload(exported, file_type, **kwargs): # formats supported by meshlab for export tests try: import pymeshlab - meshlab_formats = ['3ds', 'ply', 'stl', 'obj', 'qobj', 'off', 'ptx', 'vmi', - 'bre', 'dae', 'ctm', 'pts', 'apts', 'gts', 'pdb', - 'tri', 'asc', 'x3d', 'x3dv', 'wrl'] + + meshlab_formats = [ + "3ds", + "ply", + "stl", + "obj", + "qobj", + "off", + "ptx", + "vmi", + "bre", + "dae", + "ctm", + "pts", + "apts", + "gts", + "pdb", + "tri", + "asc", + "x3d", + "x3dv", + "wrl", + ] except BaseException: meshlab_formats = [] diff --git a/tests/test_util.py b/tests/test_util.py index 398dae83a..60d88c610 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -14,12 +14,11 @@ TOL_ZERO = 1e-9 TOL_CHECK = 1e-2 -log = logging.getLogger('trimesh') +log = logging.getLogger("trimesh") log.addHandler(logging.NullHandler()) class VectorTests(unittest.TestCase): - def setUp(self): self.test_dim = TEST_DIM @@ -31,14 +30,14 @@ def test_unitize_multi(self): assert not valid[0] assert valid[1:].all() - length = np.sum(vectors[1:] ** 2, axis=1) ** .5 + length = np.sum(vectors[1:] ** 2, axis=1) ** 0.5 assert np.allclose(length, 1.0) def test_align(self): - log.info('Testing vector alignment') + log.info("Testing vector alignment") target = np.array([0, 0, 1]) for _i in range(100): - vector = trimesh.unitize(np.random.random(3) - .5) + vector = trimesh.unitize(np.random.random(3) - 0.5) T = trimesh.geometry.align_vectors(vector, target) result = np.dot(T, np.append(vector, 1))[0:3] aligned = np.abs(result - target).sum() < TOL_ZERO @@ -46,7 +45,6 @@ def test_align(self): class UtilTests(unittest.TestCase): - def test_bounds_tree(self): for _attempt in range(3): for dimension in [2, 3]: @@ -78,9 +76,8 @@ def test_stack(self): pass def test_has_module(self): - - assert g.trimesh.util.has_module('collections') - assert not g.trimesh.util.has_module('foobarrionananan') + assert g.trimesh.util.has_module("collections") + assert not g.trimesh.util.has_module("foobarrionananan") def test_strips(self): """ @@ -95,8 +92,7 @@ def strips_to_faces(strips): for s in strips: s = g.np.asanyarray(s, dtype=g.np.int64) # each triangle is defined by one new vertex - tri = g.np.column_stack([g.np.roll(s, -i) - for i in range(3)])[:-2] + tri = g.np.column_stack([g.np.roll(s, -i) for i in range(3)])[:-2] # we need to flip ever other triangle idx = (g.np.arange(len(tri)) % 2).astype(bool) tri[idx] = g.np.fliplr(tri[idx]) @@ -108,10 +104,7 @@ def strips_to_faces(strips): # test 4- triangle strip s = [g.np.arange(6)] f = g.trimesh.util.triangle_strips_to_faces(s) - assert (f == g.np.array([[0, 1, 2], - [3, 2, 1], - [2, 3, 4], - [5, 4, 3]])).all() + assert (f == g.np.array([[0, 1, 2], [3, 2, 1], [2, 3, 4], [5, 4, 3]])).all() assert len(f) + 2 == len(s[0]) assert (f == strips_to_faces(s)).all() @@ -141,9 +134,8 @@ def test_pairwise(self): assert all(len(i) == 2 for i in pa) def test_concat(self): - - a = g.get_mesh('ballA.off') - b = g.get_mesh('ballB.off') + a = g.get_mesh("ballA.off") + b = g.get_mesh("ballB.off") hA = a.__hash__() hB = b.__hash__() @@ -151,8 +143,7 @@ def test_concat(self): # make sure we're not mutating original mesh for _i in range(4): c = a + b - assert g.np.isclose(c.volume, - a.volume + b.volume) + assert g.np.isclose(c.volume, a.volume + b.volume) assert a.__hash__() == hA assert b.__hash__() == hB @@ -165,10 +156,48 @@ def test_concat(self): # do a multimesh concatenate r = g.trimesh.util.concatenate(meshes) - assert g.np.isclose(r.volume, - a.volume * count) + assert g.np.isclose(r.volume, a.volume * count) assert a.__hash__() == hA + def test_concat_vertex_normals(self): + # vertex normals should only be included if they already exist + + a = g.trimesh.creation.icosphere().apply_translation([1, 0, 0]) + assert "vertex_normals" not in a._cache + + b = g.trimesh.creation.icosphere().apply_translation([-1, 0, 0]) + assert "vertex_normals" not in b._cache + + c = g.trimesh.util.concatenate([a, b]) + assert "vertex_normals" not in c._cache + + rando = g.trimesh.unitize(g.random(a.vertices.shape)) + a.vertex_normals = rando + assert "vertex_normals" in a._cache + + c = g.trimesh.util.concatenate([a, b]) + assert "vertex_normals" in c._cache + # should have included the rando normals + assert g.np.allclose(c.vertex_normals[: len(a.vertices)], rando) + + def test_concat_face_normals(self): + # face normals should only be included if they already exist + a = g.trimesh.creation.icosphere().apply_translation([1, 0, 0]) + assert "face_normals" not in a._cache + + b = g.trimesh.creation.icosphere().apply_translation([-1, 0, 0]) + assert "face_normals" not in b._cache + + c = g.trimesh.util.concatenate([a, b]) + assert "face_normals" not in c._cache + + # will generate normals + _ = a.face_normals + assert "face_normals" in a._cache + + c = g.trimesh.util.concatenate([a, b]) + assert "face_normals" in c._cache + def test_unique_id(self): num_ids = 10000 @@ -194,17 +223,17 @@ def test_unique_name(self): from trimesh.util import unique_name assert len(unique_name(None, {})) > 0 - assert len(unique_name('', {})) > 0 + assert len(unique_name("", {})) > 0 count = 10 names = set() for _i in range(count): - names.add(unique_name('hi', names)) + names.add(unique_name("hi", names)) assert len(names) == count names = set() for _i in range(count): - names.add(unique_name('', names)) + names.add(unique_name("", names)) assert len(names) == count # Try with a larger set of names @@ -213,7 +242,7 @@ def test_unique_name(self): # make it a whole lotta duplicates names = names * 1000 # add a non-int postfix to test - names.extend(['suppp_hi'] * 10) + names.extend(["suppp_hi"] * 10) assigned = set() with g.Profiler() as P: @@ -226,10 +255,7 @@ def test_unique_name(self): counts = {} with g.Profiler() as P: for name in names: - assigned_new.add(unique_name( - name, - contains=assigned_new, - counts=counts)) + assigned_new.add(unique_name(name, contains=assigned_new, counts=counts)) g.log.debug(P.output_text()) # new scheme should match the old one @@ -239,57 +265,51 @@ def test_unique_name(self): class ContainsTest(unittest.TestCase): - def test_inside(self): sphere = g.trimesh.primitives.Sphere(radius=1.0, subdivisions=4) - g.log.info('Testing contains function with sphere') - samples = (np.random.random((1000, 3)) - .5) * 5 + g.log.info("Testing contains function with sphere") + samples = (np.random.random((1000, 3)) - 0.5) * 5 radius = np.linalg.norm(samples, axis=1) - margin = .05 + margin = 0.05 truth_in = radius < (1.0 - margin) truth_out = radius > (1.0 + margin) contains = sphere.contains(samples) if not contains[truth_in].all(): - raise ValueError('contains test does not match truth!') + raise ValueError("contains test does not match truth!") if contains[truth_out].any(): - raise ValueError('contains test does not match truth!') + raise ValueError("contains test does not match truth!") class IOWrapTests(unittest.TestCase): - def test_io_wrap(self): - util = g.trimesh.util # check wrap_as_stream test_b = g.random(1).tobytes() - test_s = 'this is a test yo' + test_s = "this is a test yo" res_b = util.wrap_as_stream(test_b).read() res_s = util.wrap_as_stream(test_s).read() assert res_b == test_b assert res_s == test_s # check __enter__ and __exit__ - hi = b'hi' + hi = b"hi" with util.BytesIO(hi) as f: assert f.read() == hi # check __enter__ and __exit__ - hi = 'hi' + hi = "hi" with util.StringIO(hi) as f: assert f.read() == hi class CompressTests(unittest.TestCase): - def test_compress(self): - - source = {'hey': 'sup', - 'naa': '2002211'} + source = {"hey": "sup", "naa": "2002211"} # will return bytes c = g.trimesh.util.compress(source) @@ -297,23 +317,23 @@ def test_compress(self): # wrap bytes as file- like object f = g.trimesh.util.wrap_as_stream(c) # try to decompress file- like object - d = g.trimesh.util.decompress(f, file_type='zip') + d = g.trimesh.util.decompress(f, file_type="zip") # make sure compressed- decompressed items # are the same after a cycle for key, value in source.items(): - result = d[key].read().decode('utf-8') + result = d[key].read().decode("utf-8") assert result == value class UniqueTests(unittest.TestCase): - def test_unique(self): - - options = [np.array([0, 1, 2, 3, 1, 3, 10, 20]), - np.arange(100), - np.array([], dtype=np.int64), - (np.random.random(1000) * 10).astype(int)] + options = [ + np.array([0, 1, 2, 3, 1, 3, 10, 20]), + np.arange(100), + np.array([], dtype=np.int64), + (np.random.random(1000) * 10).astype(int), + ] for values in options: if len(values) > 0: @@ -323,21 +343,19 @@ def test_unique(self): # try our unique bincount function unique, inverse, counts = g.trimesh.grouping.unique_bincount( - values, - minlength=minlength, - return_inverse=True, - return_counts=True) + values, minlength=minlength, return_inverse=True, return_counts=True + ) # make sure inverse is correct assert (unique[inverse] == values).all() # make sure that the number of counts matches # the number of unique values - assert (len(unique) == len(counts)) + assert len(unique) == len(counts) # get the truth - truth_unique, truth_inverse, truth_counts = np.unique(values, - return_inverse=True, - return_counts=True) + truth_unique, truth_inverse, truth_counts = np.unique( + values, return_inverse=True, return_counts=True + ) # make sure truth is doing what we think assert (truth_unique[truth_inverse] == values).all() @@ -352,47 +370,43 @@ def test_unique(self): class CommentTests(unittest.TestCase): - def test_comment(self): # test our comment stripping logic f = g.trimesh.util.comment_strip - text = 'hey whats up' + text = "hey whats up" assert f(text) == text - text = '#hey whats up' - assert f(text) == '' + text = "#hey whats up" + assert f(text) == "" - text = ' # hey whats up ' - assert f(text) == '' + text = " # hey whats up " + assert f(text) == "" - text = '# naahah\nhey whats up' - assert f(text) == 'hey whats up' + text = "# naahah\nhey whats up" + assert f(text) == "hey whats up" - text = '#naahah\nhey whats up\nhi' - assert f(text) == 'hey whats up\nhi' + text = "#naahah\nhey whats up\nhi" + assert f(text) == "hey whats up\nhi" - text = '#naahah\nhey whats up\n hi' - assert f(text) == 'hey whats up\n hi' + text = "#naahah\nhey whats up\n hi" + assert f(text) == "hey whats up\n hi" - text = '#naahah\nhey whats up\n hi#' - assert f(text) == 'hey whats up\n hi' + text = "#naahah\nhey whats up\n hi#" + assert f(text) == "hey whats up\n hi" - text = 'hey whats up# see here\n hi#' - assert f(text) == 'hey whats up\n hi' + text = "hey whats up# see here\n hi#" + assert f(text) == "hey whats up\n hi" class ArrayToString(unittest.TestCase): def test_converts_an_unstructured_1d_array(self): - self.assertEqual( - g.trimesh.util.array_to_string(np.array([1, 2, 3])), - '1 2 3' - ) + self.assertEqual(g.trimesh.util.array_to_string(np.array([1, 2, 3])), "1 2 3") def test_converts_an_unstructured_int_array(self): self.assertEqual( g.trimesh.util.array_to_string(np.array([[1, 2, 3], [4, 5, 6]])), - '1 2 3\n4 5 6' + "1 2 3\n4 5 6", ) def test_converts_an_unstructured_float_array(self): @@ -400,51 +414,54 @@ def test_converts_an_unstructured_float_array(self): g.trimesh.util.array_to_string( np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64) ), - '1.00000000 2.00000000 3.00000000\n4.00000000 5.00000000 6.00000000' + "1.00000000 2.00000000 3.00000000\n4.00000000 5.00000000 6.00000000", ) def test_uses_the_specified_column_delimiter(self): self.assertEqual( g.trimesh.util.array_to_string( - np.array([[1, 2, 3], [4, 5, 6]]), col_delim='col'), - '1col2col3\n4col5col6' + np.array([[1, 2, 3], [4, 5, 6]]), col_delim="col" + ), + "1col2col3\n4col5col6", ) def test_uses_the_specified_row_delimiter(self): self.assertEqual( g.trimesh.util.array_to_string( - np.array([[1, 2, 3], [4, 5, 6]]), row_delim='row'), - '1 2 3row4 5 6' + np.array([[1, 2, 3], [4, 5, 6]]), row_delim="row" + ), + "1 2 3row4 5 6", ) def test_uses_the_specified_value_format(self): self.assertEqual( g.trimesh.util.array_to_string( - np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64), - value_format='{:.1f}'), - '1.0 2.0 3.0\n4.0 5.0 6.0' + np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64), value_format="{:.1f}" + ), + "1.0 2.0 3.0\n4.0 5.0 6.0", ) def test_supports_uints(self): self.assertEqual( - g.trimesh.util.array_to_string( - np.array([1, 2, 3], dtype=np.uint8)), - '1 2 3' + g.trimesh.util.array_to_string(np.array([1, 2, 3], dtype=np.uint8)), "1 2 3" ) def test_supports_repeat_format(self): self.assertEqual( g.trimesh.util.array_to_string( - np.array([[1, 2, 3], [4, 5, 6]]), value_format='{} {}'), - '1 1 2 2 3 3\n4 4 5 5 6 6' + np.array([[1, 2, 3], [4, 5, 6]]), value_format="{} {}" + ), + "1 1 2 2 3 3\n4 4 5 5 6 6", ) def test_raises_if_array_is_structured(self): with self.assertRaises(ValueError): - g.trimesh.util.array_to_string(np.array( - [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] - )) + g.trimesh.util.array_to_string( + np.array( + [(1, 1.1), (2, 2.2)], + dtype=[("some_int", np.int64), ("some_float", np.float64)], + ) + ) def test_raises_if_array_is_not_flat(self): with self.assertRaises(ValueError): @@ -452,16 +469,15 @@ def test_raises_if_array_is_not_flat(self): class StructuredArrayToString(unittest.TestCase): - def test_converts_a_structured_array_with_1d_elements(self): self.assertEqual( g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ) ), - '1 1.10000000\n2 2.20000000' + "1 1.10000000\n2 2.20000000", ) def test_converts_a_structured_array_with_2d_elements(self): @@ -469,11 +485,10 @@ def test_converts_a_structured_array_with_2d_elements(self): g.trimesh.util.structured_array_to_string( np.array( [([1, 2], 1.1), ([3, 4], 2.2)], - dtype=[('some_int', np.int64, 2), - ('some_float', np.float64)] + dtype=[("some_int", np.int64, 2), ("some_float", np.float64)], ) ), - '1 2 1.10000000\n3 4 2.20000000' + "1 2 1.10000000\n3 4 2.20000000", ) def test_uses_the_specified_column_delimiter(self): @@ -481,11 +496,11 @@ def test_uses_the_specified_column_delimiter(self): g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ), - col_delim='col' + col_delim="col", ), - '1col1.10000000\n2col2.20000000' + "1col1.10000000\n2col2.20000000", ) def test_uses_the_specified_row_delimiter(self): @@ -493,11 +508,11 @@ def test_uses_the_specified_row_delimiter(self): g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ), - row_delim='row' + row_delim="row", ), - '1 1.10000000row2 2.20000000' + "1 1.10000000row2 2.20000000", ) def test_uses_the_specified_value_format(self): @@ -505,11 +520,11 @@ def test_uses_the_specified_value_format(self): g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ), - value_format='{:.1f}' + value_format="{:.1f}", ), - '1.0 1.1\n2.0 2.2' + "1.0 1.1\n2.0 2.2", ) def test_supports_uints(self): @@ -517,10 +532,10 @@ def test_supports_uints(self): g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.uint8), ('some_float', np.float64)] + dtype=[("some_int", np.uint8), ("some_float", np.float64)], ) ), - '1 1.10000000\n2 2.20000000' + "1 1.10000000\n2 2.20000000", ) def test_raises_if_array_is_unstructured(self): @@ -532,9 +547,9 @@ def test_raises_if_value_format_specifies_repeats(self): g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ), - value_format='{} {}' + value_format="{} {}", ) def test_raises_if_array_is_not_flat(self): @@ -542,11 +557,11 @@ def test_raises_if_array_is_not_flat(self): g.trimesh.util.structured_array_to_string( np.array( [[(1, 1.1), (2, 2.2)], [(1, 1.1), (2, 2.2)]], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ) ) -if __name__ == '__main__': +if __name__ == "__main__": trimesh.util.attach_to_log() unittest.main() diff --git a/trimesh/base.py b/trimesh/base.py index 32fb01078..5ff980828 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -435,6 +435,7 @@ def face_normals(self, values): if not np.allclose(compare, values[:20]): log.debug("face_normals didn't match triangles, ignoring!") return + # otherwise store face normals self._cache["face_normals"] = values diff --git a/trimesh/util.py b/trimesh/util.py index 698ae1b6d..088f2985e 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -17,44 +17,23 @@ import logging import random import shutil -import sys import tempfile +import time import uuid import zipfile -import numpy as np - -ABC = abc.ABC - -# a flag we can check elsewhere for Python 3 -PY3 = sys.version_info.major >= 3 +# for type checking +from collections.abc import Mapping +from io import BytesIO, StringIO -if PY3: - # for type checking - basestring = str - # Python 3 - from io import BytesIO, StringIO - from shutil import which - from time import perf_counter as now -else: - # Python 2 - from distutils.spawn import find_executable as which # noqa - - from StringIO import StringIO - # monkey patch StringIO so `with` statements work - StringIO.__enter__ = lambda a: a - StringIO.__exit__ = lambda a, b, c, d: a.close() - BytesIO = StringIO - from time import time as now # noqa - - -try: - from collections.abc import Mapping -except ImportError: - from collections.abc import Mapping +import numpy as np # create a default logger -log = logging.getLogger('trimesh') +log = logging.getLogger("trimesh") + +ABC = abc.ABC +now = time.time +which = shutil.which # include constants here so we don't have to import # a floating point threshold for 0.0 @@ -67,7 +46,7 @@ _STRICT = False _IDENTITY = np.eye(4, dtype=np.float64) -_IDENTITY.flags['WRITEABLE'] = False +_IDENTITY.flags["WRITEABLE"] = False def has_module(name): @@ -87,12 +66,11 @@ def has_module(name): """ # this should work on Python 2.7 and 3.4+ import pkgutil + return pkgutil.find_loader(name) is not None -def unitize(vectors, - check_valid=False, - threshold=None): +def unitize(vectors, check_valid=False, threshold=None): """ Unitize a vector or an array or row-vectors. @@ -123,8 +101,7 @@ def unitize(vectors, # for (m, d) arrays take the per-row unit vector # using sqrt and avoiding exponents is slightly faster # also dot with ones is faser than .sum(axis=1) - norm = np.sqrt(np.dot(vectors * vectors, - [1.0] * vectors.shape[1])) + norm = np.sqrt(np.dot(vectors * vectors, [1.0] * vectors.shape[1])) # non-zero norms valid = norm > threshold # in-place reciprocal of nonzero norms @@ -141,7 +118,7 @@ def unitize(vectors, else: unit = vectors.copy() else: - raise ValueError('vectors must be (n, ) or (n, d)!') + raise ValueError("vectors must be (n, ) or (n, d)!") if check_valid: return unit[valid], valid @@ -183,7 +160,7 @@ def is_file(obj): is_file : bool True if object is a file """ - return hasattr(obj, 'read') or hasattr(obj, 'write') + return hasattr(obj, "read") or hasattr(obj, "write") def is_pathlib(obj): @@ -202,7 +179,7 @@ def is_pathlib(obj): """ # check class name rather than a pathlib import name = obj.__class__.__name__ - return hasattr(obj, 'absolute') and name.endswith('Path') + return hasattr(obj, "absolute") and name.endswith("Path") def is_string(obj): @@ -219,7 +196,7 @@ def is_string(obj): is_string : bool True if obj is a string """ - return isinstance(obj, basestring) + return isinstance(obj, str) def is_none(obj): @@ -240,9 +217,7 @@ def is_none(obj): """ if obj is None: return True - if (is_sequence(obj) and - len(obj) == 1 and - obj[0] is None): + if is_sequence(obj) and len(obj) == 1 and obj[0] is None: return True return False @@ -261,21 +236,21 @@ def is_sequence(obj): is_sequence : bool True if object is sequence """ - seq = (not hasattr(obj, "strip") and - hasattr(obj, "__getitem__") or - hasattr(obj, "__iter__")) + seq = ( + not hasattr(obj, "strip") + and hasattr(obj, "__getitem__") + or hasattr(obj, "__iter__") + ) # check to make sure it is not a set, string, or dictionary - seq = seq and all(not isinstance(obj, i) for i in (dict, - set, - basestring)) + seq = seq and all(not isinstance(obj, i) for i in (dict, set, str)) # PointCloud objects can look like an array but are not - seq = seq and type(obj).__name__ not in ['PointCloud'] + seq = seq and type(obj).__name__ not in ["PointCloud"] # numpy sometimes returns objects that are single float64 values # but sure look like sequences, so we check the shape - if hasattr(obj, 'shape'): + if hasattr(obj, "shape"): seq = seq and obj.shape != () return seq @@ -330,8 +305,7 @@ def is_shape(obj, shape, allow_zeros=False): # if the obj.shape is different length than # the goal shape it means they have different number # of dimensions and thus the obj is not the query shape - if (not hasattr(obj, 'shape') or - len(obj.shape) != len(shape)): + if not hasattr(obj, "shape") or len(obj.shape) != len(shape): return False # empty lists with any flexible dimensions match @@ -428,8 +402,7 @@ def vector_hemisphere(vectors, return_sign=False): # check the Y value and reverse vector # direction if negative. negative = vectors < -TOL_ZERO - zero = np.logical_not( - np.logical_or(negative, vectors > TOL_ZERO)) + zero = np.logical_not(np.logical_or(negative, vectors > TOL_ZERO)) signs = np.ones(len(vectors), dtype=np.float64) # negative Y values are reversed @@ -441,8 +414,7 @@ def vector_hemisphere(vectors, return_sign=False): elif is_shape(vectors, (-1, 3)): # 3D vector case negative = vectors < -TOL_ZERO - zero = np.logical_not( - np.logical_or(negative, vectors > TOL_ZERO)) + zero = np.logical_not(np.logical_or(negative, vectors > TOL_ZERO)) # move all negative Z to positive # then for zero Z vectors, move all negative Y to positive # then for zero Y vectors, move all negative X to positive @@ -453,12 +425,12 @@ def vector_hemisphere(vectors, return_sign=False): signs[np.logical_and(zero[:, 2], negative[:, 1])] = -1.0 # all on-plane vectors with zero Y values # and negative X values - signs[np.logical_and(np.logical_and(zero[:, 2], - zero[:, 1]), - negative[:, 0])] = -1.0 + signs[ + np.logical_and(np.logical_and(zero[:, 2], zero[:, 1]), negative[:, 0]) + ] = -1.0 else: - raise ValueError('vectors must be (n, 3)!') + raise ValueError("vectors must be (n, 3)!") # apply the signs to the vectors oriented = vectors * signs.reshape((-1, 1)) @@ -486,15 +458,14 @@ def vector_to_spherical(cartesian): """ cartesian = np.asanyarray(cartesian, dtype=np.float64) if not is_shape(cartesian, (-1, 3)): - raise ValueError('Cartesian points must be (n, 3)!') + raise ValueError("Cartesian points must be (n, 3)!") unit, valid = unitize(cartesian, check_valid=True) unit[np.abs(unit) < TOL_MERGE] = 0.0 x, y, z = unit.T spherical = np.zeros((len(cartesian), 2), dtype=np.float64) - spherical[valid] = np.column_stack((np.arctan2(y, x), - np.arccos(z))) + spherical[valid] = np.column_stack((np.arctan2(y, x), np.arccos(z))) return spherical @@ -514,14 +485,12 @@ def spherical_to_vector(spherical): """ spherical = np.asanyarray(spherical, dtype=np.float64) if not is_shape(spherical, (-1, 2)): - raise ValueError('spherical coordinates must be (n, 2)!') + raise ValueError("spherical coordinates must be (n, 2)!") theta, phi = spherical.T st, ct = np.sin(theta), np.cos(theta) sp, cp = np.sin(phi), np.cos(phi) - vectors = np.column_stack((ct * sp, - st * sp, - cp)) + vectors = np.column_stack((ct * sp, st * sp, cp)) return vectors @@ -558,6 +527,7 @@ def pairwise(iterable): # if we have a normal iterable use itertools import itertools + a, b = itertools.tee(iterable) # pop the first element of the second item next(b) @@ -570,7 +540,7 @@ def pairwise(iterable): # only included in recent-ish version of numpy multi_dot = np.linalg.multi_dot except AttributeError: - log.debug('np.linalg.multi_dot not available, using fallback') + log.debug("np.linalg.multi_dot not available, using fallback") def multi_dot(arrays): """ @@ -662,7 +632,7 @@ def row_norm(data): norm : (n,) float Norm of each row of input array """ - return np.sqrt(np.dot(data ** 2, [1] * data.shape[1])) + return np.sqrt(np.dot(data**2, [1] * data.shape[1])) def stack_3D(points, return_2D=False): @@ -690,15 +660,14 @@ def stack_3D(points, return_2D=False): if shape == (0,): is_2D = False elif len(shape) != 2: - raise ValueError('Points must be 2D array!') + raise ValueError("Points must be 2D array!") elif shape[1] == 2: - points = np.column_stack(( - points, np.zeros(len(points)))) + points = np.column_stack((points, np.zeros(len(points)))) is_2D = True elif shape[1] == 3: is_2D = False else: - raise ValueError('Points must be (n, 2) or (n, 3)!') + raise ValueError("Points must be (n, 2) or (n, 3)!") if return_2D: return points, is_2D @@ -721,7 +690,7 @@ def grid_arange(bounds, step): """ bounds = np.asanyarray(bounds, dtype=np.float64) if len(bounds) != 2: - raise ValueError('bounds must be (2, dimension!') + raise ValueError("bounds must be (2, dimension!") # allow single float or per-dimension spacing step = np.asanyarray(step, dtype=np.float64) @@ -729,8 +698,11 @@ def grid_arange(bounds, step): step = np.tile(step, bounds.shape[1]) grid_elements = [np.arange(*b, step=s) for b, s in zip(bounds.T, step)] - grid = np.vstack(np.meshgrid(*grid_elements, indexing='ij') - ).reshape(bounds.shape[1], -1).T + grid = ( + np.vstack(np.meshgrid(*grid_elements, indexing="ij")) + .reshape(bounds.shape[1], -1) + .T + ) return grid @@ -749,15 +721,18 @@ def grid_linspace(bounds, count): """ bounds = np.asanyarray(bounds, dtype=np.float64) if len(bounds) != 2: - raise ValueError('bounds must be (2, dimension!') + raise ValueError("bounds must be (2, dimension!") count = np.asanyarray(count, dtype=np.int64) if count.shape == (): count = np.tile(count, bounds.shape[1]) grid_elements = [np.linspace(*b, num=c) for b, c in zip(bounds.T, count)] - grid = np.vstack(np.meshgrid(*grid_elements, indexing='ij') - ).reshape(bounds.shape[1], -1).T + grid = ( + np.vstack(np.meshgrid(*grid_elements, indexing="ij")) + .reshape(bounds.shape[1], -1) + .T + ) return grid @@ -861,12 +836,14 @@ def decimal_to_digits(decimal, min_digits=None): return digits -def attach_to_log(level=logging.DEBUG, - handler=None, - loggers=None, - colors=True, - capture_warnings=True, - blacklist=None): +def attach_to_log( + level=logging.DEBUG, + handler=None, + loggers=None, + colors=True, + capture_warnings=True, + blacklist=None, +): """ Attach a stream handler to all loggers. @@ -886,12 +863,14 @@ def attach_to_log(level=logging.DEBUG, # default blacklist includes ipython debugging stuff if blacklist is None: - blacklist = ['TerminalIPythonApp', - 'PYREADLINE', - 'pyembree', - 'shapely', - 'matplotlib', - 'parso'] + blacklist = [ + "TerminalIPythonApp", + "PYREADLINE", + "pyembree", + "shapely", + "matplotlib", + "parso", + ] # make sure we log warnings from the warnings module logging.captureWarnings(capture_warnings) @@ -899,20 +878,27 @@ def attach_to_log(level=logging.DEBUG, # create a basic formatter formatter = logging.Formatter( "[%(asctime)s] %(levelname)-7s (%(filename)s:%(lineno)3s) %(message)s", - "%Y-%m-%d %H:%M:%S") + "%Y-%m-%d %H:%M:%S", + ) if colors: try: from colorlog import ColoredFormatter + formatter = ColoredFormatter( - ("%(log_color)s%(levelname)-8s%(reset)s " + - "%(filename)17s:%(lineno)-4s %(blue)4s%(message)s"), + ( + "%(log_color)s%(levelname)-8s%(reset)s " + + "%(filename)17s:%(lineno)-4s %(blue)4s%(message)s" + ), datefmt=None, reset=True, - log_colors={'DEBUG': 'cyan', - 'INFO': 'green', - 'WARNING': 'yellow', - 'ERROR': 'red', - 'CRITICAL': 'red'}) + log_colors={ + "DEBUG": "cyan", + "INFO": "green", + "WARNING": "yellow", + "ERROR": "red", + "CRITICAL": "red", + }, + ) except ImportError: pass @@ -929,16 +915,17 @@ def attach_to_log(level=logging.DEBUG, # de-duplicate loggers using a set loggers = set(logging.Logger.manager.loggerDict.values()) # add the warnings logging - loggers.add(logging.getLogger('py.warnings')) + loggers.add(logging.getLogger("py.warnings")) # disable pyembree warnings - logging.getLogger('pyembree').disabled = True + logging.getLogger("pyembree").disabled = True # loop through all available loggers for logger in loggers: # skip loggers on the blacklist - if (logger.__class__.__name__ != 'Logger' or - any(logger.name.startswith(b) for b in blacklist)): + if logger.__class__.__name__ != "Logger" or any( + logger.name.startswith(b) for b in blacklist + ): continue logger.addHandler(handler) logger.setLevel(level) @@ -993,8 +980,7 @@ def stack_lines(indices): shape = (-1, len(indices[0])) else: shape = (-1, 2) - return np.column_stack((indices[:-1], - indices[1:])).reshape(shape) + return np.column_stack((indices[:-1], indices[1:])).reshape(shape) def append_faces(vertices_seq, faces_seq): @@ -1036,11 +1022,7 @@ def append_faces(vertices_seq, faces_seq): return vertices, faces -def array_to_string(array, - col_delim=' ', - row_delim='\n', - digits=8, - value_format='{}'): +def array_to_string(array, col_delim=" ", row_delim="\n", digits=8, value_format="{}"): """ Convert a 1 or 2D array into a string with a specified number of digits and delimiter. The reason this exists is that the @@ -1076,27 +1058,25 @@ def array_to_string(array, # abort for non-flat arrays if len(array.shape) > 2: - raise ValueError('conversion only works on 1D/2D arrays not %s!', - str(array.shape)) + raise ValueError( + "conversion only works on 1D/2D arrays not %s!", str(array.shape) + ) # abort for structured arrays if array.dtype.names is not None: - raise ValueError( - 'array is structured, use structured_array_to_string instead') + raise ValueError("array is structured, use structured_array_to_string instead") # allow a value to be repeated in a value format - repeats = value_format.count('{') + repeats = value_format.count("{") - if array.dtype.kind in ['i', 'u']: + if array.dtype.kind in ["i", "u"]: # integer types don't need a specified precision format_str = value_format + col_delim - elif array.dtype.kind == 'f': + elif array.dtype.kind == "f": # add the digits formatting to floats - format_str = value_format.replace( - '{}', '{:.' + str(digits) + 'f}') + col_delim + format_str = value_format.replace("{}", "{:." + str(digits) + "f}") + col_delim else: - raise ValueError('dtype %s not convertible!', - array.dtype.name) + raise ValueError("dtype %s not convertible!", array.dtype.name) # length of extra delimiters at the end end_junk = len(col_delim) @@ -1104,7 +1084,7 @@ def array_to_string(array, if len(array.shape) == 2: format_str *= array.shape[1] # cut off the last column delimiter and add a row delimiter - format_str = format_str[:-len(col_delim)] + row_delim + format_str = format_str[: -len(col_delim)] + row_delim end_junk = len(row_delim) # expand format string to whole array @@ -1112,8 +1092,7 @@ def array_to_string(array, # if an array is repeated in the value format # do the shaping here so we don't need to specify indexes - shaped = np.tile(array.reshape((-1, 1)), - (1, repeats)).reshape(-1) + shaped = np.tile(array.reshape((-1, 1)), (1, repeats)).reshape(-1) # run the format operation and remove the extra delimiters formatted = format_str.format(*shaped)[:-end_junk] @@ -1121,11 +1100,9 @@ def array_to_string(array, return formatted -def structured_array_to_string(array, - col_delim=' ', - row_delim='\n', - digits=8, - value_format='{}'): +def structured_array_to_string( + array, col_delim=" ", row_delim="\n", digits=8, value_format="{}" +): """ Convert an unstructured array into a string with a specified number of digits and delimiter. The reason thisexists is @@ -1162,40 +1139,40 @@ def structured_array_to_string(array, # abort for non-flat arrays if len(array.shape) > 1: - raise ValueError('conversion only works on 1D/2D arrays not %s!', - str(array.shape)) + raise ValueError( + "conversion only works on 1D/2D arrays not %s!", str(array.shape) + ) # abort for unstructured arrays if array.dtype.names is None: - raise ValueError( - 'array is not structured, use array_to_string instead') + raise ValueError("array is not structured, use array_to_string instead") # do not allow a value to be repeated in a value format - if value_format.count('{') > 1: + if value_format.count("{") > 1: raise ValueError( - 'value_format %s is invalid, repeating unstructured array ' - + 'values is unsupported', value_format) + "value_format %s is invalid, repeating unstructured array " + + "values is unsupported", + value_format, + ) - format_str = '' + format_str = "" for name in array.dtype.names: kind = array[name].dtype.kind - element_row_length = ( - array[name].shape[1] if len(array[name].shape) == 2 else 1) - if kind in ['i', 'u']: + element_row_length = array[name].shape[1] if len(array[name].shape) == 2 else 1 + if kind in ["i", "u"]: # integer types need a no-decimal formatting - element_format_str = value_format.replace( - '{}', '{:0.0f}') + col_delim - elif kind == 'f': + element_format_str = value_format.replace("{}", "{:0.0f}") + col_delim + elif kind == "f": # add the digits formatting to floats - element_format_str = value_format.replace( - '{}', '{:.' + str(digits) + 'f}') + col_delim + element_format_str = ( + value_format.replace("{}", "{:." + str(digits) + "f}") + col_delim + ) else: - raise ValueError('dtype %s not convertible!', - array.dtype) + raise ValueError("dtype %s not convertible!", array.dtype) format_str += element_row_length * element_format_str # length of extra delimiters at the end - format_str = format_str[:-len(col_delim)] + row_delim + format_str = format_str[: -len(col_delim)] + row_delim # expand format string to whole array format_str *= len(array) @@ -1203,16 +1180,16 @@ def structured_array_to_string(array, count = len(array) # will upgrade everything to a float flattened = np.hstack( - [array[k].reshape((count, -1)) - for k in array.dtype.names]).reshape(-1) + [array[k].reshape((count, -1)) for k in array.dtype.names] + ).reshape(-1) # run the format operation and remove the extra delimiters - formatted = format_str.format(*flattened)[:-len(row_delim)] + formatted = format_str.format(*flattened)[: -len(row_delim)] return formatted -def array_to_encoded(array, dtype=None, encoding='base64'): +def array_to_encoded(array, dtype=None, encoding="base64"): """ Export a numpy array to a compact serializable dictionary. @@ -1240,21 +1217,20 @@ def array_to_encoded(array, dtype=None, encoding='base64'): if dtype is None: dtype = array.dtype - encoded = {'dtype': np.dtype(dtype).str, - 'shape': shape} - if encoding in ['base64', 'dict64']: + encoded = {"dtype": np.dtype(dtype).str, "shape": shape} + if encoding in ["base64", "dict64"]: packed = base64.b64encode(flat.astype(dtype).tobytes()) - if hasattr(packed, 'decode'): - packed = packed.decode('utf-8') - encoded['base64'] = packed - elif encoding == 'binary': - encoded['binary'] = array.tobytes(order='C') + if hasattr(packed, "decode"): + packed = packed.decode("utf-8") + encoded["base64"] = packed + elif encoding == "binary": + encoded["binary"] = array.tobytes(order="C") else: - raise ValueError(f'encoding {encoding} is not available!') + raise ValueError(f"encoding {encoding} is not available!") return encoded -def decode_keys(store, encoding='utf-8'): +def decode_keys(store, encoding="utf-8"): """ If a dictionary has keys that are bytes decode them to a str. @@ -1279,7 +1255,7 @@ def decode_keys(store, encoding='utf-8'): """ keys = store.keys() for key in keys: - if hasattr(key, 'decode'): + if hasattr(key, "decode"): decoded = key.decode(encoding) if key != decoded: store[key.decode(encoding)] = store[key] @@ -1287,7 +1263,7 @@ def decode_keys(store, encoding='utf-8'): return store -def comment_strip(text, starts_with='#', new_line='\n'): +def comment_strip(text, starts_with="#", new_line="\n"): """ Strip comments from a text block. @@ -1314,16 +1290,18 @@ def comment_strip(text, starts_with='#', new_line='\n'): # special case files that start with a comment if text.startswith(starts_with): - lead = '' + lead = "" else: lead = split[0] # take each comment up until the newline removed = [i.split(new_line, 1) for i in split] # add the leading string back on - result = lead + new_line + new_line.join( - i[1] for i in removed - if len(i) > 1 and len(i[1]) > 0) + result = ( + lead + + new_line + + new_line.join(i[1] for i in removed if len(i) > 1 and len(i[1]) > 0) + ) # strip leading and trailing whitespace result = result.strip() @@ -1353,19 +1331,17 @@ def encoded_to_array(encoded): as_array = np.asanyarray(encoded) return as_array else: - raise ValueError('Unable to extract numpy array from input') + raise ValueError("Unable to extract numpy array from input") encoded = decode_keys(encoded) - dtype = np.dtype(encoded['dtype']) - if 'base64' in encoded: - array = np.frombuffer(base64.b64decode(encoded['base64']), - dtype) - elif 'binary' in encoded: - array = np.frombuffer(encoded['binary'], - dtype=dtype) - if 'shape' in encoded: - array = array.reshape(encoded['shape']) + dtype = np.dtype(encoded["dtype"]) + if "base64" in encoded: + array = np.frombuffer(base64.b64decode(encoded["base64"]), dtype) + elif "binary" in encoded: + array = np.frombuffer(encoded["binary"], dtype=dtype) + if "shape" in encoded: + array = array.reshape(encoded["shape"]) return array @@ -1407,7 +1383,7 @@ def type_bases(obj, depth=4): bases = np.hstack(bases) except IndexError: bases = [] - return [i for i in bases if hasattr(i, '__name__')] + return [i for i in bases if hasattr(i, "__name__")] def type_named(obj, name): @@ -1434,7 +1410,7 @@ class : Optional[Callable] for base in type_bases(obj): if base.__name__ == name: return base - raise ValueError('Unable to extract class of name ' + name) + raise ValueError("Unable to extract class of name " + name) def concatenate(a, b=None): @@ -1475,11 +1451,12 @@ def concatenate(a, b=None): # if there are no meshes return an empty list return [] - is_mesh = [f for f in flat if is_instance_named(f, 'Trimesh')] - is_path = [f for f in flat if is_instance_named(f, 'Path')] + is_mesh = [f for f in flat if is_instance_named(f, "Trimesh")] + is_path = [f for f in flat if is_instance_named(f, "Path")] if len(is_path) > len(is_mesh): from .path.util import concatenate as concatenate_path + return concatenate_path(is_path) if len(is_mesh) == 0: @@ -1487,46 +1464,44 @@ def concatenate(a, b=None): # extract the trimesh type to avoid a circular import # and assert that all inputs are Trimesh objects - trimesh_type = type_named(is_mesh[0], 'Trimesh') + trimesh_type = type_named(is_mesh[0], "Trimesh") # append faces and vertices of meshes vertices, faces = append_faces( - [m.vertices.copy() for m in is_mesh], - [m.faces.copy() for m in is_mesh]) + [m.vertices.copy() for m in is_mesh], [m.faces.copy() for m in is_mesh] + ) - # only save face normals if already calculated + # save face normals if already calculated face_normals = None - if all('face_normals' in m._cache for m in is_mesh): - face_normals = np.vstack( - [m.face_normals for m in is_mesh]) - - # always save vertex normals - vertex_normals = vstack_empty( - [m.vertex_normals.copy() for m in is_mesh]) + if any("face_normals" in m._cache for m in is_mesh): + face_normals = np.vstack([m.face_normals for m in is_mesh]) + + # save vertex normals if any mesh has them + vertex_normals = None + if any("vertex_normals" in m._cache for m in is_mesh): + vertex_normals = vstack_empty([m.vertex_normals for m in is_mesh]) try: # concatenate visuals - visual = is_mesh[0].visual.concatenate( - [m.visual for m in is_mesh[1:]]) + visual = is_mesh[0].visual.concatenate([m.visual for m in is_mesh[1:]]) except BaseException: - log.debug('failed to combine visuals', exc_info=True) + log.debug("failed to combine visuals", exc_info=True) visual = None # create the mesh object - return trimesh_type(vertices=vertices, - faces=faces, - face_normals=face_normals, - vertex_normals=vertex_normals, - visual=visual, - process=False) + return trimesh_type( + vertices=vertices, + faces=faces, + face_normals=face_normals, + vertex_normals=vertex_normals, + visual=visual, + process=False, + ) -def submesh(mesh, - faces_sequence, - repair=True, - only_watertight=False, - min_faces=None, - append=False): +def submesh( + mesh, faces_sequence, repair=True, only_watertight=False, min_faces=None, append=False +): """ Return a subset of a mesh. @@ -1573,7 +1548,7 @@ def submesh(mesh, if len(index) == 0: # regardless of type empty arrays are useless continue - if index.dtype.kind == 'b': + if index.dtype.kind == "b": # if passed a bool with no true continue if not index.any(): continue @@ -1602,7 +1577,7 @@ def submesh(mesh, # we use type(mesh) rather than importing Trimesh from base # to avoid a circular import - trimesh_type = type_named(mesh, 'Trimesh') + trimesh_type = type_named(mesh, "Trimesh") if append: visual = None try: @@ -1617,29 +1592,30 @@ def submesh(mesh, faces=faces, face_normals=np.vstack(normals), visual=visual, - process=False) + process=False, + ) return appended if visuals is None: visuals = [None] * len(vertices) # generate a list of Trimesh objects - result = [trimesh_type( - vertices=v, - faces=f, - face_normals=n, - visual=c, - metadata=copy.deepcopy(mesh.metadata), - process=False) for v, f, n, c in zip(vertices, - faces, - normals, - visuals)] + result = [ + trimesh_type( + vertices=v, + faces=f, + face_normals=n, + visual=c, + metadata=copy.deepcopy(mesh.metadata), + process=False, + ) + for v, f, n, c in zip(vertices, faces, normals, visuals) + ] result = np.array(result) if only_watertight or repair: # fill_holes will attempt a repair and returns the # watertight status at the end of the repair attempt - watertight = np.array([i.fill_holes() and len(i.faces) >= 4 - for i in result]) + watertight = np.array([i.fill_holes() and len(i.faces) >= 4 for i in result]) if only_watertight: # remove unrepairable meshes result = result[watertight] @@ -1666,9 +1642,9 @@ def zero_pad(data, count, right=True): elif len(data) < count: padded = np.zeros(count) if right: - padded[-len(data):] = data + padded[-len(data) :] = data else: - padded[:len(data)] = data + padded[: len(data)] = data return padded else: return np.asanyarray(data) @@ -1691,15 +1667,17 @@ def jsonify(obj, **kwargs): dumped : str JSON dump of obj """ + class EdgeEncoder(json.JSONEncoder): def default(self, obj): # will work for numpy.ndarrays # as well as their int64/etc objects - if hasattr(obj, 'tolist'): + if hasattr(obj, "tolist"): return obj.tolist() - elif hasattr(obj, 'timestamp'): + elif hasattr(obj, "timestamp"): return obj.timestamp() return json.JSONEncoder.default(self, obj) + # run the dumps using our encoder return json.dumps(obj, cls=EdgeEncoder, **kwargs) @@ -1729,16 +1707,18 @@ def convert_like(item, like): return item # if it's an array with one item return it - if (is_sequence(item) and len(item) == 1 and - isinstance(item[0], like.__class__)): + if is_sequence(item) and len(item) == 1 and isinstance(item[0], like.__class__): return item[0] - if (isinstance(item, str) and - like.__class__.__name__ == 'Polygon' and - item.startswith('POLYGON')): + if ( + isinstance(item, str) + and like.__class__.__name__ == "Polygon" + and item.startswith("POLYGON") + ): # break our rule on imports but only a little bit # the import was a WKT serialized polygon from shapely import wkt + return wkt.loads(item) # otherwise just run the conversion @@ -1772,16 +1752,16 @@ def bounds_tree(bounds): if len(bounds.shape) == 3: # should be min-max per bound if bounds.shape[1] != 2: - raise ValueError('bounds not (n, 2, dimension)!') + raise ValueError("bounds not (n, 2, dimension)!") # reshape to one-row-per-hyperrectangle bounds = bounds.reshape((len(bounds), -1)) elif len(bounds.shape) != 2 or bounds.size == 0: - raise ValueError('Bounds must be (n, dimension * 2)!') + raise ValueError("Bounds must be (n, dimension * 2)!") # check to make sure we have correct shape dimension = bounds.shape[1] if (dimension % 2) != 0: - raise ValueError('Bounds must be (n,dimension*2)!') + raise ValueError("Bounds must be (n,dimension*2)!") dimension = int(dimension / 2) # some versions of rtree screw up indexes on stream loading @@ -1789,20 +1769,20 @@ def bounds_tree(bounds): # or if we have to do a loop to insert things which is 5x slower rtree_test = rtree.index.Index( [(1564, [0, 0, 0, 10, 10, 10], None)], - properties=rtree.index.Property(dimension=3)) - rtree_stream_ok = next(rtree_test.intersection( - [1, 1, 1, 2, 2, 2])) == 1564 + properties=rtree.index.Property(dimension=3), + ) + rtree_stream_ok = next(rtree_test.intersection([1, 1, 1, 2, 2, 2])) == 1564 properties = rtree.index.Property(dimension=dimension) if rtree_stream_ok: # stream load was verified working on import above - tree = rtree.index.Index(zip(np.arange(len(bounds)), - bounds, - [None] * len(bounds)), - properties=properties) + tree = rtree.index.Index( + zip(np.arange(len(bounds)), bounds, [None] * len(bounds)), + properties=properties, + ) else: # in some rtree versions stream loading goofs the index - log.warning('rtree stream loading broken! Try upgrading rtree!') + log.warning("rtree stream loading broken! Try upgrading rtree!") tree = rtree.index.Index(properties=properties) for i, b in enumerate(bounds): tree.insert(i, b) @@ -1823,14 +1803,11 @@ def wrap_as_stream(item): wrapped : file-like object Contains data from item """ - if not PY3: - # in python 2 StringIO handles bytes and str - return StringIO(item) if isinstance(item, str): return StringIO(item) elif isinstance(item, bytes): return BytesIO(item) - raise ValueError(f'{type(item).__name__} is not wrappable!') + raise ValueError(f"{type(item).__name__} is not wrappable!") def sigfig_round(values, sigfig=1): @@ -1862,7 +1839,7 @@ def sigfig_round(values, sigfig=1): Out[3]: 0.0001405 """ as_int, multiplier = sigfig_int(values, sigfig) - rounded = as_int * (10 ** multiplier) + rounded = as_int * (10**multiplier) return rounded @@ -1892,7 +1869,7 @@ def sigfig_int(values, sigfig): sigfig = np.asanyarray(sigfig, dtype=np.int64).reshape(-1) if sigfig.shape != values.shape: - raise ValueError('sigfig must match identifier') + raise ValueError("sigfig must match identifier") exponent = np.zeros(len(values)) nonzero = np.abs(values) > TOL_ZERO @@ -1926,16 +1903,15 @@ def decompress(file_obj, file_type): if isinstance(file_obj, bytes): file_obj = wrap_as_stream(file_obj) - if file_type.endswith('zip'): + if file_type.endswith("zip"): archive = zipfile.ZipFile(file_obj) - return {name: wrap_as_stream(archive.read(name)) - for name in archive.namelist()} - if 'tar' in file_type[-6:]: + return {name: wrap_as_stream(archive.read(name)) for name in archive.namelist()} + if "tar" in file_type[-6:]: import tarfile - archive = tarfile.open(fileobj=file_obj, mode='r') - return {name: archive.extractfile(name) - for name in archive.getnames()} - raise ValueError('Unsupported type passed!') + + archive = tarfile.open(fileobj=file_obj, mode="r") + return {name: archive.extractfile(name) for name in archive.getnames()} + raise ValueError("Unsupported type passed!") def compress(info, **kwargs): @@ -1954,17 +1930,12 @@ def compress(info, **kwargs): compressed : bytes Compressed file data """ - if PY3: - file_obj = BytesIO() - else: - file_obj = StringIO() - + file_obj = BytesIO() with zipfile.ZipFile( - file_obj, - mode='w', - compression=zipfile.ZIP_DEFLATED, **kwargs) as zipper: + file_obj, mode="w", compression=zipfile.ZIP_DEFLATED, **kwargs + ) as zipper: for name, data in info.items(): - if hasattr(data, 'read'): + if hasattr(data, "read"): # if we were passed a file object, read it data = data.read() zipper.writestr(name, data) @@ -1995,12 +1966,12 @@ def split_extension(file_name, special=None): file_name = str(file_name) if special is None: - special = ['tar.bz2', 'tar.gz'] + special = ["tar.bz2", "tar.gz"] if file_name.endswith(tuple(special)): for end in special: if file_name.endswith(end): return end - return file_name.split('.')[-1] + return file_name.split(".")[-1] def triangle_strips_to_faces(strips): @@ -2039,7 +2010,7 @@ def triangle_strips_to_faces(strips): # preallocate and slice the blob into rough triangles tri = np.zeros((len(blob) - 2, 3), dtype=np.int64) for i in range(3): - tri[:len(blob) - 3, i] = blob[i:-3 + i] + tri[: len(blob) - 3, i] = blob[i : -3 + i] # the last triangle is left off from the slicing, add it back tri[-1] = blob[-3:] @@ -2055,7 +2026,7 @@ def triangle_strips_to_faces(strips): length_index = np.append(0, np.cumsum(lengths - 2)) flip = np.zeros(length_index[-1], dtype=bool) for i in range(len(length_index) - 1): - flip[length_index[i] + 1:length_index[i + 1]][::2] = True + flip[length_index[i] + 1 : length_index[i + 1]][::2] = True tri[flip] = np.fliplr(tri[flip]) return tri @@ -2076,11 +2047,10 @@ def triangle_fans_to_faces(fans): Vertex indices representing triangles """ - faces = [np.transpose([ - fan[0] * np.ones(len(fan) - 2, dtype=int), - fan[1:-1], - fan[2:] - ]) for fan in fans] + faces = [ + np.transpose([fan[0] * np.ones(len(fan) - 2, dtype=int), fan[1:-1], fan[2:]]) + for fan in fans + ] return np.concatenate(faces, axis=1) @@ -2111,9 +2081,7 @@ def vstack_empty(tup): return np.vstack(stackable) -def write_encoded(file_obj, - stuff, - encoding='utf-8'): +def write_encoded(file_obj, stuff, encoding="utf-8"): """ If a file is open in binary mode and a string is passed, encode and write. @@ -2133,13 +2101,11 @@ def write_encoded(file_obj, encoding : str Encoding of text """ - binary_file = 'b' in getattr(file_obj, 'mode', 'b') - string_stuff = isinstance(stuff, basestring) + binary_file = "b" in getattr(file_obj, "mode", "b") + string_stuff = isinstance(stuff, str) binary_stuff = isinstance(stuff, bytes) - if not PY3: - file_obj.write(stuff) - elif binary_file and string_stuff: + if binary_file and string_stuff: file_obj.write(stuff.encode(encoding)) elif not binary_file and binary_stuff: file_obj.write(stuff.decode(encoding)) @@ -2164,8 +2130,7 @@ def unique_id(length=12): unique : str Unique alphanumeric identifier """ - return uuid.UUID(int=random.getrandbits(128), - version=4).hex[:length] + return uuid.UUID(int=random.getrandbits(128), version=4).hex[:length] def generate_basis(z, epsilon=1e-12): @@ -2193,7 +2158,7 @@ def generate_basis(z, epsilon=1e-12): z = np.array(z, dtype=np.float64, copy=True) # must be a 3D vector if z.shape != (3,): - raise ValueError('z must be (3,) float!') + raise ValueError("z must be (3,) float!") z_norm = np.linalg.norm(z) if z_norm < epsilon: @@ -2299,11 +2264,11 @@ def __getitem__(self, key): def __setitem__(self, key, value): if not isinstance(key, str): - raise ValueError('key must be a string, got %s' % str(key)) + raise ValueError("key must be a string, got %s" % str(key)) if key in self: - raise KeyError('Cannot set new value to existing key %s' % key) + raise KeyError("Cannot set new value to existing key %s" % key) if not callable(value): - raise ValueError('Cannot set value which is not callable.') + raise ValueError("Cannot set value which is not callable.") self._dict[key] = value def __iter__(self): @@ -2340,7 +2305,7 @@ def __exit__(self, *args, **kwargs): shutil.rmtree(self.path) -def decode_text(text, initial='utf-8'): +def decode_text(text, initial="utf-8"): """ Try to decode byte input as a string. @@ -2360,7 +2325,7 @@ def decode_text(text, initial='utf-8'): Data as a string """ # if not bytes just return input - if not hasattr(text, 'decode'): + if not hasattr(text, "decode"): return text try: # initially guess file is UTF-8 or specified encoding @@ -2368,18 +2333,19 @@ def decode_text(text, initial='utf-8'): except UnicodeDecodeError: # detect different file encodings import chardet + # try to detect the encoding of the file # only look at the first 1000 characters otherwise # for big files chardet looks at everything and is slow detect = chardet.detect(text[:1000]) # warn on files that aren't UTF-8 log.debug( - 'Data not {}! Trying {} (confidence {})'.format( - initial, - detect['encoding'], - detect['confidence'])) + "Data not {}! Trying {} (confidence {})".format( + initial, detect["encoding"], detect["confidence"] + ) + ) # try to decode again, unwrap in try - text = text.decode(detect['encoding'], errors='ignore') + text = text.decode(detect["encoding"], errors="ignore") return text @@ -2397,13 +2363,12 @@ def to_ascii(text): ascii : str Input as an ASCII string """ - if hasattr(text, 'encode'): + if hasattr(text, "encode"): # case for existing strings - return text.encode( - 'ascii', errors='ignore').decode('ascii') - elif hasattr(text, 'decode'): + return text.encode("ascii", errors="ignore").decode("ascii") + elif hasattr(text, "decode"): # case for bytes - return text.decode('ascii', errors='ignore') + return text.decode("ascii", errors="ignore") # otherwise just wrap as a string return str(text) @@ -2431,7 +2396,7 @@ def is_ccw(points, return_all=False): points = np.array(points, dtype=np.float64) if len(points.shape) != 2 or points.shape[1] != 2: - raise ValueError('only defined for `(n, 2)` points') + raise ValueError("only defined for `(n, 2)` points") # the "shoelace formula" product = np.subtract(*(points[:-1, [1, 0]] * points[1:]).T) @@ -2444,8 +2409,9 @@ def is_ccw(points, return_all=False): return ccw # the centroid of the polygon uses the same formula - centroid = ((points[:-1] + points[1:]) * - product.reshape((-1, 1))).sum(axis=0) / (6.0 * area) + centroid = ((points[:-1] + points[1:]) * product.reshape((-1, 1))).sum(axis=0) / ( + 6.0 * area + ) return ccw, area, centroid @@ -2477,9 +2443,7 @@ def unique_name(start, contains, counts=None): A name that is not contained in `contains` """ # exit early if name is not in bundle - if (start is not None and - len(start) > 0 and - start not in contains): + if start is not None and len(start) > 0 and start not in contains: return start # start checking with zero index unless found @@ -2488,9 +2452,9 @@ def unique_name(start, contains, counts=None): else: increment = counts.get(start, 0) if start is not None and len(start) > 0: - formatter = start + '_{}' + formatter = start + "_{}" # split by our delimiter once - split = start.rsplit('_', 1) + split = start.rsplit("_", 1) if len(split) == 2 and increment == 0: try: # start incrementing from the existing @@ -2498,11 +2462,11 @@ def unique_name(start, contains, counts=None): # if it is not an integer this will fail increment = int(split[1]) # include the first split value - formatter = split[0] + '_{}' + formatter = split[0] + "_{}" except BaseException: pass else: - formatter = 'geometry_{}' + formatter = "geometry_{}" # if contains is empty we will only need to check once for i in range(increment + 1, 2 + increment + len(contains)): @@ -2514,4 +2478,4 @@ def unique_name(start, contains, counts=None): # this should really never happen since we looped # through the full length of contains - raise ValueError('Unable to establish unique name!') + raise ValueError("Unable to establish unique name!") From a393abba772d8f379570eca76d13664e8a3defab Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 15:01:20 -0400 Subject: [PATCH 72/84] add in-process check --- tests/test_gltf.py | 24 ++++++++---------------- trimesh/util.py | 4 +++- trimesh/voxel/encoding.py | 16 ---------------- 3 files changed, 11 insertions(+), 33 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 825c95b1d..018e39f19 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -42,22 +42,14 @@ def validate_glb(data, name=None): capture_output=True) # -o prints JSON to stdout content = report.stdout.decode('utf-8') - # log the GLTF validator report if - # there are any warnings or hints - decode = g.json.loads(content) - - if (decode['issues']['numErrors'] > 0 or - report.returncode != 0): - # log the whole error report - g.log.error(content) - if name is not None: - g.log.error('failed on: %s', name) - raise ValueError(content) - - # print all warnings: extremely verbose - # if any(decode['issues'][i] > 0 for i in - # ['numWarnings', 'numInfos', 'numHints']): - # g.log.debug(content) + returncode = report.returncode + + if returncode != 0: + g.log.error(f'failed on: `{name}`') + g.log.error(f'validator: `{content}`') + g.log.error(f'stderr: `{report.stderr}`') + + raise ValueError("gltf_validator failed") class GLTFTest(g.unittest.TestCase): diff --git a/trimesh/util.py b/trimesh/util.py index 088f2985e..0da9aa344 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -1474,12 +1474,14 @@ def concatenate(a, b=None): # save face normals if already calculated face_normals = None if any("face_normals" in m._cache for m in is_mesh): - face_normals = np.vstack([m.face_normals for m in is_mesh]) + face_normals = vstack_empty([m.face_normals for m in is_mesh]) + assert face_normals.shape == faces.shape # save vertex normals if any mesh has them vertex_normals = None if any("vertex_normals" in m._cache for m in is_mesh): vertex_normals = vstack_empty([m.vertex_normals for m in is_mesh]) + assert vertex_normals.shape == vertices.shape try: # concatenate visuals diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 3942a27e0..1f21d6c0d 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -118,22 +118,6 @@ def stripped(self): def _flip(self, axes): return FlippedEncoding(self, axes) - def crc(self): - log.warning( - "`geometry.crc()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - - def hash(self): - log.warning( - "`geometry.hash()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - def __hash__(self): """ Get the hash of the current transformation matrix. From f7121ca78438b3a90efa3a80dc40b3ed003b751c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 15:54:01 -0400 Subject: [PATCH 73/84] fix subtle caching bug --- trimesh/caching.py | 174 +++++++++++++++++++------------------- trimesh/voxel/base.py | 33 +++----- trimesh/voxel/creation.py | 6 +- trimesh/voxel/encoding.py | 22 +---- 4 files changed, 100 insertions(+), 135 deletions(-) diff --git a/trimesh/caching.py b/trimesh/caching.py index a41396a9c..2a1c11484 100644 --- a/trimesh/caching.py +++ b/trimesh/caching.py @@ -48,6 +48,7 @@ def sha256(item): def hash_fallback(item): return int(_blake2b(item).hexdigest(), 16) + except BaseException: # fallback to sha256 hash_fallback = sha256 @@ -63,9 +64,11 @@ def hash_fallback(item): from xxhash import xxh64_intdigest as hash_fast except BaseException: # use hashlib as a fallback hashing library - log.debug('falling back to hashlib ' + - 'hashing: `pip install xxhash`' + - 'for 50x faster cache checks') + log.debug( + "falling back to hashlib " + + "hashing: `pip install xxhash`" + + "for 50x faster cache checks" + ) hash_fast = hash_fallback @@ -92,10 +95,9 @@ def tracked_array(array, dtype=None): if array is None: array = [] # make sure it is contiguous then view it as our subclass - tracked = np.ascontiguousarray( - array, dtype=dtype).view(TrackedArray) + tracked = np.ascontiguousarray(array, dtype=dtype).view(TrackedArray) # should always be contiguous here - assert tracked.flags['C_CONTIGUOUS'] + assert tracked.flags["C_CONTIGUOUS"] return tracked @@ -138,8 +140,11 @@ def get_cached(*args, **kwargs): # value not in cache so execute the function value = function(*args, **kwargs) # store the value - if self._cache.force_immutable and hasattr( - value, 'flags') and len(value.shape) > 0: + if ( + self._cache.force_immutable + and hasattr(value, "flags") + and len(value.shape) > 0 + ): value.flags.writeable = False self._cache.cache[name] = value @@ -189,14 +194,13 @@ def __array_wrap__(self, out_arr, context=None): See https://github.com/numpy/numpy/issues/5819 """ if out_arr.ndim: - return np.ndarray.__array_wrap__( - self, out_arr, context) + return np.ndarray.__array_wrap__(self, out_arr, context) # Match numpy's behavior and return a numpy dtype scalar return out_arr[()] @property def mutable(self): - return self.flags['WRITEABLE'] + return self.flags["WRITEABLE"] @mutable.setter def mutable(self, value): @@ -212,12 +216,12 @@ def __hash__(self): A hash of the array contents. """ # repeat the bookkeeping to get a contiguous array - if not self._dirty_hash and hasattr(self, '_hashed'): + if not self._dirty_hash and hasattr(self, "_hashed"): # we have a valid hash without recomputing. return self._hashed # run a hashing function on the C-order bytes copy - hashed = hash_fast(self.tobytes(order='C')) + hashed = hash_fast(self.tobytes(order="C")) # assign the value and set the flag self._hashed = hashed @@ -234,118 +238,95 @@ def __iadd__(self, *args, **kwargs): """ self._dirty_hash = True - return super(self.__class__, self).__iadd__( - *args, **kwargs) + return super(self.__class__, self).__iadd__(*args, **kwargs) def __isub__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__isub__( - *args, **kwargs) + return super(self.__class__, self).__isub__(*args, **kwargs) def fill(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).fill( - *args, **kwargs) + return super(self.__class__, self).fill(*args, **kwargs) def partition(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).partition( - *args, **kwargs) + return super(self.__class__, self).partition(*args, **kwargs) def put(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).put( - *args, **kwargs) + return super(self.__class__, self).put(*args, **kwargs) def byteswap(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).byteswap( - *args, **kwargs) + return super(self.__class__, self).byteswap(*args, **kwargs) def itemset(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).itemset( - *args, **kwargs) + return super(self.__class__, self).itemset(*args, **kwargs) def sort(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).sort( - *args, **kwargs) + return super(self.__class__, self).sort(*args, **kwargs) def setflags(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).setflags( - *args, **kwargs) + return super(self.__class__, self).setflags(*args, **kwargs) def __imul__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__imul__( - *args, **kwargs) + return super(self.__class__, self).__imul__(*args, **kwargs) def __idiv__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__idiv__( - *args, **kwargs) + return super(self.__class__, self).__idiv__(*args, **kwargs) def __itruediv__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__itruediv__( - *args, **kwargs) + return super(self.__class__, self).__itruediv__(*args, **kwargs) def __imatmul__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__imatmul__( - *args, **kwargs) + return super(self.__class__, self).__imatmul__(*args, **kwargs) def __ipow__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__ipow__( - *args, **kwargs) + return super(self.__class__, self).__ipow__(*args, **kwargs) def __imod__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__imod__( - *args, **kwargs) + return super(self.__class__, self).__imod__(*args, **kwargs) def __ifloordiv__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__ifloordiv__( - *args, **kwargs) + return super(self.__class__, self).__ifloordiv__(*args, **kwargs) def __ilshift__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__ilshift__( - *args, **kwargs) + return super(self.__class__, self).__ilshift__(*args, **kwargs) def __irshift__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__irshift__( - *args, **kwargs) + return super(self.__class__, self).__irshift__(*args, **kwargs) def __iand__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__iand__( - *args, **kwargs) + return super(self.__class__, self).__iand__(*args, **kwargs) def __ixor__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__ixor__( - *args, **kwargs) + return super(self.__class__, self).__ixor__(*args, **kwargs) def __ior__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__ior__( - *args, **kwargs) + return super(self.__class__, self).__ior__(*args, **kwargs) def __setitem__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__setitem__( - *args, **kwargs) + return super(self.__class__, self).__setitem__(*args, **kwargs) def __setslice__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__setslice__( - *args, **kwargs) + return super(self.__class__, self).__setslice__(*args, **kwargs) class Cache: @@ -398,9 +379,11 @@ def verify(self): # things changed if id_new != self.id_current: if len(self.cache) > 0: - log.debug('%d items cleared from cache: %s', - len(self.cache), - str(list(self.cache.keys()))) + log.debug( + "%d items cleared from cache: %s", + len(self.cache), + str(list(self.cache.keys())), + ) # hash changed, so dump the cache # do it manually rather than calling clear() # as we are internal logic and can avoid function calls @@ -420,8 +403,7 @@ def clear(self, exclude=None): if exclude is None: self.cache = {} else: - self.cache = {k: v for k, v in self.cache.items() - if k in exclude} + self.cache = {k: v for k, v in self.cache.items() if k in exclude} def update(self, items): """ @@ -432,7 +414,7 @@ def update(self, items): if self.force_immutable: for v in self.cache.values(): - if hasattr(v, 'flags') and len(v.shape) > 0: + if hasattr(v, "flags") and len(v.shape) > 0: v.flags.writeable = False self.id_set() @@ -476,8 +458,7 @@ def __setitem__(self, key, value): # dumpy cache if ID function has changed self.verify() # make numpy arrays read-only if asked to - if self.force_immutable and hasattr( - value, 'flags') and len(value.shape) > 0: + if self.force_immutable and hasattr(value, "flags") and len(value.shape) > 0: value.flags.writeable = False # assign data to dict self.cache[key] = value @@ -523,8 +504,7 @@ def __init__(self, path, expire_days=30): # store how old we allow results to be self.expire_days = expire_days # store the location for saving results - self.path = os.path.abspath( - os.path.expanduser(path)) + self.path = os.path.abspath(os.path.expanduser(path)) # make sure the specified path exists os.makedirs(self.path, exist_ok=True) @@ -541,7 +521,7 @@ def get(self, key, fetch): function and store its result on disk. """ # hash the key so we have a fixed length string - key_hash = _sha256(key.encode('utf-8')).hexdigest() + key_hash = _sha256(key.encode("utf-8")).hexdigest() # full path of result on local disk path = os.path.join(self.path, key_hash) @@ -553,15 +533,15 @@ def get(self, key, fetch): # this nested condition means that # the file both exists and is recent # enough, so just return its contents - with open(path, 'rb') as f: + with open(path, "rb") as f: return f.read() - log.debug(f'not in cache fetching: `{key}`') + log.debug(f"not in cache fetching: `{key}`") # since we made it here our data isn't cached # run the expensive function to fetch the file raw = fetch() # write the data so we can save it - with open(path, 'wb') as f: + with open(path, "wb") as f: f.write(raw) # return the data @@ -598,7 +578,7 @@ def mutable(self): is_mutable : bool Can data be altered in the DataStore """ - return getattr(self, '_mutable', True) + return getattr(self, "_mutable", True) @mutable.setter def mutable(self, value): @@ -650,21 +630,36 @@ def __getitem__(self, key): def __setitem__(self, key, data): """ - Store an item in the DataStore + Store an item in the DataStore. + + Parameters + ------------- + key + A hashable key to store under + data + Usually a numpy array which will be subclassed + but anything hashable should be able to be stored. """ # we shouldn't allow setting on immutable datastores if not self.mutable: - raise ValueError('DataStore is configured immutable!') + raise ValueError("DataStore is configured immutable!") - if hasattr(data, 'hash'): + if isinstance(data, TrackedArray): # don't bother to re-track TrackedArray tracked = data - else: - # otherwise wrap data + elif isinstance(data, (np.ndarray, list, set, tuple)): + # wrap data if it is array-like tracked = tracked_array(data) - # apply our mutability setting + else: + try: + # will raise if this is not a hashable type + hash(data) + except BaseException: + raise ValueError("unhashable `{key}:{type(data)}`") + tracked = data - if hasattr(self, '_mutable'): + # apply our mutability setting + if hasattr(self, "_mutable"): # apply our mutability setting only if it was explicitly set tracked.mutable = self.mutable # store data @@ -678,7 +673,7 @@ def __len__(self): def update(self, values): if not isinstance(values, dict): - raise ValueError('Update only implemented for dicts') + raise ValueError("Update only implemented for dicts") for key, value in values.items(): self[key] = value @@ -693,8 +688,13 @@ def __hash__(self): """ # only hash values that aren't None # or if they are arrays require length greater than zero - return hash_fast(np.array( - [hash(v) for v in self.data.values() - if v is not None and - (not hasattr(v, '__len__') or len(v) > 0)], - dtype=np.int64).tobytes()) + return hash_fast( + np.array( + [ + hash(v) + for v in self.data.values() + if v is not None and (not hasattr(v, "__len__") or len(v) > 0) + ], + dtype=np.int64, + ).tobytes() + ) diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index 5d2f361c2..e4e6f10f5 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -12,39 +12,33 @@ from ..constants import log from ..exchange.binvox import export_binvox from ..parent import Geometry -from ..typed import NDArray, float64 from . import morphology, ops, transforms from .encoding import DenseEncoding, Encoding class VoxelGrid(Geometry): - def __init__(self, encoding, transform=None, metadata=None): - """ - Store 3D voxels. + """ + Store 3D voxels. + """ - Parameters - -------------- - encoding - A numpy array of voxels, or an encoding object - """ + def __init__(self, encoding, transform=None, metadata=None): if transform is None: transform = np.eye(4) if isinstance(encoding, np.ndarray): encoding = DenseEncoding(encoding.astype(bool)) if encoding.dtype != bool: raise ValueError("encoding must have dtype bool") - self._data = caching.DataStore() - self._cache = caching.Cache(id_function=self._data.__hash__) - self._transform = transforms.Transform(transform, datastore=self._data) self.encoding = encoding - self.metadata = {} + self._transform = transforms.Transform(transform, datastore=self._data) + self._cache = caching.Cache(id_function=self._data.__hash__) + self.metadata = {} # update the mesh metadata with passed metadata if isinstance(metadata, dict): self.metadata.update(metadata) elif metadata is not None: - raise ValueError(f"metadata should be a dict or None, not {type(metadata)}") + raise ValueError("metadata should be a dict or None, got %s" % str(metadata)) def __hash__(self): """ @@ -81,7 +75,7 @@ def encoding(self, encoding): self._data["encoding"] = encoding @property - def transform(self) -> NDArray[float64]: + def transform(self): """4x4 homogeneous transformation matrix.""" return self._transform.matrix @@ -95,12 +89,6 @@ def translation(self): """Location of voxel at [0, 0, 0].""" return self._transform.translation - @property - def origin(self): - """Deprecated. Use `self.translation`.""" - # DEPRECATED. Use translation instead - return self.translation - @property def scale(self): """ @@ -204,8 +192,7 @@ def is_filled(self, point): point = np.asanyarray(point) indices = self.points_to_indices(point) in_range = np.logical_and( - np.all(indices < np.array(self.shape), axis=-1), - np.all(indices >= 0, axis=-1), + np.all(indices < np.array(self.shape), axis=-1), np.all(indices >= 0, axis=-1) ) is_filled = np.zeros_like(in_range) diff --git a/trimesh/voxel/creation.py b/trimesh/voxel/creation.py index 85ee37507..2acc8bc88 100644 --- a/trimesh/voxel/creation.py +++ b/trimesh/voxel/creation.py @@ -39,11 +39,7 @@ def voxelize_subdivide(mesh, pitch, max_iter=10, edge_factor=2.0): # get the same mesh sudivided so every edge is shorter # than a factor of our pitch v, f, idx = remesh.subdivide_to_size( - mesh.vertices, - mesh.faces, - max_edge=max_edge, - max_iter=max_iter, - return_index=True, + mesh.vertices, mesh.faces, max_edge=max_edge, max_iter=max_iter, return_index=True ) # convert the vertices to their voxel grid position diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 1f21d6c0d..f350962f9 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -4,7 +4,7 @@ import numpy as np from .. import caching -from ..util import ABC, log +from ..util import ABC from . import runlength try: @@ -283,9 +283,7 @@ def __init__(self, indices, values, shape=None): raise ValueError("indices must be 2D, got shaped %s" % str(indices.shape)) if data["values"].shape != (indices.shape[0],): raise ValueError( - "values and indices shapes inconsistent: {} and {}".format( - data["values"], data["indices"] - ) + "values and indices shapes inconsistent: {} and {}".format(data["values"], data["indices"]) ) if shape is None: self._shape = tuple(data["indices"].max(axis=0) + 1) @@ -454,22 +452,6 @@ def shape(self): def dtype(self): return self._dtype - def crc(self): - log.warning( - "`geometry.crc()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - - def hash(self): - log.warning( - "`geometry.hash()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - def __hash__(self): """ Get the hash of the current transformation matrix. From a22222356dac5ee2333270bbf92133862883fe97 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 15:59:57 -0400 Subject: [PATCH 74/84] run black on trimesh-setup --- docker/trimesh-setup | 133 ++++++++++++++++++++++--------------------- 1 file changed, 67 insertions(+), 66 deletions(-) mode change 100644 => 100755 docker/trimesh-setup diff --git a/docker/trimesh-setup b/docker/trimesh-setup old mode 100644 new mode 100755 index 53a70c289..7646d27b3 --- a/docker/trimesh-setup +++ b/docker/trimesh-setup @@ -6,17 +6,17 @@ environment for `trimesh` in a Debian Docker image. It probably isn't useful for most people unless you are running this exact configuration. """ -import os -import sys +import argparse import json +import logging +import os import shutil +import subprocess +import sys import tarfile import tempfile -import logging -import argparse -import subprocess -from io import BytesIO from fnmatch import fnmatch +from io import BytesIO # define system packages for our debian docker image # someday possibly add this to the `pyproject.toml` config @@ -74,7 +74,7 @@ config_json = """ """ -log = logging.getLogger('trimesh') +log = logging.getLogger("trimesh") log.setLevel(logging.DEBUG) log.addHandler(logging.StreamHandler(sys.stdout)) @@ -94,20 +94,18 @@ def apt(packages): return # start with updating the sources - log.debug(subprocess.check_output( - 'apt-get update -qq'.split()).decode('utf-8')) + log.debug(subprocess.check_output("apt-get update -qq".split()).decode("utf-8")) # the install command - install = 'apt-get install -qq --no-install-recommends'.split() + install = "apt-get install -qq --no-install-recommends".split() # de-duplicate package list install.extend(set(packages)) # call the install command - log.debug(subprocess.check_output(install).decode('utf-8')) + log.debug(subprocess.check_output(install).decode("utf-8")) # delete any temporary files - subprocess.check_output( - 'rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*'.split()) + subprocess.check_output("rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*".split()) def argsort(items): @@ -125,8 +123,7 @@ def argsort(items): index : int Index such `items[index] == min(items)` """ - return [i for (v, i) in sorted( - (v, i) for (i, v) in enumerate(items))] + return [i for (v, i) in sorted((v, i) for (i, v) in enumerate(items))] def fetch(url, sha256): @@ -152,13 +149,13 @@ def fetch(url, sha256): data = urlopen(url).read() hashed = hashlib.sha256(data).hexdigest() if hashed != sha256: - log.error(f'`{hashed}` != `{sha256}`') - raise ValueError('sha256 hash does not match!') + log.error(f"`{hashed}` != `{sha256}`") + raise ValueError("sha256 hash does not match!") return data -def copy_to_path(file_path, prefix='~'): +def copy_to_path(file_path, prefix="~"): """ Copy an executable file onto `PATH`, typically one of the options in the current user's home directory. @@ -172,44 +169,50 @@ def copy_to_path(file_path, prefix='~'): typically `~` for `/home/{current_user}`. """ # get the full path of the requested file - source = os.path.abspath( - os.path.expanduser(file_path)) + source = os.path.abspath(os.path.expanduser(file_path)) # get the file name file_name = os.path.split(source)[-1] # make sure the source file is readable and not empty - with open(source, 'rb') as f: + with open(source, "rb") as f: file_data = f.read() # check for empty files if len(file_data) == 0: - raise ValueError('empty file: {}'.format(file_path)) + raise ValueError(f"empty file: {file_path}") # get all locations in PATH - candidates = [os.path.abspath(os.path.expanduser(i)) - for i in os.environ['PATH'].split(':')] + candidates = [ + os.path.abspath(os.path.expanduser(i)) for i in os.environ["PATH"].split(":") + ] # cull candidates that don't start with our prefix if prefix is not None: # expand shortcut for user's home directory prefix = os.path.abspath(os.path.expanduser(prefix)) # if we are the root user don't cull the available copy locations - if not prefix.endswith('root'): + if not prefix.endswith("root"): # cull non-prefixed path entries candidates = [c for c in candidates if c.startswith(prefix)] + # we want to encourage it to put stuff in the home directory + encourage = [os.path.expanduser("~"), ".local"] + + # rank the candidate paths + scores = [len(c) - sum(len(e) for e in encourage if e in c) for c in candidates] + # try writing to the shortest paths first - for index in argsort([len(c) for c in candidates]): + for index in argsort(scores): path = os.path.join(candidates[index], file_name) try: shutil.copy(source, path) - print('wrote `{}`'.format(path)) + print(f"wrote `{path}`") return path except BaseException: pass # none of our candidates worked - raise ValueError('unable to write to file') + raise ValueError("unable to write to file") def extract(tar, member, path, chmod): @@ -219,7 +222,7 @@ def extract(tar, member, path, chmod): if os.path.isdir(path): return data = tar.extractfile(member=member) - if not hasattr(data, 'read'): + if not hasattr(data, "read"): return data = data.read() if len(data) == 0: @@ -228,7 +231,7 @@ def extract(tar, member, path, chmod): # make sure root path exists os.makedirs(os.path.dirname(path), exist_ok=True) - with open(path, 'wb') as f: + with open(path, "wb") as f: f.write(data) if chmod is not None: @@ -236,13 +239,15 @@ def extract(tar, member, path, chmod): os.chmod(path, int(str(chmod), base=8)) -def handle_fetch(url, - sha256, - target, - chmod=None, - extract_skip=None, - extract_only=None, - strip_components=0): +def handle_fetch( + url, + sha256, + target, + chmod=None, + extract_skip=None, + extract_only=None, + strip_components=0, +): """ A macro to fetch a remote resource (usually an executable) and move it somewhere on the file system. @@ -267,14 +272,14 @@ def handle_fetch(url, in the archive, i.e. at `1`, `a/b/c` is extracted to `target/b/c` """ # get the raw bytes - log.debug(f'fetching: `{url}`') + log.debug(f"fetching: `{url}`") raw = fetch(url=url, sha256=sha256) if len(raw) == 0: - raise ValueError(f'{url} is empty!') + raise ValueError(f"{url} is empty!") # if we have an archive that tar supports - if url.endswith(('.tar.gz', '.tar.xz', 'tar.bz2')): + if url.endswith((".tar.gz", ".tar.xz", "tar.bz2")): # mode needs to know what type of compression mode = f'r:{url.split(".")[-1]}' # get the archive @@ -285,46 +290,45 @@ def handle_fetch(url, for member in tar.getmembers(): # final name after stripping components - name = '/'.join(member.name.split('/')[strip_components:]) + name = "/".join(member.name.split("/")[strip_components:]) # if any of the skip patterns match continue if any(fnmatch(name, p) for p in extract_skip): - log.debug(f'skipping: `{name}`') + log.debug(f"skipping: `{name}`") continue if extract_only is None: path = os.path.join(target, name) - log.debug(f'extracting: `{path}`') + log.debug(f"extracting: `{path}`") extract(tar=tar, member=member, path=path, chmod=chmod) else: - name = name.split('/')[-1] + name = name.split("/")[-1] if name == extract_only: - if target.lower() == '$path': + if target.lower() == "$path": with tempfile.TemporaryDirectory() as D: path = os.path.join(D, name) - log.debug(f'extracting `{path}`') - extract( - tar=tar, member=member, path=path, chmod=chmod) + log.debug(f"extracting `{path}`") + extract(tar=tar, member=member, path=path, chmod=chmod) copy_to_path(path) return path = os.path.join(target, name) - log.debug(f'extracting `{path}`') + log.debug(f"extracting `{path}`") extract(tar=tar, member=member, path=path, chmod=chmod) return else: # a single file - name = url.split('/')[-1].strip() - if target.lower() == '$path': + name = url.split("/")[-1].strip() + if target.lower() == "$path": with tempfile.TemporaryDirectory() as D: temp_path = os.path.join(D, name) - with open(temp_path, 'wb') as f: + with open(temp_path, "wb") as f: f.write(raw) # move the file somewhere on the path path = copy_to_path(temp_path) else: path = target - with open(path, 'wb') as f: + with open(path, "wb") as f: f.write(raw) # apply chmod if requested @@ -334,39 +338,36 @@ def handle_fetch(url, def load_config(): - """ - """ + """ """ return json.loads(config_json) -if __name__ == '__main__': - +if __name__ == "__main__": config = load_config() options = set() for v in config.values(): options.update(v.keys()) - parser = argparse.ArgumentParser( - description='Install system packages for trimesh.') + parser = argparse.ArgumentParser(description="Install system packages for trimesh.") parser.add_argument( - '--install', - type=str, - action='append', - help=f'Install metapackages: {options}') + "--install", type=str, action="append", help=f"Install metapackages: {options}" + ) args = parser.parse_args() # collect `apt-get install`-able package apt_select = [] - handlers = {'fetch': lambda x: handle_fetch(**x), - 'apt': lambda x: apt_select.extend(x)} + handlers = { + "fetch": lambda x: handle_fetch(**x), + "apt": lambda x: apt_select.extend(x), + } # allow comma delimeters and de-duplicate if args.install is None: parser.print_help() exit() else: - select = set(' '.join(args.install).replace(',', ' ').split()) + select = set(" ".join(args.install).replace(",", " ").split()) log.debug(f'installing metapackages: `{", ".join(select)}`') From 40b468987a05f9a8878155a2282c1ea99fe183ae Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 16:03:39 -0400 Subject: [PATCH 75/84] simplify arraylike --- trimesh/base.py | 2 +- trimesh/path/arc.py | 4 ++-- trimesh/typed.py | 30 ++++-------------------------- 3 files changed, 7 insertions(+), 29 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index 5ff980828..25c3d93d5 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -2987,7 +2987,7 @@ def intersection( ) return result - def contains(self, points: ArrayLike[float64]) -> NDArray[bool]: + def contains(self, points: ArrayLike) -> NDArray[bool]: """ Given an array of points determine whether or not they are inside the mesh. This raises an error if called on a diff --git a/trimesh/path/arc.py b/trimesh/path/arc.py index 271d0e807..fff7c1a59 100644 --- a/trimesh/path/arc.py +++ b/trimesh/path/arc.py @@ -6,7 +6,7 @@ from ..constants import log from ..constants import res_path as res from ..constants import tol_path as tol -from ..typed import ArrayLike, FloatLike, NDArray, Optional, float64 +from ..typed import ArrayLike, NDArray, Optional, float64 # floating point zero _TOL_ZERO = 1e-12 @@ -36,7 +36,7 @@ def __getitem__(self, item): def arc_center( - points: ArrayLike[FloatLike], return_normal: bool = True, return_angle: bool = True + points: ArrayLike, return_normal: bool = True, return_angle: bool = True ) -> ArcInfo: """ Given three points on a 2D or 3D arc find the center, diff --git a/trimesh/typed.py b/trimesh/typed.py index 435b2049f..3226760c1 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,35 +1,13 @@ -from typing import List, Optional, Sequence, Tuple, Union - -import numpy as np +from typing import List, Optional, Sequence, Tuple # our default integer and floating point types from numpy import float64, int64 try: - from numpy.typing import NDArray + from numpy.typing import ArrayLike, NDArray except BaseException: NDArray = Sequence - -# for input arrays we want to say "list[int], ndarray[int64], etc" -# all the integer types -IntLike = Union[ - int, - np.int8, - np.int16, - np.int32, - int64, - np.intc, - np.intp, - np.uint8, - np.uint16, - np.uint32, - np.uint64, -] - -FloatLike = Union[float, np.float16, np.float32, float64, np.float_] -BoolLike = Union[bool, np.bool_] - -ArrayLike = Sequence + ArrayLike = Sequence -__all__ = ["NDArray", "ArrayLike", "Optional", "FloatLike", "IntLike", "BoolLike", "List", "Tuple"] +__all__ = ["NDArray", "ArrayLike", "Optional", "List", "Tuple", "float64", "int64"] From e95ccad2196eefb8c88df61cb4d17c54b4759fde Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 16:06:49 -0400 Subject: [PATCH 76/84] fix test_bounds --- trimesh/base.py | 3 +-- trimesh/caching.py | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index 25c3d93d5..ef28adad3 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -2732,9 +2732,8 @@ def mass_properties(self) -> Dict: 'center_mass' : Center of mass location, in global coordinate system """ # if the density or center of mass was overridden they will be put into data - density = self._data.data.get("density", [None])[0] + density = self._data.data.get("density", None) center_mass = self._data.data.get("center_mass", None) - mass = triangles.mass_properties( triangles=self.triangles, crosses=self.triangles_cross, diff --git a/trimesh/caching.py b/trimesh/caching.py index 2a1c11484..c05f6d871 100644 --- a/trimesh/caching.py +++ b/trimesh/caching.py @@ -594,7 +594,8 @@ def mutable(self, value): is_mutable = bool(value) # apply the flag to any data stored for v in self.data.values(): - v.mutable = value + if isinstance(v, TrackedArray): + v.mutable = value # save the mutable setting self._mutable = is_mutable From 19a7301b2f439849e5194127e6617aa8342f0e39 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 16:14:30 -0400 Subject: [PATCH 77/84] update points --- trimesh/base.py | 38 +++++++------- trimesh/points.py | 128 ++++++++++++++++++++++++---------------------- 2 files changed, 86 insertions(+), 80 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index ef28adad3..555198d02 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -307,7 +307,7 @@ def faces(self) -> NDArray[int64]: faces : (n, 3) int64 References for `self.vertices` for triangles. """ - return self._data.get("faces", np.empty(shape=(0, 3), dtype=np.int64)) + return self._data.get("faces", np.empty(shape=(0, 3), dtype=int64)) @faces.setter def faces(self, values: Union[List[List[int]], NDArray[int64]]): @@ -321,8 +321,8 @@ def faces(self, values: Union[List[List[int]], NDArray[int64]]): """ if values is None or len(values) == 0: return self._data.data.pop("faces", None) - if not (isinstance(values, np.ndarray) and values.dtype == np.int64): - values = np.asanyarray(values, dtype=np.int64) + if not (isinstance(values, np.ndarray) and values.dtype == int64): + values = np.asanyarray(values, dtype=int64) # automatically triangulate quad faces if len(values.shape) == 2 and values.shape[1] != 3: @@ -355,7 +355,7 @@ def face_normals(self): Returns ----------- - normals : (len(self.faces), 3) np.float64 + normals : (len(self.faces), 3) float64 Normal vectors of each face """ # check shape of cached normals @@ -368,7 +368,7 @@ def face_normals(self): # if we have no faces exit early if faces is None or len(faces) == 0: - return np.array([], dtype=np.int64).reshape((0, 3)) + return np.array([], dtype=int64).reshape((0, 3)) # if the shape of cached normals equals the shape of faces return if np.shape(cached) == np.shape(faces): @@ -389,7 +389,7 @@ def face_normals(self): return normals # make a padded list of normals for correct shape - padded = np.zeros((len(self.triangles), 3), dtype=np.float64) + padded = np.zeros((len(self.triangles), 3), dtype=float64) padded[valid] = normals # put calculated face normals into cache manually @@ -411,7 +411,7 @@ def face_normals(self, values): if values is None: return # make sure candidate face normals are C-contiguous float - values = np.asanyarray(values, order="C", dtype=np.float64) + values = np.asanyarray(values, order="C", dtype=float64) # face normals need to correspond to faces if len(values) == 0 or values.shape != self.faces.shape: log.debug("face_normals incorrect shape, ignoring!") @@ -454,7 +454,7 @@ def vertices(self): vertices : (n, 3) float Points in cartesian space referenced by self.faces """ - return self._data.get("vertices", np.empty(shape=(0, 3), dtype=np.float64)) + return self._data.get("vertices", np.empty(shape=(0, 3), dtype=float64)) @vertices.setter def vertices(self, values): @@ -466,7 +466,9 @@ def vertices(self, values): values : (n, 3) float Points in space """ - self._data["vertices"] = np.asanyarray(values, order="C", dtype=np.float64) + if values is None or len(values) == 0: + return self._data.data.pop("vertices", None) + self._data["vertices"] = np.asanyarray(values, order="C", dtype=float64) @caching.cache_decorator def vertex_normals(self): @@ -505,7 +507,7 @@ def vertex_normals(self, values: NDArray[float64]): Unit normal vectors for each vertex """ if values is not None: - values = np.asanyarray(values, order="C", dtype=np.float64) + values = np.asanyarray(values, order="C", dtype=float64) if values.shape == self.vertices.shape: # check to see if they assigned all zeros if values.ptp() < tol.merge: @@ -635,7 +637,7 @@ def center_mass(self, value): center_mass : (3, ) float Volumetric center of mass of the mesh. """ - value = np.array(value, dtype=np.float64) + value = np.array(value, dtype=float64) if value.shape != (3,): raise ValueError("shape must be (3,) float!") self._data["center_mass"] = value @@ -1220,7 +1222,7 @@ def update_vertices( # create the inverse mask if not passed if inverse is None: - inverse = np.zeros(len(self.vertices), dtype=np.int64) + inverse = np.zeros(len(self.vertices), dtype=int64) if mask.dtype.kind == "b": inverse[mask] = np.arange(mask.sum()) elif mask.dtype.kind == "i": @@ -1784,7 +1786,7 @@ def facets_area(self) -> NDArray[float64]: # use native python sum in tight loop as opposed to array.sum() # as in this case the lower function call overhead of # native sum provides roughly a 50% speedup - areas = np.array([sum(area_faces[i]) for i in self.facets], dtype=np.float64) + areas = np.array([sum(area_faces[i]) for i in self.facets], dtype=float64) return areas @caching.cache_decorator @@ -2367,12 +2369,12 @@ def unwrap(self, image=None): export = result.export(file_type="obj") uv_recon = np.array( [L[3:].split() for L in str.splitlines(export) if L.startswith("vt ")], - dtype=np.float64, + dtype=float64, ) assert np.allclose(uv_recon, uv) v_recon = np.array( [L[2:].split() for L in str.splitlines(export) if L.startswith("v ")], - dtype=np.float64, + dtype=float64, ) assert np.allclose(v_recon, self.vertices[vmap]) @@ -2430,7 +2432,7 @@ def remove_unreferenced_vertices(self) -> None: referenced = np.zeros(len(self.vertices), dtype=bool) referenced[self.faces] = True - inverse = np.zeros(len(self.vertices), dtype=np.int64) + inverse = np.zeros(len(self.vertices), dtype=int64) inverse[referenced] = np.arange(referenced.sum()) self.update_vertices(mask=referenced, inverse=inverse) @@ -2441,7 +2443,7 @@ def unmerge_vertices(self) -> None: three unique vertex indices and no faces are adjacent. """ # new faces are incrementing so every vertex is unique - faces = np.arange(len(self.faces) * 3, dtype=np.int64).reshape((-1, 3)) + faces = np.arange(len(self.faces) * 3, dtype=int64).reshape((-1, 3)) # use update_vertices to apply mask to # all properties that are per-vertex @@ -2465,7 +2467,7 @@ def apply_transform(self, matrix: NDArray[float64]) -> "Trimesh": Homogeneous transformation matrix """ # get c-order float64 matrix - matrix = np.asanyarray(matrix, order="C", dtype=np.float64) + matrix = np.asanyarray(matrix, order="C", dtype=float64) # only support homogeneous transformations if matrix.shape != (4, 4): diff --git a/trimesh/points.py b/trimesh/points.py index 76909ec13..b03fd01af 100644 --- a/trimesh/points.py +++ b/trimesh/points.py @@ -7,6 +7,7 @@ import copy import numpy as np +from numpy import float64 from . import caching, grouping, transformations, util from .constants import tol @@ -15,9 +16,7 @@ from .visual.color import VertexColor -def point_plane_distance(points, - plane_normal, - plane_origin=None): +def point_plane_distance(points, plane_normal, plane_origin=None): """ The minimum perpendicular distance of a point to a plane. @@ -35,7 +34,7 @@ def point_plane_distance(points, distances : (n,) float Distance from point to plane """ - points = np.asanyarray(points, dtype=np.float64) + points = np.asanyarray(points, dtype=float64) if plane_origin is None: w = points else: @@ -83,7 +82,7 @@ def plane_fit(points): Unit normal vector of plane """ # make sure input is numpy array - points = np.asanyarray(points, dtype=np.float64) + points = np.asanyarray(points, dtype=float64) assert points.ndim == 2 or points.ndim == 3 # with only one point set, np.dot is faster if points.ndim == 2: @@ -99,17 +98,14 @@ def plane_fit(points): # points offset by the plane origin x = points - C[:, None, :] # create a (p, 3, 3) matrix - M = np.einsum('pnd, pnm->pdm', x, x) + M = np.einsum("pnd, pnm->pdm", x, x) # run SVD N = np.linalg.svd(M)[0][..., -1] # return the centroid(s) and normal(s) return C, N -def radial_sort(points, - origin, - normal, - start=None): +def radial_sort(points, origin, normal, start=None): """ Sorts a set of points radially (by angle) around an axis specified by origin and normal vector. @@ -141,23 +137,24 @@ def radial_sort(points, else: normal, start = util.unitize([normal, start]) if np.abs(1 - np.abs(np.dot(normal, start))) < tol.zero: - raise ValueError('start must not parallel with normal') + raise ValueError("start must not parallel with normal") axis0 = np.cross(start, normal) axis1 = np.cross(axis0, normal) vectors = points - origin # calculate the angles of the points on the axis - angles = np.arctan2(np.dot(vectors, axis0), - np.dot(vectors, axis1)) + angles = np.arctan2(np.dot(vectors, axis0), np.dot(vectors, axis1)) # return the points sorted by angle return points[angles.argsort()[::-1]] -def project_to_plane(points, - plane_normal, - plane_origin, - transform=None, - return_transform=False, - return_planar=True): +def project_to_plane( + points, + plane_normal, + plane_origin, + transform=None, + return_transform=False, + return_planar=True, +): """ Project (n, 3) points onto a plane. @@ -178,13 +175,13 @@ def project_to_plane(points, """ if np.all(np.abs(plane_normal) < tol.zero): - raise NameError('Normal must be nonzero!') + raise NameError("Normal must be nonzero!") if transform is None: transform = plane_transform(plane_origin, plane_normal) transformed = transformations.transform_points(points, transform) - transformed = transformed[:, 0:(3 - int(return_planar))] + transformed = transformed[:, 0 : (3 - int(return_planar))] if return_transform: polygon_to_3D = np.linalg.inv(transform) @@ -215,7 +212,7 @@ def remove_close(points, radius): tree = cKDTree(points) # get the index of every pair of points closer than our radius - pairs = tree.query_pairs(radius, output_type='ndarray') + pairs = tree.query_pairs(radius, output_type="ndarray") # how often each vertex index appears in a pair # this is essentially a cheaply computed "vertex degree" @@ -267,7 +264,7 @@ def k_means(points, k, **kwargs): from scipy.cluster.vq import kmeans from scipy.spatial import cKDTree - points = np.asanyarray(points, dtype=np.float64) + points = np.asanyarray(points, dtype=float64) points_std = points.std(axis=0) points_std[points_std < tol.zero] = 1 whitened = points / points_std @@ -310,10 +307,10 @@ def tsp(points, start=0): The euclidean distance between points in traversal """ # points should be float - points = np.asanyarray(points, dtype=np.float64) + points = np.asanyarray(points, dtype=float64) if len(points.shape) != 2: - raise ValueError('points must be (n, dimension)!') + raise ValueError("points must be (n, dimension)!") # start should be an index start = int(start) @@ -326,7 +323,7 @@ def tsp(points, start=0): traversal = np.zeros(len(points), dtype=np.int64) - 1 traversal[0] = start # list of distances - distances = np.zeros(len(points) - 1, dtype=np.float64) + distances = np.zeros(len(points) - 1, dtype=float64) # a mask of indexes in order index_mask = np.arange(len(points), dtype=np.int64) @@ -343,8 +340,7 @@ def tsp(points, start=0): # do NlogN distance query # use dot instead of .sum(axis=1) or np.linalg.norm # as it is faster, also don't square root here - dist = np.dot((points[unvisited] - current) ** 2, - sum_ones) + dist = np.dot((points[unvisited] - current) ** 2, sum_ones) # minimum distance index min_index = dist.argmin() @@ -377,19 +373,19 @@ def plot_points(points, show=True): import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # NOQA - points = np.asanyarray(points, dtype=np.float64) + points = np.asanyarray(points, dtype=float64) if len(points.shape) != 2: - raise ValueError('Points must be (n, 2|3)!') + raise ValueError("Points must be (n, 2|3)!") if points.shape[1] == 3: fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') + ax = fig.add_subplot(111, projection="3d") ax.scatter(*points.T) elif points.shape[1] == 2: plt.scatter(*points.T) else: - raise ValueError(f'points not 2D/3D: {points.shape}') + raise ValueError(f"points not 2D/3D: {points.shape}") if show: plt.show() @@ -424,8 +420,8 @@ def __init__(self, vertices, colors=None, metadata=None, **kwargs): # load vertices self.vertices = vertices - if 'vertex_colors' in kwargs and colors is None: - colors = kwargs['vertex_colors'] + if "vertex_colors" in kwargs and colors is None: + colors = kwargs["vertex_colors"] # save visual data to vertex color object self.visual = VertexColor(colors=colors, obj=self) @@ -520,8 +516,7 @@ def merge_vertices(self): self.vertices = self.vertices[unique] # apply unique mask to colors - if (self.colors is not None and - len(self.colors) == len(inverse)): + if self.colors is not None and len(self.colors) == len(inverse): self.colors = self.colors[unique] def apply_transform(self, transform): @@ -534,8 +529,7 @@ def apply_transform(self, transform): transform : (4, 4) float Homogeneous transformation to apply to PointCloud """ - self.vertices = transformations.transform_points( - self.vertices, matrix=transform) + self.vertices = transformations.transform_points(self.vertices, matrix=transform) return self @property @@ -548,8 +542,7 @@ def bounds(self): bounds : (2, 3) float Minimum, Maximum verteex """ - return np.array([self.vertices.min(axis=0), - self.vertices.max(axis=0)]) + return np.array([self.vertices.min(axis=0), self.vertices.max(axis=0)]) @property def extents(self): @@ -585,18 +578,21 @@ def vertices(self): vertices : (n, 3) float Points in the PointCloud """ - return self._data['vertices'] + return self._data.get("vertices", np.empty(shape=(0, 3), dtype=float64)) @vertices.setter - def vertices(self, data): - if data is None: - self._data['vertices'] = None - else: - # we want to copy data for new object - data = np.array(data, dtype=np.float64, copy=True) - if not util.is_shape(data, (-1, 3)): - raise ValueError('Point clouds must be (n, 3)!') - self._data['vertices'] = data + def vertices(self, values): + """ + Assign vertex values to the point cloud. + + Parameters + -------------- + values : (n, 3) float + Points in space + """ + if values is None or len(values) == 0: + return self._data.data.pop("vertices", None) + self._data["vertices"] = np.asanyarray(values, order="C", dtype=float64) @property def colors(self): @@ -627,6 +623,7 @@ def kdtree(self): """ from scipy.spatial import cKDTree + tree = cKDTree(self.vertices.view(np.ndarray)) return tree @@ -641,6 +638,7 @@ def convex_hull(self): A watertight mesh of the hull of the points """ from . import convex + return convex.convex_hull(self.vertices) def scene(self): @@ -653,6 +651,7 @@ def scene(self): Scene object containing this PointCloud """ from .scene.scene import Scene + return Scene(self) def show(self, **kwargs): @@ -676,10 +675,8 @@ def export(self, file_obj=None, file_type=None, **kwargs): If file name is passed this is not required """ from .exchange.export import export_mesh - return export_mesh(self, - file_obj=file_obj, - file_type=file_type, - **kwargs) + + return export_mesh(self, file_obj=file_obj, file_type=file_type, **kwargs) def query(self, input_points, **kwargs): """ @@ -694,8 +691,8 @@ def query(self, input_points, **kwargs): Result of the query. """ from .proximity import query_from_points - return query_from_points( - self.vertices, input_points, self.kdtree, **kwargs) + + return query_from_points(self.vertices, input_points, self.kdtree, **kwargs) def __add__(self, other): if len(other.colors) == len(self.colors) == 0: @@ -703,10 +700,17 @@ def __add__(self, other): else: # preserve colors # if one point cloud has no color property use black - other_colors = [[0, 0, 0, 255]] * \ - len(other.vertices) if len(other.colors) == 0 else other.colors - self_colors = [[0, 0, 0, 255]] * \ - len(self.vertices) if len(self.colors) == 0 else self.colors + other_colors = ( + [[0, 0, 0, 255]] * len(other.vertices) + if len(other.colors) == 0 + else other.colors + ) + self_colors = ( + [[0, 0, 0, 255]] * len(self.vertices) + if len(self.colors) == 0 + else self.colors + ) colors = np.vstack((self_colors, other_colors)) - return PointCloud(vertices=np.vstack( - (self.vertices, other.vertices)), colors=colors) + return PointCloud( + vertices=np.vstack((self.vertices, other.vertices)), colors=colors + ) From 7c251e02bfc2f4625338622ee200214c0b1eb573 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 16:15:36 -0400 Subject: [PATCH 78/84] remove floatlike --- tests/test_typed.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_typed.py b/tests/test_typed.py index 677bb7869..bd50c835d 100644 --- a/tests/test_typed.py +++ b/tests/test_typed.py @@ -1,10 +1,10 @@ import numpy as np -from trimesh.typed import ArrayLike, FloatLike, NDArray, float64, int64 +from trimesh.typed import ArrayLike, NDArray, float64, int64 # see if we pass mypy -def _check(values: ArrayLike[FloatLike]) -> NDArray[int64]: +def _check(values: ArrayLike) -> NDArray[int64]: return (np.array(values, dtype=float64) * 100).astype(int64) def _run() -> NDArray[int64]: From 3efcbf159a1c6463454b1529d83a30e6a39f7f8c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 16:51:19 -0400 Subject: [PATCH 79/84] try embed --- tests/test_gltf.py | 716 ++++++++++++++---------------- trimesh/exchange/gltf.py | 920 ++++++++++++++++++++------------------- trimesh/util.py | 2 +- 3 files changed, 797 insertions(+), 841 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 018e39f19..da3491fca 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -6,7 +6,7 @@ # Khronos' official file validator # can be installed with the helper script: # `trimesh/docker/builds/gltf_validator.bash` -_gltf_validator = g.trimesh.util.which('gltf_validator') +_gltf_validator = g.trimesh.util.which("gltf_validator") def validate_glb(data, name=None): @@ -26,36 +26,33 @@ def validate_glb(data, name=None): ValueError If Khronos validator reports errors. """ - # subprocess options not in old python - if g.PY_VER < (3, 7): - return if _gltf_validator is None: - g.log.warning('no gltf_validator!') + g.log.warning("no gltf_validator!") return - with g.tempfile.NamedTemporaryFile(suffix='.glb') as f: + with g.tempfile.NamedTemporaryFile(suffix=".glb") as f: f.write(data) f.flush() # run the khronos gltf-validator - report = g.subprocess.run( - [_gltf_validator, f.name, '-o'], - capture_output=True) + report = g.subprocess.run([_gltf_validator, f.name, "-o"], capture_output=True) # -o prints JSON to stdout - content = report.stdout.decode('utf-8') + content = report.stdout.decode("utf-8") returncode = report.returncode if returncode != 0: - g.log.error(f'failed on: `{name}`') - g.log.error(f'validator: `{content}`') - g.log.error(f'stderr: `{report.stderr}`') + from IPython import embed + + embed() + g.log.error(f"failed on: `{name}`") + g.log.error(f"validator: `{content}`") + g.log.error(f"stderr: `{report.stderr}`") raise ValueError("gltf_validator failed") class GLTFTest(g.unittest.TestCase): - def test_duck(self): - scene = g.get_mesh('Duck.glb', process=False) + scene = g.get_mesh("Duck.glb", process=False) # should have one mesh assert len(scene.geometry) == 1 @@ -64,31 +61,28 @@ def test_duck(self): geom = next(iter(scene.geometry.values())) # vertex normals should have been loaded - assert 'vertex_normals' in geom._cache.cache + assert "vertex_normals" in geom._cache.cache # should not be watertight assert not geom.is_volume # make sure export doesn't crash - export = scene.export(file_type='glb') - validate_glb(export) + export = scene.export(file_type="glb") + validate_glb(export, "Duck.glb") # check a roundtrip - reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + reloaded = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make basic assertions g.scene_equal(scene, reloaded) # if we merge ugly it should now be watertight - geom.merge_vertices( - merge_tex=True, merge_norm=True) + geom.merge_vertices(merge_tex=True, merge_norm=True) assert geom.is_volume def test_strips(self): - a = g.get_mesh('mode5.gltf') + a = g.get_mesh("mode5.gltf") assert len(a.geometry) > 0 - b = g.get_mesh('mode5.gltf', merge_primitives=True) + b = g.get_mesh("mode5.gltf", merge_primitives=True) assert len(b.geometry) > 0 def test_buffer_dedupe(self): @@ -99,102 +93,87 @@ def test_buffer_dedupe(self): box_3.visual.face_colors = [0, 255, 0, 255] tm = g.trimesh.transformations.translation_matrix - scene.add_geometry( - box_1, 'box_1', - transform=tm((1, 1, 1))) - scene.add_geometry( - box_2, 'box_2', - transform=tm((-1, -1, -1))) - scene.add_geometry( - box_3, 'box_3', - transform=tm((-1, 20, -1))) - a = g.json.loads(scene.export( - file_type='gltf')['model.gltf'].decode('utf-8')) - assert len(a['buffers']) <= 3 + scene.add_geometry(box_1, "box_1", transform=tm((1, 1, 1))) + scene.add_geometry(box_2, "box_2", transform=tm((-1, -1, -1))) + scene.add_geometry(box_3, "box_3", transform=tm((-1, 20, -1))) + a = g.json.loads(scene.export(file_type="gltf")["model.gltf"].decode("utf-8")) + assert len(a["buffers"]) <= 3 def test_tex_export(self): # load textured PLY - mesh = g.get_mesh('fuze.ply') - assert hasattr(mesh.visual, 'uv') + mesh = g.get_mesh("fuze.ply") + assert hasattr(mesh.visual, "uv") # make sure export as GLB doesn't crash on scenes - export = mesh.scene().export(file_type='glb', unitize_normals=True) - validate_glb(export) + export = mesh.scene().export(file_type="glb", unitize_normals=True) + validate_glb(export, "fuze.ply") # make sure it works on meshes - export = mesh.export(file_type='glb', unitize_normals=True) - validate_glb(export) + export = mesh.export(file_type="glb", unitize_normals=True) + validate_glb(export, "fuze.ply") def test_cesium(self): # A GLTF with a multi- primitive mesh - s = g.get_mesh('CesiumMilkTruck.glb') + s = g.get_mesh("CesiumMilkTruck.glb") # should be one Trimesh object per GLTF "primitive" assert len(s.geometry) == 4 # every geometry displayed once, except wheels twice assert len(s.graph.nodes_geometry) == 5 # make sure export doesn't crash - export = s.export(file_type='glb') + export = s.export(file_type="glb") validate_glb(export) - reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + reloaded = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make basic assertions g.scene_equal(s, reloaded) def test_alphamode(self): # A GLTF with combinations of AlphaMode and AlphaCutoff - s = g.get_mesh('AlphaBlendModeTest.glb') + s = g.get_mesh("AlphaBlendModeTest.glb") # should be 5 test geometries - assert len([geom for geom in - s.geometry if geom.startswith('Test')]) == 5 - assert s.geometry['TestCutoffDefaultMesh'].visual.material.alphaMode == 'MASK' - assert s.geometry['TestCutoff25Mesh'].visual.material.alphaMode == 'MASK' - assert s.geometry['TestCutoff25Mesh'].visual.material.alphaCutoff == 0.25 - assert s.geometry['TestCutoff75Mesh'].visual.material.alphaMode == 'MASK' - assert s.geometry['TestCutoff75Mesh'].visual.material.alphaCutoff == 0.75 - assert s.geometry['TestBlendMesh'].visual.material.alphaMode == 'BLEND' + assert len([geom for geom in s.geometry if geom.startswith("Test")]) == 5 + assert s.geometry["TestCutoffDefaultMesh"].visual.material.alphaMode == "MASK" + assert s.geometry["TestCutoff25Mesh"].visual.material.alphaMode == "MASK" + assert s.geometry["TestCutoff25Mesh"].visual.material.alphaCutoff == 0.25 + assert s.geometry["TestCutoff75Mesh"].visual.material.alphaMode == "MASK" + assert s.geometry["TestCutoff75Mesh"].visual.material.alphaCutoff == 0.75 + assert s.geometry["TestBlendMesh"].visual.material.alphaMode == "BLEND" # defaults OPAQUE - assert s.geometry['TestOpaqueMesh'].visual.material.alphaMode is None + assert s.geometry["TestOpaqueMesh"].visual.material.alphaMode is None - export = s.export(file_type='glb') + export = s.export(file_type="glb") validate_glb(export) # roundtrip it - rs = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + rs = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make basic assertions g.scene_equal(s, rs) # make sure export keeps alpha modes # should be the same - assert len( - [geom for geom in rs.geometry if geom.startswith('Test')]) == 5 - assert rs.geometry['TestCutoffDefaultMesh'].visual.material.alphaMode == 'MASK' - assert rs.geometry['TestCutoff25Mesh'].visual.material.alphaMode == 'MASK' - assert rs.geometry['TestCutoff25Mesh'].visual.material.alphaCutoff == 0.25 - assert rs.geometry['TestCutoff75Mesh'].visual.material.alphaMode == 'MASK' - assert rs.geometry['TestCutoff75Mesh'].visual.material.alphaCutoff == 0.75 - assert rs.geometry['TestBlendMesh'].visual.material.alphaMode == 'BLEND' + assert len([geom for geom in rs.geometry if geom.startswith("Test")]) == 5 + assert rs.geometry["TestCutoffDefaultMesh"].visual.material.alphaMode == "MASK" + assert rs.geometry["TestCutoff25Mesh"].visual.material.alphaMode == "MASK" + assert rs.geometry["TestCutoff25Mesh"].visual.material.alphaCutoff == 0.25 + assert rs.geometry["TestCutoff75Mesh"].visual.material.alphaMode == "MASK" + assert rs.geometry["TestCutoff75Mesh"].visual.material.alphaCutoff == 0.75 + assert rs.geometry["TestBlendMesh"].visual.material.alphaMode == "BLEND" # defaults OPAQUE - assert rs.geometry['TestOpaqueMesh'].visual.material.alphaMode is None + assert rs.geometry["TestOpaqueMesh"].visual.material.alphaMode is None def test_units(self): - # Trimesh will store units as a GLTF extra if they # are defined so check that. - original = g.get_mesh('pins.glb') + original = g.get_mesh("pins.glb") # export it as a a GLB file - export = original.export(file_type='glb') + export = original.export(file_type="glb") validate_glb(export) - kwargs = g.trimesh.exchange.gltf.load_glb( - g.trimesh.util.wrap_as_stream(export)) + kwargs = g.trimesh.exchange.gltf.load_glb(g.trimesh.util.wrap_as_stream(export)) # roundtrip it reloaded = g.trimesh.exchange.load.load_kwargs(kwargs) # make basic assertions @@ -203,7 +182,7 @@ def test_units(self): # make assertions on original and reloaded for scene in [original, reloaded]: # units should be stored as an extra - assert scene.units == 'mm' + assert scene.units == "mm" # make sure we have two unique geometries assert len(scene.geometry) == 2 @@ -211,32 +190,27 @@ def test_units(self): assert len(scene.graph.nodes_geometry) == 7 # all meshes should be well constructed - assert all(m.is_volume for m in - scene.geometry.values()) + assert all(m.is_volume for m in scene.geometry.values()) # check unit conversions for fun extents = scene.extents.copy() - as_in = scene.convert_units('in') + as_in = scene.convert_units("in") # should all be exactly mm -> in conversion factor - assert g.np.allclose( - extents / as_in.extents, 25.4, atol=.001) + assert g.np.allclose(extents / as_in.extents, 25.4, atol=0.001) - m = g.get_mesh('testplate.glb') - assert m.units == 'meters' + m = g.get_mesh("testplate.glb") + assert m.units == "meters" def test_basic(self): # split a multibody mesh into a scene - scene = g.trimesh.scene.split_scene( - g.get_mesh('cycloidal.ply')) + scene = g.trimesh.scene.split_scene(g.get_mesh("cycloidal.ply")) # should be 117 geometries assert len(scene.geometry) >= 117 # a dict with {file name: str} - export = scene.export(file_type='gltf') + export = scene.export(file_type="gltf") # load from just resolver - r = g.trimesh.load(file_obj=None, - file_type='gltf', - resolver=export) + r = g.trimesh.load(file_obj=None, file_type="gltf", resolver=export) # will assert round trip is roughly equal g.scene_equal(r, scene) @@ -244,28 +218,26 @@ def test_basic(self): # try loading from a ZIP archive zipped = g.trimesh.util.compress(export) r = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream(zipped), - file_type='zip') + file_obj=g.trimesh.util.wrap_as_stream(zipped), file_type="zip" + ) # try loading from a file name # will require a file path resolver with g.TemporaryDirectory() as d: for file_name, data in export.items(): - with open(g.os.path.join(d, file_name), 'wb') as f: + with open(g.os.path.join(d, file_name), "wb") as f: f.write(data) # load from file path of header GLTF - rd = g.trimesh.load( - g.os.path.join(d, 'model.gltf')) + rd = g.trimesh.load(g.os.path.join(d, "model.gltf")) # will assert round trip is roughly equal g.scene_equal(rd, scene) def test_merge_buffers(self): # split a multibody mesh into a scene - scene = g.trimesh.scene.split_scene( - g.get_mesh('cycloidal.ply')) + scene = g.trimesh.scene.split_scene(g.get_mesh("cycloidal.ply")) # export a gltf with the merge_buffers option set to true - export = scene.export(file_type='gltf', merge_buffers=True) + export = scene.export(file_type="gltf", merge_buffers=True) # We should end up with a single .bin and scene.gltf assert len(export.keys()) == 2 @@ -273,28 +245,28 @@ def test_merge_buffers(self): # reload the export reloaded = g.trimesh.exchange.load.load_kwargs( g.trimesh.exchange.gltf.load_gltf( - file_obj=None, - resolver=g.trimesh.visual.resolvers.ZipResolver(export))) + file_obj=None, resolver=g.trimesh.visual.resolvers.ZipResolver(export) + ) + ) # check to make sure the geometry keys are the same assert set(reloaded.geometry.keys()) == set(scene.geometry.keys()) def test_merge_primitives(self): # test to see if the `merge_primitives` logic is working - a = g.get_mesh('CesiumMilkTruck.glb') + a = g.get_mesh("CesiumMilkTruck.glb") assert len(a.geometry) == 4 # should combine the multiple primitives into a single mesh - b = g.get_mesh( - 'CesiumMilkTruck.glb', merge_primitives=True) + b = g.get_mesh("CesiumMilkTruck.glb", merge_primitives=True) assert len(b.geometry) == 2 def test_specular_glossiness(self): - s = g.get_mesh('pyramid.zip') + s = g.get_mesh("pyramid.zip") assert len(s.geometry) > 0 - assert 'GLTF' in s.geometry + assert "GLTF" in s.geometry - mat = s.geometry['GLTF'].visual.material + mat = s.geometry["GLTF"].visual.material assert isinstance(mat, g.trimesh.visual.material.PBRMaterial) color = g.np.array(mat.baseColorTexture)[:, :, :3] @@ -309,9 +281,9 @@ def test_specular_glossiness(self): assert color.dtype == g.np.uint8 assert g.np.allclose(color, [255, 255, 255, 255]) - metallic_roughness = g.np.array( - mat.metallicRoughnessTexture, - dtype=g.np.float32) / 255.0 + metallic_roughness = ( + g.np.array(mat.metallicRoughnessTexture, dtype=g.np.float32) / 255.0 + ) assert metallic_roughness.shape[0] == 84 and metallic_roughness.shape[1] == 71 metallic = metallic_roughness[:, :, 0] @@ -332,11 +304,11 @@ def test_specular_glossiness(self): def test_write_dir(self): # try loading from a file name # will require a file path resolver - original = g.get_mesh('fuze.obj') + original = g.get_mesh("fuze.obj") assert isinstance(original, g.trimesh.Trimesh) s = original.scene() with g.TemporaryDirectory() as d: - path = g.os.path.join(d, 'heyy.gltf') + path = g.os.path.join(d, "heyy.gltf") s.export(file_obj=path) r = g.trimesh.load(path) assert isinstance(r, g.trimesh.Scene) @@ -346,72 +318,60 @@ def test_write_dir(self): def test_merge_primitives_materials(self): # test to see if the `merge_primitives` logic is working - a = g.get_mesh('rgb_cube_with_primitives.gltf', - merge_primitives=True) - assert len(a.geometry['Cube'].visual.material) == 3 + a = g.get_mesh("rgb_cube_with_primitives.gltf", merge_primitives=True) + assert len(a.geometry["Cube"].visual.material) == 3 # what the face materials should be - truth = [0, 0, 0, 0, 1, 1, - 1, 1, 2, 2, 2, 2] - assert g.np.allclose( - a.geometry['Cube'].visual.face_materials, - truth) + truth = [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2] + assert g.np.allclose(a.geometry["Cube"].visual.face_materials, truth) # make sure copying did the things correctly c = a.copy() - assert g.np.allclose( - c.geometry['Cube'].visual.face_materials, - truth) + assert g.np.allclose(c.geometry["Cube"].visual.face_materials, truth) def test_merge_primitives_materials_roundtrip(self): # test to see if gltf loaded with `merge_primitives` # and then exported back # to gltf, produces a valid gltf. - a = g.get_mesh('rgb_cube_with_primitives.gltf', - merge_primitives=True) - result = a.export(file_type='gltf', merge_buffers=True) + a = g.get_mesh("rgb_cube_with_primitives.gltf", merge_primitives=True) + result = a.export(file_type="gltf", merge_buffers=True) with g.TemporaryDirectory() as d: for file_name, data in result.items(): - with open(g.os.path.join(d, file_name), 'wb') as f: + with open(g.os.path.join(d, file_name), "wb") as f: f.write(data) - rd = g.trimesh.load( - g.os.path.join(d, 'model.gltf'), merge_primitives=True) + rd = g.trimesh.load(g.os.path.join(d, "model.gltf"), merge_primitives=True) assert isinstance(rd, g.trimesh.Scene) # will assert round trip is roughly equal # TODO : restore # g.scene_equal(rd, a) def test_optional_camera(self): - gltf_cameras_key = 'cameras' + gltf_cameras_key = "cameras" # if there's no camera in the scene, then it shouldn't be added to the # gltf box = g.trimesh.creation.box([1, 1, 1]) scene = g.trimesh.Scene(box) - export = scene.export(file_type='gltf') - assert gltf_cameras_key not in g.json.loads( - export['model.gltf'].decode('utf8')) + export = scene.export(file_type="gltf") + assert gltf_cameras_key not in g.json.loads(export["model.gltf"].decode("utf8")) # `scene.camera` creates a camera if it does not exist. # once in the scene, it should be added to the gltf. box = g.trimesh.creation.box([1, 1, 1]) scene = g.trimesh.Scene(box) scene.set_camera() - export = scene.export(file_type='gltf') - assert gltf_cameras_key in g.json.loads( - export['model.gltf'].decode('utf8')) + export = scene.export(file_type="gltf") + assert gltf_cameras_key in g.json.loads(export["model.gltf"].decode("utf8")) def test_gltf_pole(self): - scene = g.get_mesh('simple_pole.glb') + scene = g.get_mesh("simple_pole.glb") # should have multiple primitives assert len(scene.geometry) == 11 - export = scene.export(file_type='glb') + export = scene.export(file_type="glb") validate_glb(export) # check a roundtrip - reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + reloaded = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make basic assertions g.scene_equal(scene, reloaded) @@ -423,26 +383,22 @@ def test_material_primary_colors(self): scene = g.trimesh.Scene([sphere]) def to_integer(args): - args['materials'][0]['pbrMetallicRoughness']['baseColorFactor'] = [ - 1, 0, 0, 1] + args["materials"][0]["pbrMetallicRoughness"]["baseColorFactor"] = [1, 0, 0, 1] - export = scene.export(file_type='glb', tree_postprocessor=to_integer) + export = scene.export(file_type="glb", tree_postprocessor=to_integer) validate_glb(export) reloaded = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream(export), - file_type='glb') + file_obj=g.trimesh.util.wrap_as_stream(export), file_type="glb" + ) assert len(reloaded.geometry) == 1 # get meshes back sphere_b = list(reloaded.geometry.values())[0] - assert ( - sphere_b.visual.material.baseColorFactor == ( - 255, 0, 0, 255)).all() + assert (sphere_b.visual.material.baseColorFactor == (255, 0, 0, 255)).all() def test_material_hash(self): - # load mesh twice independently - a = g.get_mesh('fuze.obj') - b = g.get_mesh('fuze.obj') + a = g.get_mesh("fuze.obj") + b = g.get_mesh("fuze.obj") # move one of the meshes away from the other a.apply_translation([a.scale, 0, 0]) @@ -454,19 +410,22 @@ def test_material_hash(self): # create a scene with two meshes scene = g.trimesh.Scene([a, b]) # get the exported GLTF header of a scene with both meshes - header = g.json.loads(scene.export( - file_type='gltf', unitize_normals=True)['model.gltf'].decode('utf-8')) + header = g.json.loads( + scene.export(file_type="gltf", unitize_normals=True)["model.gltf"].decode( + "utf-8" + ) + ) # header should contain exactly one material - assert len(header['materials']) == 1 + assert len(header["materials"]) == 1 # both meshes should be contained in the export - assert len(header['meshes']) == 2 + assert len(header["meshes"]) == 2 # get a reloaded version - export = scene.export(file_type='glb', unitize_normals=True) + export = scene.export(file_type="glb", unitize_normals=True) validate_glb(export) reloaded = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream(export), - file_type='glb') + file_obj=g.trimesh.util.wrap_as_stream(export), file_type="glb" + ) # meshes should have survived assert len(reloaded.geometry) == 2 @@ -474,8 +433,7 @@ def test_material_hash(self): ar, br = reloaded.geometry.values() # should have been loaded as a PBR material - assert isinstance(ar.visual.material, - g.trimesh.visual.material.PBRMaterial) + assert isinstance(ar.visual.material, g.trimesh.visual.material.PBRMaterial) # materials should have the same memory location assert id(ar.visual.material) == id(br.visual.material) @@ -491,144 +449,138 @@ def test_node_name(self): # an export-import cycle. # a scene - s = g.get_mesh('cycloidal.3DXML') + s = g.get_mesh("cycloidal.3DXML") # export as GLB then re-load - export = s.export(file_type='glb') + export = s.export(file_type="glb") validate_glb(export) - r = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + r = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make sure we have the same geometries before and after assert set(s.geometry.keys()) == set(r.geometry.keys()) # make sure the node names are the same before and after - assert (set(s.graph.nodes_geometry) == - set(r.graph.nodes_geometry)) + assert set(s.graph.nodes_geometry) == set(r.graph.nodes_geometry) def test_nested_scale(self): # nested transforms with scale - s = g.get_mesh('nested.glb') + s = g.get_mesh("nested.glb") assert len(s.graph.nodes_geometry) == 3 assert g.np.allclose( - [[-1.16701, -2.3366, -0.26938], - [0.26938, 1., 0.26938]], - s.bounds, atol=1e-4) + [[-1.16701, -2.3366, -0.26938], [0.26938, 1.0, 0.26938]], s.bounds, atol=1e-4 + ) def test_schema(self): # get a copy of the GLTF schema and do simple checks s = g.trimesh.exchange.gltf.get_schema() # make sure it has at least the keys we expect - assert set(s['properties'].keys()).issuperset( - {'accessors', - 'animations', - 'asset', - 'buffers', - 'bufferViews', - 'cameras', - 'images', - 'materials', - 'meshes', - 'nodes', - 'samplers', - 'scene', - 'scenes', - 'skins', - 'textures', - 'extensions', - 'extras'}) + assert set(s["properties"].keys()).issuperset( + { + "accessors", + "animations", + "asset", + "buffers", + "bufferViews", + "cameras", + "images", + "materials", + "meshes", + "nodes", + "samplers", + "scene", + "scenes", + "skins", + "textures", + "extensions", + "extras", + } + ) # lightly check to see that no references exist - assert '$ref' not in g.json.dumps(s) + assert "$ref" not in g.json.dumps(s) def test_export_custom_attributes(self): # Write and read custom vertex attributes to gltf sphere = g.trimesh.primitives.Sphere() v_count, _ = sphere.vertices.shape - sphere.vertex_attributes[ - '_CustomFloat32Scalar'] = g.np.random.rand( - v_count, 1).astype(g.np.float32) - sphere.vertex_attributes[ - '_CustomFloat32Vec3'] = g.np.random.rand( - v_count, 3).astype(g.np.float32) - sphere.vertex_attributes[ - '_CustomFloat32Mat4'] = g.np.random.rand( - v_count, 4, 4).astype(g.np.float32) + sphere.vertex_attributes["_CustomFloat32Scalar"] = g.np.random.rand( + v_count, 1 + ).astype(g.np.float32) + sphere.vertex_attributes["_CustomFloat32Vec3"] = g.np.random.rand( + v_count, 3 + ).astype(g.np.float32) + sphere.vertex_attributes["_CustomFloat32Mat4"] = g.np.random.rand( + v_count, 4, 4 + ).astype(g.np.float32) # export as GLB bytes - export = sphere.export(file_type='glb') + export = sphere.export(file_type="glb") # this should validate just fine validate_glb(export) # uint32 is slightly off-label and may cause # validators to fail but if you're a bad larry who # doesn't follow the rules it should be fine - sphere.vertex_attributes[ - '_CustomUInt32Scalar'] = g.np.random.randint( - 0, 1000, size=(v_count, 1)).astype(g.np.uint32) + sphere.vertex_attributes["_CustomUInt32Scalar"] = g.np.random.randint( + 0, 1000, size=(v_count, 1) + ).astype(g.np.uint32) # when you add a uint16/int16 the gltf-validator # complains about the 4-byte boundaries even though # all their lengths and offsets mod 4 are zero # not sure if that's a validator bug or what - sphere.vertex_attributes[ - '_CustomUInt16Scalar'] = g.np.random.randint( - 0, 1000, size=(v_count, 1)).astype(g.np.uint16) - sphere.vertex_attributes[ - '_CustomInt16Scalar'] = g.np.random.randint( - 0, 1000, size=(v_count, 1)).astype(g.np.int16) + sphere.vertex_attributes["_CustomUInt16Scalar"] = g.np.random.randint( + 0, 1000, size=(v_count, 1) + ).astype(g.np.uint16) + sphere.vertex_attributes["_CustomInt16Scalar"] = g.np.random.randint( + 0, 1000, size=(v_count, 1) + ).astype(g.np.int16) # export as GLB then re-load - export = sphere.export(file_type='glb') + export = sphere.export(file_type="glb") - r = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + r = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") for _, val in r.geometry.items(): - assert set( - val.vertex_attributes.keys()) == set( - sphere.vertex_attributes.keys()) + assert set(val.vertex_attributes.keys()) == set( + sphere.vertex_attributes.keys() + ) for key in val.vertex_attributes: is_same = g.np.array_equal( - val.vertex_attributes[key], - sphere.vertex_attributes[key]) + val.vertex_attributes[key], sphere.vertex_attributes[key] + ) assert is_same is True def test_extras(self): # if GLTF extras are defined, make sure they survive a round trip - s = g.get_mesh('cycloidal.3DXML') + s = g.get_mesh("cycloidal.3DXML") - scene_extensions = {'mesh_ext': {'ext_data': 1.23}} + scene_extensions = {"mesh_ext": {"ext_data": 1.23}} # some dummy data dummy = { - 'who': 'likes cheese', - 'potatoes': 25, - 'gtlf_extensions': scene_extensions} + "who": "likes cheese", + "potatoes": 25, + "gtlf_extensions": scene_extensions, + } # export as GLB with extras passed to the exporter then re-load s.metadata = dummy - export = s.export(file_type='glb') + export = s.export(file_type="glb") validate_glb(export) - r = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + r = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make sure extras survived a round trip - assert all(r.metadata[k] == v - for k, v in dummy.items()) + assert all(r.metadata[k] == v for k, v in dummy.items()) def test_extras_nodes(self): - - mesh_extensions = {'mesh_ext': {'ext_data': 1.23}} + mesh_extensions = {"mesh_ext": {"ext_data": 1.23}} test_metadata = { - 'test_str': 'test_value', - 'test_int': 1, - 'test_float': 0.123456789, - 'test_bool': True, - 'test_array': [1, 2, 3], - 'test_dict': {'a': 1, 'b': 2}, - 'gltf_extensions': mesh_extensions + "test_str": "test_value", + "test_int": 1, + "test_float": 0.123456789, + "test_bool": True, + "test_array": [1, 2, 3], + "test_dict": {"a": 1, "b": 2}, + "gltf_extensions": mesh_extensions, } sphere1 = g.trimesh.primitives.Sphere(radius=1.0) @@ -645,66 +597,64 @@ def test_extras_nodes(self): node_name="Sphere1", geom_name="Geom Sphere1", transform=tf1, - metadata={'field': 'extra_data1'}) - node_extensions = {'mesh_ext': {'ext_data': 1.23}} - sphere2_metadata = { - 'field': 'extra_data2', - 'gltf_extensions': node_extensions} - s.add_geometry(sphere2, - node_name="Sphere2", - geom_name="Geom Sphere2", - parent_node_name="Sphere1", - transform=tf2, - metadata=sphere2_metadata) + metadata={"field": "extra_data1"}, + ) + node_extensions = {"mesh_ext": {"ext_data": 1.23}} + sphere2_metadata = {"field": "extra_data2", "gltf_extensions": node_extensions} + s.add_geometry( + sphere2, + node_name="Sphere2", + geom_name="Geom Sphere2", + parent_node_name="Sphere1", + transform=tf2, + metadata=sphere2_metadata, + ) # Test extras appear in the exported model nodes files = s.export(None, "gltf") gltf_data = files["model.gltf"] - assert 'test_value' in gltf_data.decode('utf8') + assert "test_value" in gltf_data.decode("utf8") # Check node extras survive a round trip - export = s.export(file_type='glb') + export = s.export(file_type="glb") validate_glb(export) - r = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + r = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") files = r.export(None, "gltf") gltf_data = files["model.gltf"] # Check that the mesh and node metadata/extras survived - assert 'test_value' in gltf_data.decode('utf8') - assert 'extra_data1' in gltf_data.decode('utf8') + assert "test_value" in gltf_data.decode("utf8") + assert "extra_data1" in gltf_data.decode("utf8") # Check that the extensions were removed from the metadata; # they should be saved as 'extensions' in the gltf file - assert 'gltf_extensions' not in gltf_data.decode('utf8') + assert "gltf_extensions" not in gltf_data.decode("utf8") # Check that the node transforms and metadata/extras survived edge = r.graph.transforms.edge_data[("world", "Sphere1")] - assert g.np.allclose(edge['matrix'], tf1) - assert edge['metadata']['field'] == 'extra_data1' + assert g.np.allclose(edge["matrix"], tf1) + assert edge["metadata"]["field"] == "extra_data1" edge = r.graph.transforms.edge_data[("Sphere1", "Sphere2")] - assert g.np.allclose(edge['matrix'], tf2) - assert edge['metadata']['field'] == 'extra_data2' + assert g.np.allclose(edge["matrix"], tf2) + assert edge["metadata"]["field"] == "extra_data2" # Check that the node's extensions survived - assert edge['metadata']['gltf_extensions'] == node_extensions + assert edge["metadata"]["gltf_extensions"] == node_extensions # Check that the mesh extensions survived for mesh in r.geometry.values(): - assert mesh.metadata['gltf_extensions'] == mesh_extensions + assert mesh.metadata["gltf_extensions"] == mesh_extensions # all geometry should be the same assert set(r.geometry.keys()) == set(s.geometry.keys()) for mesh in r.geometry.values(): # metadata should have all survived - assert all(mesh.metadata[k] == v - for k, v in test_metadata.items()) + assert all(mesh.metadata[k] == v for k, v in test_metadata.items()) def test_read_scene_extras(self): # loads a glb with scene extras - scene = g.get_mesh('monkey.glb', process=False) + scene = g.get_mesh("monkey.glb", process=False) # expected data - check = {'name': 'monkey', 'age': 32, 'height': 0.987} + check = {"name": "monkey", "age": 32, "height": 0.987} meta = scene.metadata for key in check: @@ -714,21 +664,29 @@ def test_read_scene_extras(self): def test_load_empty_nodes(self): # loads a glb with no meshes - scene = g.get_mesh('empty_nodes.glb', process=False) + scene = g.get_mesh("empty_nodes.glb", process=False) # expected data - check = {"parent": [[1.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 0.0, 0.0], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0]], - "children_1": [[1.0, 0.0, 0.0, -5.0], - [0.0, 1.0, 0.0, 5.0], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0]], - "children_2": [[1.0, 0.0, 0.0, 5.0], - [0.0, 1.0, 0.0, 5.0], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0]]} + check = { + "parent": [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + "children_1": [ + [1.0, 0.0, 0.0, -5.0], + [0.0, 1.0, 0.0, 5.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + "children_2": [ + [1.0, 0.0, 0.0, 5.0], + [0.0, 1.0, 0.0, 5.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + } # get the scene nodes objs = scene.graph.to_flattened() @@ -741,7 +699,7 @@ def test_load_empty_nodes(self): assert objs[key]["transform"] == check[key] def test_same_name(self): - s = g.get_mesh('TestScene.gltf') + s = g.get_mesh("TestScene.gltf") # hardcode correct bounds to check against bounds = s.dump(concatenate=True).bounds @@ -752,118 +710,105 @@ def test_same_name(self): assert g.np.allclose(s.bounds, bounds, atol=1e-3) # if merged should have combined the icosahedrons - s = g.get_mesh('TestScene.gltf', merge_primitives=True) + s = g.get_mesh("TestScene.gltf", merge_primitives=True) assert len(s.graph.nodes_geometry) == 7 assert len(s.geometry) == 6 assert g.np.allclose(s.bounds, bounds, atol=1e-3) def test_vertex_colors(self): # get a mesh with face colors - m = g.get_mesh('machinist.XAML') + m = g.get_mesh("machinist.XAML") # export as GLB then re-import - export = m.export(file_type='glb') + export = m.export(file_type="glb") validate_glb(export) - r = next(iter( - g.trimesh.load(g.trimesh.util.wrap_as_stream( - export), - file_type='glb').geometry.values())) + r = next( + iter( + g.trimesh.load( + g.trimesh.util.wrap_as_stream(export), file_type="glb" + ).geometry.values() + ) + ) # original mesh should have vertex colors - assert m.visual.kind == 'face' + assert m.visual.kind == "face" assert m.visual.vertex_colors.ptp(axis=0).ptp() > 0 # vertex colors should have survived import-export - assert g.np.allclose(m.visual.vertex_colors, - r.visual.vertex_colors) + assert g.np.allclose(m.visual.vertex_colors, r.visual.vertex_colors) def test_vertex_attrib(self): # test concatenation with texture - m = g.get_mesh('fuze.obj') + m = g.get_mesh("fuze.obj") - colors = (g.random( - (len(m.vertices), 4)) * 255).astype(g.np.uint8) + colors = (g.random((len(m.vertices), 4)) * 255).astype(g.np.uint8) # set the color vertex attribute - m.visual.vertex_attributes['color'] = colors - export = m.export(file_type='glb', unitize_normals=True) + m.visual.vertex_attributes["color"] = colors + export = m.export(file_type="glb", unitize_normals=True) validate_glb(export) - r = next(iter( - g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb').geometry.values())) + r = next( + iter( + g.trimesh.load( + g.trimesh.util.wrap_as_stream(export), file_type="glb" + ).geometry.values() + ) + ) # make sure the color vertex attributes survived the roundtrip - assert g.np.allclose( - r.visual.vertex_attributes['color'], colors) + assert g.np.allclose(r.visual.vertex_attributes["color"], colors) def test_export_postprocess(self): scene = g.trimesh.Scene() sphere = g.trimesh.primitives.Sphere() - sphere.visual.material = g.trimesh.visual.material.PBRMaterial( - name='unlit_test') + sphere.visual.material = g.trimesh.visual.material.PBRMaterial(name="unlit_test") scene.add_geometry(sphere) def add_unlit(gltf_tree): - for material_dict in gltf_tree['materials']: - if 'unlit' in material_dict.get('name', '').lower(): - material_dict["extensions"] = { - "KHR_materials_unlit": {} - } + for material_dict in gltf_tree["materials"]: + if "unlit" in material_dict.get("name", "").lower(): + material_dict["extensions"] = {"KHR_materials_unlit": {}} gltf_tree["extensionsUsed"] = ["KHR_materials_unlit"] gltf_1 = g.trimesh.exchange.gltf.export_gltf(scene) - gltf_2 = g.trimesh.exchange.gltf.export_gltf( - scene, tree_postprocessor=add_unlit) + gltf_2 = g.trimesh.exchange.gltf.export_gltf(scene, tree_postprocessor=add_unlit) def extract_materials(gltf_files): - return g.json.loads(gltf_files['model.gltf'].decode('utf8'))[ - 'materials'] + return g.json.loads(gltf_files["model.gltf"].decode("utf8"))["materials"] assert "extensions" not in extract_materials(gltf_1)[-1] assert "extensions" in extract_materials(gltf_2)[-1] def test_primitive_geometry_meta(self): # Model with primitives - s = g.get_mesh('CesiumMilkTruck.glb') + s = g.get_mesh("CesiumMilkTruck.glb") # check to see if names are somewhat sane assert set(s.geometry.keys()) == { - 'Cesium_Milk_Truck', - 'Cesium_Milk_Truck_1', - 'Cesium_Milk_Truck_2', - 'Wheels'} + "Cesium_Milk_Truck", + "Cesium_Milk_Truck_1", + "Cesium_Milk_Truck_2", + "Wheels", + } # Assert that primitive geometries are marked as such - assert s.geometry['Cesium_Milk_Truck'].metadata[ - 'from_gltf_primitive'] - assert s.geometry['Cesium_Milk_Truck_1'].metadata[ - 'from_gltf_primitive'] - assert s.geometry['Cesium_Milk_Truck_2'].metadata[ - 'from_gltf_primitive'] + assert s.geometry["Cesium_Milk_Truck"].metadata["from_gltf_primitive"] + assert s.geometry["Cesium_Milk_Truck_1"].metadata["from_gltf_primitive"] + assert s.geometry["Cesium_Milk_Truck_2"].metadata["from_gltf_primitive"] # Assert that geometries that are not primitives # are not marked as such - assert not s.geometry['Wheels'].metadata[ - 'from_gltf_primitive'] + assert not s.geometry["Wheels"].metadata["from_gltf_primitive"] # make sure the flags survive being merged - m = g.get_mesh('CesiumMilkTruck.glb', - merge_primitives=True) + m = g.get_mesh("CesiumMilkTruck.glb", merge_primitives=True) # names should be non-insane - assert set(m.geometry.keys()) == { - 'Cesium_Milk_Truck', 'Wheels'} - assert not s.geometry['Wheels'].metadata[ - 'from_gltf_primitive'] - assert s.geometry['Cesium_Milk_Truck'].metadata[ - 'from_gltf_primitive'] + assert set(m.geometry.keys()) == {"Cesium_Milk_Truck", "Wheels"} + assert not s.geometry["Wheels"].metadata["from_gltf_primitive"] + assert s.geometry["Cesium_Milk_Truck"].metadata["from_gltf_primitive"] def test_points(self): # test a simple pointcloud export-import cycle points = g.np.arange(30).reshape((-1, 3)) - export = g.trimesh.Scene( - g.trimesh.PointCloud(points)).export(file_type='glb') + export = g.trimesh.Scene(g.trimesh.PointCloud(points)).export(file_type="glb") validate_glb(export) - reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + reloaded = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make sure points survived export and reload - assert g.np.allclose(next(iter( - reloaded.geometry.values())).vertices, points) + assert g.np.allclose(next(iter(reloaded.geometry.values())).vertices, points) def test_bulk(self): # Try exporting every loadable model to GLTF and checking @@ -873,7 +818,7 @@ def test_bulk(self): assert g.trimesh.tol.strict # check mesh, path, pointcloud exports - for root in [g.dir_models, g.os.path.join(g.dir_models, '2D')]: + for root in [g.dir_models, g.os.path.join(g.dir_models, "2D")]: for fn in g.os.listdir(root): path_in = g.os.path.join(root, fn) try: @@ -886,31 +831,30 @@ def test_bulk(self): # voxels don't have an export to gltf mode if isinstance(geom, g.trimesh.voxel.VoxelGrid): try: - geom.export(file_type='glb') + geom.export(file_type="glb") except ValueError: # should have raised so all good continue - raise ValueError( - 'voxel was allowed to export wrong GLB!') - if hasattr(geom, 'vertices') and len(geom.vertices) == 0: + raise ValueError("voxel was allowed to export wrong GLB!") + if hasattr(geom, "vertices") and len(geom.vertices) == 0: continue - if hasattr(geom, 'geometry') and len(geom.geometry) == 0: + if hasattr(geom, "geometry") and len(geom.geometry) == 0: continue - g.log.info(f'Testing: {fn}') + g.log.info(f"Testing: {fn}") # check a roundtrip which will validate on export # and crash on reload if we've done anything screwey # unitize normals will unitize any normals to comply with # the validator although there are probably reasons you'd # want to roundtrip non-unit normals for things, stuff, and # activities - export = geom.export(file_type='glb', unitize_normals=True) + export = geom.export(file_type="glb", unitize_normals=True) validate_glb(export, name=fn) # shouldn't crash on a reload reloaded = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream(export), - file_type='glb') + file_obj=g.trimesh.util.wrap_as_stream(export), file_type="glb" + ) if isinstance(geom, g.trimesh.Trimesh): assert g.np.isclose(geom.area, reloaded.area) @@ -921,38 +865,34 @@ def test_bulk(self): def test_interleaved(self): # do a quick check on a mesh that uses byte stride - with open(g.get_path('BoxInterleaved.glb'), 'rb') as f: + with open(g.get_path("BoxInterleaved.glb"), "rb") as f: k = g.trimesh.exchange.gltf.load_glb(f) # get the kwargs for the mesh constructor - c = k['geometry']['Mesh'] + c = k["geometry"]["Mesh"] # should have vertex normals - assert c['vertex_normals'].shape == c['vertices'].shape + assert c["vertex_normals"].shape == c["vertices"].shape # interleaved vertex normals should all be unit vectors - assert g.np.allclose( - 1.0, g.np.linalg.norm(c['vertex_normals'], axis=1)) + assert g.np.allclose(1.0, g.np.linalg.norm(c["vertex_normals"], axis=1)) # should also load as a box - m = g.get_mesh('BoxInterleaved.glb').geometry['Mesh'] + m = g.get_mesh("BoxInterleaved.glb").geometry["Mesh"] assert g.np.isclose(m.volume, 1.0) def test_equal_by_default(self): # all things being equal we shouldn't be moving things # for the usual load-export loop - s = g.get_mesh('fuze.obj') + s = g.get_mesh("fuze.obj") # export as GLB then re-load - export = s.export(file_type='glb', unitize_normals=True) + export = s.export(file_type="glb", unitize_normals=True) validate_glb(export) reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb', process=False) + g.trimesh.util.wrap_as_stream(export), file_type="glb", process=False + ) assert len(reloaded.geometry) == 1 m = next(iter(reloaded.geometry.values())) - assert g.np.allclose(m.visual.uv, - s.visual.uv) - assert g.np.allclose(m.vertices, - s.vertices) - assert g.np.allclose(m.faces, - s.faces) + assert g.np.allclose(m.visual.uv, s.visual.uv) + assert g.np.allclose(m.vertices, s.vertices) + assert g.np.allclose(m.faces, s.faces) # will run a kdtree check g.texture_equal(s, m) @@ -962,7 +902,7 @@ def test_gltf_by_name(self): with g.TemporaryDirectory() as d: # export the GLTF file by name - file_path = g.os.path.join(d, 'hi.gltf') + file_path = g.os.path.join(d, "hi.gltf") # export the file by path m.export(file_path) # reload the gltf from the file path @@ -970,20 +910,18 @@ def test_gltf_by_name(self): assert isinstance(r, g.trimesh.Scene) assert len(r.geometry) == 1 - assert g.np.isclose( - next( - iter( - r.geometry.values())).volume, - m.volume) + assert g.np.isclose(next(iter(r.geometry.values())).volume, m.volume) def test_embed_buffer(self): - - scene = g.trimesh.Scene({ - 'thing': g.trimesh.primitives.Sphere(), - 'other': g.trimesh.creation.capsule()}) + scene = g.trimesh.Scene( + { + "thing": g.trimesh.primitives.Sphere(), + "other": g.trimesh.creation.capsule(), + } + ) with g.trimesh.util.TemporaryDirectory() as D: - path = g.os.path.join(D, 'hi.gltf') + path = g.os.path.join(D, "hi.gltf") scene.export(path) # should export with separate buffers @@ -993,7 +931,7 @@ def test_embed_buffer(self): assert set(reloaded.geometry.keys()) == set(scene.geometry.keys()) with g.trimesh.util.TemporaryDirectory() as D: - path = g.os.path.join(D, 'hi.gltf') + path = g.os.path.join(D, "hi.gltf") scene.export(path, embed_buffers=True) # should export with embeded bufferes @@ -1004,8 +942,8 @@ def test_embed_buffer(self): def test_webp(self): # load textured file - mesh = g.get_mesh('fuze.ply') - assert hasattr(mesh.visual, 'uv') + mesh = g.get_mesh("fuze.ply") + assert hasattr(mesh.visual, "uv") for extension in ["glb"]: export = mesh.export(file_type=extension, extension_webp=True) @@ -1013,30 +951,30 @@ def test_webp(self): # roundtrip reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type=extension) + g.trimesh.util.wrap_as_stream(export), file_type=extension + ) g.scene_equal(g.trimesh.Scene(mesh), reloaded) def test_relative_paths(self): # try with a relative path with g.TemporaryDirectory() as d: - g.os.makedirs(g.os.path.join(d, 'fused')) + g.os.makedirs(g.os.path.join(d, "fused")) g.os.chdir(d) - g.trimesh.creation.box().export('fused/hi.gltf') - r = g.trimesh.load('fused/hi.gltf') + g.trimesh.creation.box().export("fused/hi.gltf") + r = g.trimesh.load("fused/hi.gltf") assert g.np.isclose(r.volume, 1.0) with g.TemporaryDirectory() as d: # now try it without chaging to that directory - full = g.os.path.join(d, 'hi', 'there', 'different', 'levels') - path = g.os.path.join(full, 'hey.gltf') + full = g.os.path.join(d, "hi", "there", "different", "levels") + path = g.os.path.join(full, "hey.gltf") g.os.makedirs(full) g.trimesh.creation.box().export(path) r = g.trimesh.load(path) assert g.np.isclose(r.volume, 1.0) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 209175716..33aafdfd8 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -20,17 +20,10 @@ # magic numbers which have meaning in GLTF # most are uint32's of UTF-8 text -_magic = {"gltf": 1179937895, - "json": 1313821514, - "bin": 5130562} +_magic = {"gltf": 1179937895, "json": 1313821514, "bin": 5130562} # GLTF data type codes: little endian numpy dtypes -_dtypes = {5120: " 0: tree["buffers"] = buffers tree["bufferViews"] = views # dump tree with compact separators - files["model.gltf"] = util.jsonify( - tree, separators=(',', ':')).encode("utf-8") + files["model.gltf"] = util.jsonify(tree, separators=(",", ":")).encode("utf-8") if tol.strict: validate(tree) @@ -165,12 +158,13 @@ def export_gltf(scene, def export_glb( - scene, - include_normals=None, - unitize_normals=False, - tree_postprocessor=None, - buffer_postprocessor=None, - extension_webp=False): + scene, + include_normals=None, + unitize_normals=False, + tree_postprocessor=None, + buffer_postprocessor=None, + extension_webp=False, +): """ Export a scene as a binary GLTF (GLB) file. @@ -194,8 +188,7 @@ def export_glb( Exported result in GLB 2.0 """ # if we were passed a bare Trimesh or Path3D object - if (not util.is_instance_named(scene, "Scene") and - hasattr(scene, "scene")): + if not util.is_instance_named(scene, "Scene") and hasattr(scene, "scene"): # generate a scene with just that mesh in it scene = scene.scene() @@ -204,7 +197,8 @@ def export_glb( unitize_normals=unitize_normals, include_normals=include_normals, buffer_postprocessor=buffer_postprocessor, - extension_webp=extension_webp) + extension_webp=extension_webp, + ) # allow custom postprocessing if tree_postprocessor is not None: @@ -222,7 +216,7 @@ def export_glb( tree["bufferViews"] = views # export the tree to JSON for the header - content = util.jsonify(tree, separators=(',', ':')) + content = util.jsonify(tree, separators=(",", ":")) # add spaces to content, so the start of the data # is 4 byte aligned as per spec content += (4 - ((len(content) + 20) % 4)) * " " @@ -232,28 +226,29 @@ def export_glb( # the initial header of the file header = _byte_pad( - np.array([_magic["gltf"], # magic, turns into glTF - 2, # GLTF version - # length is the total length of the Binary glTF - # including Header and all Chunks, in bytes. - len(content) + len(buffer_data) + 28, - # contentLength is the length, in bytes, - # of the glTF content (JSON) - len(content), - # magic number which is 'JSON' - _magic["json"]], - dtype=" 0: - tree['extensionsUsed'] = list(extensions_used) + tree["extensionsUsed"] = list(extensions_used) # Also add WebP to required (no fallback currently implemented) # 'extensionsRequired' aren't currently used so this doesn't overwrite if extension_webp: - tree['extensionsRequired'] = ["EXT_texture_webp"] + tree["extensionsRequired"] = ["EXT_texture_webp"] if buffer_postprocessor is not None: buffer_postprocessor(buffer_items, tree) # convert accessors back to a flat list - tree['accessors'] = list(tree['accessors'].values()) + tree["accessors"] = list(tree["accessors"].values()) # cull empty or unpopulated fields # check keys that might be empty so we can remove them - check = ['textures', 'materials', 'images', 'accessors', 'meshes'] + check = ["textures", "materials", "images", "accessors", "meshes"] # remove the keys with nothing stored in them [tree.pop(key) for key in check if len(tree[key]) == 0] return tree, buffer_items -def _append_mesh(mesh, - name, - tree, - buffer_items, - include_normals, - unitize_normals, - mat_hashes, - extension_webp): +def _append_mesh( + mesh, + name, + tree, + buffer_items, + include_normals, + unitize_normals, + mat_hashes, + extension_webp, +): """ Append a mesh to the scene structure and put the data into buffer_items. @@ -781,34 +771,40 @@ def _append_mesh(mesh, """ # return early from empty meshes to avoid crashing later if len(mesh.faces) == 0 or len(mesh.vertices) == 0: - log.debug('skipping empty mesh!') + log.debug("skipping empty mesh!") return # convert mesh data to the correct dtypes # faces: 5125 is an unsigned 32 bit integer # accessors refer to data locations # mesh faces are stored as flat list of integers - acc_face = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5125, - "type": "SCALAR"}, - data=mesh.faces.astype(uint32)) + acc_face = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={"componentType": 5125, "type": "SCALAR"}, + data=mesh.faces.astype(uint32), + ) # vertices: 5126 is a float32 # create or reuse an accessor for these vertices - acc_vertex = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5126, - "type": "VEC3", - "byteOffset": 0}, - data=mesh.vertices.astype(float32)) + acc_vertex = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={"componentType": 5126, "type": "VEC3", "byteOffset": 0}, + data=mesh.vertices.astype(float32), + ) # meshes reference accessor indexes - current = {"name": name, - "extras": {}, - "primitives": [{ - "attributes": {"POSITION": acc_vertex}, - "indices": acc_face, - "mode": _GL_TRIANGLES}]} + current = { + "name": name, + "extras": {}, + "primitives": [ + { + "attributes": {"POSITION": acc_vertex}, + "indices": acc_face, + "mode": _GL_TRIANGLES, + } + ], + } # if units are defined, store them as an extra # the GLTF spec says everything is implicit meters # we're not doing that as our unit conversions are expensive @@ -816,77 +812,83 @@ def _append_mesh(mesh, # https://github.com/KhronosGroup/glTF/tree/master/extensions try: # skip jsonify any metadata, skipping internal keys - current['extras'] = _jsonify(mesh.metadata) + current["extras"] = _jsonify(mesh.metadata) # extract extensions if any - extensions = current['extras'].pop('gltf_extensions', None) + extensions = current["extras"].pop("gltf_extensions", None) if isinstance(extensions, dict): - current['extensions'] = extensions + current["extensions"] = extensions - if mesh.units not in [None, 'm', 'meters', 'meter']: + if mesh.units not in [None, "m", "meters", "meter"]: current["extras"]["units"] = str(mesh.units) except BaseException: - log.debug('metadata not serializable, dropping!', - exc_info=True) + log.debug("metadata not serializable, dropping!", exc_info=True) # check to see if we have vertex or face colors # or if a TextureVisual has colors included as an attribute - if mesh.visual.kind in ['vertex', 'face']: + if mesh.visual.kind in ["vertex", "face"]: vertex_colors = mesh.visual.vertex_colors - elif (hasattr(mesh.visual, 'vertex_attributes') and - 'color' in mesh.visual.vertex_attributes): - vertex_colors = mesh.visual.vertex_attributes['color'] + elif ( + hasattr(mesh.visual, "vertex_attributes") + and "color" in mesh.visual.vertex_attributes + ): + vertex_colors = mesh.visual.vertex_attributes["color"] else: vertex_colors = None if vertex_colors is not None: # convert color data to bytes and append acc_color = _data_append( - acc=tree['accessors'], + acc=tree["accessors"], buff=buffer_items, - blob={"componentType": 5121, - "normalized": True, - "type": "VEC4", - "byteOffset": 0}, - data=vertex_colors.astype(uint8)) + blob={ + "componentType": 5121, + "normalized": True, + "type": "VEC4", + "byteOffset": 0, + }, + data=vertex_colors.astype(uint8), + ) # add the reference for vertex color - current["primitives"][0]["attributes"][ - "COLOR_0"] = acc_color + current["primitives"][0]["attributes"]["COLOR_0"] = acc_color - if hasattr(mesh.visual, 'material'): + if hasattr(mesh.visual, "material"): # append the material and then set from returned index current_material = _append_material( mat=mesh.visual.material, tree=tree, buffer_items=buffer_items, mat_hashes=mat_hashes, - extension_webp=extension_webp) + extension_webp=extension_webp, + ) # if mesh has UV coordinates defined export them - has_uv = (hasattr(mesh.visual, 'uv') and - mesh.visual.uv is not None and - len(mesh.visual.uv) == len(mesh.vertices)) + has_uv = ( + hasattr(mesh.visual, "uv") + and mesh.visual.uv is not None + and len(mesh.visual.uv) == len(mesh.vertices) + ) if has_uv: # slice off W if passed uv = mesh.visual.uv.copy()[:, :2] # reverse the Y for GLTF uv[:, 1] = 1.0 - uv[:, 1] # add an accessor describing the blob of UV's - acc_uv = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5126, - "type": "VEC2", - "byteOffset": 0}, - data=uv.astype(float32)) + acc_uv = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={"componentType": 5126, "type": "VEC2", "byteOffset": 0}, + data=uv.astype(float32), + ) # add the reference for UV coordinates current["primitives"][0]["attributes"]["TEXCOORD_0"] = acc_uv # only reference the material if we had UV coordinates current["primitives"][0]["material"] = current_material - if (include_normals or - (include_normals is None and - 'vertex_normals' in mesh._cache.cache)): + if include_normals or ( + include_normals is None and "vertex_normals" in mesh._cache.cache + ): # store vertex normals if requested if unitize_normals: normals = mesh.vertex_normals.copy() @@ -899,16 +901,18 @@ def _append_mesh(mesh, normals = mesh.vertex_normals acc_norm = _data_append( - acc=tree['accessors'], + acc=tree["accessors"], buff=buffer_items, - blob={"componentType": 5126, - "count": len(mesh.vertices), - "type": "VEC3", - "byteOffset": 0}, - data=normals.astype(float32)) + blob={ + "componentType": 5126, + "count": len(mesh.vertices), + "type": "VEC3", + "byteOffset": 0, + }, + data=normals.astype(float32), + ) # add the reference for vertex color - current["primitives"][0]["attributes"][ - "NORMAL"] = acc_norm + current["primitives"][0]["attributes"]["NORMAL"] = acc_norm # for each attribute with a leading underscore, assign them to trimesh # vertex_attributes @@ -920,18 +924,18 @@ def _append_mesh(mesh, # GLTF has no floating point type larger than 32 bits so clip # any float64 or larger to float32 - if attrib.dtype.kind == 'f' and attrib.dtype.itemsize > 4: + if attrib.dtype.kind == "f" and attrib.dtype.itemsize > 4: data = attrib.astype(np.float32) else: data = attrib # store custom vertex attributes - current["primitives"][0][ - "attributes"][key] = _data_append( - acc=tree['accessors'], - buff=buffer_items, - blob=_build_accessor(data), - data=data) + current["primitives"][0]["attributes"][key] = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob=_build_accessor(data), + data=data, + ) tree["meshes"].append(current) @@ -956,9 +960,8 @@ def _build_views(buffer_items): current_pos = 0 for current_item in buffer_items.values(): views.append( - {"buffer": 0, - "byteOffset": current_pos, - "byteLength": len(current_item)}) + {"buffer": 0, "byteOffset": current_pos, "byteLength": len(current_item)} + ) assert (current_pos % 4) == 0 assert (len(current_item) % 4) == 0 current_pos += len(current_item) @@ -984,8 +987,7 @@ def _build_accessor(array): if len(shape) == 2: vec_length = shape[1] if vec_length > 4: - raise ValueError( - "The GLTF spec does not support vectors larger than 4") + raise ValueError("The GLTF spec does not support vectors larger than 4") if vec_length > 1: data_type = "VEC%d" % vec_length else: @@ -997,20 +999,17 @@ def _build_accessor(array): data_type = "MAT%d" % shape[2] # get the array data type as a str stripping off endian - lookup = array.dtype.str.lstrip('<>') + lookup = array.dtype.str.lstrip("<>") - if lookup == 'u4': + if lookup == "u4": # spec: UNSIGNED_INT is only allowed when the accessor # contains indices i.e. the accessor is only referenced # by `primitive.indices` - log.debug('custom uint32 may cause validation failures') + log.debug("custom uint32 may cause validation failures") # map the numpy dtype to a GLTF code (i.e. 5121) componentType = _dtypes_lookup[lookup] - accessor = { - "componentType": componentType, - "type": data_type, - "byteOffset": 0} + accessor = {"componentType": componentType, "type": data_type, "byteOffset": 0} if len(shape) < 3: accessor["max"] = array.max(axis=0).tolist() @@ -1043,14 +1042,16 @@ def _byte_pad(data, bound=4): # extra bytes to pad with count = bound - (len(data) % bound) # bytes(count) only works on Python 3 - pad = (' ' * count).encode('utf-8') + pad = (" " * count).encode("utf-8") # combine the padding and data result = b"".join([data, pad]) # we should always divide evenly if tol.strict and (len(result) % bound) != 0: raise ValueError( - 'byte_pad failed! ori:{} res:{} pad:{} req:{}'.format( - len(data), len(result), count, bound)) + "byte_pad failed! ori:{} res:{} pad:{} req:{}".format( + len(data), len(result), count, bound + ) + ) return result return data @@ -1091,36 +1092,42 @@ def _append_path(path, name, tree, buffer_items): # data is the second value of the fifth field # which is a (data type, data) tuple acc_vertex = _data_append( - acc=tree['accessors'], + acc=tree["accessors"], buff=buffer_items, - blob={"componentType": 5126, - "type": "VEC3", - "byteOffset": 0}, - data=vxlist[4][1].astype(float32)) + blob={"componentType": 5126, "type": "VEC3", "byteOffset": 0}, + data=vxlist[4][1].astype(float32), + ) current = { "name": name, - "primitives": [{ - "attributes": {"POSITION": acc_vertex}, - "mode": _GL_LINES, # i.e. 1 - "material": material_idx}]} + "primitives": [ + { + "attributes": {"POSITION": acc_vertex}, + "mode": _GL_LINES, # i.e. 1 + "material": material_idx, + } + ], + } # if units are defined, store them as an extra: # https://github.com/KhronosGroup/glTF/tree/master/extensions try: current["extras"] = _jsonify(path.metadata) except BaseException: - log.debug('failed to serialize metadata, dropping!', - exc_info=True) + log.debug("failed to serialize metadata, dropping!", exc_info=True) if path.colors is not None: - acc_color = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5121, - "normalized": True, - "type": "VEC4", - "byteOffset": 0}, - data=np.array(vxlist[5][1]).astype(uint8)) + acc_color = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={ + "componentType": 5121, + "normalized": True, + "type": "VEC4", + "byteOffset": 0, + }, + data=np.array(vxlist[5][1]).astype(uint8), + ) # add color to attributes current["primitives"][0]["attributes"]["COLOR_0"] = acc_color @@ -1146,22 +1153,26 @@ def _append_point(points, name, tree, buffer_items): # convert the points to the unnamed args for # a pyglet vertex list - vxlist = rendering.points_to_vertexlist( - points=points.vertices, colors=points.colors) + vxlist = rendering.points_to_vertexlist(points=points.vertices, colors=points.colors) # data is the second value of the fifth field # which is a (data type, data) tuple - acc_vertex = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5126, - "type": "VEC3", - "byteOffset": 0}, - data=vxlist[4][1].astype(float32)) - current = {"name": name, - "primitives": [{ - "attributes": {"POSITION": acc_vertex}, - "mode": _GL_POINTS, - "material": len(tree["materials"])}]} + acc_vertex = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={"componentType": 5126, "type": "VEC3", "byteOffset": 0}, + data=vxlist[4][1].astype(float32), + ) + current = { + "name": name, + "primitives": [ + { + "attributes": {"POSITION": acc_vertex}, + "mode": _GL_POINTS, + "material": len(tree["materials"]), + } + ], + } # TODO add color support to Points object # this is just exporting everying as black @@ -1170,20 +1181,24 @@ def _append_point(points, name, tree, buffer_items): if len(np.shape(points.colors)) == 2: # colors may be returned as "c3f" or other RGBA color_type, color_data = vxlist[5] - if '3' in color_type: - kind = 'VEC3' - elif '4' in color_type: - kind = 'VEC4' + if "3" in color_type: + kind = "VEC3" + elif "4" in color_type: + kind = "VEC4" else: - raise ValueError('unknown color: %s', color_type) - acc_color = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5121, - "count": vxlist[0], - "normalized": True, - "type": kind, - "byteOffset": 0}, - data=np.array(color_data).astype(uint8)) + raise ValueError("unknown color: %s", color_type) + acc_color = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={ + "componentType": 5121, + "count": vxlist[0], + "normalized": True, + "type": kind, + "byteOffset": 0, + }, + data=np.array(color_data).astype(uint8), + ) # add color to attributes current["primitives"][0]["attributes"]["COLOR_0"] = acc_color tree["meshes"].append(current) @@ -1204,13 +1219,13 @@ def _parse_textures(header, views, resolver=None): # loop through images for i, img in enumerate(header["images"]): # get the bytes representing an image - if 'bufferView' in img: + if "bufferView" in img: blob = views[img["bufferView"]] - elif 'uri' in img: + elif "uri" in img: # will get bytes from filesystem or base64 URI - blob = _uri_to_bytes(uri=img['uri'], resolver=resolver) + blob = _uri_to_bytes(uri=img["uri"], resolver=resolver) else: - log.debug(f'unable to load image from: {img.keys()}') + log.debug(f"unable to load image from: {img.keys()}") continue # i.e. 'image/jpeg' # mime = img['mimeType'] @@ -1239,6 +1254,7 @@ def _parse_materials(header, views, resolver=None): materials : list List of trimesh.visual.texture.Material objects """ + def parse_values_and_textures(input_dict): result = {} for k, v in input_dict.items(): @@ -1255,9 +1271,11 @@ def parse_values_and_textures(input_dict): # check to see if this is using a webp extension texture # should this be case sensitive? - webp = texture.get( - 'extensions', {}).get( - 'EXT_texture_webp', {}).get('source') + webp = ( + texture.get("extensions", {}) + .get("EXT_texture_webp", {}) + .get("source") + ) if webp is not None: idx = webp else: @@ -1267,8 +1285,7 @@ def parse_values_and_textures(input_dict): # store the actual image as the value result[k] = images[idx] except BaseException: - log.debug('unable to store texture', - exc_info=True) + log.debug("unable to store texture", exc_info=True) return result images = _parse_textures(header, views, resolver) @@ -1284,8 +1301,9 @@ def parse_values_and_textures(input_dict): # add keys of keys to top level dict loopable.update(loopable.pop("pbrMetallicRoughness")) - ext = mat.get('extensions', {}).get( - 'KHR_materials_pbrSpecularGlossiness', None) + ext = mat.get("extensions", {}).get( + "KHR_materials_pbrSpecularGlossiness", None + ) if isinstance(ext, dict): ext_params = parse_values_and_textures(ext) loopable.update(specular_to_pbr(**ext_params)) @@ -1298,12 +1316,14 @@ def parse_values_and_textures(input_dict): return materials -def _read_buffers(header, - buffers, - mesh_kwargs, - ignore_broken=False, - merge_primitives=False, - resolver=None): +def _read_buffers( + header, + buffers, + mesh_kwargs, + ignore_broken=False, + merge_primitives=False, + resolver=None, +): """ Given binary data and a layout return the kwargs to create a scene object. @@ -1345,10 +1365,10 @@ def _read_buffers(header, # load data from buffers into numpy arrays # using the layout described by accessors - access = [None] * len(header['accessors']) + access = [None] * len(header["accessors"]) for index, a in enumerate(header["accessors"]): # number of items - count = a['count'] + count = a["count"] # what is the datatype dtype = np.dtype(_dtypes[a["componentType"]]) # basically how many columns @@ -1359,7 +1379,7 @@ def _read_buffers(header, # number of items when flattened # i.e. a (4, 4) MAT4 has 16 per_count = np.abs(np.prod(per_item)) - if 'bufferView' in a: + if "bufferView" in a: # data was stored in a buffer view so get raw bytes # load the bytes data into correct dtype and shape @@ -1373,7 +1393,7 @@ def _read_buffers(header, # both bufferView *and* accessors are allowed # to have a byteOffset - start = a.get('byteOffset', 0) + start = a.get("byteOffset", 0) if "byteStride" in buffer_view: # how many bytes for each chunk @@ -1386,25 +1406,24 @@ def _read_buffers(header, # and then pull chunks per-stride # do as a list comprehension as the numpy # buffer wangling was - raw = b''.join( - data[i:i + per_row] for i in - range(start, start + length, stride)) + raw = b"".join( + data[i : i + per_row] + for i in range(start, start + length, stride) + ) # the reshape should fail if we screwed up - access[index] = np.frombuffer( - raw, dtype=dtype).reshape(shape) + access[index] = np.frombuffer(raw, dtype=dtype).reshape(shape) else: # length is the number of bytes per item times total length = dtype.itemsize * count * per_count access[index] = np.frombuffer( - data[start:start + length], dtype=dtype).reshape(shape) + data[start : start + length], dtype=dtype + ).reshape(shape) else: # a "sparse" accessor should be initialized as zeros - access[index] = np.zeros( - count * per_count, dtype=dtype).reshape(shape) + access[index] = np.zeros(count * per_count, dtype=dtype).reshape(shape) # load images and textures into material objects - materials = _parse_materials( - header, views=views, resolver=resolver) + materials = _parse_materials(header, views=views, resolver=resolver) mesh_prim = collections.defaultdict(list) # load data from accessors into Trimesh objects @@ -1417,13 +1436,13 @@ def _read_buffers(header, for index, m in enumerate(header.get("meshes", [])): try: # GLTF spec indicates implicit units are meters - metadata = {'units': 'meters'} + metadata = {"units": "meters"} # try to load all mesh metadata - if isinstance(m.get('extras'), dict): - metadata.update(m['extras']) + if isinstance(m.get("extras"), dict): + metadata.update(m["extras"]) # put any mesh extensions in a field of the metadata - if 'extensions' in m: - metadata['gltf_extensions'] = m['extensions'] + if "extensions" in m: + metadata["gltf_extensions"] = m["extensions"] for p in m["primitives"]: # if we don't have a triangular mesh continue @@ -1433,50 +1452,48 @@ def _read_buffers(header, kwargs["metadata"].update(metadata) # i.e. GL_LINES, GL_TRIANGLES, etc # specification says the default mode is GL_TRIANGLES - mode = p.get('mode', _GL_TRIANGLES) + mode = p.get("mode", _GL_TRIANGLES) # colors, normals, etc - attr = p['attributes'] + attr = p["attributes"] # create a unique mesh name per- primitive - name = m.get('name', 'GLTF') + name = m.get("name", "GLTF") # make name unique across multiple meshes name = unique_name(name, meshes, counts=name_counts) if mode == _GL_LINES: # load GL_LINES into a Path object from ..path.entities import Line + kwargs["vertices"] = access[attr["POSITION"]] - kwargs['entities'] = [Line( - points=np.arange(len(kwargs['vertices'])))] + kwargs["entities"] = [Line(points=np.arange(len(kwargs["vertices"])))] elif mode == _GL_POINTS: kwargs["vertices"] = access[attr["POSITION"]] elif mode in (_GL_TRIANGLES, _GL_STRIP): # get vertices from accessors kwargs["vertices"] = access[attr["POSITION"]] # get faces from accessors - if 'indices' in p: + if "indices" in p: if mode == _GL_STRIP: # this is triangle strips - flat = access[p['indices']].reshape(-1) - kwargs['faces'] = util.triangle_strips_to_faces( - [flat]) + flat = access[p["indices"]].reshape(-1) + kwargs["faces"] = util.triangle_strips_to_faces([flat]) else: - kwargs["faces"] = access[p["indices"] - ].reshape((-1, 3)) + kwargs["faces"] = access[p["indices"]].reshape((-1, 3)) else: # indices are apparently optional and we are supposed to # do the same thing as webGL drawArrays? - kwargs['faces'] = np.arange( - len(kwargs['vertices']) * 3, - dtype=np.int64).reshape((-1, 3)) + kwargs["faces"] = np.arange( + len(kwargs["vertices"]) * 3, dtype=np.int64 + ).reshape((-1, 3)) - if 'NORMAL' in attr: + if "NORMAL" in attr: # vertex normals are specified - kwargs['vertex_normals'] = access[attr['NORMAL']] + kwargs["vertex_normals"] = access[attr["NORMAL"]] # do we have UV coordinates visuals = None if "material" in p: if materials is None: - log.debug('no materials! `pip install pillow`') + log.debug("no materials! `pip install pillow`") else: uv = None if "TEXCOORD_0" in attr: @@ -1486,41 +1503,43 @@ def _read_buffers(header, uv[:, 1] = 1.0 - uv[:, 1] # create a texture visual visuals = visual.texture.TextureVisuals( - uv=uv, material=materials[p["material"]]) + uv=uv, material=materials[p["material"]] + ) - if 'COLOR_0' in attr: + if "COLOR_0" in attr: try: # try to load vertex colors from the accessors - colors = access[attr['COLOR_0']] - if len(colors) == len(kwargs['vertices']): + colors = access[attr["COLOR_0"]] + if len(colors) == len(kwargs["vertices"]): if visuals is None: # just pass to mesh as vertex color - kwargs['vertex_colors'] = colors + kwargs["vertex_colors"] = colors else: # we ALSO have texture so save as vertex # attribute - visuals.vertex_attributes['color'] = colors + visuals.vertex_attributes["color"] = colors except BaseException: # survive failed colors - log.debug('failed to load colors', exc_info=True) + log.debug("failed to load colors", exc_info=True) if visuals is not None: - kwargs['visual'] = visuals + kwargs["visual"] = visuals # By default the created mesh is not from primitive, # in case it is the value will be updated # each primitive gets it's own Trimesh object if len(m["primitives"]) > 1: - kwargs['metadata']['from_gltf_primitive'] = True + kwargs["metadata"]["from_gltf_primitive"] = True else: - kwargs['metadata']['from_gltf_primitive'] = False + kwargs["metadata"]["from_gltf_primitive"] = False # custom attributes starting with a `_` - custom = {a: access[attr[a]] for a in attr.keys() - if a.startswith('_')} + custom = { + a: access[attr[a]] for a in attr.keys() if a.startswith("_") + } if len(custom) > 0: kwargs["vertex_attributes"] = custom else: - log.debug('skipping primitive with mode %s!', mode) + log.debug("skipping primitive with mode %s!", mode) continue # this should absolutely not be stomping on itself assert name not in meshes @@ -1528,8 +1547,7 @@ def _read_buffers(header, mesh_prim[index].append(name) except BaseException as E: if ignore_broken: - log.debug('failed to load mesh', - exc_info=True), + log.debug("failed to load mesh", exc_info=True), else: raise E @@ -1557,27 +1575,28 @@ def _read_buffers(header, # get all meshes for this group current = [meshes[n] for n in names] - v_seq = [p['vertices'] for p in current] - f_seq = [p['faces'] for p in current] + v_seq = [p["vertices"] for p in current] + f_seq = [p["faces"] for p in current] v, f = util.append_faces(v_seq, f_seq) - materials = [p['visual'].material for p in current] + materials = [p["visual"].material for p in current] face_materials = [] for i, p in enumerate(current): - face_materials += [i] * len(p['faces']) + face_materials += [i] * len(p["faces"]) visuals = visual.texture.TextureVisuals( - material=visual.material.MultiMaterial( - materials=materials), - face_materials=face_materials) - if 'metadata' in meshes[names[0]]: - metadata = meshes[names[0]]['metadata'] + material=visual.material.MultiMaterial(materials=materials), + face_materials=face_materials, + ) + if "metadata" in meshes[names[0]]: + metadata = meshes[names[0]]["metadata"] else: metadata = {} meshes[name] = { - 'vertices': v, - 'faces': f, - 'visual': visuals, - 'metadata': metadata, - 'process': False} + "vertices": v, + "faces": f, + "visual": visuals, + "metadata": metadata, + "process": False, + } mesh_prim_replace[mesh_index] = [name] # avoid altering inside loop mesh_prim = mesh_prim_replace @@ -1594,11 +1613,7 @@ def _read_buffers(header, name_index = {} name_counts = {} for i, n in enumerate(nodes): - name_index[unique_name( - n.get('name', str(i)), - name_index, - counts=name_counts - )] = i + name_index[unique_name(n.get("name", str(i)), name_index, counts=name_counts)] = i # invert the dict so we can look up by index # node index (int) : name (str) names = {v: k for k, v in name_index.items()} @@ -1614,14 +1629,14 @@ def _read_buffers(header, # unvisited, pairs of node indexes queue = collections.deque() - if 'scene' in header: + if "scene" in header: # specify the index of scenes if specified - scene_index = header['scene'] + scene_index = header["scene"] else: # otherwise just use the first index scene_index = 0 - if 'scenes' in header: + if "scenes" in header: # start the traversal from the base frame to the roots for root in header["scenes"][scene_index].get("nodes", []): # add transform from base frame to these root nodes @@ -1658,9 +1673,9 @@ def _read_buffers(header, # parent -> child relationships have matrix stored in child # for the transform from parent to child if "matrix" in child: - kwargs["matrix"] = np.array( - child["matrix"], - dtype=np.float64).reshape((4, 4)).T + kwargs["matrix"] = ( + np.array(child["matrix"], dtype=np.float64).reshape((4, 4)).T + ) else: # if no matrix set identity kwargs["matrix"] = _EYE @@ -1669,20 +1684,21 @@ def _read_buffers(header, # GLTF applies these in order: T * R * S if "translation" in child: kwargs["matrix"] = np.dot( - kwargs["matrix"], - transformations.translation_matrix(child["translation"])) + kwargs["matrix"], transformations.translation_matrix(child["translation"]) + ) if "rotation" in child: # GLTF rotations are stored as (4,) XYZW unit quaternions # we need to re- order to our quaternion style, WXYZ quat = np.reshape(child["rotation"], 4)[[3, 0, 1, 2]] # add the rotation to the matrix kwargs["matrix"] = np.dot( - kwargs["matrix"], transformations.quaternion_matrix(quat)) + kwargs["matrix"], transformations.quaternion_matrix(quat) + ) if "scale" in child: # add scale to the matrix kwargs["matrix"] = np.dot( - kwargs["matrix"], - np.diag(np.concatenate((child['scale'], [1.0])))) + kwargs["matrix"], np.diag(np.concatenate((child["scale"], [1.0]))) + ) # treat node metadata similarly to mesh metadata if isinstance(child.get("extras"), dict): @@ -1707,32 +1723,33 @@ def _read_buffers(header, kwargs["geometry"] = geom_name # no transformations kwargs["matrix"] = _EYE - kwargs['frame_from'] = names[b] + kwargs["frame_from"] = names[b] # if we have more than one primitive assign a new UUID # frame name for the primitives after the first one - frame_to = f'{names[b]}_{util.unique_id(length=6)}' - kwargs['frame_to'] = frame_to + frame_to = f"{names[b]}_{util.unique_id(length=6)}" + kwargs["frame_to"] = frame_to # append the edge with the mesh frame graph.append(kwargs.copy()) elif len(geometries) == 1: kwargs["geometry"] = geometries[0] - if 'name' in child: - kwargs['frame_to'] = names[b] + if "name" in child: + kwargs["frame_to"] = names[b] graph.append(kwargs.copy()) else: # if the node doesn't have any geometry just add graph.append(kwargs) # kwargs for load_kwargs - result = {"class": "Scene", - "geometry": meshes, - "graph": graph, - "base_frame": base_frame} + result = { + "class": "Scene", + "geometry": meshes, + "graph": graph, + "base_frame": base_frame, + } try: # load any scene extras into scene.metadata # use a try except to avoid nested key checks - result['metadata'] = header['scenes'][ - header['scene']]['extras'] + result["metadata"] = header["scenes"][header["scene"]]["extras"] except BaseException: pass try: @@ -1740,7 +1757,7 @@ def _read_buffers(header, # use a try except to avoid nested key checks if "metadata" not in result: result["metadata"] = {} - result['metadata']['gltf_extensions'] = header['extensions'] + result["metadata"]["gltf_extensions"] = header["extensions"] except BaseException: pass @@ -1767,7 +1784,9 @@ def _convert_camera(camera): "perspective": { "aspectRatio": camera.fov[0] / camera.fov[1], "yfov": np.radians(camera.fov[1]), - "znear": float(camera.z_near)}} + "znear": float(camera.z_near), + }, + } return result @@ -1793,18 +1812,18 @@ def _append_image(img, tree, buffer_items, extension_webp): None if image append failed for any reason """ # probably not a PIL image so exit - if not hasattr(img, 'format'): + if not hasattr(img, "format"): return None if extension_webp: # support WebP if extension is specified - save_as = 'WEBP' - elif img.format == 'JPEG': + save_as = "WEBP" + elif img.format == "JPEG": # don't re-encode JPEGs - save_as = 'JPEG' + save_as = "JPEG" else: # for everything else just use PNG - save_as = 'png' + save_as = "png" # get the image data into a bytes object with util.BytesIO() as f: @@ -1814,12 +1833,10 @@ def _append_image(img, tree, buffer_items, extension_webp): index = _buffer_append(buffer_items, data) # append buffer index and the GLTF-acceptable mimetype - tree['images'].append({ - 'bufferView': index, - 'mimeType': f'image/{save_as.lower()}'}) + tree["images"].append({"bufferView": index, "mimeType": f"image/{save_as.lower()}"}) # index is length minus one - return len(tree['images']) - 1 + return len(tree["images"]) - 1 def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): @@ -1857,7 +1874,7 @@ def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): return mat_hashes[hashed] # convert passed input to PBR if necessary - if hasattr(mat, 'to_pbr'): + if hasattr(mat, "to_pbr"): as_pbr = mat.to_pbr() else: as_pbr = mat @@ -1866,89 +1883,92 @@ def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): result = {"pbrMetallicRoughness": {}} try: # try to convert base color to (4,) float color - result['baseColorFactor'] = visual.color.to_float( - as_pbr.baseColorFactor).reshape(4).tolist() + result["baseColorFactor"] = ( + visual.color.to_float(as_pbr.baseColorFactor).reshape(4).tolist() + ) except BaseException: pass try: - result['emissiveFactor'] = as_pbr.emissiveFactor.reshape(3).tolist() + result["emissiveFactor"] = as_pbr.emissiveFactor.reshape(3).tolist() except BaseException: pass # if name is defined, export if isinstance(as_pbr.name, str): - result['name'] = as_pbr.name + result["name"] = as_pbr.name # if alphaMode is defined, export if isinstance(as_pbr.alphaMode, str): - result['alphaMode'] = as_pbr.alphaMode + result["alphaMode"] = as_pbr.alphaMode # if alphaCutoff is defined, export if isinstance(as_pbr.alphaCutoff, float): - result['alphaCutoff'] = as_pbr.alphaCutoff + result["alphaCutoff"] = as_pbr.alphaCutoff # if doubleSided is defined, export if isinstance(as_pbr.doubleSided, bool): - result['doubleSided'] = as_pbr.doubleSided + result["doubleSided"] = as_pbr.doubleSided # if scalars are defined correctly export if isinstance(as_pbr.metallicFactor, float): - result['metallicFactor'] = as_pbr.metallicFactor + result["metallicFactor"] = as_pbr.metallicFactor if isinstance(as_pbr.roughnessFactor, float): - result['roughnessFactor'] = as_pbr.roughnessFactor + result["roughnessFactor"] = as_pbr.roughnessFactor # which keys of the PBRMaterial are images image_mapping = { - 'baseColorTexture': as_pbr.baseColorTexture, - 'emissiveTexture': as_pbr.emissiveTexture, - 'normalTexture': as_pbr.normalTexture, - 'occlusionTexture': as_pbr.occlusionTexture, - 'metallicRoughnessTexture': as_pbr.metallicRoughnessTexture} + "baseColorTexture": as_pbr.baseColorTexture, + "emissiveTexture": as_pbr.emissiveTexture, + "normalTexture": as_pbr.normalTexture, + "occlusionTexture": as_pbr.occlusionTexture, + "metallicRoughnessTexture": as_pbr.metallicRoughnessTexture, + } for key, img in image_mapping.items(): if img is None: continue # try adding the base image to the export object index = _append_image( - img=img, - tree=tree, - buffer_items=buffer_items, - extension_webp=extension_webp) + img=img, tree=tree, buffer_items=buffer_items, extension_webp=extension_webp + ) # if the image was added successfully it will return index # if it failed for any reason, it will return None if index is not None: # add a reference to the base color texture - result[key] = {'index': len(tree['textures'])} + result[key] = {"index": len(tree["textures"])} # add an object for the texture according to the WebP extension if extension_webp: - tree['textures'].append({'extensions': {'EXT_texture_webp': - {'source': index}}}) + tree["textures"].append( + {"extensions": {"EXT_texture_webp": {"source": index}}} + ) else: - tree['textures'].append({'source': index}) + tree["textures"].append({"source": index}) # for our PBRMaterial object we flatten all keys # however GLTF would like some of them under the # "pbrMetallicRoughness" key - pbr_subset = ['baseColorTexture', - 'baseColorFactor', - 'roughnessFactor', - 'metallicFactor', - 'metallicRoughnessTexture'] + pbr_subset = [ + "baseColorTexture", + "baseColorFactor", + "roughnessFactor", + "metallicFactor", + "metallicRoughnessTexture", + ] # move keys down a level for key in pbr_subset: if key in result: result["pbrMetallicRoughness"][key] = result.pop(key) # if we didn't have any PBR keys remove the empty key - if len(result['pbrMetallicRoughness']) == 0: - result.pop('pbrMetallicRoughness') + if len(result["pbrMetallicRoughness"]) == 0: + result.pop("pbrMetallicRoughness") # which index are we inserting material at - index = len(tree['materials']) + index = len(tree["materials"]) # add the material to the data structure - tree['materials'].append(result) + tree["materials"].append(result) # add the material index in-place mat_hashes[hashed] = index @@ -1974,6 +1994,7 @@ def validate(header): """ # a soft dependency import jsonschema + # will do the reference replacement schema = get_schema() # validate the passed header against the schema @@ -1997,15 +2018,13 @@ def get_schema(): from ..schemas import resolve # get a blob of a zip file including the GLTF 2.0 schema - blob = resources.get( - 'schema/gltf2.schema.zip', decode=False) + blob = resources.get("schema/gltf2.schema.zip", decode=False) # get the zip file as a dict keyed by file name - archive = util.decompress(util.wrap_as_stream(blob), 'zip') + archive = util.decompress(util.wrap_as_stream(blob), "zip") # get a resolver object for accessing the schema resolver = ZipResolver(archive) # get a loaded dict from the base file - unresolved = json.loads(util.decode_text( - resolver.get('glTF.schema.json'))) + unresolved = json.loads(util.decode_text(resolver.get("glTF.schema.json"))) # resolve `$ref` references to other files in the schema schema = resolve(unresolved, resolver=resolver) @@ -2013,5 +2032,4 @@ def get_schema(): # exporters -_gltf_loaders = {"glb": load_glb, - "gltf": load_gltf} +_gltf_loaders = {"glb": load_glb, "gltf": load_gltf} diff --git a/trimesh/util.py b/trimesh/util.py index 0da9aa344..443b46dc0 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -1482,7 +1482,7 @@ def concatenate(a, b=None): if any("vertex_normals" in m._cache for m in is_mesh): vertex_normals = vstack_empty([m.vertex_normals for m in is_mesh]) assert vertex_normals.shape == vertices.shape - + try: # concatenate visuals visual = is_mesh[0].visual.concatenate([m.visual for m in is_mesh[1:]]) From befc4c1047d707b3b054d4a9e76402c25ff353a9 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 18:25:30 -0400 Subject: [PATCH 80/84] partially revert #1895 --- tests/test_gltf.py | 21 ++++++++++++++------- trimesh/exchange/gltf.py | 11 ++++++----- trimesh/util.py | 2 +- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index da3491fca..5acb56ce0 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -6,7 +6,7 @@ # Khronos' official file validator # can be installed with the helper script: # `trimesh/docker/builds/gltf_validator.bash` -_gltf_validator = g.trimesh.util.which("gltf_validator") +_gltf_validator = g.shutil.which("gltf_validator") def validate_glb(data, name=None): @@ -33,16 +33,19 @@ def validate_glb(data, name=None): with g.tempfile.NamedTemporaryFile(suffix=".glb") as f: f.write(data) f.flush() - # run the khronos gltf-validator - report = g.subprocess.run([_gltf_validator, f.name, "-o"], capture_output=True) + + # gltf_validator has occasional bugs being run outside + # of the current working directory + temp_dir, file_name = g.os.path.split(f.name) + # run khronos gltf_validator + report = g.subprocess.run( + [_gltf_validator, file_name, "-o"], cwd=temp_dir, capture_output=True + ) # -o prints JSON to stdout content = report.stdout.decode("utf-8") returncode = report.returncode if returncode != 0: - from IPython import embed - - embed() g.log.error(f"failed on: `{name}`") g.log.error(f"validator: `{content}`") g.log.error(f"stderr: `{report.stderr}`") @@ -378,8 +381,12 @@ def test_gltf_pole(self): def test_material_primary_colors(self): primary_color_material = g.trimesh.visual.material.PBRMaterial() primary_color_material.baseColorFactor = (255, 0, 0, 255) - sphere = g.trimesh.primitives.Sphere() + sphere = g.trimesh.creation.icosphere() + sphere.visual = g.trimesh.visual.TextureVisuals(material=primary_color_material) sphere.visual.material = primary_color_material + # material will *not* export without uv coordinates to gltf + # as GLTF requires TEXCOORD_0 be defined if there is a material + sphere.visual.uv = g.np.zeros((len(sphere.vertices), 2)) scene = g.trimesh.Scene([sphere]) def to_integer(args): diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 33aafdfd8..e06318990 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -739,10 +739,10 @@ def _append_mesh( name, tree, buffer_items, - include_normals, - unitize_normals, - mat_hashes, - extension_webp, + include_normals: bool, + unitize_normals: bool, + mat_hashes: dict, + extension_webp: bool, ): """ Append a mesh to the scene structure and put the @@ -883,8 +883,9 @@ def _append_mesh( ) # add the reference for UV coordinates current["primitives"][0]["attributes"]["TEXCOORD_0"] = acc_uv + # only reference the material if we had UV coordinates - current["primitives"][0]["material"] = current_material + current["primitives"][0]["material"] = current_material if include_normals or ( include_normals is None and "vertex_normals" in mesh._cache.cache diff --git a/trimesh/util.py b/trimesh/util.py index 443b46dc0..0da9aa344 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -1482,7 +1482,7 @@ def concatenate(a, b=None): if any("vertex_normals" in m._cache for m in is_mesh): vertex_normals = vstack_empty([m.vertex_normals for m in is_mesh]) assert vertex_normals.shape == vertices.shape - + try: # concatenate visuals visual = is_mesh[0].visual.concatenate([m.visual for m in is_mesh[1:]]) From 2ef582ad9582e490edb12dfdfb4ef88f8a559ff5 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 18:55:37 -0400 Subject: [PATCH 81/84] remove stub temporarydirectory --- tests/generic.py | 4 +--- trimesh/util.py | 21 --------------------- 2 files changed, 1 insertion(+), 24 deletions(-) diff --git a/tests/generic.py b/tests/generic.py index d766c66ab..53076a2d9 100644 --- a/tests/generic.py +++ b/tests/generic.py @@ -37,7 +37,7 @@ tf = trimesh.transformations - +TemporaryDirectory = tempfile.TemporaryDirectory # make a dummy profiler which does nothing class DummyProfiler(object): @@ -564,8 +564,6 @@ def wrapload(exported, file_type, **kwargs): ) -TemporaryDirectory = trimesh.util.TemporaryDirectory - # all the JSON files with truth data data = _load_data() diff --git a/trimesh/util.py b/trimesh/util.py index 0da9aa344..a2904829f 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -2286,27 +2286,6 @@ def __call__(self, key, *args, **kwargs): return self[key](*args, **kwargs) -class TemporaryDirectory: - """ - Same basic usage as tempfile.TemporaryDirectory - but functional in Python 2.7+. - - Example - --------- - ``` - with trimesh.util.TemporaryDirectory() as path: - writable = os.path.join(path, 'hi.txt') - ``` - """ - - def __enter__(self): - self.path = tempfile.mkdtemp() - return self.path - - def __exit__(self, *args, **kwargs): - shutil.rmtree(self.path) - - def decode_text(text, initial="utf-8"): """ Try to decode byte input as a string. From 6210b25110d6c3eb06c696e1294fd326931635bd Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 18:55:57 -0400 Subject: [PATCH 82/84] remove unused import --- trimesh/util.py | 1 - 1 file changed, 1 deletion(-) diff --git a/trimesh/util.py b/trimesh/util.py index a2904829f..f252b7ad6 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -17,7 +17,6 @@ import logging import random import shutil -import tempfile import time import uuid import zipfile From ab6d61ed8d7490819d670ea8f6b3d65a273ede1c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 19:07:40 -0400 Subject: [PATCH 83/84] use built-in temporarydirectory --- tests/test_gltf.py | 4 ++-- tests/test_obj.py | 4 ++-- trimesh/exchange/binvox.py | 3 ++- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 5acb56ce0..0900f7b8e 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -927,7 +927,7 @@ def test_embed_buffer(self): } ) - with g.trimesh.util.TemporaryDirectory() as D: + with g.TemporaryDirectory() as D: path = g.os.path.join(D, "hi.gltf") scene.export(path) @@ -937,7 +937,7 @@ def test_embed_buffer(self): reloaded = g.trimesh.load(path) assert set(reloaded.geometry.keys()) == set(scene.geometry.keys()) - with g.trimesh.util.TemporaryDirectory() as D: + with g.TemporaryDirectory() as D: path = g.os.path.join(D, "hi.gltf") scene.export(path, embed_buffers=True) diff --git a/tests/test_obj.py b/tests/test_obj.py index cb5e940b4..2a0037ea6 100644 --- a/tests/test_obj.py +++ b/tests/test_obj.py @@ -180,7 +180,7 @@ def test_mtl(self): m = g.get_mesh("fuze.obj") # export the mesh including data obj, data = g.trimesh.exchange.export.export_obj(m, return_texture=True) - with g.trimesh.util.TemporaryDirectory() as path: + with g.TemporaryDirectory() as path: # where is the OBJ file going to be saved obj_path = g.os.path.join(path, "test.obj") with open(obj_path, "w") as f: @@ -308,7 +308,7 @@ def test_mtl_color_roundtrip(self): m.visual.material.diffuse = colors[2] m.visual.material.glossiness = 0.52622 - with g.trimesh.util.TemporaryDirectory() as d: + with g.TemporaryDirectory() as d: # exporting by filename will automatically # create a FilePathResolver which writes the # `mtl` file to the same directory diff --git a/trimesh/exchange/binvox.py b/trimesh/exchange/binvox.py index df5924113..8bcbd7b51 100644 --- a/trimesh/exchange/binvox.py +++ b/trimesh/exchange/binvox.py @@ -9,6 +9,7 @@ import collections import os import subprocess +from tempfile import TemporaryDirectory import numpy as np @@ -565,7 +566,7 @@ def voxelize_mesh(mesh, if binvoxer.file_type != 'binvox': raise ValueError( 'Only "binvox" binvoxer `file_type` currently supported') - with util.TemporaryDirectory() as folder: + with TemporaryDirectory() as folder: model_path = os.path.join(folder, 'model.%s' % export_type) with open(model_path, 'wb') as fp: mesh.export(fp, file_type=export_type) From 7c1c45d2e61ce5ab2d1fa43bf0edffa2b52306b5 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 20:24:16 -0400 Subject: [PATCH 84/84] exit temporary directory for windows --- tests/test_gltf.py | 4 + tests/test_graph.py | 112 ++++++++++++++------------- trimesh/graph.py | 180 +++++++++++++++++--------------------------- 3 files changed, 129 insertions(+), 167 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 0900f7b8e..3c8e07f9d 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -965,6 +965,7 @@ def test_webp(self): def test_relative_paths(self): # try with a relative path + cwd = g.os.path.abspath(g.os.path.expanduser(".")) with g.TemporaryDirectory() as d: g.os.makedirs(g.os.path.join(d, "fused")) g.os.chdir(d) @@ -972,6 +973,9 @@ def test_relative_paths(self): r = g.trimesh.load("fused/hi.gltf") assert g.np.isclose(r.volume, 1.0) + # avoid a windows file-access error + g.os.chdir(cwd) + with g.TemporaryDirectory() as d: # now try it without chaging to that directory full = g.os.path.join(d, "hi", "there", "different", "levels") diff --git a/tests/test_graph.py b/tests/test_graph.py index 402ab2f2c..a389e2383 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -5,13 +5,20 @@ class GraphTest(g.unittest.TestCase): - def setUp(self): - self.engines = ['scipy', 'networkx'] + self.engines = [] + try: + self.engines.append("scipy") + except BaseException: + pass + try: + self.engines.append("networkx") + except BaseException: + pass def test_soup(self): # a soup of random triangles, with no adjacent pairs - soup = g.get_mesh('soup.stl') + soup = g.get_mesh("soup.stl") assert len(soup.face_adjacency) == 0 assert len(soup.face_adjacency_radius) == 0 @@ -23,13 +30,13 @@ def test_soup(self): def test_components(self): # a soup of random triangles, with no adjacent pairs - soup = g.get_mesh('soup.stl') + soup = g.get_mesh("soup.stl") # a mesh with multiple watertight bodies - mult = g.get_mesh('cycloidal.ply') + mult = g.get_mesh("cycloidal.ply") # a mesh with a single watertight body - sing = g.get_mesh('featuretype.STL') + sing = g.get_mesh("featuretype.STL") # mesh with a single tetrahedron - tet = g.get_mesh('tet.ply') + tet = g.get_mesh("tet.ply") for engine in self.engines: # without requiring watertight the split should be into every face @@ -77,7 +84,7 @@ def test_vertex_adjacency_graph(self): f = g.trimesh.graph.vertex_adjacency_graph # a mesh with a single watertight body - sing = g.get_mesh('featuretype.STL') + sing = g.get_mesh("featuretype.STL") vert_adj_g = f(sing) assert len(sing.vertices) == len(vert_adj_g) @@ -89,36 +96,35 @@ def test_engine_time(self): g.trimesh.graph.facets(mesh=mesh, engine=engine) tic.append(g.time.time()) - tic_diff = g.np.diff(tic) - tic_min = tic_diff.min() - tic_diff /= tic_min - g.log.info('graph engine on %s (scale %f sec):\n%s', - mesh.metadata['file_name'], - tic_min, - str(g.np.column_stack((self.engines, - tic_diff)))) + diff = g.np.abs(g.np.diff(tic)) + if diff.min() > 0.0: + diff /= diff.min() + + g.log.info( + "graph engine on %s (scale %f sec):\n%s", + mesh.metadata["file_name"], + diff.min(), + str(g.np.column_stack((self.engines, diff))), + ) def test_smoothed(self): # Make sure smoothing is keeping the same number # of faces. - for name in ['ADIS16480.STL', 'featuretype.STL']: + for name in ["ADIS16480.STL", "featuretype.STL"]: mesh = g.get_mesh(name) assert len(mesh.faces) == len(mesh.smoothed().faces) def test_engines(self): edges = g.np.arange(10).reshape((-1, 2)) for i in range(0, 20): - check_engines(nodes=g.np.arange(i), - edges=edges) - edges = g.np.column_stack((g.np.arange(1, 11), - g.np.arange(0, 10))) + check_engines(nodes=g.np.arange(i), edges=edges) + edges = g.np.column_stack((g.np.arange(1, 11), g.np.arange(0, 10))) for i in range(0, 20): - check_engines(nodes=g.np.arange(i), - edges=edges) + check_engines(nodes=g.np.arange(i), edges=edges) def test_watertight(self): - m = g.get_mesh('shared.STL') # NOQA + m = g.get_mesh("shared.STL") # NOQA # assert m.is_watertight # assert m.is_winding_consistent # assert m.is_volume @@ -128,15 +134,12 @@ def test_traversals(self): # generate some simple test data simple_nodes = g.np.arange(20) - simple_edges = g.np.column_stack((simple_nodes[:-1], - simple_nodes[1:])) - simple_edges = g.np.vstack(( - simple_edges, - [[19, 0], - [10, 1000], - [500, 501]])).astype(g.np.int64) - - all_edges = g.data['edges'] + simple_edges = g.np.column_stack((simple_nodes[:-1], simple_nodes[1:])) + simple_edges = g.np.vstack( + (simple_edges, [[19, 0], [10, 1000], [500, 501]]) + ).astype(g.np.int64) + + all_edges = g.data["edges"] all_edges.append(simple_edges) for edges in all_edges: @@ -147,8 +150,8 @@ def test_traversals(self): nodes = g.np.unique(edges) # the basic BFS/DFS traversal - dfs_basic = g.trimesh.graph.traversals(edges, 'dfs') - bfs_basic = g.trimesh.graph.traversals(edges, 'bfs') + dfs_basic = g.trimesh.graph.traversals(edges, "dfs") + bfs_basic = g.trimesh.graph.traversals(edges, "bfs") # check return types assert all(i.dtype == g.np.int64 for i in dfs_basic) assert all(i.dtype == g.np.int64 for i in bfs_basic) @@ -169,15 +172,13 @@ def test_traversals(self): dfs = g.trimesh.graph.fill_traversals(traversal, edges) # edges that are included in the new separated traversal inc = g.trimesh.util.vstack_empty( - [g.np.column_stack((i[:-1], i[1:])) - for i in dfs]) + [g.np.column_stack((i[:-1], i[1:])) for i in dfs] + ) # make a set from edges included in the traversal - inc_set = set(g.trimesh.grouping.hashable_rows( - g.np.sort(inc, axis=1))) + inc_set = set(g.trimesh.grouping.hashable_rows(g.np.sort(inc, axis=1))) # make a set of the source edges we were supposed to include - edge_set = set(g.trimesh.grouping.hashable_rows( - g.np.sort(edges, axis=1))) + edge_set = set(g.trimesh.grouping.hashable_rows(g.np.sort(edges, axis=1))) # we should have exactly the same edges # after the filled traversal as we started with @@ -192,7 +193,7 @@ def test_traversals(self): def test_adjacency(self): for add_degen in [False, True]: - for name in ['featuretype.STL', 'soup.stl']: + for name in ["featuretype.STL", "soup.stl"]: m = g.get_mesh(name) if add_degen: # make the first face degenerate @@ -202,15 +203,13 @@ def test_adjacency(self): # check the various paths of calling face adjacency a = g.trimesh.graph.face_adjacency( - m.faces.view(g.np.ndarray).copy(), - return_edges=False) + m.faces.view(g.np.ndarray).copy(), return_edges=False + ) b, be = g.trimesh.graph.face_adjacency( - m.faces.view(g.np.ndarray).copy(), - return_edges=True) - c = g.trimesh.graph.face_adjacency( - mesh=m, return_edges=False) - c, ce = g.trimesh.graph.face_adjacency( - mesh=m, return_edges=True) + m.faces.view(g.np.ndarray).copy(), return_edges=True + ) + c = g.trimesh.graph.face_adjacency(mesh=m, return_edges=False) + c, ce = g.trimesh.graph.face_adjacency(mesh=m, return_edges=True) # make sure they all return the expected result assert g.np.allclose(a, b) assert g.np.allclose(a, c) @@ -218,9 +217,9 @@ def test_adjacency(self): assert len(ce) == len(a) # package properties to loop through - zips = zip(m.face_adjacency, - m.face_adjacency_edges, - m.face_adjacency_unshared) + zips = zip( + m.face_adjacency, m.face_adjacency_edges, m.face_adjacency_unshared + ) for a, e, v in zips: # get two adjacenct faces as a set fa = set(m.faces[a[0]]) @@ -249,11 +248,10 @@ def check_engines(edges, nodes): returning the exact same values """ results = [] - engines = [None, 'scipy', 'networkx'] + engines = [None, "scipy", "networkx"] for engine in engines: - c = g.trimesh.graph.connected_components( - edges, nodes=nodes, engine=engine) + c = g.trimesh.graph.connected_components(edges, nodes=nodes, engine=engine) if len(c) > 0: # check to see if every resulting component # was in the passed set of nodes @@ -270,6 +268,6 @@ def check_engines(edges, nodes): raise E -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/trimesh/graph.py b/trimesh/graph.py index 6ce4f1652..a816402d6 100644 --- a/trimesh/graph.py +++ b/trimesh/graph.py @@ -31,9 +31,7 @@ nx = exceptions.ExceptionWrapper(E) -def face_adjacency(faces=None, - mesh=None, - return_edges=False): +def face_adjacency(faces=None, mesh=None, return_edges=False): """ Returns an (n, 2) list of face indices. Each pair of faces in the list shares an edge, making them adjacent. @@ -89,7 +87,7 @@ def face_adjacency(faces=None, edge_groups = grouping.group_rows(edges, require_count=2) if len(edge_groups) == 0: - log.debug('No adjacent faces detected! Did you merge vertices?') + log.debug("No adjacent faces detected! Did you merge vertices?") # the pairs of all adjacent faces # so for every row in face_idx, self.faces[face_idx[*][0]] and @@ -153,8 +151,7 @@ def face_adjacency_unshared(mesh): # the non- shared vertex index is the same shape # as face_adjacency holding vertex indices vs face indices - vid_unshared = np.zeros_like(mesh.face_adjacency, - dtype=np.int64) - 1 + vid_unshared = np.zeros_like(mesh.face_adjacency, dtype=np.int64) - 1 # get the shared edges between adjacent faces edges = mesh.face_adjacency_edges @@ -164,9 +161,12 @@ def face_adjacency_unshared(mesh): faces = mesh.faces[fid] # should have one True per row of (3,) # index of vertex not included in shared edge - unshared = np.logical_not(np.logical_or( - faces == edges[:, 0].reshape((-1, 1)), - faces == edges[:, 1].reshape((-1, 1)))) + unshared = np.logical_not( + np.logical_or( + faces == edges[:, 0].reshape((-1, 1)), + faces == edges[:, 1].reshape((-1, 1)), + ) + ) # each row should have exactly one uncontained verted row_ok = unshared.sum(axis=1) == 1 # any degenerate row should be ignored @@ -199,27 +199,23 @@ def face_adjacency_radius(mesh): # distance # R = --------------- # 2 * sin(theta) - nonzero = mesh.face_adjacency_angles > np.radians(.01) - denominator = np.abs( - 2.0 * np.sin(mesh.face_adjacency_angles[nonzero])) + nonzero = mesh.face_adjacency_angles > np.radians(0.01) + denominator = np.abs(2.0 * np.sin(mesh.face_adjacency_angles[nonzero])) # consider the distance between the non- shared vertices of the # face adjacency pair as the key distance point_pairs = mesh.vertices[mesh.face_adjacency_unshared] - vectors = np.diff(point_pairs, - axis=1).reshape((-1, 3)) + vectors = np.diff(point_pairs, axis=1).reshape((-1, 3)) # the vertex indices of the shared edge for the adjacency pairx edges = mesh.face_adjacency_edges # unit vector along shared the edge - edges_vec = util.unitize(np.diff(mesh.vertices[edges], - axis=1).reshape((-1, 3))) + edges_vec = util.unitize(np.diff(mesh.vertices[edges], axis=1).reshape((-1, 3))) # the vector of the perpendicular projection to the shared edge perp = np.subtract( - vectors, (util.diagonal_dot( - vectors, edges_vec).reshape( - (-1, 1)) * edges_vec)) + vectors, (util.diagonal_dot(vectors, edges_vec).reshape((-1, 1)) * edges_vec) + ) # the length of the perpendicular projection span = util.row_norm(perp) @@ -276,8 +272,7 @@ def shared_edges(faces_a, faces_b): """ e_a = np.sort(faces_to_edges(faces_a), axis=1) e_b = np.sort(faces_to_edges(faces_b), axis=1) - shared = grouping.boolean_rows( - e_a, e_b, operation=np.intersect1d) + shared = grouping.boolean_rows(e_a, e_b, operation=np.intersect1d) return shared @@ -314,15 +309,15 @@ def facets(mesh, engine=None): # if span is zero we know faces are small/parallel nonzero = np.abs(span) > tol.zero # faces with a radii/span ratio larger than a threshold pass - parallel[nonzero] = (radii[nonzero] / - span[nonzero]) ** 2 > tol.facet_threshold + parallel[nonzero] = (radii[nonzero] / span[nonzero]) ** 2 > tol.facet_threshold # run connected components on the parallel faces to group them components = connected_components( mesh.face_adjacency[parallel], nodes=np.arange(len(mesh.faces)), min_len=2, - engine=engine) + engine=engine, + ) return components @@ -361,19 +356,13 @@ def split(mesh, only_watertight=True, adjacency=None, engine=None, **kwargs): min_len = 1 components = connected_components( - edges=adjacency, - nodes=np.arange(len(mesh.faces)), - min_len=min_len, - engine=engine) - meshes = mesh.submesh( - components, only_watertight=only_watertight, **kwargs) + edges=adjacency, nodes=np.arange(len(mesh.faces)), min_len=min_len, engine=engine + ) + meshes = mesh.submesh(components, only_watertight=only_watertight, **kwargs) return meshes -def connected_components(edges, - min_len=1, - nodes=None, - engine=None): +def connected_components(edges, min_len=1, nodes=None, engine=None): """ Find groups of connected nodes from an edge list. @@ -395,6 +384,7 @@ def connected_components(edges, components : (n,) sequence of (*,) int Nodes which are connected """ + def components_networkx(): """ Find connected components using networkx @@ -411,8 +401,7 @@ def components_csgraph(): Find connected components using scipy.sparse.csgraph """ # label each node - labels = connected_component_labels(edges, - node_count=node_count) + labels = connected_component_labels(edges, node_count=node_count) # we have to remove results that contain nodes outside # of the specified node set and reindex @@ -440,7 +429,7 @@ def components_csgraph(): return [] if not util.is_shape(edges, (-1, 2)): - raise ValueError('edges must be (n, 2)!') + raise ValueError("edges must be (n, 2)!") # find the maximum index referenced in either nodes or edges counts = [0] @@ -457,9 +446,9 @@ def components_csgraph(): edges = edges[edges_ok] # networkx is pure python and is usually 5-10x slower than scipy - engines = collections.OrderedDict(( - ('scipy', components_csgraph), - ('networkx', components_networkx))) + engines = collections.OrderedDict( + (("scipy", components_csgraph), ("networkx", components_networkx)) + ) # if a graph engine has explicitly been requested use it if engine in engines: @@ -473,7 +462,7 @@ def components_csgraph(): # will be raised if the library didn't import correctly above except BaseException: continue - raise ImportError('no graph engines available!') + raise ImportError("no graph engines available!") def connected_component_labels(edges, node_count=None): @@ -493,8 +482,7 @@ def connected_component_labels(edges, node_count=None): Component labels for each node """ matrix = edges_to_coo(edges, node_count) - body_count, labels = csgraph.connected_components( - matrix, directed=False) + body_count, labels = csgraph.connected_components(matrix, directed=False) if node_count is not None: assert len(labels) == node_count @@ -502,9 +490,7 @@ def connected_component_labels(edges, node_count=None): return labels -def split_traversal(traversal, - edges, - edges_hash=None): +def split_traversal(traversal, edges, edges_hash=None): """ Given a traversal as a list of nodes, split the traversal if a sequential index pair is not in the given edges. @@ -523,20 +509,16 @@ def split_traversal(traversal, --------------- split : sequence of (p,) int """ - traversal = np.asanyarray(traversal, - dtype=np.int64) + traversal = np.asanyarray(traversal, dtype=np.int64) # hash edge rows for contains checks if edges_hash is None: - edges_hash = grouping.hashable_rows( - np.sort(edges, axis=1)) + edges_hash = grouping.hashable_rows(np.sort(edges, axis=1)) # turn the (n,) traversal into (n-1, 2) edges - trav_edge = np.column_stack((traversal[:-1], - traversal[1:])) + trav_edge = np.column_stack((traversal[:-1], traversal[1:])) # hash each edge so we can compare to edge set - trav_hash = grouping.hashable_rows( - np.sort(trav_edge, axis=1)) + trav_hash = grouping.hashable_rows(np.sort(trav_edge, axis=1)) # check if each edge is contained in edge set contained = np.in1d(trav_hash, edges_hash) @@ -546,14 +528,10 @@ def split_traversal(traversal, split = [traversal] else: # find contiguous groups of contained edges - blocks = grouping.blocks(contained, - min_len=1, - only_nonzero=True) + blocks = grouping.blocks(contained, min_len=1, only_nonzero=True) # turn edges back in to sequence of traversals - split = [np.append(trav_edge[b][:, 0], - trav_edge[b[-1]][1]) - for b in blocks] + split = [np.append(trav_edge[b][:, 0], trav_edge[b[-1]][1]) for b in blocks] # close traversals if necessary for i, t in enumerate(split): @@ -612,22 +590,17 @@ def fill_traversals(traversals, edges, edges_hash=None): for nodes in traversals: # split traversals to remove edges # that don't actually exist - splits.extend(split_traversal( - traversal=nodes, - edges=edges, - edges_hash=edges_hash)) + splits.extend( + split_traversal(traversal=nodes, edges=edges, edges_hash=edges_hash) + ) # turn the split traversals back into (n, 2) edges - included = util.vstack_empty([np.column_stack((i[:-1], i[1:])) - for i in splits]) + included = util.vstack_empty([np.column_stack((i[:-1], i[1:])) for i in splits]) if len(included) > 0: # sort included edges in place included.sort(axis=1) # make sure any edges not included in split traversals # are just added as a length 2 traversal - splits.extend(grouping.boolean_rows( - edges, - included, - operation=np.setdiff1d)) + splits.extend(grouping.boolean_rows(edges, included, operation=np.setdiff1d)) else: # no edges were included, so our filled traversal # is just the original edges copied over @@ -636,7 +609,7 @@ def fill_traversals(traversals, edges, edges_hash=None): return splits -def traversals(edges, mode='bfs'): +def traversals(edges, mode="bfs"): """ Given an edge list generate a sequence of ordered depth first search traversals using scipy.csgraph routines. @@ -657,16 +630,16 @@ def traversals(edges, mode='bfs'): if len(edges) == 0: return [] elif not util.is_shape(edges, (-1, 2)): - raise ValueError('edges are not (n, 2)!') + raise ValueError("edges are not (n, 2)!") # pick the traversal method mode = str(mode).lower().strip() - if mode == 'bfs': + if mode == "bfs": func = csgraph.breadth_first_order - elif mode == 'dfs': + elif mode == "dfs": func = csgraph.depth_first_order else: - raise ValueError('traversal mode must be either dfs or bfs') + raise ValueError("traversal mode must be either dfs or bfs") # make sure edges are sorted so we can query # an ordered pair later @@ -683,10 +656,9 @@ def traversals(edges, mode='bfs'): # starting at any node start = nodes.pop() # get an (n,) ordered traversal - ordered = func(graph, - i_start=start, - return_predecessors=False, - directed=False).astype(np.int64) + ordered = func( + graph, i_start=start, return_predecessors=False, directed=False + ).astype(np.int64) traversals.append(ordered) # remove the nodes we've consumed @@ -717,9 +689,8 @@ def edges_to_coo(edges, count=None, data=None): Sparse COO """ edges = np.asanyarray(edges, dtype=np.int64) - if not (len(edges) == 0 or - util.is_shape(edges, (-1, 2))): - raise ValueError('edges must be (n, 2)!') + if not (len(edges) == 0 or util.is_shape(edges, (-1, 2))): + raise ValueError("edges must be (n, 2)!") # if count isn't specified just set it to largest # value referenced in edges @@ -732,9 +703,7 @@ def edges_to_coo(edges, count=None, data=None): if data is None: data = np.ones(len(edges), dtype=bool) - matrix = coo_matrix((data, edges.T), - dtype=data.dtype, - shape=(count, count)) + matrix = coo_matrix((data, edges.T), dtype=data.dtype, shape=(count, count)) return matrix @@ -758,12 +727,12 @@ def neighbors(edges, max_index=None, directed=False): """ neighbors = collections.defaultdict(set) if directed: - [neighbors[edge[0]].add(edge[1]) - for edge in edges] + [neighbors[edge[0]].add(edge[1]) for edge in edges] else: - [(neighbors[edge[0]].add(edge[1]), - neighbors[edge[1]].add(edge[0])) - for edge in edges] + [ + (neighbors[edge[0]].add(edge[1]), neighbors[edge[1]].add(edge[0])) + for edge in edges + ] if max_index is None: max_index = edges.max() + 1 @@ -817,26 +786,20 @@ def smoothed(mesh, angle=None, facet_minarea=10): try: # we can survive not knowing facets # exclude facets with few faces - facets = [f for f in mesh.facets - if areas[f].sum() > min_area] + facets = [f for f in mesh.facets if areas[f].sum() > min_area] if len(facets) > 0: # mask for removing adjacency pairs where # one of the faces is contained in a facet - mask = np.ones(len(mesh.faces), - dtype=bool) + mask = np.ones(len(mesh.faces), dtype=bool) mask[np.hstack(facets)] = False # apply the mask to adjacency adjacency = adjacency[mask[adjacency].all(axis=1)] # nodes are no longer every faces nodes = np.unique(adjacency) except BaseException: - log.warning('failed to calculate facets', - exc_info=True) + log.warning("failed to calculate facets", exc_info=True) # run connected components on facet adjacency - components = connected_components( - adjacency, - min_len=2, - nodes=nodes) + components = connected_components(adjacency, min_len=2, nodes=nodes) # add back coplanar groups if any exist if len(facets) > 0: @@ -852,19 +815,16 @@ def smoothed(mesh, angle=None, facet_minarea=10): if len(unique) != len(mesh.faces): # things like single loose faces # or groups below facet_minlen - broke = np.setdiff1d( - np.arange(len(mesh.faces)), unique) + broke = np.setdiff1d(np.arange(len(mesh.faces)), unique) components.extend(broke.reshape((-1, 1))) # get a submesh as a single appended Trimesh - smooth = mesh.submesh(components, - only_watertight=False, - append=True) + smooth = mesh.submesh(components, only_watertight=False, append=True) # store face indices from original mesh - smooth.metadata['original_components'] = components + smooth.metadata["original_components"] = components # smoothed should have exactly the same number of faces if len(smooth.faces) != len(mesh.faces): - log.warning('face count in smooth wrong!') + log.warning("face count in smooth wrong!") return smooth @@ -890,8 +850,7 @@ def is_watertight(edges, edges_sorted=None): edges_sorted = np.sort(edges, axis=1) # group sorted edges - groups = grouping.group_rows( - edges_sorted, require_count=2) + groups = grouping.group_rows(edges_sorted, require_count=2) watertight = bool((len(groups) * 2) == len(edges)) # are opposing edges reversed @@ -918,9 +877,10 @@ def graph_to_svg(graph): import subprocess import tempfile + with tempfile.NamedTemporaryFile() as dot_file: nx.drawing.nx_agraph.write_dot(graph, dot_file.name) - svg = subprocess.check_output(['dot', dot_file.name, '-Tsvg']) + svg = subprocess.check_output(["dot", dot_file.name, "-Tsvg"]) return svg