From 8064f2fe7f8defc2189fa540618a5002d3db9928 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Wed, 23 Aug 2023 16:23:33 -0400 Subject: [PATCH 001/144] ruff --- docs/conf.py | 3 +- docs/examples.py | 8 +-- examples/convexify.py | 3 +- examples/docker/render/render.py | 2 +- examples/nricp.py | 7 ++- examples/offscreen_render.py | 2 +- examples/outlined.py | 3 +- examples/ray.py | 3 +- examples/raytrace.py | 3 +- examples/scan_register.py | 3 +- examples/shortest.py | 4 +- examples/sinter.py | 8 ++- examples/viewcallback.py | 4 +- examples/voxel.py | 10 ++-- examples/voxel_silhouette.py | 6 ++- examples/widget.py | 7 ++- pyproject.toml | 26 +++++++--- tests/corpus.py | 5 +- tests/helpers/dxfhelp.py | 8 +-- tests/helpers/id_helper.py | 19 +++---- tests/notebooks.py | 18 +++---- tests/test_base.py | 6 +-- tests/test_binvox.py | 3 +- tests/test_bounds.py | 2 +- tests/test_cache.py | 23 ++++----- tests/test_convex.py | 2 +- tests/test_creation.py | 6 +-- tests/test_dxf.py | 2 +- tests/test_export.py | 6 +-- tests/test_gltf.py | 10 ++-- tests/test_graph.py | 2 +- tests/test_grouping.py | 8 +-- tests/test_identifier.py | 10 ++-- tests/test_inertia.py | 2 +- tests/test_integrate.py | 3 +- tests/test_mesh.py | 3 +- tests/test_minimal.py | 5 +- tests/test_obj.py | 8 +-- tests/test_pbr.py | 3 +- tests/test_permutate.py | 6 +-- tests/test_polygons.py | 5 +- tests/test_primitives.py | 4 +- tests/test_proximity.py | 2 +- tests/test_resolvers.py | 12 ++--- tests/test_runlength.py | 1 + tests/test_scene.py | 4 +- tests/test_scenegraph.py | 4 +- tests/test_simplify.py | 4 +- tests/test_texture.py | 6 +-- tests/test_transformations.py | 7 +-- tests/test_util.py | 8 +-- tests/test_vhacd.py | 2 +- tests/test_voxel.py | 2 +- trimesh/__init__.py | 29 ++++------- trimesh/base.py | 69 +++++++++++++------------ trimesh/bounds.py | 11 ++-- trimesh/caching.py | 11 ++-- trimesh/collision.py | 14 ++--- trimesh/comparison.py | 4 +- trimesh/constants.py | 6 +-- trimesh/convex.py | 5 +- trimesh/creation.py | 28 ++++------ trimesh/exceptions.py | 6 +-- trimesh/exchange/binvox.py | 24 ++++----- trimesh/exchange/dae.py | 14 +++-- trimesh/exchange/export.py | 19 ++++--- trimesh/exchange/gltf.py | 42 ++++++--------- trimesh/exchange/load.py | 44 +++++++--------- trimesh/exchange/obj.py | 42 +++++++-------- trimesh/exchange/off.py | 3 +- trimesh/exchange/openctm.py | 5 +- trimesh/exchange/ply.py | 34 +++++------- trimesh/exchange/stl.py | 7 ++- trimesh/exchange/threedxml.py | 10 ++-- trimesh/exchange/threemf.py | 28 +++------- trimesh/exchange/urdf.py | 35 ++++++------- trimesh/exchange/xaml.py | 9 ++-- trimesh/graph.py | 10 ++-- trimesh/grouping.py | 3 +- trimesh/interfaces/blender.py | 10 ++-- trimesh/interfaces/generic.py | 14 ++--- trimesh/interfaces/gmsh.py | 3 +- trimesh/interfaces/scad.py | 4 +- trimesh/interfaces/vhacd.py | 5 +- trimesh/intersections.py | 14 +++-- trimesh/nsphere.py | 4 +- trimesh/parent.py | 15 +++--- trimesh/path/arc.py | 5 +- trimesh/path/creation.py | 8 ++- trimesh/path/entities.py | 8 +-- trimesh/path/exchange/dxf.py | 27 ++++------ trimesh/path/exchange/export.py | 5 +- trimesh/path/exchange/load.py | 10 ++-- trimesh/path/exchange/misc.py | 7 +-- trimesh/path/exchange/svg_io.py | 40 ++++++-------- trimesh/path/intersections.py | 1 - trimesh/path/packing.py | 8 +-- trimesh/path/path.py | 48 +++++++---------- trimesh/path/polygons.py | 8 +-- trimesh/path/raster.py | 6 +-- trimesh/path/repair.py | 8 +-- trimesh/path/segments.py | 7 +-- trimesh/path/simplify.py | 14 ++--- trimesh/path/traversal.py | 6 +-- trimesh/path/util.py | 3 +- trimesh/permutate.py | 3 +- trimesh/points.py | 14 ++--- trimesh/primitives.py | 37 ++++++------- trimesh/proximity.py | 5 +- trimesh/ray/__init__.py | 2 +- trimesh/ray/ray_pyembree.py | 18 +++---- trimesh/ray/ray_triangle.py | 14 ++--- trimesh/ray/ray_util.py | 4 +- trimesh/registration.py | 12 ++--- trimesh/remesh.py | 9 ++-- trimesh/rendering.py | 4 +- trimesh/repair.py | 7 +-- trimesh/resolvers.py | 10 ++-- trimesh/resources/__init__.py | 4 +- trimesh/resources/javascript/compile.py | 8 +-- trimesh/sample.py | 6 +-- trimesh/scene/__init__.py | 1 - trimesh/scene/cameras.py | 7 ++- trimesh/scene/lighting.py | 12 ++--- trimesh/scene/scene.py | 29 ++++------- trimesh/scene/transforms.py | 22 ++++---- trimesh/smoothing.py | 4 +- trimesh/transformations.py | 7 ++- trimesh/triangles.py | 5 +- trimesh/units.py | 2 +- trimesh/util.py | 34 ++++++------ trimesh/version.py | 2 +- trimesh/viewer/__init__.py | 8 +-- trimesh/viewer/notebook.py | 5 +- trimesh/viewer/trackball.py | 2 +- trimesh/viewer/widget.py | 5 +- trimesh/viewer/windowed.py | 27 +++++----- trimesh/visual/base.py | 1 + trimesh/visual/color.py | 13 ++--- trimesh/visual/gloss.py | 2 +- trimesh/visual/material.py | 33 ++++++------ trimesh/visual/objects.py | 2 +- trimesh/visual/texture.py | 12 ++--- trimesh/voxel/base.py | 16 ++---- trimesh/voxel/creation.py | 7 +-- trimesh/voxel/encoding.py | 23 ++++----- trimesh/voxel/morphology.py | 6 +-- trimesh/voxel/ops.py | 1 + trimesh/voxel/runlength.py | 1 + trimesh/voxel/transforms.py | 6 +-- 150 files changed, 674 insertions(+), 901 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index e33ea433f..fd4cbbfdb 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- -import os import inspect +import os def abspath(rel): diff --git a/docs/examples.py b/docs/examples.py index 4c511db88..a1e90570a 100644 --- a/docs/examples.py +++ b/docs/examples.py @@ -6,10 +6,10 @@ of `../examples/*.ipynb` """ -import os -import sys import json import logging +import os +import sys log = logging.getLogger('trimesh') log.addHandler(logging.StreamHandler(sys.stdout)) @@ -62,10 +62,10 @@ def extract_docstring(loaded): if not fn.lower().endswith('.ipynb'): continue path = os.path.join(source, fn) - with open(path, 'r') as f: + with open(path) as f: raw = json.load(f) doc = extract_docstring(raw) - log.info('`{}`: "{}"\n'.format(fn, doc)) + log.info(f'`{fn}`: "{doc}"\n') link = f'examples.{fn.split(".")[0]}.html' markdown.append(f'### [{fn}]({link})') diff --git a/examples/convexify.py b/examples/convexify.py index 484b14323..c1b675b40 100644 --- a/examples/convexify.py +++ b/examples/convexify.py @@ -8,9 +8,10 @@ Useful for generating collision models of an object. """ -import trimesh import numpy as np +import trimesh + if __name__ == '__main__': # attach to trimesh logs diff --git a/examples/docker/render/render.py b/examples/docker/render/render.py index 172ccba18..fed7a97ad 100644 --- a/examples/docker/render/render.py +++ b/examples/docker/render/render.py @@ -1,6 +1,6 @@ -import trimesh from pyglet import gl +import trimesh if __name__ == '__main__': # print logged messages diff --git a/examples/nricp.py b/examples/nricp.py index 53e98243f..e95e608fd 100644 --- a/examples/nricp.py +++ b/examples/nricp.py @@ -9,12 +9,11 @@ """ -import trimesh import numpy as np -from trimesh.registration import (nricp_amberg, - nricp_sumner, - procrustes) + +import trimesh from trimesh.proximity import closest_point +from trimesh.registration import nricp_amberg, nricp_sumner, procrustes from trimesh.triangles import points_to_barycentric diff --git a/examples/offscreen_render.py b/examples/offscreen_render.py index e7930db34..1c855ea5b 100644 --- a/examples/offscreen_render.py +++ b/examples/offscreen_render.py @@ -1,7 +1,7 @@ import numpy as np -import trimesh +import trimesh if __name__ == '__main__': # print logged messages diff --git a/examples/outlined.py b/examples/outlined.py index da0da16e6..79401f5f5 100644 --- a/examples/outlined.py +++ b/examples/outlined.py @@ -5,9 +5,10 @@ Show a mesh with edges highlighted using GL_LINES """ -import trimesh import numpy as np +import trimesh + if __name__ == '__main__': mesh = trimesh.load('../models/featuretype.STL') diff --git a/examples/ray.py b/examples/ray.py index c6bb60522..372e456ab 100644 --- a/examples/ray.py +++ b/examples/ray.py @@ -7,9 +7,10 @@ same API with a roughly 50x speedup. """ -import trimesh import numpy as np +import trimesh + if __name__ == '__main__': # test on a sphere mesh diff --git a/examples/raytrace.py b/examples/raytrace.py index becbcd7f6..daf193568 100644 --- a/examples/raytrace.py +++ b/examples/raytrace.py @@ -7,12 +7,11 @@ Install `pyembree` for a speedup (600k+ rays per second) """ -from __future__ import division +import numpy as np import PIL.Image import trimesh -import numpy as np if __name__ == '__main__': diff --git a/examples/scan_register.py b/examples/scan_register.py index 9b5600acc..613c7a222 100644 --- a/examples/scan_register.py +++ b/examples/scan_register.py @@ -6,9 +6,10 @@ it to a "truth" mesh. """ -import trimesh import numpy as np +import trimesh + def simulated_brick(face_count, extents, noise, max_iter=10): """ diff --git a/examples/shortest.py b/examples/shortest.py index cc82810f2..4b96b91ac 100644 --- a/examples/shortest.py +++ b/examples/shortest.py @@ -7,10 +7,10 @@ of the mesh. """ -import trimesh - import networkx as nx +import trimesh + if __name__ == '__main__': # test on a sphere mesh diff --git a/examples/sinter.py b/examples/sinter.py index 02af16fe0..006bd13ce 100644 --- a/examples/sinter.py +++ b/examples/sinter.py @@ -3,14 +3,12 @@ might for a powder volume in a sintered printing process. """ import os -import trimesh import numpy as np - -from trimesh.path import packing - from pyinstrument import Profiler +import trimesh +from trimesh.path import packing # path with our sample models models = os.path.abspath(os.path.join( @@ -65,7 +63,7 @@ def collect_meshes(count=None, max_size=20.0): # get some sample data meshes = collect_meshes(max_size=size) - log.debug('loaded {} meshes'.format(len(meshes))) + log.debug(f'loaded {len(meshes)} meshes') # place the meshes into the volume with Profiler() as P: diff --git a/examples/viewcallback.py b/examples/viewcallback.py index 80af678c3..82ccff58a 100644 --- a/examples/viewcallback.py +++ b/examples/viewcallback.py @@ -7,9 +7,11 @@ """ import time -import trimesh + import numpy as np +import trimesh + def sinwave(scene): """ diff --git a/examples/voxel.py b/examples/voxel.py index 7e3a4fe89..e85ac7f08 100644 --- a/examples/voxel.py +++ b/examples/voxel.py @@ -1,14 +1,12 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +import inspect import os + import numpy as np -import inspect -import trimesh -from trimesh.exchange.binvox import voxelize_mesh +import trimesh from trimesh import voxel as v +from trimesh.exchange.binvox import voxelize_mesh log = trimesh.util.log diff --git a/examples/voxel_silhouette.py b/examples/voxel_silhouette.py index fd8551574..fd76a8476 100644 --- a/examples/voxel_silhouette.py +++ b/examples/voxel_silhouette.py @@ -1,8 +1,10 @@ import os -import trimesh + import numpy as np from PIL import Image +import trimesh + def vis(): # separate function to delay plt import @@ -42,7 +44,7 @@ def vis(): closest = np.min(dists) farthest = np.max(dists) z = np.linspace(closest, farthest, resolution) - log.debug('z range: %f, %f' % (closest, farthest)) + log.debug(f'z range: {closest:f}, {farthest:f}') vox = mesh.voxelized(1. / resolution, method='binvox') diff --git a/examples/widget.py b/examples/widget.py index 2e4ddd8bc..08011e1d8 100644 --- a/examples/widget.py +++ b/examples/widget.py @@ -7,13 +7,12 @@ import glooey import numpy as np - +import PIL.Image import pyglet + import trimesh -import trimesh.viewer import trimesh.transformations as tf -import PIL.Image - +import trimesh.viewer here = pathlib.Path(__file__).resolve().parent diff --git a/pyproject.toml b/pyproject.toml index 7818fa9c6..f8cc629bb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,12 +3,26 @@ build-backend = "setuptools.build_meta" requires = ["setuptools >= 40.8", "wheel"] [tool.ruff] -select = ["E", "F", # the default rules - "T201", # disallow print statements - "B"] # pass bugbear -ignore = ["B905", # `zip()` without an explicit `strict=` - "B904", # `raise ... from err` seems a bit silly - "B018"] # useless expression ideally ignore only on `tests` +# See https://github.com/charliermarsh/ruff#rules for error code definitions. +select = [ + # "ANN", # annotations + "B", # bugbear + "C", # comprehensions + "E", # style errors + "F", # flakes + "I", # import sorting + "RUF100", # meta + "U", # upgrade + "W", # style warnings + "YTT", # sys.version +] +ignore = [ + "C901", # Comprehension is too complex (11 > 10) + "N802", # Function name should be lowercase + "N806", # Variable in function should be lowercase + "E501", # Line too long ({width} > {limit} characters) + "B905", # zip() without an explicit strict= parameter +] line-length = 90 [tool.autopep8] diff --git a/tests/corpus.py b/tests/corpus.py index f5d16acee..c4f1127ae 100644 --- a/tests/corpus.py +++ b/tests/corpus.py @@ -5,12 +5,11 @@ Test loaders against large corpuses of test data from github: will download more than a gigabyte to your home directory! """ -import trimesh -from trimesh.util import wrap_as_stream, log import numpy as np - from pyinstrument import Profiler +import trimesh +from trimesh.util import log, wrap_as_stream # get a set with available extension available = trimesh.available_formats() diff --git a/tests/helpers/dxfhelp.py b/tests/helpers/dxfhelp.py index c881c6cee..088331791 100644 --- a/tests/helpers/dxfhelp.py +++ b/tests/helpers/dxfhelp.py @@ -6,8 +6,9 @@ than strings inside a JSON blob """ -import os import json +import os + import numpy as np @@ -15,7 +16,7 @@ def get_json(file_name='../templates/dxf.json'): """ Load the JSON blob into native objects """ - with open(file_name, 'r') as f: + with open(file_name) as f: t = json.load(f) return t @@ -82,7 +83,7 @@ def read_files(path): # skip emacs buffers if '~' in file_name: continue - with open(os.path.join(path, file_name), 'r') as f: + with open(os.path.join(path, file_name)) as f: template[file_name] = replace_whitespace( f.read(), reformat=False, insert=True) @@ -92,6 +93,7 @@ def read_files(path): if __name__ == '__main__': import sys + import trimesh trimesh.util.attach_to_log() diff --git a/tests/helpers/id_helper.py b/tests/helpers/id_helper.py index 3d656d8b8..b33dbb5c4 100644 --- a/tests/helpers/id_helper.py +++ b/tests/helpers/id_helper.py @@ -10,16 +10,15 @@ changes. We use this to generate the arbitrary sigfig thresholds. """ -import trimesh -import numpy as np - - -import time +import collections import json +import logging import os +import time -import collections -import logging +import numpy as np + +import trimesh log = trimesh.util.log TOL_ZERO = 1e-12 @@ -74,9 +73,7 @@ def permutations(mesh, identifiers.append(identifier) if (time.time() - start) > cutoff: - log.debug('bailing for time:{} count:{}'.format( - time.time() - start, - i)) + log.debug(f'bailing for time:{time.time() - start} count:{i}') return np.array(identifiers) return np.array(identifiers) @@ -182,7 +179,7 @@ def data_stats(data): result.append({'mean': mean.tolist(), 'percent': percent.tolist()}) - log.debug('\n\n{}/{}'.format(i, len(meshes) - 1)) + log.debug(f'\n\n{i}/{len(meshes) - 1}') log.debug('mean', mean) log.debug('percent', percent) log.debug('oom', mean / percent) diff --git a/tests/notebooks.py b/tests/notebooks.py index c82b96365..fdf0a8717 100644 --- a/tests/notebooks.py +++ b/tests/notebooks.py @@ -1,9 +1,10 @@ -import os -import sys -import json import inspect +import json import logging +import os import subprocess +import sys + import numpy as np # current working directory @@ -166,7 +167,7 @@ def main(): file_name = sys.argv[sys.argv.index("exec") + 1].strip() # we want to skip some of these examples in CI if 'ci' in sys.argv and os.path.basename(file_name) in ci_blacklist: - log.debug('{} in CI blacklist: skipping!'.format(file_name)) + log.debug(f'{file_name} in CI blacklist: skipping!') return # skip files that don't exist @@ -175,23 +176,22 @@ def main(): if file_name.lower().endswith('.ipynb'): # ipython notebooks - with open(file_name, 'r') as file_obj: + with open(file_name) as file_obj: script = load_notebook(file_obj) elif file_name.lower().endswith('.py'): # regular python files - with open(file_name, 'r') as file_obj: + with open(file_name) as file_obj: script = exclude_calls(file_obj.read().split('\n')) else: # skip other types of files return - log.debug('running {}'.format(file_name)) + log.debug(f'running {file_name}') try: exec(script, globals()) except BaseException as E: log.debug( - 'failed {}!\n\nscript was:\n{}\n\n'.format( - file_name, script)) + f'failed {file_name}!\n\nscript was:\n{script}\n\n') raise E diff --git a/tests/test_base.py b/tests/test_base.py index 722daaee2..11ced68ce 100644 --- a/tests/test_base.py +++ b/tests/test_base.py @@ -73,7 +73,7 @@ def test_none(self): if method.startswith('_'): continue # a string expression to evaluate - expr = 'mesh.{}'.format(method) + expr = f'mesh.{method}' try: # get the value of that expression @@ -94,12 +94,12 @@ def test_none(self): if method.startswith('_') or method in blacklist: continue # a string expression to evaluate - expr = 'scene.{}'.format(method) + expr = f'scene.{method}' # get the value of that expression res = eval(expr) # shouldn't be None! if res is None: - raise ValueError('"{}" is None!!'.format(expr)) + raise ValueError(f'"{expr}" is None!!') if __name__ == '__main__': diff --git a/tests/test_binvox.py b/tests/test_binvox.py index 9c1077d1b..dc19b3392 100644 --- a/tests/test_binvox.py +++ b/tests/test_binvox.py @@ -4,9 +4,10 @@ import generic as g from io import BytesIO + +from trimesh import voxel as v from trimesh.exchange import binvox from trimesh.voxel import runlength as rl -from trimesh import voxel as v class BinvoxTest(g.unittest.TestCase): diff --git a/tests/test_bounds.py b/tests/test_bounds.py index fef025720..6e9b1b6c4 100644 --- a/tests/test_bounds.py +++ b/tests/test_bounds.py @@ -291,7 +291,7 @@ def test_obb_corpus(self): meshes = list(g.get_meshes(split=True, min_volume=min_volume, only_watertight=True)) - g.log.debug('loaded {} meshes'.format(len(meshes))) + g.log.debug(f'loaded {len(meshes)} meshes') if g.PY3: # our models corpus should have 200+ models diff --git a/tests/test_cache.py b/tests/test_cache.py index cb8cd629d..f4b97cdd4 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -20,8 +20,7 @@ def test_track(self): g.trimesh.caching.sha256] for option in options: - g.log.info('testing hash function: {}'.format( - option.__name__)) + g.log.info(f'testing hash function: {option.__name__}') g.trimesh.caching.hash_fast = option # generate test data and perform numpy operations @@ -108,8 +107,7 @@ def test_contiguous(self): g.trimesh.caching.sha256] for option in options: - g.log.info('testing hash function: {}'.format( - option.__name__)) + g.log.info(f'testing hash function: {option.__name__}') g.trimesh.caching.hash_fast = option # hashing will fail on non- contiguous arrays # make sure our utility function has handled this @@ -196,7 +194,9 @@ def test_method_combinations(self): return import itertools + import numpy as np + from trimesh.caching import tracked_array dim = (100, 3) @@ -218,7 +218,7 @@ def test_method_combinations(self): 'shape'] # start with no arguments - attempts = [tuple()] + attempts = [()] # add a single argument from our guesses attempts.extend([(A,) for A in flat]) # add 2 and 3 length permutations of our guesses @@ -228,9 +228,9 @@ def test_method_combinations(self): # are suspicious of a method caching you could uncomment this out: # attempts.extend([tuple(G) for G in itertools.permutations(flat, 3)]) - skip = set(['__array_ufunc__', # segfaulting when called with `(2.3, 1)` + skip = {'__array_ufunc__', # segfaulting when called with `(2.3, 1)` 'astype', - ]) + } # collect functions which mutate arrays but don't change our hash broken = [] @@ -241,14 +241,14 @@ def test_method_combinations(self): continue failures = [] - g.log.debug('hash check: `{}`'.format(method)) + g.log.debug(f'hash check: `{method}`') for A in attempts: m = g.random((100, 3)) true_pre = m.tobytes() m = tracked_array(m) hash_pre = hash(m) try: - eval('m.{method}(*A)'.format(method=method)) + eval(f'm.{method}(*A)') except BaseException as J: failures.append(str(J)) @@ -261,10 +261,9 @@ def test_method_combinations(self): broken.append((method, A)) if len(broken) > 0: - method_busted = set([method for method, _ in broken]) + method_busted = {method for method, _ in broken} raise ValueError( - '`TrackedArray` incorrectly hashing methods: {}'.format( - method_busted)) + f'`TrackedArray` incorrectly hashing methods: {method_busted}') def test_validate(self): # create a mesh with two duplicate triangles diff --git a/tests/test_convex.py b/tests/test_convex.py index 7bf530bec..33968233f 100644 --- a/tests/test_convex.py +++ b/tests/test_convex.py @@ -56,7 +56,7 @@ def test_convex(self): close_ok = ratio > .9 if not close_ok: - g.log.error('volume inconsistent: {}'.format(volume)) + g.log.error(f'volume inconsistent: {volume}') raise ValueError('volume is inconsistent on {}'.format( mesh.metadata['file_name'])) assert min(volume) > 0.0 diff --git a/tests/test_creation.py b/tests/test_creation.py index f6511fa5f..2781f4d23 100644 --- a/tests/test_creation.py +++ b/tests/test_creation.py @@ -126,8 +126,7 @@ def test_camera_marker(self): assert isinstance(meshes, list) # all meshes should be viewable type for mesh in meshes: - assert isinstance(mesh, (g.trimesh.Trimesh, - g.trimesh.path.Path3D)) + assert isinstance(mesh, g.trimesh.Trimesh | g.trimesh.path.Path3D) def test_axis(self): # specify the size of the origin radius @@ -290,8 +289,7 @@ def test_triangulate(self): g.log.error( 'failed to benchmark triangle', exc_info=True) g.log.info( - 'benchmarked triangulation on {} polygons: {}'.format( - len(bench), str(times))) + f'benchmarked triangulation on {len(bench)} polygons: {str(times)}') def test_triangulate_plumbing(self): """ diff --git a/tests/test_dxf.py b/tests/test_dxf.py index c63351052..dd10e1cbb 100644 --- a/tests/test_dxf.py +++ b/tests/test_dxf.py @@ -32,7 +32,7 @@ def test_dxf(self): # try using ezdxf as a simple validator # it raises exceptions aggressively if ezdxf is not None: - with open(temp_name, 'r') as f: + with open(temp_name) as f: ezdxf.read(f) # export to a string diff --git a/tests/test_export.py b/tests/test_export.py index 14ea1b093..3ffd72f44 100644 --- a/tests/test_export.py +++ b/tests/test_export.py @@ -12,9 +12,9 @@ def test_export(self): from trimesh.exceptions import ExceptionWrapper - export_types = set(k for k, v in + export_types = {k for k, v in g.trimesh.exchange.export._mesh_exporters.items() - if not isinstance(v, ExceptionWrapper)) + if not isinstance(v, ExceptionWrapper)} meshes = list(g.get_meshes(8)) # make sure we've got something with texture @@ -252,7 +252,7 @@ def test_parse_file_args(self): RET_COUNT = 5 # a path that doesn't exist - nonexists = '/banana{}'.format(g.random()) + nonexists = f'/banana{g.random()}' assert not g.os.path.exists(nonexists) # loadable OBJ model diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 767fefe53..c11a0daa5 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -829,11 +829,11 @@ def test_primitive_geometry_meta(self): # Model with primitives s = g.get_mesh('CesiumMilkTruck.glb') # check to see if names are somewhat sane - assert set(s.geometry.keys()) == set([ + assert set(s.geometry.keys()) == { 'Cesium_Milk_Truck', 'Cesium_Milk_Truck_1', 'Cesium_Milk_Truck_2', - 'Wheels']) + 'Wheels'} # Assert that primitive geometries are marked as such assert s.geometry['Cesium_Milk_Truck'].metadata[ 'from_gltf_primitive'] @@ -850,8 +850,8 @@ def test_primitive_geometry_meta(self): m = g.get_mesh('CesiumMilkTruck.glb', merge_primitives=True) # names should be non-insane - assert set(m.geometry.keys()) == set([ - 'Cesium_Milk_Truck', 'Wheels']) + assert set(m.geometry.keys()) == { + 'Cesium_Milk_Truck', 'Wheels'} assert not s.geometry['Wheels'].metadata[ 'from_gltf_primitive'] assert s.geometry['Cesium_Milk_Truck'].metadata[ @@ -902,7 +902,7 @@ def test_bulk(self): if hasattr(geom, 'geometry') and len(geom.geometry) == 0: continue - g.log.info('Testing: {}'.format(fn)) + g.log.info(f'Testing: {fn}') # check a roundtrip which will validate on export # and crash on reload if we've done anything screwey # unitize normals will unitize any normals to comply with diff --git a/tests/test_graph.py b/tests/test_graph.py index 53545933d..402ab2f2c 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -260,7 +260,7 @@ def check_engines(edges, nodes): diff = g.np.setdiff1d(g.np.hstack(c), nodes) assert len(diff) == 0 # store the result as a set of tuples so we can compare - results.append(set([tuple(sorted(i)) for i in c])) + results.append({tuple(sorted(i)) for i in c}) # make sure different engines are returning the same thing try: diff --git a/tests/test_grouping.py b/tests/test_grouping.py index c23eddcc6..711d391a6 100644 --- a/tests/test_grouping.py +++ b/tests/test_grouping.py @@ -77,7 +77,7 @@ def test_blocks(self): a[2] = False result = blocks(a, min_len=1, only_nonzero=True) assert len(result) == 2 - assert set(result[0]) == set([1]) + assert set(result[0]) == {1} assert all(a[i].all() for i in result) def test_block_wrap(self): @@ -175,7 +175,7 @@ def test_block_wrap(self): 'only_nonzero': True} r = blocks(**kwargs) assert len(r) == 1 - assert set(r[0]) == set([0, 4]) + assert set(r[0]) == {0, 4} check_roll_wrap(**kwargs) def test_runs(self): @@ -338,8 +338,8 @@ def check_roll_wrap(**kwargs): g.np.roll(data, -i), **kwargs) # get result as a set of tuples with the rolling index # removed through a modulus, so we can compare equality - check = set([tuple(((j + i) % len(data)).tolist()) - for j in block]) + check = {tuple(((j + i) % len(data)).tolist()) + for j in block} if current is None: current = check # all values should be the same diff --git a/tests/test_identifier.py b/tests/test_identifier.py index 030e69b85..16ef93483 100644 --- a/tests/test_identifier.py +++ b/tests/test_identifier.py @@ -66,7 +66,7 @@ def test_scene_id(self): if not all(meshes[0].identifier_hash == i.identifier_hash for i in meshes): raise ValueError( - '{} differs after transform!'.format(geom_name)) + f'{geom_name} differs after transform!') # check an example for a mirrored part assert (scenes[0].geometry['disc_cam_B'].identifier_hash != @@ -110,10 +110,10 @@ def clean_name(name): # should be the same in both forms assert len(a) == len(b) - a_set = set([tuple(sorted([clean_name(i) for i in group])) - for group in a]) - b_set = set([tuple(sorted([clean_name(i) for i in group])) - for group in b]) + a_set = {tuple(sorted([clean_name(i) for i in group])) + for group in a} + b_set = {tuple(sorted([clean_name(i) for i in group])) + for group in b} assert a_set == b_set ptp = [] diff --git a/tests/test_inertia.py b/tests/test_inertia.py index 040b7ca5e..a5302db57 100644 --- a/tests/test_inertia.py +++ b/tests/test_inertia.py @@ -405,7 +405,7 @@ class MassTests(g.unittest.TestCase): def setUp(self): # inertia numbers pulled from solidworks self.truth = g.data['mass_properties'] - self.meshes = dict() + self.meshes = {} for data in self.truth: filename = data['filename'] self.meshes[filename] = g.get_mesh(filename) diff --git a/tests/test_integrate.py b/tests/test_integrate.py index ca677f143..d8d0222a8 100644 --- a/tests/test_integrate.py +++ b/tests/test_integrate.py @@ -8,8 +8,9 @@ class IntegrateTest(g.unittest.TestCase): def test_integrate(self): try: - from trimesh.integrate import symbolic_barycentric import sympy as sp + + from trimesh.integrate import symbolic_barycentric except BaseException: g.log.warning('no sympy', exc_info=True) return diff --git a/tests/test_mesh.py b/tests/test_mesh.py index 49cd0cca6..1ebb2790f 100644 --- a/tests/test_mesh.py +++ b/tests/test_mesh.py @@ -119,8 +119,7 @@ def test_meshes(self): # nothing in the cache should be writeable if cached.flags['WRITEABLE']: - raise ValueError('{} is writeable!'.format( - name)) + raise ValueError(f'{name} is writeable!') # only check int, float, and bool if cached.dtype.kind not in 'ibf': diff --git a/tests/test_minimal.py b/tests/test_minimal.py index 9d0e43448..a3c38ddc5 100644 --- a/tests/test_minimal.py +++ b/tests/test_minimal.py @@ -7,11 +7,12 @@ """ import os - import unittest -import trimesh + import numpy as np +import trimesh + # the path of the current directory _pwd = os.path.dirname( os.path.abspath(os.path.expanduser(__file__))) diff --git a/tests/test_obj.py b/tests/test_obj.py index b57343b7c..90db07741 100644 --- a/tests/test_obj.py +++ b/tests/test_obj.py @@ -115,7 +115,7 @@ def test_obj_simple_order(self): # load a simple OBJ file without merging vertices m = g.trimesh.load(file_name, process=False) # use trivial loading to compare with fancy performant one - with open(file_name, 'r') as f: + with open(file_name) as f: f, v, vt = simple_load(f.read()) # trimesh loader should return the same face order assert g.np.allclose(f, m.faces) @@ -132,7 +132,7 @@ def test_order_tex(self): process=False, maintain_order=True) # use trivial loading to compare with fancy performant one - with open(file_name, 'r') as f: + with open(file_name) as f: f, v, vt = simple_load(f.read()) # trimesh loader should return the same face order assert g.np.allclose(f, m.faces) @@ -361,8 +361,8 @@ def test_scene_export_material_name(self): mtl = r['mystuff.mtl'].decode('utf-8') assert mtl.count('newmtl') == 1 - assert 'newmtl {}'.format(dummy) in mtl - assert '{}.jpeg'.format(dummy) in r + assert f'newmtl {dummy}' in mtl + assert f'{dummy}.jpeg' in r def test_compound_scene_export(self): diff --git a/tests/test_pbr.py b/tests/test_pbr.py index 89365aa02..721599420 100644 --- a/tests/test_pbr.py +++ b/tests/test_pbr.py @@ -1,8 +1,9 @@ import unittest -import trimesh import numpy as np +import trimesh + class PBRTest(unittest.TestCase): diff --git a/tests/test_permutate.py b/tests/test_permutate.py index 3ef3b341b..7c701599b 100644 --- a/tests/test_permutate.py +++ b/tests/test_permutate.py @@ -23,8 +23,7 @@ def make_assertions(mesh, test, rigid=False): mesh.face_adjacency) and len(mesh.faces) > MIN_FACES): g.log.error( - 'face_adjacency unchanged: {}'.format( - str(test.face_adjacency))) + f'face_adjacency unchanged: {str(test.face_adjacency)}') raise ValueError( 'face adjacency of %s the same after permutation!', mesh.metadata['file_name']) @@ -33,8 +32,7 @@ def make_assertions(mesh, test, rigid=False): mesh.face_adjacency_edges) and len(mesh.faces) > MIN_FACES): g.log.error( - 'face_adjacency_edges unchanged: {}'.format( - str(test.face_adjacency_edges))) + f'face_adjacency_edges unchanged: {str(test.face_adjacency_edges)}') raise ValueError( 'face adjacency edges of %s the same after permutation!', mesh.metadata['file_name']) diff --git a/tests/test_polygons.py b/tests/test_polygons.py index 808d9fb12..2e93102f0 100644 --- a/tests/test_polygons.py +++ b/tests/test_polygons.py @@ -187,11 +187,10 @@ def truth_corner(bh): h * b**3 / 3.0, 0.5 * b**2 * 0.5 * h**2], dtype=g.np.float64) - from trimesh.path.polygons import second_moments - from trimesh.path.polygons import transform_polygon - from shapely.geometry import Polygon + from trimesh.path.polygons import second_moments, transform_polygon + heights = g.np.array([[0.01, 0.01], [1, 1], [10, 2], diff --git a/tests/test_primitives.py b/tests/test_primitives.py index 0fb4bdde2..9ed1001b5 100644 --- a/tests/test_primitives.py +++ b/tests/test_primitives.py @@ -171,8 +171,8 @@ def test_mesh_schema(self): def test_primitives(self): - kind = set([i.__class__.__name__ - for i in self.primitives]) + kind = {i.__class__.__name__ + for i in self.primitives} # make sure our test data has every primitive kinds = {'Box', 'Capsule', 'Cylinder', 'Sphere'} if has_triangle: diff --git a/tests/test_proximity.py b/tests/test_proximity.py index 63be516ae..32bc58bec 100644 --- a/tests/test_proximity.py +++ b/tests/test_proximity.py @@ -61,7 +61,7 @@ def test_nearest_naive(self): assert g.np.ptp(data_points, axis=0).max() < g.tol.merge assert g.np.ptp(data_dist, axis=0).max() < g.tol.merge - log_msg = '\n'.join("{}: {}s".format(i, j) + log_msg = '\n'.join(f"{i}: {j}s" for i, j in zip( [i.__name__ for i in funs], g.np.diff(tic))) diff --git a/tests/test_resolvers.py b/tests/test_resolvers.py index f10b5dba8..ba1255f47 100644 --- a/tests/test_resolvers.py +++ b/tests/test_resolvers.py @@ -18,7 +18,7 @@ def test_filepath_namespace(self): assert len(resolver.get('rabbit.obj')) > 0 # check a few file path keys - check = set(['ballA.off', 'featuretype.STL']) + check = {'ballA.off', 'featuretype.STL'} assert set(resolver.keys()).issuperset(check) # try a namespaced resolver @@ -58,22 +58,22 @@ def test_items(self): assert len(set(resolver.keys())) == 0 resolver['hi'] = b'what' # should have one item - assert set(resolver.keys()) == set(['hi']) + assert set(resolver.keys()) == {'hi'} # should have the right value assert resolver['hi'] == b'what' # original archive should have been modified - assert set(archive.keys()) == set(['hi']) + assert set(archive.keys()) == {'hi'} # add a subdirectory key resolver['stuff/nah'] = b'sup' - assert set(archive.keys()) == set(['hi', 'stuff/nah']) - assert set(resolver.keys()) == set(['hi', 'stuff/nah']) + assert set(archive.keys()) == {'hi', 'stuff/nah'} + assert set(resolver.keys()) == {'hi', 'stuff/nah'} # try namespacing ns = resolver.namespaced('stuff') assert ns['nah'] == b'sup' g.log.debug(ns.keys()) - assert set(ns.keys()) == set(['nah']) + assert set(ns.keys()) == {'nah'} if __name__ == '__main__': diff --git a/tests/test_runlength.py b/tests/test_runlength.py index 6a38bacd5..c63d7136c 100644 --- a/tests/test_runlength.py +++ b/tests/test_runlength.py @@ -3,6 +3,7 @@ except BaseException: import generic as g from trimesh.voxel import runlength as rl + np = g.np diff --git a/tests/test_scene.py b/tests/test_scene.py index 12029c3a9..448c6017f 100644 --- a/tests/test_scene.py +++ b/tests/test_scene.py @@ -367,8 +367,8 @@ def test_doubling(self): r.extents) # duplicate node groups should be twice as long - set_ori = set([len(i) * 2 for i in s.duplicate_nodes]) - set_dbl = set([len(i) for i in r.duplicate_nodes]) + set_ori = {len(i) * 2 for i in s.duplicate_nodes} + set_dbl = {len(i) for i in r.duplicate_nodes} assert set_ori == set_dbl diff --git a/tests/test_scenegraph.py b/tests/test_scenegraph.py index 038a3eaf6..6a35b4d37 100644 --- a/tests/test_scenegraph.py +++ b/tests/test_scenegraph.py @@ -67,8 +67,8 @@ def test_nodes(self): # get a scene graph graph = g.get_mesh('cycloidal.3DXML').graph # get any non-root node - node = next(iter((set(graph.nodes).difference( - [graph.base_frame])))) + node = next(iter(set(graph.nodes).difference( + [graph.base_frame]))) # remove that node graph.transforms.remove_node(node) # should have dumped the cache and removed the node diff --git a/tests/test_simplify.py b/tests/test_simplify.py index a237132dd..50aaf26fc 100644 --- a/tests/test_simplify.py +++ b/tests/test_simplify.py @@ -33,9 +33,7 @@ def polygon_simplify(self, polygon, arc_count): g.log.debug(new_count, arc_count) if arc_count > 1: - g.log.info('originally were {} arcs, simplify found {}'.format( - arc_count, - new_count)) + g.log.info(f'originally were {arc_count} arcs, simplify found {new_count}') assert new_count > 0 assert new_count <= arc_count diff --git a/tests/test_texture.py b/tests/test_texture.py index 71db2c471..f590978b4 100644 --- a/tests/test_texture.py +++ b/tests/test_texture.py @@ -33,7 +33,7 @@ def test_order_kwarg(self): for file_name in ['ico4.obj', 'ico4uv.obj']: # get the location of the model file file_path = g.get_path(file_name) - with open(file_path, 'r') as f: + with open(file_path) as f: # get the raw ordered vertices from the file with basic string # ops v_raw = g.np.array( @@ -183,8 +183,8 @@ def test_concatentate_multi(self): unique = vertex_c[g.trimesh.grouping.unique_rows(vertex_c)[0]] # roundtripped colors should be a superset of original colors - assert set(tuple(c) for c in unique).issuperset( - set(tuple(c) for c in colors)) + assert {tuple(c) for c in unique}.issuperset( + {tuple(c) for c in colors}) def test_to_tex(self): m = g.trimesh.creation.box() diff --git a/tests/test_transformations.py b/tests/test_transformations.py index a32fd9e26..672fc3954 100644 --- a/tests/test_transformations.py +++ b/tests/test_transformations.py @@ -16,9 +16,10 @@ def test_doctest(self): but it depends on numpy string formatting and is very flaky. """ - import trimesh - import random import doctest + import random + + import trimesh # make sure formatting is the same as their docstrings g.np.set_printoptions(suppress=True, precision=5) @@ -31,7 +32,7 @@ def test_doctest(self): results = doctest.testmod(trimesh.transformations, verbose=False, raise_on_error=True) - g.log.info('transformations {}'.format(str(results))) + g.log.info(f'transformations {str(results)}') def test_downstream(self): """ diff --git a/tests/test_util.py b/tests/test_util.py index 04cf63782..398dae83a 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -1,8 +1,10 @@ -import trimesh -import unittest import logging +import unittest + import numpy as np +import trimesh + try: from . import generic as g except BaseException: @@ -272,7 +274,7 @@ def test_io_wrap(self): assert res_s == test_s # check __enter__ and __exit__ - hi = 'hi'.encode('utf-8') + hi = b'hi' with util.BytesIO(hi) as f: assert f.read() == hi diff --git a/tests/test_vhacd.py b/tests/test_vhacd.py index a2e75bbb2..3a24a38c9 100644 --- a/tests/test_vhacd.py +++ b/tests/test_vhacd.py @@ -23,7 +23,7 @@ def test_vhacd(self): if len(decomposed) != 10: # it should return the correct number of meshes - raise ValueError('{} != 10'.format(len(decomposed))) + raise ValueError(f'{len(decomposed)} != 10') # make sure everything is convex # also this will fail if the type is returned incorrectly diff --git a/tests/test_voxel.py b/tests/test_voxel.py index 334e2ecc6..9ea20dbf4 100644 --- a/tests/test_voxel.py +++ b/tests/test_voxel.py @@ -282,7 +282,7 @@ def _test_equiv(self, v0, v1, query_points=None): `is_filled` are tested for consistency. """ def array_as_set(array2d): - return set(tuple(x) for x in array2d) + return {tuple(x) for x in array2d} # all points are filled assert g.np.all(v0.is_filled(v1.points)) diff --git a/trimesh/__init__.py b/trimesh/__init__.py index 67de65cdd..591a78225 100644 --- a/trimesh/__init__.py +++ b/trimesh/__init__.py @@ -9,35 +9,24 @@ """ # current version -from .version import __version__ +# avoid a circular import in trimesh.base +from . import bounds, collision, nsphere, primitives, smoothing, voxel # geometry objects from .base import Trimesh -from .points import PointCloud -from .scene.scene import Scene - -# utility functions -from .util import unitize -from .transformations import transform_points # general numeric tolerances from .constants import tol # loader functions -from .exchange.load import ( - load, - load_mesh, - load_path, - load_remote, - available_formats) +from .exchange.load import available_formats, load, load_mesh, load_path, load_remote +from .points import PointCloud +from .scene.scene import Scene +from .transformations import transform_points -# avoid a circular import in trimesh.base -from . import voxel -from . import bounds -from . import nsphere -from . import collision -from . import smoothing -from . import primitives +# utility functions +from .util import unitize +from .version import __version__ try: # handle vector paths diff --git a/trimesh/base.py b/trimesh/base.py index 3385cd025..41b25ce2c 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -5,42 +5,43 @@ Library for importing, exporting and doing simple operations on triangular meshes. """ -from . import ray -from . import util -from . import units -from . import poses -from . import graph -from . import sample -from . import repair -from . import convex -from . import remesh -from . import caching -from . import inertia -from . import boolean -from . import grouping -from . import geometry -from . import permutate -from . import proximity -from . import triangles -from . import curvature -from . import smoothing # noqa -from . import comparison -from . import registration -from . import decomposition -from . import intersections -from . import transformations - -from .visual import create_visual, TextureVisuals -from .exchange.export import export_mesh -from .constants import log, log_time, tol - -from .scene import Scene -from .parent import Geometry3D - import copy import warnings + import numpy as np +from . import ( + boolean, + caching, + comparison, + convex, + curvature, + decomposition, + geometry, + graph, + grouping, + inertia, + intersections, + permutate, + poses, + proximity, + ray, + registration, + remesh, + repair, + sample, + smoothing, # noqa + transformations, + triangles, + units, + util, +) +from .constants import log, log_time, tol +from .exchange.export import export_mesh +from .parent import Geometry3D +from .scene import Scene +from .visual import TextureVisuals, create_visual + class Trimesh(Geometry3D): @@ -162,7 +163,7 @@ def __init__(self, self.nearest = proximity.ProximityQuery(self) # store metadata about the mesh in a dictionary - self.metadata = dict() + self.metadata = {} # update the mesh metadata with passed metadata if isinstance(metadata, dict): self.metadata.update(metadata) @@ -2641,8 +2642,8 @@ def projected(self, projected : trimesh.path.Path2D Outline of source mesh """ - from .path import Path2D from .exchange.load import load_path + from .path import Path2D from .path.polygons import projected projection = projected( diff --git a/trimesh/bounds.py b/trimesh/bounds.py index c7bee0936..377a2aaa3 100644 --- a/trimesh/bounds.py +++ b/trimesh/bounds.py @@ -1,17 +1,12 @@ import numpy as np -from .constants import log, now -from . import util -from . import convex -from . import nsphere -from . import geometry -from . import grouping -from . import transformations +from . import convex, geometry, grouping, nsphere, transformations, util +from .constants import log, now try: # scipy is a soft dependency - from scipy.spatial import ConvexHull from scipy import optimize + from scipy.spatial import ConvexHull except BaseException as E: # raise the exception when someone tries to use it from . import exceptions diff --git a/trimesh/caching.py b/trimesh/caching.py index a821bab41..ce429507f 100644 --- a/trimesh/caching.py +++ b/trimesh/caching.py @@ -22,16 +22,17 @@ import os import time import warnings +from functools import wraps + import numpy as np -from functools import wraps from .constants import log from .util import is_sequence try: from collections.abc import Mapping except BaseException: - from collections import Mapping + from collections.abc import Mapping # sha256 is always available @@ -372,7 +373,7 @@ def __setslice__(self, *args, **kwargs): *args, **kwargs) -class Cache(object): +class Cache: """ Class to cache values which will be stored until the result of an ID function changes. @@ -524,7 +525,7 @@ def __exit__(self, *args): self.id_current = self._id_function() -class DiskCache(object): +class DiskCache: """ Store results of expensive operations on disk with an option to expire the results. This is used @@ -580,7 +581,7 @@ def get(self, key, fetch): with open(path, 'rb') as f: return f.read() - log.debug('not in cache fetching: `{}`'.format(key)) + log.debug(f'not in cache fetching: `{key}`') # since we made it here our data isn't cached # run the expensive function to fetch the file raw = fetch() diff --git a/trimesh/collision.py b/trimesh/collision.py index 243125f3f..fd546de8a 100644 --- a/trimesh/collision.py +++ b/trimesh/collision.py @@ -1,7 +1,7 @@ -import numpy as np - import collections +import numpy as np + try: # pip install python-fcl import fcl @@ -9,7 +9,7 @@ fcl = None -class ContactData(object): +class ContactData: """ Data structure for holding information about a collision contact. """ @@ -88,7 +88,7 @@ def index(self, name): return self._inds[name] -class DistanceData(object): +class DistanceData: """ Data structure for holding information about a distance query. """ @@ -161,7 +161,7 @@ def point(self, name): return self._points[name] -class CollisionManager(object): +class CollisionManager: """ A mesh-mesh collision manager. """ @@ -245,7 +245,7 @@ def remove_object(self, name): # remove names self._names.pop(geom_id) else: - raise ValueError('{} not in collision manager!'.format(name)) + raise ValueError(f'{name} not in collision manager!') def set_transform(self, name, transform): """ @@ -265,7 +265,7 @@ def set_transform(self, name, transform): o.setTranslation(transform[:3, 3]) self._manager.update(o) else: - raise ValueError('{} not in collision manager!'.format(name)) + raise ValueError(f'{name} not in collision manager!') def in_collision_single(self, mesh, diff --git a/trimesh/comparison.py b/trimesh/comparison.py index 68fa1dc68..02ac50ebc 100644 --- a/trimesh/comparison.py +++ b/trimesh/comparison.py @@ -5,11 +5,11 @@ Provide methods for quickly hashing and comparing meshes. """ +from hashlib import sha256 + import numpy as np from . import util - -from hashlib import sha256 from .constants import tol # how many significant figures to use for each diff --git a/trimesh/constants.py b/trimesh/constants.py index 375289fb2..30bc2a0da 100644 --- a/trimesh/constants.py +++ b/trimesh/constants.py @@ -3,7 +3,7 @@ from .util import log, now -class ToleranceMesh(object): +class ToleranceMesh: """ ToleranceMesh objects hold tolerance information about meshes. @@ -41,7 +41,7 @@ def __init__(self, **kwargs): self.__dict__.update(kwargs) -class TolerancePath(object): +class TolerancePath: """ TolerancePath objects contain tolerance information used in Path objects. @@ -102,7 +102,7 @@ def __init__(self, **kwargs): self.__dict__.update(kwargs) -class ResolutionPath(object): +class ResolutionPath: """ res.seg_frac : float When discretizing curves, what percentage of the drawing diff --git a/trimesh/convex.py b/trimesh/convex.py index a5a20b64f..6e0ccc05d 100644 --- a/trimesh/convex.py +++ b/trimesh/convex.py @@ -11,12 +11,9 @@ import numpy as np +from . import triangles, util from .constants import tol -from . import util -from . import triangles - - try: from scipy.spatial import ConvexHull except ImportError as E: diff --git a/trimesh/creation.py b/trimesh/creation.py index 4c7657680..39a7965ff 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -5,22 +5,16 @@ Create meshes from primitives, or with operations. """ -from .base import Trimesh -from .constants import log, tol -from .geometry import (faces_to_edges, - align_vectors, - plane_transform) - -from . import util -from . import grouping -from . import triangles -from . import exceptions -from . import transformations as tf +import collections +import warnings import numpy as np -import warnings -import collections +from . import exceptions, grouping, triangles, util +from . import transformations as tf +from .base import Trimesh +from .constants import log, tol +from .geometry import align_vectors, faces_to_edges, plane_transform try: # shapely is a soft dependency @@ -632,7 +626,7 @@ def box(extents=None, transform=None, bounds=None, **kwargs): dtype=np.float64).reshape(-1, 3) if 'metadata' not in kwargs: - kwargs['metadata'] = dict() + kwargs['metadata'] = {} kwargs['metadata'].update( {'shape': 'box', 'extents': extents}) @@ -850,7 +844,7 @@ def cone(radius, [0, height]] # revolve the profile to create a cone if 'metadata' not in kwargs: - kwargs['metadata'] = dict() + kwargs['metadata'] = {} kwargs['metadata'].update( {'shape': 'cone', 'radius': radius, @@ -907,7 +901,7 @@ def cylinder(radius, [radius, half], [0, half]] if 'metadata' not in kwargs: - kwargs['metadata'] = dict() + kwargs['metadata'] = {} kwargs['metadata'].update( {'shape': 'cylinder', 'height': height, @@ -977,7 +971,7 @@ def annulus(r_min, [r_min, -half]] if 'metadata' not in kwargs: - kwargs['metadata'] = dict() + kwargs['metadata'] = {} kwargs['metadata'].update( {'shape': 'annulus', 'r_min': r_min, diff --git a/trimesh/exceptions.py b/trimesh/exceptions.py index 11916d40f..1e4500262 100644 --- a/trimesh/exceptions.py +++ b/trimesh/exceptions.py @@ -6,7 +6,7 @@ """ -class ExceptionWrapper(object): +class ExceptionWrapper: """ Create a dummy object which will raise an exception when attributes are accessed (i.e. when used as a module) or when called (i.e. @@ -27,8 +27,8 @@ def __getattribute__(self, *args, **kwargs): if args[0] == '__class__': return None.__class__ # otherwise raise our original exception - raise super(ExceptionWrapper, self).__getattribute__('exception') + raise super().__getattribute__('exception') def __call__(self, *args, **kwargs): # will raise when this object is called like a function - raise super(ExceptionWrapper, self).__getattribute__('exception') + raise super().__getattribute__('exception') diff --git a/trimesh/exchange/binvox.py b/trimesh/exchange/binvox.py index 16195dd73..54b9342fa 100644 --- a/trimesh/exchange/binvox.py +++ b/trimesh/exchange/binvox.py @@ -6,10 +6,11 @@ Exporting meshes as binvox files requires the `binvox` executable to be in your path. """ +import collections import os import subprocess + import numpy as np -import collections from .. import util from ..base import Trimesh @@ -54,7 +55,7 @@ def parse_binvox_header(fp): binvox = '#binvox' space = ' ' if not line.startswith(binvox): - raise IOError('Not a binvox file') + raise OSError('Not a binvox file') shape = tuple( int(s) for s in fp.readline().strip().split(space)[1:]) translate = tuple( @@ -182,11 +183,10 @@ def voxel_from_binvox( Loaded voxels """ # shape must be uniform else scale is ambiguous + from .. import transformations from ..voxel import encoding as enc from ..voxel.base import VoxelGrid - from .. import transformations - if isinstance(rle_data, enc.RunLengthEncoding): encoding = rle_data else: @@ -267,7 +267,7 @@ def export_binvox(voxel, axis_order='xzy'): Representation according to binvox spec """ translate = voxel.translation - scale = voxel.scale * ((np.array(voxel.shape) - 1)) + scale = voxel.scale * (np.array(voxel.shape) - 1) neg_scale, = np.where(scale < 0) encoding = voxel.encoding.flip(neg_scale) scale = np.abs(scale) @@ -283,7 +283,7 @@ def export_binvox(voxel, axis_order='xzy'): rle_data, shape=voxel.shape, translate=translate, scale=scale) -class Binvoxer(object): +class Binvoxer: """ Interface for binvox CL tool. @@ -420,7 +420,7 @@ def __init__( encoder = binvox_path if encoder is None: - raise IOError(' '.join([ + raise OSError(' '.join([ 'No `binvox_path` provided and no binvox executable found', 'on PATH, please go to https://www.patrickmin.com/binvox/ and', 'download the appropriate version.'])) @@ -430,8 +430,7 @@ def __init__( 'Maximum dimension using exact is 1024, got %d' % dimension) if file_type not in Binvoxer.SUPPORTED_OUTPUT_TYPES: raise ValueError( - 'file_type %s not in set of supported output types %s' % - (file_type, str(Binvoxer.SUPPORTED_OUTPUT_TYPES))) + 'file_type {} not in set of supported output types {}'.format(file_type, str(Binvoxer.SUPPORTED_OUTPUT_TYPES))) args = [encoder, '-d', str(dimension), '-t', file_type] if exact: args.append('-e') @@ -516,11 +515,10 @@ def __call__(self, path, overwrite=False): ext = ext[1:].lower() if ext not in Binvoxer.SUPPORTED_INPUT_TYPES: raise ValueError( - 'file_type %s not in set of supported input types %s' % - (ext, str(Binvoxer.SUPPORTED_INPUT_TYPES))) - out_path = '%s.%s' % (head, self._file_type) + 'file_type {} not in set of supported input types {}'.format(ext, str(Binvoxer.SUPPORTED_INPUT_TYPES))) + out_path = f'{head}.{self._file_type}' if os.path.isfile(out_path) and not overwrite: - raise IOError('Attempted to voxelize object at existing path') + raise OSError('Attempted to voxelize object at existing path') self._args[-1] = path # generalizes to python2 and python3 diff --git a/trimesh/exchange/dae.py b/trimesh/exchange/dae.py index 3831c9e45..95356bdb9 100644 --- a/trimesh/exchange/dae.py +++ b/trimesh/exchange/dae.py @@ -1,14 +1,12 @@ -import io import copy +import io import uuid import numpy as np -from .. import util -from .. import visual - -from ..util import unique_name +from .. import util, visual from ..constants import log +from ..util import unique_name _EYE = np.eye(4) _EYE.flags.writeable = False @@ -101,7 +99,7 @@ def export_collada(mesh, **kwargs): import collada meshes = mesh - if not isinstance(mesh, (list, tuple, set, np.ndarray)): + if not isinstance(mesh, list | tuple | set | np.ndarray): meshes = [mesh] c = collada.Collada() @@ -148,14 +146,14 @@ def export_collada(mesh, **kwargs): ) indices = np.repeat(m.faces.flatten(), len(arrays)) - matref = u'material{}'.format(i) + matref = f'material{i}' triset = geom.createTriangleSet(indices, input_list, matref) geom.primitives.append(triset) c.geometries.append(geom) matnode = collada.scene.MaterialNode(matref, mat, inputs=[]) geomnode = collada.scene.GeometryNode(geom, [matnode]) - node = collada.scene.Node(u'node{}'.format(i), children=[geomnode]) + node = collada.scene.Node(f'node{i}', children=[geomnode]) nodes.append(node) scene = collada.scene.Scene('scene', nodes) c.scenes.append(scene) diff --git a/trimesh/exchange/export.py b/trimesh/exchange/export.py index b4fd127fd..d65bd2da0 100644 --- a/trimesh/exchange/export.py +++ b/trimesh/exchange/export.py @@ -1,20 +1,19 @@ -import os import json +import os + import numpy as np +from .. import resolvers, util from ..constants import log -from .. import util -from .. import resolvers - -from .urdf import export_urdf # NOQA +from .dae import _collada_exporters from .gltf import export_glb, export_gltf from .obj import export_obj from .off import _off_exporters -from .stl import export_stl, export_stl_ascii from .ply import _ply_exporters -from .dae import _collada_exporters -from .xyz import _xyz_exporters +from .stl import export_stl, export_stl_ascii from .threemf import _3mf_exporters +from .urdf import export_urdf # NOQA +from .xyz import _xyz_exporters def export_mesh(mesh, @@ -68,7 +67,7 @@ def export_mesh(mesh, if file_type not in _mesh_exporters: raise ValueError('%s exporter not available!', file_type) - if isinstance(mesh, (list, tuple, set, np.ndarray)): + if isinstance(mesh, list | tuple | set | np.ndarray): faces = 0 for m in mesh: faces += len(m.faces) @@ -283,7 +282,7 @@ def export_scene(scene, data = _mesh_exporters['3mf'](scene, **kwargs) else: raise ValueError( - 'unsupported export format: {}'.format(file_type)) + f'unsupported export format: {file_type}') # now write the data or return bytes of result if isinstance(data, dict): diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 822989db8..4c155510b 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -6,22 +6,16 @@ as GL_TRIANGLES, and trimesh.Path2D/Path3D as GL_LINES """ -import json import base64 import collections +import json import numpy as np -from .. import util -from .. import visual -from .. import rendering -from .. import resources -from .. import transformations - -from ..util import unique_name +from .. import rendering, resources, transformations, util, visual from ..caching import hash_fast from ..constants import log, tol - +from ..util import unique_name from ..visual.gloss import specular_to_pbr # magic numbers which have meaning in GLTF @@ -125,7 +119,7 @@ def export_gltf(scene, base64_buffer_format = "data:application/octet-stream;base64,{}" if merge_buffers: views = _build_views(buffer_items) - buffer_data = bytes().join(buffer_items.values()) + buffer_data = b"".join(buffer_items.values()) if embed_buffers: buffer_name = base64_buffer_format.format( base64.b64encode(buffer_data).decode()) @@ -148,7 +142,7 @@ def export_gltf(scene, buffer_name = base64_buffer_format.format( base64.b64encode(item).decode()) else: - buffer_name = "gltf_buffer_{}.bin".format(i) + buffer_name = f"gltf_buffer_{i}.bin" files[buffer_name] = item buffers[i] = {"uri": buffer_name, "byteLength": len(item)} @@ -211,7 +205,7 @@ def export_glb( views = _build_views(buffer_items) # combine bytes into a single blob - buffer_data = bytes().join(buffer_items.values()) + buffer_data = b"".join(buffer_items.values()) # add the information about the buffer data if len(buffer_data) > 0: @@ -247,7 +241,7 @@ def export_glb( np.array([len(buffer_data), 0x004E4942], dtype="{} skipping!'.format( - len(split))) + f'face needs more values 3>{len(split)} skipping!') continue # f is like: '76/558/76' @@ -557,7 +553,7 @@ def _parse_vertices(text): # up to the location of out our first vertex but we # are going to use this check for "do we have texture" # determination later so search the whole stupid file - starts = {k: text.find('\n{} '.format(k)) for k in + starts = {k: text.find(f'\n{k} ') for k in ['v', 'vt', 'vn']} # no valid values so exit early @@ -566,7 +562,7 @@ def _parse_vertices(text): # find the last position of each valid value ends = {k: text.find( - '\n', text.rfind('\n{} '.format(k)) + 2 + len(k)) + '\n', text.rfind(f'\n{k} ') + 2 + len(k)) for k, v in starts.items() if v >= 0} # take the first and last position of any vertex property @@ -577,7 +573,7 @@ def _parse_vertices(text): # get the clean-ish data from the file as python lists data = {k: [i.split('\n', 1)[0] - for i in chunk.split('\n{} '.format(k))[1:]] + for i in chunk.split(f'\n{k} ')[1:]] for k, v in starts.items() if v >= 0} # count the number of data values per row on a sample row @@ -909,7 +905,7 @@ def export_obj(mesh, # add the uv coordinates export.append('vt ' + converted) # add the directive to use the exported material - export.appendleft('usemtl {}'.format(tex_name)) + export.appendleft(f'usemtl {tex_name}') except BaseException: log.debug('failed to convert UV coordinates', exc_info=True) @@ -950,7 +946,7 @@ def export_obj(mesh, # things like images mtl_data[file_name] = file_data else: - log.warning('not writing {}'.format(file_name)) + log.warning(f'not writing {file_name}') if mtl_name is None: # if no name passed set a default @@ -958,18 +954,18 @@ def export_obj(mesh, # prepend a header to the MTL text if requested if header is not None: - prepend = '# {}\n\n'.format(header).encode('utf-8') + prepend = f'# {header}\n\n'.encode() else: prepend = b'' # save the material data mtl_data[mtl_name] = prepend + b'\n\n'.join(mtl_lib) # add the reference to the MTL file - objects.appendleft('mtllib {}'.format(mtl_name)) + objects.appendleft(f'mtllib {mtl_name}') if header is not None: # add a created-with header to the top of the file - objects.appendleft('# {}'.format(header)) + objects.appendleft(f'# {header}') # combine elements into a single string text = '\n'.join(objects) diff --git a/trimesh/exchange/off.py b/trimesh/exchange/off.py index d9a625513..cf8bfb2c6 100644 --- a/trimesh/exchange/off.py +++ b/trimesh/exchange/off.py @@ -1,4 +1,5 @@ import re + import numpy as np from .. import util @@ -29,7 +30,7 @@ def load_off(file_obj, **kwargs): _, header, raw = re.split('(COFF|OFF)', text, maxsplit=1) if header.upper() not in ['OFF', 'COFF']: raise NameError( - 'Not an OFF file! Header was: `{}`'.format(header)) + f'Not an OFF file! Header was: `{header}`') # split into lines and remove whitespace splits = [i.strip() for i in str.splitlines(str(raw))] diff --git a/trimesh/exchange/openctm.py b/trimesh/exchange/openctm.py index 5ccc5c3a2..323833fb1 100644 --- a/trimesh/exchange/openctm.py +++ b/trimesh/exchange/openctm.py @@ -28,11 +28,12 @@ # distribution. # ------------------------------------------------------------------------------ -import os import ctypes import ctypes.util +import os import numpy as np + _ctm_loaders = {} try: @@ -129,7 +130,7 @@ def load_ctm(file_obj, file_type=None, **kwargs): err = ctmGetError(ctm) if err != CTM_NONE: - raise IOError("Error loading file: " + str(ctmErrorString(err))) + raise OSError("Error loading file: " + str(ctmErrorString(err))) # get vertices vertex_count = ctmGetInteger(ctm, CTM_VERTEX_COUNT) diff --git a/trimesh/exchange/ply.py b/trimesh/exchange/ply.py index f450a7503..a6d11779a 100644 --- a/trimesh/exchange/ply.py +++ b/trimesh/exchange/ply.py @@ -1,18 +1,13 @@ -import numpy as np - -from string import Template - -import tempfile -import subprocess import collections +import subprocess +import tempfile +from string import Template -from .. import util -from .. import visual -from .. import grouping -from .. import resources +import numpy as np -from ..geometry import triangulate_quads +from .. import grouping, resources, util, visual from ..constants import log +from ..geometry import triangulate_quads # from ply specification, and additional dtypes found in the wild _dtypes = { @@ -149,7 +144,7 @@ def _add_attributes_to_dtype(dtype, attributes): else: attribute_dtype = data.dtype if len( data.dtype) == 0 else data.dtype[0] - dtype.append(('{}_count'.format(name), 'u1')) + dtype.append((f'{name}_count', 'u1')) dtype.append( (name, _numpy_type_to_ply_type(attribute_dtype), data.shape[1])) return dtype @@ -174,12 +169,10 @@ def _add_attributes_to_header(header, attributes): for name, data in attributes.items(): if data.ndim == 1: header.append( - 'property {} {}\n'.format( - _numpy_type_to_ply_type(data.dtype), name)) + f'property {_numpy_type_to_ply_type(data.dtype)} {name}\n') else: header.append( - 'property list uchar {} {}\n'.format( - _numpy_type_to_ply_type(data.dtype), name)) + f'property list uchar {_numpy_type_to_ply_type(data.dtype)} {name}\n') return header @@ -201,8 +194,7 @@ def _add_attributes_to_data_array(data_array, attributes): """ for name, data in attributes.items(): if data.ndim > 1: - data_array['{}_count'.format( - name)] = data.shape[1] * np.ones(data.shape[0]) + data_array[f'{name}_count'] = data.shape[1] * np.ones(data.shape[0]) data_array[name] = data return data_array @@ -487,7 +479,7 @@ def _elements_to_kwargs(elements, # return empty geometry if there are no vertices kwargs['geometry'] = {} return kwargs - + try: vertex_normals = np.column_stack([elements['vertex']['data'][j] for j in ('nx', 'ny', 'nz')]) @@ -495,7 +487,7 @@ def _elements_to_kwargs(elements, kwargs['vertex_normals'] = vertex_normals except BaseException: pass - + if 'face' in elements and elements['face']['length']: face_data = elements['face']['data'] else: @@ -876,7 +868,7 @@ def populate_data(file_obj, elements): elements[key]['data'] = np.frombuffer( data, dtype=dtype) except BaseException: - log.warning('PLY failed to populate: {}'.format(key)) + log.warning(f'PLY failed to populate: {key}') elements[key]['data'] = None return elements diff --git a/trimesh/exchange/stl.py b/trimesh/exchange/stl.py index 77dc58dc5..ff08b757b 100644 --- a/trimesh/exchange/stl.py +++ b/trimesh/exchange/stl.py @@ -1,7 +1,7 @@ -from .. import util - import numpy as np +from .. import util + class HeaderError(Exception): # the exception raised if an STL file object doesn't match its header @@ -114,8 +114,7 @@ def load_stl_binary(file_obj): # so it's much better to raise an exception here. if len_data != len_expected: raise HeaderError( - 'Binary STL has incorrect length in header: {} vs {}'.format( - len_data, len_expected)) + f'Binary STL has incorrect length in header: {len_data} vs {len_expected}') blob = np.frombuffer(file_obj.read(), dtype=_stl_dtype) diff --git a/trimesh/exchange/threedxml.py b/trimesh/exchange/threedxml.py index d9fe5b060..b47f516bd 100644 --- a/trimesh/exchange/threedxml.py +++ b/trimesh/exchange/threedxml.py @@ -16,8 +16,8 @@ from ..exceptions import ExceptionWrapper Image = ExceptionWrapper(E) -import json import collections +import json from .. import util from ..visual.texture import TextureVisuals @@ -136,7 +136,7 @@ def get_rgba(color): references[reference_id]['color'] = rgba # geometries will hold meshes - geometries = dict() + geometries = {} # get geometry for ReferenceRep in tree.iter(tag='{*}ReferenceRep'): @@ -147,7 +147,7 @@ def get_rgba(color): # the format of the geometry file part_format = ReferenceRep.attrib['format'] if part_format not in ('TESSELLATED', ): - util.log.warning('ReferenceRep %r unsupported format %r' % ( + util.log.warning('ReferenceRep {!r} unsupported format {!r}'.format( part_file, part_format)) continue @@ -246,7 +246,7 @@ def get_rgba(color): # save each mesh as the kwargs for a trimesh.Trimesh constructor # aka, a Trimesh object can be created with trimesh.Trimesh(**mesh) # this avoids needing trimesh- specific imports in this IO function - mesh = dict() + mesh = {} (mesh['vertices'], mesh['faces']) = util.append_faces(mesh_vertices, mesh_faces) @@ -395,8 +395,8 @@ def print_element(element): try: # soft dependencies - from lxml import etree import networkx as nx + from lxml import etree _threedxml_loaders = {'3dxml': load_3DXML} except BaseException as E: # set loader to exception wrapper diff --git a/trimesh/exchange/threemf.py b/trimesh/exchange/threemf.py index ee8d89bf4..69dc19490 100644 --- a/trimesh/exchange/threemf.py +++ b/trimesh/exchange/threemf.py @@ -1,14 +1,11 @@ +import collections import io -import sys import uuid import zipfile -import collections import numpy as np -from .. import util -from .. import graph - +from .. import graph, util from ..constants import log @@ -161,7 +158,7 @@ def load_3MF(file_obj, last = path[-1][0] # if someone included an undefined component, skip it if last not in id_name: - log.debug('id {} included but not defined!'.format(last)) + log.debug(f'id {last} included but not defined!') continue # frame names unique name = id_name[last] + util.unique_id() @@ -232,11 +229,6 @@ def export_3MF(mesh, Represents geometry as a 3MF file. """ - if sys.version_info < (3, 6): - # Python only added 'w' mode to `zipfile` in Python 3.6 - # and it is not worth the effort to work around - raise NotImplementedError( - "3MF export requires Python >= 3.6") from ..scene.scene import Scene if not isinstance(mesh, Scene): @@ -274,8 +266,7 @@ def model_id(x): # specify the parameters for the zip container zip_kwargs = {'compression': compression} # compresslevel was added in Python 3.7 - if sys.version_info >= (3, 7): - zip_kwargs['compresslevel'] = compresslevel + zip_kwargs['compresslevel'] = compresslevel with zipfile.ZipFile(file_obj, mode='w', **zip_kwargs) as z: # 3dmodel.model @@ -458,17 +449,10 @@ def _attrib_to_transform(attrib): # do import here to keep lxml a soft dependency try: - from lxml import etree import networkx as nx + from lxml import etree _three_loaders = {'3mf': load_3MF} - if sys.version_info < (3, 6): - # Python only added 'w' mode to `zipfile` in Python 3.6 - # and it is not worth the effort to work around - from ..exceptions import ExceptionWrapper - _3mf_exporters = {'3mf': ExceptionWrapper( - NotImplementedError("3MF export requires Python >= 3.6"))} - else: - _3mf_exporters = {'3mf': export_3MF} + _3mf_exporters = {'3mf': export_3MF} except BaseException as E: from ..exceptions import ExceptionWrapper _three_loaders = {'3mf': ExceptionWrapper(E)} diff --git a/trimesh/exchange/urdf.py b/trimesh/exchange/urdf.py index 81d52ef75..b60072201 100644 --- a/trimesh/exchange/urdf.py +++ b/trimesh/exchange/urdf.py @@ -31,9 +31,11 @@ def export_urdf(mesh, """ import lxml.etree as et + + from ..resources import get + # TODO: fix circular import from .export import export_mesh - from ..resources import get # Extract the save directory and the file name fullpath = os.path.abspath(directory) @@ -71,8 +73,8 @@ def export_urdf(mesh, for i, piece in enumerate(convex_pieces): # Save each nearly convex mesh out to a file - piece_name = '{}_convex_piece_{}'.format(name, i) - piece_filename = '{}.obj'.format(piece_name) + piece_name = f'{name}_convex_piece_{i}' + piece_filename = f'{piece_name}.obj' piece_filepath = os.path.join(fullpath, piece_filename) export_mesh(piece, piece_filepath) @@ -80,8 +82,8 @@ def export_urdf(mesh, piece.center_mass = mesh.center_mass piece.density = effective_density * mesh.density - link_name = 'link_{}'.format(piece_name) - geom_name = '{}'.format(piece_filename) + link_name = f'link_{piece_name}' + geom_name = f'{piece_filename}' I = [['{:.2E}'.format(y) for y in x] # NOQA for x in piece.moment_inertia] @@ -91,7 +93,7 @@ def export_urdf(mesh, # Inertial information inertial = et.SubElement(link, 'inertial') et.SubElement(inertial, 'origin', xyz="0 0 0", rpy="0 0 0") - et.SubElement(inertial, 'mass', value='{:.2E}'.format(piece.mass)) + et.SubElement(inertial, 'mass', value=f'{piece.mass:.2E}') et.SubElement( inertial, 'inertia', @@ -106,29 +108,23 @@ def export_urdf(mesh, et.SubElement(visual, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(visual, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, - scale="{:.4E} {:.4E} {:.4E}".format(scale, - scale, - scale)) + scale=f"{scale:.4E} {scale:.4E} {scale:.4E}") material = et.SubElement(visual, 'material', name='') if color is not None: et.SubElement(material, 'color', - rgba="{:.2E} {:.2E} {:.2E} 1".format(color[0], - color[1], - color[2])) + rgba=f"{color[0]:.2E} {color[1]:.2E} {color[2]:.2E} 1") # Collision Information collision = et.SubElement(link, 'collision') et.SubElement(collision, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(collision, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, - scale="{:.4E} {:.4E} {:.4E}".format(scale, - scale, - scale)) + scale=f"{scale:.4E} {scale:.4E} {scale:.4E}") # Create rigid joint to previous link if prev_link_name is not None: - joint_name = '{}_joint'.format(link_name) + joint_name = f'{link_name}_joint' joint = et.SubElement(root, 'joint', name=joint_name, @@ -141,7 +137,7 @@ def export_urdf(mesh, # Write URDF file tree = et.ElementTree(root) - urdf_filename = '{}.urdf'.format(name) + urdf_filename = f'{name}.urdf' tree.write(os.path.join(fullpath, urdf_filename), pretty_print=True) @@ -152,11 +148,10 @@ def export_urdf(mesh, version = et.SubElement(root, 'version') version.text = '1.0' sdf = et.SubElement(root, 'sdf', version='1.4') - sdf.text = '{}.urdf'.format(name) + sdf.text = f'{name}.urdf' author = et.SubElement(root, 'author') - et.SubElement(author, 'name').text = 'trimesh {}'.format( - __version__) + et.SubElement(author, 'name').text = f'trimesh {__version__}' et.SubElement(author, 'email').text = 'blank@blank.blank' description = et.SubElement(root, 'description') diff --git a/trimesh/exchange/xaml.py b/trimesh/exchange/xaml.py index 8db0aef79..be7e21633 100644 --- a/trimesh/exchange/xaml.py +++ b/trimesh/exchange/xaml.py @@ -4,13 +4,12 @@ Load 3D XAMl files, an export option from Solidworks. """ -import numpy as np - import collections -from .. import util -from .. import visual +import numpy as np + from .. import transformations as tf +from .. import util, visual def load_XAML(file_obj, *args, **kwargs): @@ -139,7 +138,7 @@ def element_to_transform(element): normals.append(c_normals) # compile the results into clean numpy arrays - result = dict() + result = {} result['vertices'], result['faces'] = util.append_faces(vertices, faces) result['face_colors'] = np.vstack(colors) diff --git a/trimesh/graph.py b/trimesh/graph.py index 093d7da4c..6ce4f1652 100644 --- a/trimesh/graph.py +++ b/trimesh/graph.py @@ -8,18 +8,16 @@ Currently uses networkx or scipy.sparse.csgraph backend. """ -import numpy as np import collections -from . import util -from . import grouping -from . import exceptions +import numpy as np +from . import exceptions, grouping, util from .constants import log, tol from .geometry import faces_to_edges try: - from scipy.sparse import csgraph, coo_matrix + from scipy.sparse import coo_matrix, csgraph except BaseException as E: # re-raise exception when used csgraph = exceptions.ExceptionWrapper(E) @@ -918,8 +916,8 @@ def graph_to_svg(graph): svg: string, pictoral layout in SVG format """ - import tempfile import subprocess + import tempfile with tempfile.NamedTemporaryFile() as dot_file: nx.drawing.nx_agraph.write_dot(graph, dot_file.name) svg = subprocess.check_output(['dot', dot_file.name, '-Tsvg']) diff --git a/trimesh/grouping.py b/trimesh/grouping.py index ea634d969..d9d21b91f 100644 --- a/trimesh/grouping.py +++ b/trimesh/grouping.py @@ -8,7 +8,6 @@ import numpy as np from . import util - from .constants import log, tol try: @@ -557,7 +556,7 @@ def group_dict(): The loop and appends make this rather slow on large arrays but it works on irregular groups. """ - observed = dict() + observed = {} hashable = hashable_rows(data, digits=digits) for index, key in enumerate(hashable): key_string = key.tobytes() diff --git a/trimesh/interfaces/blender.py b/trimesh/interfaces/blender.py index b0448554d..e640c3d14 100644 --- a/trimesh/interfaces/blender.py +++ b/trimesh/interfaces/blender.py @@ -1,12 +1,10 @@ -from .. import util -from .. import resources - -from .generic import MeshScript -from ..constants import log - import os import platform +from .. import resources, util +from ..constants import log +from .generic import MeshScript + _search_path = os.environ.get('PATH', '') if platform.system() == 'Windows': # try to find Blender install on Windows diff --git a/trimesh/interfaces/generic.py b/trimesh/interfaces/generic.py index 3b2a26794..21785643b 100644 --- a/trimesh/interfaces/generic.py +++ b/trimesh/interfaces/generic.py @@ -1,10 +1,9 @@ import os import platform import subprocess - from string import Template +from subprocess import CalledProcessError, check_output from tempfile import NamedTemporaryFile -from subprocess import check_output, CalledProcessError from .. import exchange from ..util import log @@ -33,14 +32,12 @@ def __enter__(self): digit_count = len(str(len(self.meshes))) self.mesh_pre = [ NamedTemporaryFile( - suffix='.{}'.format( - self.exchange), - prefix='{}_'.format(str(i).zfill(digit_count)), + suffix=f'.{self.exchange}', + prefix=f'{str(i).zfill(digit_count)}_', mode='wb', delete=False) for i in range(len(self.meshes))] self.mesh_post = NamedTemporaryFile( - suffix='.{}'.format( - self.exchange), + suffix=f'.{self.exchange}', mode='rb', delete=False) self.script_out = NamedTemporaryFile( @@ -102,8 +99,7 @@ def run(self, command): def __exit__(self, *args, **kwargs): if self.debug: - log.info('MeshScript.debug: not deleting {}'.format( - self.script_out.name)) + log.info(f'MeshScript.debug: not deleting {self.script_out.name}') return # delete all the temporary files by name # they are closed but their names are still available diff --git a/trimesh/interfaces/gmsh.py b/trimesh/interfaces/gmsh.py index ebabcee0c..c36674ec1 100644 --- a/trimesh/interfaces/gmsh.py +++ b/trimesh/interfaces/gmsh.py @@ -32,10 +32,11 @@ def load_gmsh(file_name, gmsh_args=None): Surface mesh of input geometry """ # use STL as an intermediate format - from ..exchange.stl import load_stl # do import here to avoid very occasional segfaults import gmsh + from ..exchange.stl import load_stl + # start with default args for the meshing step # Mesh.Algorithm=2 MeshAdapt/Delaunay, there are others but they may include quads # With this planes are meshed using Delaunay and cylinders are meshed diff --git a/trimesh/interfaces/scad.py b/trimesh/interfaces/scad.py index 372f9b8f6..2400fdb91 100644 --- a/trimesh/interfaces/scad.py +++ b/trimesh/interfaces/scad.py @@ -2,9 +2,9 @@ import platform from subprocess import CalledProcessError +from ..constants import log from ..util import which from .generic import MeshScript -from ..constants import log # start the search with the user's PATH _search_path = os.environ.get('PATH', '') @@ -48,7 +48,7 @@ def interface_scad(meshes, script, debug=False, **kwargs): # OFF is a simple text format that references vertices by-index # making it slightly preferable to STL for this kind of exchange duty try: - with MeshScript(meshes=meshes, script=script, + with MeshScript(meshes=meshes, script=script, debug=debug, exchange='off') as scad: result = scad.run(_scad_executable + ' $SCRIPT -o $MESH_POST') except CalledProcessError as e: diff --git a/trimesh/interfaces/vhacd.py b/trimesh/interfaces/vhacd.py index 2cbdb21c0..5c7a15549 100644 --- a/trimesh/interfaces/vhacd.py +++ b/trimesh/interfaces/vhacd.py @@ -1,9 +1,9 @@ import os import platform -from .generic import MeshScript from ..constants import log from ..util import which +from .generic import MeshScript _search_path = os.environ.get("PATH", "") @@ -49,8 +49,7 @@ def convex_decomposition(mesh, debug=False, **kwargs): # pass through extra arguments from the input dictionary for key, value in kwargs.items(): - argstring += ' --{} {}'.format(str(key), - str(value)) + argstring += f' --{str(key)} {str(value)}' with MeshScript(meshes=[mesh], script='', diff --git a/trimesh/intersections.py b/trimesh/intersections.py index 2784e50c0..b3bb5f24c 100644 --- a/trimesh/intersections.py +++ b/trimesh/intersections.py @@ -6,12 +6,9 @@ """ import numpy as np -from . import util -from . import geometry -from . import grouping -from . import triangles as tm +from . import geometry, grouping, util from . import transformations as tf - +from . import triangles as tm from .constants import tol from .triangles import points_to_barycentric @@ -716,11 +713,12 @@ def slice_mesh_plane(mesh, return None # avoid circular import - from .base import Trimesh - from .visual import TextureVisuals - from .path import polygons from scipy.spatial import cKDTree + + from .base import Trimesh from .creation import triangulate_polygon + from .path import polygons + from .visual import TextureVisuals # check input plane plane_normal = np.asanyarray( diff --git a/trimesh/nsphere.py b/trimesh/nsphere.py index 3532ac27f..1a760a6c3 100644 --- a/trimesh/nsphere.py +++ b/trimesh/nsphere.py @@ -7,9 +7,7 @@ """ import numpy as np -from . import util -from . import convex - +from . import convex, util from .constants import log, tol try: diff --git a/trimesh/parent.py b/trimesh/parent.py index b50b7d1ef..5f63b7eb9 100644 --- a/trimesh/parent.py +++ b/trimesh/parent.py @@ -134,17 +134,14 @@ def __repr__(self): elements = [] if hasattr(self, 'vertices'): # for Trimesh and PointCloud - elements.append('vertices.shape={}'.format( - self.vertices.shape)) + elements.append(f'vertices.shape={self.vertices.shape}') if hasattr(self, 'faces'): # for Trimesh - elements.append('faces.shape={}'.format( - self.faces.shape)) + elements.append(f'faces.shape={self.faces.shape}') if hasattr(self, 'geometry') and isinstance( self.geometry, dict): # for Scene - elements.append('len(geometry)={}'.format( - len(self.geometry))) + elements.append(f'len(geometry)={len(self.geometry)}') if 'Voxel' in type(self).__name__: # for VoxelGrid objects elements.append(str(self.shape)[1:-1]) @@ -252,7 +249,7 @@ def bounding_box_oriented(self): representing the minimum volume oriented bounding box of the mesh """ - from . import primitives, bounds + from . import bounds, primitives to_origin, extents = bounds.oriented_bounds(self) obb = primitives.Box(transform=np.linalg.inv(to_origin), extents=extents, @@ -275,7 +272,7 @@ def bounding_sphere(self): minball : trimesh.primitives.Sphere Sphere primitive containing current mesh """ - from . import primitives, nsphere + from . import nsphere, primitives center, radius = nsphere.minimum_nsphere(self) minball = primitives.Sphere(center=center, radius=radius, @@ -292,7 +289,7 @@ def bounding_cylinder(self): mincyl : trimesh.primitives.Cylinder Cylinder primitive containing current mesh """ - from . import primitives, bounds + from . import bounds, primitives kwargs = bounds.minimum_cylinder(self) mincyl = primitives.Cylinder(mutable=False, **kwargs) return mincyl diff --git a/trimesh/path/arc.py b/trimesh/path/arc.py index 1e121a9e2..936e1c167 100644 --- a/trimesh/path/arc.py +++ b/trimesh/path/arc.py @@ -1,10 +1,9 @@ import numpy as np from .. import util - from ..constants import log -from ..constants import tol_path as tol from ..constants import res_path as res +from ..constants import tol_path as tol # floating point zero _TOL_ZERO = 1e-12 @@ -158,7 +157,7 @@ def discretize_arc(points, # the number of facets, based on the angle criteria count_a = angle / res.seg_angle - count_l = ((R * angle)) / (res.seg_frac * scale) + count_l = (R * angle) / (res.seg_frac * scale) # figure out the number of line segments count = np.max([count_a, count_l]) diff --git a/trimesh/path/creation.py b/trimesh/path/creation.py index e376dabc3..6ab272d1b 100644 --- a/trimesh/path/creation.py +++ b/trimesh/path/creation.py @@ -1,11 +1,9 @@ import numpy as np -from . import arc -from .entities import Line, Arc +from .. import transformations, util from ..geometry import plane_transform - -from .. import util -from .. import transformations +from . import arc +from .entities import Arc, Line def circle_pattern(pattern_radius, diff --git a/trimesh/path/entities.py b/trimesh/path/entities.py index 081369eed..e8a92dd76 100644 --- a/trimesh/path/entities.py +++ b/trimesh/path/entities.py @@ -5,14 +5,14 @@ Basic geometric primitives which only store references to vertex indices rather than vertices themselves. """ -import numpy as np - from copy import deepcopy -from .arc import discretize_arc, arc_center -from .curve import discretize_bezier, discretize_bspline + +import numpy as np from .. import util from ..util import ABC +from .arc import arc_center, discretize_arc +from .curve import discretize_bezier, discretize_bspline class Entity(ABC): diff --git a/trimesh/path/exchange/dxf.py b/trimesh/path/exchange/dxf.py index 9b968a73a..8768f55d2 100644 --- a/trimesh/path/exchange/dxf.py +++ b/trimesh/path/exchange/dxf.py @@ -1,19 +1,14 @@ -import numpy as np - from collections import defaultdict -from ..arc import to_threepoint -from ..entities import Line, Arc, BSpline, Text +import numpy as np -from ... import resources -from ...util import multi_dict +from ... import grouping, resources, util +from ... import transformations as tf from ...constants import log from ...constants import tol_path as tol - -from ... import util -from ... import grouping -from ... import transformations as tf - +from ...util import multi_dict +from ..arc import to_threepoint +from ..entities import Arc, BSpline, Line, Text # unit codes _DXF_UNITS = {1: 'inches', @@ -560,7 +555,7 @@ def convert_insert(e): unsupported[entity_type] += 1 if len(unsupported) > 0: log.debug('skipping dxf entities: {}'.format( - ', '.join('{}: {}'.format(k, v) for k, v + ', '.join(f'{k}: {v}' for k, v in unsupported.items()))) # stack vertices into single array vertices = util.vstack_empty(vertices).astype(np.float64) @@ -632,7 +627,7 @@ def format_points(points, group = group[:, :2] three = three[:, :2] # join into result string - packed = '\n'.join('{:d}\n{:.12g}'.format(g, v) + packed = '\n'.join(f'{g:d}\n{v:.12g}' for g, v in zip(group.reshape(-1), three.reshape(-1))) @@ -743,7 +738,7 @@ def convert_bspline(spline, vertices): normal = [0.0, 0.0, 1.0] n_code = [210, 220, 230] - n_str = '\n'.join('{:d}\n{:.12g}'.format(i, j) + n_str = '\n'.join(f'{i:d}\n{j:.12g}' for i, j in zip(n_code, normal)) subs = entity_info(spline) @@ -815,9 +810,9 @@ def convert_generic(entity, vertices): entities_str = '\n'.join(collected) # add in the extents of the document as explicit XYZ lines - hsub = {'EXTMIN_{}'.format(k): v for k, v in zip( + hsub = {f'EXTMIN_{k}': v for k, v in zip( 'XYZ', np.append(path.bounds[0], 0.0))} - hsub.update({'EXTMAX_{}'.format(k): v for k, v in zip( + hsub.update({f'EXTMAX_{k}': v for k, v in zip( 'XYZ', np.append(path.bounds[1], 0.0))}) # apply a units flag defaulting to `1` hsub['LUNITS'] = _UNITS_TO_DXF.get(path.units, 1) diff --git a/trimesh/path/exchange/export.py b/trimesh/path/exchange/export.py index b76e47677..96b1eb0df 100644 --- a/trimesh/path/exchange/export.py +++ b/trimesh/path/exchange/export.py @@ -1,8 +1,7 @@ import os -from ... import util -from . import dxf -from . import svg_io +from ... import util +from . import dxf, svg_io def export_path(path, diff --git a/trimesh/path/exchange/load.py b/trimesh/path/exchange/load.py index d2d0aec88..3cfb18278 100644 --- a/trimesh/path/exchange/load.py +++ b/trimesh/path/exchange/load.py @@ -1,11 +1,10 @@ import os -from .dxf import _dxf_loaders -from .svg_io import svg_to_path +from ... import util from ..path import Path - from . import misc -from ... import util +from .dxf import _dxf_loaders +from .svg_io import svg_to_path def load_path(file_obj, file_type=None, **kwargs): @@ -68,8 +67,7 @@ def load_path(file_obj, file_type=None, **kwargs): raise ValueError('Not a supported object type!') result = load_kwargs(kwargs) - util.log.debug('loaded {} in {:0.4f}s'.format( - str(result), util.now() - tic)) + util.log.debug(f'loaded {str(result)} in {util.now() - tic:0.4f}s') return result diff --git a/trimesh/path/exchange/misc.py b/trimesh/path/exchange/misc.py index 4d05c9863..57c958a7e 100644 --- a/trimesh/path/exchange/misc.py +++ b/trimesh/path/exchange/misc.py @@ -1,10 +1,7 @@ import numpy as np -from ... import util -from ... import graph -from ... import grouping - -from ..entities import Line, Arc +from ... import graph, grouping, util +from ..entities import Arc, Line def dict_to_path(as_dict): diff --git a/trimesh/path/exchange/svg_io.py b/trimesh/path/exchange/svg_io.py index 3c7bd73fd..111838fef 100644 --- a/trimesh/path/exchange/svg_io.py +++ b/trimesh/path/exchange/svg_io.py @@ -1,23 +1,16 @@ -import json import base64 import collections - -import numpy as np - +import json from copy import deepcopy -from ..arc import arc_center -from ..entities import Line, Arc, Bezier +import numpy as np +from ... import exceptions, grouping, resources, util from ...constants import log, tol - -from ... import util -from ... import grouping -from ... import resources -from ... import exceptions - +from ...transformations import planar_matrix, transform_points from ...util import jsonify -from ...transformations import transform_points, planar_matrix +from ..arc import arc_center +from ..entities import Arc, Bezier, Line try: # pip install svg.path @@ -37,7 +30,7 @@ # store any additional properties using a trimesh namespace _ns_name = 'trimesh' _ns_url = 'https://github.com/mikedh/trimesh' -_ns = '{{{}}}'.format(_ns_url) +_ns = f'{{{_ns_url}}}' _IDENTITY = np.eye(3) _IDENTITY.flags['WRITEABLE'] = False @@ -209,7 +202,7 @@ def transform_to_matrices(transform): mat[:2, :2] *= values matrices.append(mat) else: - log.debug('unknown SVG transform: {}'.format(key)) + log.debug(f'unknown SVG transform: {key}') return matrices @@ -268,7 +261,7 @@ def load_cubic(svg_cubic): svg_cubic.end]) return Bezier(np.arange(4) + counts[name]), points - class MultiLine(object): + class MultiLine: # An object to hold one or multiple Line entities. def __init__(self, lines): if tol.strict: @@ -434,7 +427,7 @@ def _entities_to_str(entities, points = vertices.copy() # generate a format string with the requested digits - temp_digits = '0.{}f'.format(int(digits)) + temp_digits = f'0.{int(digits)}f' # generate a format string for circles as two arc segments temp_circle = ('M {x:DI},{y:DI}a{r:DI},{r:DI},0,1,0,{d:DI},' + '0a{r:DI},{r:DI},0,1,0,-{d:DI},0Z').replace('DI', temp_digits) @@ -573,9 +566,7 @@ def export_svg(drawing, elements = [] for meta, path_string in pairs: # create a simple path element - elements.append(''.format( - d=path_string, - attr=_format_attrib(meta))) + elements.append(f'') # format as XML if 'stroke_width' in kwargs: @@ -610,8 +601,7 @@ def _format_attrib(attrib): Bag of keys and values. """ bag = {k: _encode(v) for k, v in attrib.items()} - return '\n'.join('{ns}:{key}="{value}"'.format( - ns=_ns_name, key=k, value=v) + return '\n'.join(f'{_ns_name}:{k}="{v}"' for k, v in bag.items() if len(k) > 0 and v is not None and len(v) > 0) @@ -664,7 +654,7 @@ def _deep_same(original, other): # ndarrays will be converted to lists # but otherwise types should be identical if isinstance(original, np.ndarray): - assert isinstance(other, (list, np.ndarray)) + assert isinstance(other, list | np.ndarray) elif util.is_string(original): # handle python 2+3 unicode vs str assert util.is_string(other) @@ -672,11 +662,11 @@ def _deep_same(original, other): # otherwise they should be the same type assert isinstance(original, type(other)) - if isinstance(original, (str, bytes)): + if isinstance(original, str | bytes): # string and bytes should just be identical assert original == other return - elif isinstance(original, (float, int, np.ndarray)): + elif isinstance(original, float | int | np.ndarray): # for numeric classes use numpy magic comparison # which includes an epsilon for floating point assert np.allclose(original, other) diff --git a/trimesh/path/intersections.py b/trimesh/path/intersections.py index 60a7ece61..fd70c7a02 100644 --- a/trimesh/path/intersections.py +++ b/trimesh/path/intersections.py @@ -1,7 +1,6 @@ import numpy as np from .. import util - from ..constants import tol_path as tol diff --git a/trimesh/path/packing.py b/trimesh/path/packing.py index a72849e13..08bf3f8f8 100644 --- a/trimesh/path/packing.py +++ b/trimesh/path/packing.py @@ -6,8 +6,8 @@ """ import numpy as np -from ..util import allclose, bounds_tree from ..constants import log, tol +from ..util import allclose, bounds_tree # floating point zero _TOL_ZERO = 1e-12 @@ -377,7 +377,7 @@ def polygons(polygons, **kwargs): i.e. `consume.sum() == m` """ - from .polygons import polygons_obb, polygon_bounds + from .polygons import polygon_bounds, polygons_obb # find the oriented bounding box of the polygons obb, extents = polygons_obb(polygons) @@ -486,7 +486,7 @@ def rectangles(extents, # shrink the bounds by spacing result[0] += [[[spacing], [-spacing]]] - log.debug('packed with density {:0.5f}'.format(best_density)) + log.debug(f'packed with density {best_density:0.5f}') return result @@ -599,8 +599,8 @@ def visualize(extents, bounds): scene : trimesh.Scene Scene with boxes at requested locations. """ - from ..scene import Scene from ..creation import box + from ..scene import Scene from ..visual import random_color # use a roll transform to verify extents diff --git a/trimesh/path/path.py b/trimesh/path/path.py index 5c454c9af..4ba418698 100644 --- a/trimesh/path/path.py +++ b/trimesh/path/path.py @@ -5,38 +5,29 @@ A module designed to work with vector paths such as those stored in a DXF or SVG file. """ -import numpy as np - +import collections import copy import warnings -import collections - from hashlib import sha256 -from ..points import plane_fit -from ..geometry import plane_transform -from ..visual import to_rgba -from ..constants import log -from ..constants import tol_path as tol - -from .util import concatenate +import numpy as np -from .. import parent -from .. import util -from .. import units -from .. import bounds -from .. import caching -from .. import grouping -from .. import exceptions +from .. import bounds, caching, exceptions, grouping, parent, units, util from .. import transformations as tf - -from . import raster -from . import simplify -from . import creation # NOQA -from . import segments # NOQA -from . import traversal - +from ..constants import log +from ..constants import tol_path as tol +from ..geometry import plane_transform +from ..points import plane_fit +from ..visual import to_rgba +from . import ( + creation, # NOQA + raster, + segments, # NOQA + simplify, + traversal, +) from .exchange.export import export_path +from .util import concatenate # now import things which require non-minimal install of Trimesh # create a dummy module which will raise the ImportError @@ -102,7 +93,7 @@ def __init__(self, # assign each color to each entity self.colors = colors # collect metadata into new dictionary - self.metadata = dict() + self.metadata = {} if metadata.__class__.__name__ == 'dict': self.metadata.update(metadata) @@ -777,7 +768,7 @@ def copy(self): metadata[key] = copy.deepcopy(self.metadata[key]) except RuntimeError: # multiple threads - log.warning('key {} changed during copy'.format(key)) + log.warning(f'key {key} changed during copy') # copy the core data copied = type(self)(entities=copy.deepcopy(self.entities), @@ -892,8 +883,7 @@ def to_planar(self, N = normal else: log.debug( - "passed normal not used: {}".format( - normal.shape)) + f"passed normal not used: {normal.shape}") # create a transform from fit plane to XY to_2D = plane_transform(origin=C, normal=N) diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index e46140fe0..96315502a 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -1,17 +1,11 @@ import numpy as np - from shapely import ops from shapely.geometry import Polygon -from .. import bounds -from .. import graph -from .. import geometry -from .. import grouping - +from .. import bounds, geometry, graph, grouping from ..constants import log from ..constants import tol_path as tol from ..transformations import transform_points - from .simplify import fit_circle_check from .traversal import resample_path diff --git a/trimesh/path/raster.py b/trimesh/path/raster.py index ae5d7b70c..5967d308e 100644 --- a/trimesh/path/raster.py +++ b/trimesh/path/raster.py @@ -8,9 +8,7 @@ try: # keep pillow as a soft dependency - from PIL import (Image, - ImageDraw, - ImageChops) + from PIL import Image, ImageChops, ImageDraw except BaseException as E: from .. import exceptions # re-raise the useful exception when called @@ -55,7 +53,7 @@ def rasterize(path, if origin is None: origin = path.bounds[0] - (pitch * 2.0) - + # check inputs pitch = np.asanyarray(pitch, dtype=np.float64) origin = np.asanyarray(origin, dtype=np.float64) diff --git a/trimesh/path/repair.py b/trimesh/path/repair.py index c6b6a783a..504a029fd 100644 --- a/trimesh/path/repair.py +++ b/trimesh/path/repair.py @@ -4,12 +4,12 @@ Try to fix problems with closed regions. """ -from . import segments -from .. import util - import numpy as np from scipy.spatial import cKDTree +from .. import util +from . import segments + def fill_gaps(path, distance=.025): """ @@ -103,7 +103,7 @@ def fill_gaps(path, distance=.025): # add line segments in as line entities entities = [] - for i in range(len((final_seg))): + for i in range(len(final_seg)): entities.append( line_class( points=np.arange(2) + (i * 2) + len(path.vertices))) diff --git a/trimesh/path/segments.py b/trimesh/path/segments.py index 5475aa05c..7281ef60a 100644 --- a/trimesh/path/segments.py +++ b/trimesh/path/segments.py @@ -7,12 +7,7 @@ import numpy as np -from .. import util -from .. import grouping -from .. import geometry -from .. import interval -from .. import transformations - +from .. import geometry, grouping, interval, transformations, util from ..constants import tol diff --git a/trimesh/path/simplify.py b/trimesh/path/simplify.py index fb3657454..60b5f38ac 100644 --- a/trimesh/path/simplify.py +++ b/trimesh/path/simplify.py @@ -1,17 +1,13 @@ -import numpy as np - -import copy import collections +import copy -from . import arc -from . import entities +import numpy as np from .. import util - -from ..nsphere import fit_nsphere - from ..constants import log from ..constants import tol_path as tol +from ..nsphere import fit_nsphere +from . import arc, entities def fit_circle_check(points, @@ -243,7 +239,7 @@ def resample_spline(points, smooth=.001, count=None, degree=3): resampled : (count, dimension) float Points in space """ - from scipy.interpolate import splprep, splev + from scipy.interpolate import splev, splprep if count is None: count = len(points) points = np.asanyarray(points) diff --git a/trimesh/path/traversal.py b/trimesh/path/traversal.py index 4630ec291..75f256e66 100644 --- a/trimesh/path/traversal.py +++ b/trimesh/path/traversal.py @@ -1,12 +1,10 @@ import copy + import numpy as np +from .. import constants, grouping, util from .util import is_ccw -from .. import util -from .. import grouping -from .. import constants - try: import networkx as nx except BaseException as E: diff --git a/trimesh/path/util.py b/trimesh/path/util.py index 19d890e8b..683ff450d 100644 --- a/trimesh/path/util.py +++ b/trimesh/path/util.py @@ -1,4 +1,5 @@ import numpy as np + from ..util import is_ccw # NOQA @@ -21,7 +22,7 @@ def concatenate(paths): return paths[0].copy() # upgrade to 3D if we have mixed 2D and 3D paths - dimensions = set(i.vertices.shape[1] for i in paths) + dimensions = {i.vertices.shape[1] for i in paths} if len(dimensions) > 1: paths = [i.to_3D() if hasattr(i, 'to_3D') else i for i in paths] diff --git a/trimesh/permutate.py b/trimesh/permutate.py index d2928624b..24ae58aa5 100644 --- a/trimesh/permutate.py +++ b/trimesh/permutate.py @@ -7,9 +7,8 @@ import numpy as np -from . import transformations +from . import transformations, util from . import triangles as triangles_module -from . import util def transform(mesh, translation_scale=1000.0): diff --git a/trimesh/points.py b/trimesh/points.py index 01d8504f0..76909ec13 100644 --- a/trimesh/points.py +++ b/trimesh/points.py @@ -8,18 +8,13 @@ import numpy as np -from .parent import Geometry3D -from .geometry import plane_transform +from . import caching, grouping, transformations, util from .constants import tol +from .geometry import plane_transform +from .parent import Geometry3D from .visual.color import VertexColor -from . import util -from . import caching -from . import grouping -from . import transformations - - def point_plane_distance(points, plane_normal, plane_origin=None): @@ -394,8 +389,7 @@ def plot_points(points, show=True): elif points.shape[1] == 2: plt.scatter(*points.T) else: - raise ValueError('points not 2D/3D: {}'.format( - points.shape)) + raise ValueError(f'points not 2D/3D: {points.shape}') if show: plt.show() diff --git a/trimesh/primitives.py b/trimesh/primitives.py index 965d82742..33768741e 100644 --- a/trimesh/primitives.py +++ b/trimesh/primitives.py @@ -7,17 +7,12 @@ Useful because you can move boxes and spheres around and then use trimesh operations on them at any point. """ -import numpy as np import abc -from . import util -from . import sample -from . import caching -from . import inertia -from . import creation -from . import triangles -from . import transformations as tf +import numpy as np +from . import caching, creation, inertia, sample, triangles, util +from . import transformations as tf from .base import Trimesh from .constants import log, tol @@ -38,7 +33,7 @@ class _Primitive(Trimesh): def __init__(self): # run the Trimesh constructor with no arguments - super(_Primitive, self).__init__() + super().__init__() # remove any data self._data.clear() @@ -49,8 +44,7 @@ def __init__(self): self._cache.force_immutable = True def __repr__(self): - return ''.format( - type(self).__name__) + return f'' @property def faces(self): @@ -225,7 +219,7 @@ def _create_mesh(self): raise ValueError('Primitive doesn\'t define mesh creation!') -class _PrimitiveAttributes(object): +class _PrimitiveAttributes: """ Hold the mutable data which defines a primitive. """ @@ -287,7 +281,7 @@ def __doc__(self): def __getattr__(self, key): if key.startswith('_'): - return super(_PrimitiveAttributes, self).__getattr__(key) + return super().__getattr__(key) elif key == 'center': # this whole __getattr__ is a little hacky return self._data['transform'][:3, 3] @@ -295,12 +289,11 @@ def __getattr__(self, key): return util.convert_like(self._data[key], self._defaults[key]) raise AttributeError( - "primitive object has no attribute '{}' ".format(key)) + f"primitive object has no attribute '{key}' ") def __setattr__(self, key, value): if key.startswith('_'): - return super(_PrimitiveAttributes, - self).__setattr__(key, value) + return super().__setattr__(key, value) elif key == 'center': value = np.array(value, dtype=np.float64) transform = np.eye(4) @@ -317,7 +310,7 @@ def __setattr__(self, key, value): else: keys = list(self._defaults.keys()) raise ValueError( - 'Only default attributes {} can be set!'.format(keys)) + f'Only default attributes {keys} can be set!') def __dir__(self): result = sorted(dir(type(self)) + @@ -349,7 +342,7 @@ def __init__(self, mutable : bool Are extents and transform mutable after creation. """ - super(Cylinder, self).__init__() + super().__init__() defaults = {'height': 10.0, 'radius': 1.0, @@ -503,7 +496,7 @@ def __init__(self, mutable : bool Are extents and transform mutable after creation. """ - super(Capsule, self).__init__() + super().__init__() defaults = {'height': 1.0, 'radius': 1.0, @@ -589,7 +582,7 @@ def __init__(self, Are extents and transform mutable after creation. """ - super(Sphere, self).__init__() + super().__init__() defaults = {'radius': 1.0, 'transform': np.eye(4), @@ -725,7 +718,7 @@ def __init__(self, mutable : bool Are extents and transform mutable after creation. """ - super(Box, self).__init__() + super().__init__() defaults = {'transform': np.eye(4), 'extents': np.ones(3)} @@ -903,7 +896,7 @@ def __init__(self, from shapely.geometry import Point # run the Trimesh init - super(Extrusion, self).__init__() + super().__init__() # set default values defaults = {'polygon': Point([0, 0]).buffer(1.0), 'transform': np.eye(4), diff --git a/trimesh/proximity.py b/trimesh/proximity.py index c341d761c..e3f19a3e4 100644 --- a/trimesh/proximity.py +++ b/trimesh/proximity.py @@ -7,9 +7,8 @@ import numpy as np from . import util - +from .constants import log_time, tol from .grouping import group_min -from .constants import tol, log_time from .triangles import closest_point as _corresponding from .triangles import points_to_barycentric @@ -302,7 +301,7 @@ def has_normals(self): return self.normals is not None or self.interpolated_normals is not None -class ProximityQuery(object): +class ProximityQuery: """ Proximity queries for the current mesh. """ diff --git a/trimesh/ray/__init__.py b/trimesh/ray/__init__.py index 23ee5dfdd..a71eba216 100644 --- a/trimesh/ray/__init__.py +++ b/trimesh/ray/__init__.py @@ -1,4 +1,4 @@ -from .import ray_triangle +from . import ray_triangle # optionally load an interface to the embree raytracer try: diff --git a/trimesh/ray/ray_pyembree.py b/trimesh/ray/ray_pyembree.py index 77a014051..a8975fa1b 100644 --- a/trimesh/ray/ray_pyembree.py +++ b/trimesh/ray/ray_pyembree.py @@ -2,18 +2,13 @@ Ray queries using the embreex package with the API wrapped to match our native raytracer. """ -import numpy as np - from copy import deepcopy +import numpy as np -from .ray_util import contains_points - -from .. import util -from .. import caching -from .. import intersections - +from .. import caching, intersections, util from ..constants import log_time +from .ray_util import contains_points # the factor of geometry.scale to offset a ray from a triangle # to reliably not hit its origin triangle @@ -31,9 +26,8 @@ except BaseException as E: try: # this will be deprecated at some point hopefully soon - from pyembree import rtcore_scene + from pyembree import __version__, rtcore_scene from pyembree.mesh_construction import TriangleMesh - from pyembree import __version__ # see if we're using a newer version of the pyembree wrapper _embree_new = tuple([int(i) for i in __version__.split('.')]) >= (0, 1, 4) # both old and new versions require exact but different type @@ -43,7 +37,7 @@ raise E -class RayMeshIntersector(object): +class RayMeshIntersector: def __init__(self, geometry, @@ -330,7 +324,7 @@ def contains_points(self, points): return contains_points(self, points) -class _EmbreeWrap(object): +class _EmbreeWrap: """ A light wrapper for Embreex scene objects which allows queries to be scaled to help with precision diff --git a/trimesh/ray/ray_triangle.py b/trimesh/ray/ray_triangle.py index b502ad6f3..7a7a02593 100644 --- a/trimesh/ray/ray_triangle.py +++ b/trimesh/ray/ray_triangle.py @@ -3,19 +3,13 @@ """ import numpy as np - -from .ray_util import contains_points - -from ..constants import tol - -from .. import util -from .. import caching -from .. import grouping -from .. import intersections +from .. import caching, grouping, intersections, util from .. import triangles as triangles_mod +from ..constants import tol +from .ray_util import contains_points -class RayMeshIntersector(object): +class RayMeshIntersector: """ An object to query a mesh for ray intersections. Precomputes an r-tree for each triangle on the mesh. diff --git a/trimesh/ray/ray_util.py b/trimesh/ray/ray_util.py index fac7b58d4..68276213b 100644 --- a/trimesh/ray/ray_util.py +++ b/trimesh/ray/ray_util.py @@ -1,8 +1,6 @@ import numpy as np -from .. import util -from .. import bounds -from .. import constants +from .. import bounds, constants, util @constants.log_time diff --git a/trimesh/registration.py b/trimesh/registration.py index 42f5e2dc8..997d7759a 100644 --- a/trimesh/registration.py +++ b/trimesh/registration.py @@ -7,19 +7,15 @@ import numpy as np - -from . import util -from . import bounds -from . import transformations - -from .points import PointCloud, plane_fit +from . import bounds, transformations, util from .geometry import weighted_vertex_normals -from .triangles import normals, angles, cross +from .points import PointCloud, plane_fit from .transformations import transform_points +from .triangles import angles, cross, normals try: - from scipy.spatial import cKDTree import scipy.sparse as sparse + from scipy.spatial import cKDTree except BaseException as E: # wrapping just ImportError fails in some cases # will raise the error when someone tries to use KDtree diff --git a/trimesh/remesh.py b/trimesh/remesh.py index a27cbc5e8..3b2cde28a 100644 --- a/trimesh/remesh.py +++ b/trimesh/remesh.py @@ -6,12 +6,9 @@ """ import numpy as np -from . import util -from . import grouping -from . import graph - -from .geometry import faces_to_edges +from . import graph, grouping, util from .constants import tol +from .geometry import faces_to_edges def subdivide(vertices, @@ -111,7 +108,7 @@ def subdivide(vertices, stack = np.arange( start, start + len(f) * 4).reshape((-1, 4)) # reformat into a slightly silly dict for some reason - index_dict = {k: v for k, v in zip(nonzero, stack)} + index_dict = dict(zip(nonzero, stack)) return new_vertices, new_faces, index_dict diff --git a/trimesh/rendering.py b/trimesh/rendering.py index 9297d1139..155a7b49c 100644 --- a/trimesh/rendering.py +++ b/trimesh/rendering.py @@ -271,7 +271,7 @@ def colors_to_gl(colors, count): if dtype is not None and util.is_shape(colors, (count, (3, 4))): # save the shape and dtype for opengl color string - colors_type = 'c{}{}/static'.format(colors.shape[1], dtype) + colors_type = f'c{colors.shape[1]}{dtype}/static' # reshape the 2D array into a 1D one and then convert to a python list gl_colors = colors.reshape(-1).tolist() elif dtype is not None and colors.shape in [(3,), (4,)]: @@ -279,7 +279,7 @@ def colors_to_gl(colors, count): gl_colors = (np.ones((count, colors.size), dtype=colors.dtype) * colors).reshape(-1).tolist() # we know we're tiling - colors_type = 'c{}{}/static'.format(colors.size, dtype) + colors_type = f'c{colors.size}{dtype}/static' else: # case where colors are wrong shape # use black as the default color diff --git a/trimesh/repair.py b/trimesh/repair.py index b1a6913ad..a9fd7f70e 100644 --- a/trimesh/repair.py +++ b/trimesh/repair.py @@ -7,13 +7,10 @@ import numpy as np -from . import graph -from . import triangles - +from . import graph, triangles from .constants import log -from .grouping import group_rows from .geometry import faces_to_edges - +from .grouping import group_rows try: import networkx as nx diff --git a/trimesh/resolvers.py b/trimesh/resolvers.py index 09ead18f1..532c0c67d 100644 --- a/trimesh/resolvers.py +++ b/trimesh/resolvers.py @@ -7,12 +7,11 @@ archives, web assets, or a local file path. """ -import os import abc import itertools +import os -from . import util -from . import caching +from . import caching, util # URL parsing for remote resources via WebResolver try: @@ -84,8 +83,7 @@ def __init__(self, source): # exit if directory doesn't exist if not os.path.isdir(self.parent): raise ValueError( - 'path `{} `not a directory!'.format( - self.parent)) + f'path `{self.parent} `not a directory!') def keys(self): """ @@ -259,7 +257,7 @@ def get(self, name): # get the stored data obj = archive[name] # if the dict is storing data as bytes just return - if isinstance(obj, (bytes, str)): + if isinstance(obj, bytes | str): return obj # otherwise get it as a file object # read file object from beginning diff --git a/trimesh/resources/__init__.py b/trimesh/resources/__init__.py index 0d8ea9c3d..d01a0a091 100644 --- a/trimesh/resources/__init__.py +++ b/trimesh/resources/__init__.py @@ -1,5 +1,5 @@ -import os import json +import os from ..util import decode_text, wrap_as_stream @@ -76,8 +76,8 @@ def get_schema(name): schema : dict Loaded and resolved schema. """ - from ..schemas import resolve from ..resolvers import FilePathResolver + from ..schemas import resolve # get a resolver for our base path resolver = FilePathResolver( os.path.join(_pwd, 'schema', name)) diff --git a/trimesh/resources/javascript/compile.py b/trimesh/resources/javascript/compile.py index 2cecb624a..b5fc690a1 100644 --- a/trimesh/resources/javascript/compile.py +++ b/trimesh/resources/javascript/compile.py @@ -8,9 +8,9 @@ generate the template used in the trimesh viewer. """ import os + import jsmin import requests - from lxml import html @@ -26,7 +26,7 @@ def minify(path): if path.startswith('http'): data = requests.get(path).content.decode( 'ascii', errors='ignore') - print('downloaded', path, len(data)) # noqa + print('downloaded', path, len(data)) else: with open(path, 'rb') as f: # some upstream JS uses unicode spaces -_- @@ -55,12 +55,12 @@ def minify(path): # get a blob of file path = s.attrib['src'].strip() - print('minifying:', path) # noqa + print('minifying:', path) mini = minify(path) # replace test data in our file if path == 'load_base64.js': - print('replacing test data with "$B64GLTF"') # noqa + print('replacing test data with "$B64GLTF"') start = mini.find('base64_data') end = mini.find(';', start) # replace test data with a string we can replace diff --git a/trimesh/sample.py b/trimesh/sample.py index edf002eb8..ca1e1b28c 100644 --- a/trimesh/sample.py +++ b/trimesh/sample.py @@ -7,8 +7,7 @@ import numpy as np -from . import util -from . import transformations +from . import transformations, util from .visual import uv_to_interpolated_color if hasattr(np.random, 'default_rng'): @@ -219,8 +218,7 @@ def sample_surface_even(mesh, count, radius=None, seed=None): return points[:count], index[mask][:count] # warn if we didn't get all the samples we expect - util.log.warning('only got {}/{} samples!'.format( - len(points), count)) + util.log.warning(f'only got {len(points)}/{count} samples!') return points, index[mask] diff --git a/trimesh/scene/__init__.py b/trimesh/scene/__init__.py index 303754a90..1610837b9 100644 --- a/trimesh/scene/__init__.py +++ b/trimesh/scene/__init__.py @@ -1,5 +1,4 @@ from .cameras import Camera - from .scene import Scene, split_scene # add to __all__ as per pep8 diff --git a/trimesh/scene/cameras.py b/trimesh/scene/cameras.py index 6ce420af1..5fcd00bf8 100644 --- a/trimesh/scene/cameras.py +++ b/trimesh/scene/cameras.py @@ -5,7 +5,7 @@ from .. import util -class Camera(object): +class Camera: def __init__( self, @@ -39,7 +39,7 @@ def __init__( if name is None: # if name is not passed, make it something unique - self.name = 'camera_{}'.format(util.unique_id(6).upper()) + self.name = f'camera_{util.unique_id(6).upper()}' else: # otherwise assign it self.name = name @@ -283,8 +283,7 @@ def look_at(self, points, **kwargs): **kwargs) def __repr__(self): - return ' FOV: {} Resolution: {}'.format( - self.fov, self.resolution) + return f' FOV: {self.fov} Resolution: {self.resolution}' def look_at(points, fov, rotation=None, distance=None, center=None, pad=None): diff --git a/trimesh/scene/lighting.py b/trimesh/scene/lighting.py index 83c90ca3c..27f36367e 100644 --- a/trimesh/scene/lighting.py +++ b/trimesh/scene/lighting.py @@ -10,9 +10,7 @@ import numpy as np -from .. import util -from .. import visual -from .. import transformations +from .. import transformations, util, visual # default light color _DEFAULT_RGBA = np.array([60, 60, 60, 255], dtype=np.uint8) @@ -48,7 +46,7 @@ def __init__(self, if name is None: # if name is not passed, make it something unique - self.name = 'light_{}'.format(util.unique_id(6).upper()) + self.name = f'light_{util.unique_id(6).upper()}' else: # otherwise assign it self.name = name @@ -129,7 +127,7 @@ def __init__(self, color=None, intensity=None, radius=None): - super(DirectionalLight, self).__init__( + super().__init__( name=name, color=color, intensity=intensity, @@ -168,7 +166,7 @@ def __init__(self, color=None, intensity=None, radius=None): - super(PointLight, self).__init__( + super().__init__( name=name, color=color, intensity=intensity, @@ -220,7 +218,7 @@ def __init__(self, radius=None, innerConeAngle=0.0, outerConeAngle=np.pi / 4.0): - super(SpotLight, self).__init__( + super().__init__( name=name, color=color, intensity=intensity, diff --git a/trimesh/scene/scene.py b/trimesh/scene/scene.py index 940a8428e..392f30e96 100644 --- a/trimesh/scene/scene.py +++ b/trimesh/scene/scene.py @@ -1,22 +1,13 @@ -import numpy as np import collections import uuid -from . import cameras -from . import lighting - -from .. import util -from .. import units -from .. import convex -from .. import inertia -from .. import caching -from .. import grouping -from .. import transformations +import numpy as np -from ..util import unique_name +from .. import caching, convex, grouping, inertia, transformations, units, util from ..exchange import export from ..parent import Geometry3D - +from ..util import unique_name +from . import cameras, lighting from .transforms import SceneGraph @@ -167,8 +158,7 @@ def add_geometry(self, return if not hasattr(geometry, 'vertices'): - util.log.debug('unknown type ({}) added to scene!'.format( - type(geometry).__name__)) + util.log.debug(f'unknown type ({type(geometry).__name__}) added to scene!') return # get or create a name to reference the geometry by @@ -897,8 +887,8 @@ def subscene(self, node): graph = SceneGraph(base_frame=node) graph.from_edgelist(edges) - geometry_names = set([e[2]['geometry'] for e in edges - if 'geometry' in e[2]]) + geometry_names = {e[2]['geometry'] for e in edges + if 'geometry' in e[2]} geometry = {k: self.geometry[k] for k in geometry_names} result = Scene(geometry=geometry, graph=graph) return result @@ -1102,7 +1092,7 @@ def scaled(self, scale): """ # convert 2D geometries to 3D for 3D scaling factors scale_is_3D = isinstance( - scale, (list, tuple, np.ndarray)) and len(scale) == 3 + scale, list | tuple | np.ndarray) and len(scale) == 3 if scale_is_3D and np.all(np.asarray(scale) == scale[0]): # scale is uniform @@ -1415,8 +1405,7 @@ def node_remap(node): s = s.scene() # if we don't have a scene raise an exception if not isinstance(s, Scene): - raise ValueError('{} is not a scene!'.format( - type(s).__name__)) + raise ValueError(f'{type(s).__name__} is not a scene!') # remap geometries if they have been consumed map_geom = {} diff --git a/trimesh/scene/transforms.py b/trimesh/scene/transforms.py index 28eac393e..09ffce6d4 100644 --- a/trimesh/scene/transforms.py +++ b/trimesh/scene/transforms.py @@ -1,22 +1,18 @@ -import numpy as np import collections - from copy import deepcopy -from .. import util -from .. import caching +import numpy as np -from ..transformations import (rotation_matrix, - quaternion_matrix, - fix_rigid) +from .. import caching, util from ..caching import hash_fast +from ..transformations import fix_rigid, quaternion_matrix, rotation_matrix # we compare to identity a lot _identity = np.eye(4) _identity.flags['WRITEABLE'] = False -class SceneGraph(object): +class SceneGraph: """ Hold data about positions and instances of geometry in a scene. This includes a forest (i.e. multi-root tree) @@ -172,7 +168,7 @@ def get(self, frame_to, frame_from=None): np.linalg.inv(backward['matrix'])) # filter out any identity matrices matrices = [m for m in matrices if - np.abs((m - _identity)).max() > 1e-8] + np.abs(m - _identity).max() > 1e-8] if len(matrices) == 0: matrix = _identity elif len(matrices) == 1: @@ -425,8 +421,8 @@ def show(self, **kwargs): kwargs : dict Passed to `networkx.draw_networkx` """ - import networkx import matplotlib.pyplot as plt + import networkx # default kwargs will only be set if not # passed explicitly to the show command defaults = {'with_labels': True} @@ -536,7 +532,7 @@ def clear(self): self._cache.clear() -class EnforcedForest(object): +class EnforcedForest: """ A simple forest graph data structure: every node is allowed to have exactly one parent. This makes @@ -708,7 +704,7 @@ def shortest_path(self, u, v): common = set(backward).intersection( forward).difference({None}) if len(common) == 0: - raise ValueError('No path from {}->{}!'.format(u, v)) + raise ValueError(f'No path from {u}->{v}!') elif len(common) > 1: # get the first occurring common element in "forward" link = next(f for f in forward if f in common) @@ -785,7 +781,7 @@ def successors(self, node): children = self.children # if node doesn't exist return early if node not in children: - return set([node]) + return {node} # children we need to collect queue = [node] diff --git a/trimesh/smoothing.py b/trimesh/smoothing.py index a8d9c5091..3830c6e2d 100644 --- a/trimesh/smoothing.py +++ b/trimesh/smoothing.py @@ -1,15 +1,15 @@ import numpy as np try: - from scipy.sparse.linalg import spsolve from scipy.sparse import coo_matrix, eye + from scipy.sparse.linalg import spsolve except ImportError: pass from . import triangles -from .util import unitize from .geometry import index_sparse from .triangles import mass_properties +from .util import unitize def filter_laplacian(mesh, diff --git a/trimesh/transformations.py b/trimesh/transformations.py index 48d25498e..0cfc67198 100644 --- a/trimesh/transformations.py +++ b/trimesh/transformations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # transformations.py # Modified for inclusion in the `trimesh` library @@ -196,10 +195,10 @@ True """ -from __future__ import division, print_function import math + import numpy as np __version__ = '2017.02.17' @@ -1590,7 +1589,7 @@ def random_rotation_matrix(rand=None, num=1, translate=False): return matrix -class Arcball(object): +class Arcball: """Virtual Trackball Control. >>> ball = Arcball() @@ -1761,7 +1760,7 @@ def arcball_nearest_axis(point, axes): 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} -_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) +_TUPLE2AXES = {v: k for k, v in _AXES2TUPLE.items()} def vector_norm(data, axis=None, out=None): diff --git a/trimesh/triangles.py b/trimesh/triangles.py index ed47cf424..a5835ebf3 100644 --- a/trimesh/triangles.py +++ b/trimesh/triangles.py @@ -7,10 +7,9 @@ import numpy as np from . import util - -from .util import unitize, diagonal_dot -from .points import point_plane_distance from .constants import tol +from .points import point_plane_distance +from .util import diagonal_dot, unitize def cross(triangles): diff --git a/trimesh/units.py b/trimesh/units.py index fc6f76dd4..ea09ee188 100644 --- a/trimesh/units.py +++ b/trimesh/units.py @@ -6,8 +6,8 @@ Very basic conversions, and no requirement for sympy.physics.units or pint. """ -from .constants import log from . import resources +from .constants import log def unit_conversion(current, desired): diff --git a/trimesh/util.py b/trimesh/util.py index 0be3003fd..872317031 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -10,26 +10,21 @@ """ import abc -import sys +import base64 +import collections import copy import json -import uuid -import base64 +import logging import random import shutil -import logging -import zipfile +import sys import tempfile -import collections +import uuid +import zipfile import numpy as np -if sys.version_info >= (3, 4): - # for newer version of python - ABC = abc.ABC -else: - # an abstract base class that works on older versions - ABC = abc.ABCMeta('ABC', (), {}) +ABC = abc.ABC # a flag we can check elsewhere for Python 3 PY3 = sys.version_info.major >= 3 @@ -39,12 +34,13 @@ basestring = str # Python 3 from io import BytesIO, StringIO - from shutil import which # noqa - from time import perf_counter as now # noqa + from shutil import which + from time import perf_counter as now else: # Python 2 - from StringIO import StringIO from distutils.spawn import find_executable as which # noqa + + from StringIO import StringIO # monkey patch StringIO so `with` statements work StringIO.__enter__ = lambda a: a StringIO.__exit__ = lambda a, b, c, d: a.close() @@ -55,7 +51,7 @@ try: from collections.abc import Mapping except ImportError: - from collections import Mapping + from collections.abc import Mapping # create a default logger log = logging.getLogger('trimesh') @@ -1254,7 +1250,7 @@ def array_to_encoded(array, dtype=None, encoding='base64'): elif encoding == 'binary': encoded['binary'] = array.tobytes(order='C') else: - raise ValueError('encoding {} is not available!'.format(encoding)) + raise ValueError(f'encoding {encoding} is not available!') return encoded @@ -1829,7 +1825,7 @@ def wrap_as_stream(item): return StringIO(item) elif isinstance(item, bytes): return BytesIO(item) - raise ValueError('{} is not wrappable!'.format(type(item).__name__)) + raise ValueError(f'{type(item).__name__} is not wrappable!') def sigfig_round(values, sigfig=1): @@ -2318,7 +2314,7 @@ def __call__(self, key, *args, **kwargs): return self[key](*args, **kwargs) -class TemporaryDirectory(object): +class TemporaryDirectory: """ Same basic usage as tempfile.TemporaryDirectory but functional in Python 2.7+. diff --git a/trimesh/version.py b/trimesh/version.py index b21b2f52f..85b1ed9b8 100644 --- a/trimesh/version.py +++ b/trimesh/version.py @@ -2,4 +2,4 @@ if __name__ == '__main__': # print version if run directly i.e. in a CI script - print(__version__) # noqa + print(__version__) diff --git a/trimesh/viewer/__init__.py b/trimesh/viewer/__init__.py index f8aaeded5..d235d200f 100644 --- a/trimesh/viewer/__init__.py +++ b/trimesh/viewer/__init__.py @@ -6,17 +6,13 @@ """ -from .notebook import (in_notebook, - scene_to_notebook, - scene_to_html) - from .. import exceptions +from .notebook import in_notebook, scene_to_html, scene_to_notebook try: # try importing windowed which will fail # if we can't create an openGL context - from .windowed import (SceneViewer, - render_scene) + from .windowed import SceneViewer, render_scene except BaseException as E: # if windowed failed to import only raise # the exception if someone tries to use them diff --git a/trimesh/viewer/notebook.py b/trimesh/viewer/notebook.py index bcb49915d..451471b1e 100644 --- a/trimesh/viewer/notebook.py +++ b/trimesh/viewer/notebook.py @@ -5,12 +5,11 @@ Render trimesh.Scene objects in HTML and jupyter notebooks using three.js """ -import os import base64 +import os # for our template -from .. import util -from .. import resources +from .. import resources, util def scene_to_html(scene): diff --git a/trimesh/viewer/trackball.py b/trimesh/viewer/trackball.py index cc2de2e8a..cf32cdefb 100644 --- a/trimesh/viewer/trackball.py +++ b/trimesh/viewer/trackball.py @@ -30,7 +30,7 @@ from .. import transformations -class Trackball(object): +class Trackball: """A trackball class for creating camera transforms from mouse movements. """ STATE_ROTATE = 0 diff --git a/trimesh/viewer/widget.py b/trimesh/viewer/widget.py index e37d9cd13..464a90f4f 100644 --- a/trimesh/viewer/widget.py +++ b/trimesh/viewer/widget.py @@ -13,8 +13,7 @@ from .. import rendering from .trackball import Trackball -from .windowed import geometry_hash -from .windowed import SceneViewer +from .windowed import SceneViewer, geometry_hash class SceneGroup(pyglet.graphics.Group): @@ -144,7 +143,7 @@ def __init__(self, scene, **kwargs): self._background = kwargs.pop('background', None) self._smooth = kwargs.pop('smooth', True) if kwargs: - raise TypeError('unexpected kwargs: {}'.format(kwargs)) + raise TypeError(f'unexpected kwargs: {kwargs}') @property def scene_group(self): diff --git a/trimesh/viewer/windowed.py b/trimesh/viewer/windowed.py index 7a7731abc..b728d53cc 100644 --- a/trimesh/viewer/windowed.py +++ b/trimesh/viewer/windowed.py @@ -8,8 +8,8 @@ Works on all major platforms: Windows, Linux, and OSX. """ import collections -import numpy as np +import numpy as np import pyglet # pyglet 2.0 is close to a re-write moving from fixed-function @@ -20,13 +20,10 @@ raise ImportError( '`trimesh.viewer.windowed` requires `pip install "pyglet<2"`') -from .trackball import Trackball - -from .. import util -from .. import rendering - -from ..visual import to_rgba +from .. import rendering, util from ..transformations import translation_matrix +from ..visual import to_rgba +from .trackball import Trackball pyglet.options['shadow_window'] = False @@ -172,7 +169,7 @@ def __init__(self, samples=4, depth_size=24, double_buffer=True) - super(SceneViewer, self).__init__(config=conf, + super().__init__(config=conf, visible=visible, resizable=True, width=resolution[0], @@ -180,7 +177,7 @@ def __init__(self, caption=caption) except pyglet.window.NoSuchConfigException: conf = gl.Config(double_buffer=True) - super(SceneViewer, self).__init__(config=conf, + super().__init__(config=conf, resizable=True, visible=visible, width=resolution[0], @@ -188,7 +185,7 @@ def __init__(self, caption=caption) else: # window config was manually passed - super(SceneViewer, self).__init__(config=window_conf, + super().__init__(config=window_conf, resizable=True, visible=visible, width=resolution[0], @@ -257,7 +254,7 @@ def add_geometry(self, name, geometry, **kwargs): # convert geometry to constructor args args = rendering.convert_to_vertexlist(geometry, **kwargs) except BaseException: - util.log.warning('failed to add geometry `{}`'.format(name), + util.log.warning(f'failed to add geometry `{name}`', exc_info=True) return @@ -286,8 +283,8 @@ def cleanup_geometries(self): # shorthand to scene graph graph = self.scene.graph # which parts of the graph still have geometry - geom_keep = set([graph[node][1] for - node in graph.nodes_geometry]) + geom_keep = {graph[node][1] for + node in graph.nodes_geometry} # which geometries no longer need to be kept geom_delete = [geom for geom in self.vertex_list if geom not in geom_keep] @@ -450,7 +447,7 @@ def _gl_enable_lighting(scene): # opengl only supports 7 lights? for i, light in enumerate(scene.lights[:7]): # the index of which light we have - lightN = eval('gl.GL_LIGHT{}'.format(i)) + lightN = eval(f'gl.GL_LIGHT{i}') # get the transform for the light by name matrix = scene.graph.get(light.name)[0] @@ -813,7 +810,7 @@ def on_draw(self): util.log.debug(profiler.output_text(unicode=True, color=True)) def flip(self): - super(SceneViewer, self).flip() + super().flip() if self._record: # will save a PNG-encoded bytes img = self.save_image(util.BytesIO()) diff --git a/trimesh/visual/base.py b/trimesh/visual/base.py index 02246625a..38f567fc6 100644 --- a/trimesh/visual/base.py +++ b/trimesh/visual/base.py @@ -5,6 +5,7 @@ The base class for `Visual` objects """ import abc + from ..util import ABC diff --git a/trimesh/visual/color.py b/trimesh/visual/color.py index dd85cc313..7ac684569 100644 --- a/trimesh/visual/color.py +++ b/trimesh/visual/color.py @@ -22,18 +22,15 @@ and setting or altering a value should automatically change the mode. """ -import numpy as np - -import copy import colorsys +import copy -from .base import Visuals - -from .. import util -from .. import caching +import numpy as np -from ..grouping import unique_rows +from .. import caching, util from ..constants import tol +from ..grouping import unique_rows +from .base import Visuals class ColorVisuals(Visuals): diff --git a/trimesh/visual/gloss.py b/trimesh/visual/gloss.py index 7ab55d0d3..edeb28b71 100644 --- a/trimesh/visual/gloss.py +++ b/trimesh/visual/gloss.py @@ -122,7 +122,7 @@ def get_diffuse(diffuseFactor, diffuseTexture): else: log.warning( '`diffuseFactor` and `diffuseTexture` have incompatible shapes: ' + - '{0} and {1}'.format(diffuseFactor.shape, diffuse.shape)) + f'{diffuseFactor.shape} and {diffuse.shape}') else: diffuse = diffuseFactor if diffuseFactor is not None else [1, 1, 1, 1] diffuse = np.array(diffuse, dtype=np.float32) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 4dc745b30..d330debf2 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -6,13 +6,12 @@ """ import abc import copy -import numpy as np -from . import color -from .. import util -from .. import exceptions +import numpy as np +from .. import exceptions, util from ..constants import tol +from . import color # epsilon for comparing floating point _eps = 1e-5 @@ -109,11 +108,11 @@ def to_obj(self, name=None): name = self.name # create an MTL file - mtl = ['newmtl {}'.format(name), + mtl = [f'newmtl {name}', 'Ka {:0.8f} {:0.8f} {:0.8f}'.format(*Ka), 'Kd {:0.8f} {:0.8f} {:0.8f}'.format(*Kd), 'Ks {:0.8f} {:0.8f} {:0.8f}'.format(*Ks), - 'Ns {:0.8f}'.format(self.glossiness)] + f'Ns {self.glossiness:0.8f}'] # collect the OBJ data into files data = {} @@ -123,9 +122,9 @@ def to_obj(self, name=None): # what is the name of the export image to save if image_type is None: image_type = 'png' - image_name = '{}.{}'.format(name, image_type.lower()) + image_name = f'{name}.{image_type.lower()}' # save the reference to the image - mtl.append('map_Kd {}'.format(image_name)) + mtl.append(f'map_Kd {image_name}') # save the image texture as bytes in the original format f_obj = util.BytesIO() @@ -133,7 +132,7 @@ def to_obj(self, name=None): f_obj.seek(0) data[image_name] = f_obj.read() - data['{}.mtl'.format(name)] = '\n'.join(mtl).encode('utf-8') + data[f'{name}.mtl'] = '\n'.join(mtl).encode('utf-8') return data, name @@ -525,7 +524,7 @@ def normalTexture(self): Normal texture. """ return self._data.get('normalTexture') - + @normalTexture.setter def normalTexture(self, value): if value is None: @@ -545,7 +544,7 @@ def emissiveTexture(self): Emissive texture. """ return self._data.get('emissiveTexture') - + @emissiveTexture.setter def emissiveTexture(self, value): if value is None: @@ -565,7 +564,7 @@ def occlusionTexture(self): Occlusion texture. """ return self._data.get('occlusionTexture') - + @occlusionTexture.setter def occlusionTexture(self, value): if value is None: @@ -606,7 +605,7 @@ def metallicRoughnessTexture(self): Metallic-roughness texture. """ return self._data.get('metallicRoughnessTexture') - + @metallicRoughnessTexture.setter def metallicRoughnessTexture(self, value): if value is None: @@ -618,7 +617,7 @@ def metallicRoughnessTexture(self, value): @property def name(self): return self._data.get('name') - + @name.setter def name(self, value): if value is None: @@ -692,7 +691,7 @@ def __hash__(self): hash : int Hash of image and parameters """ - return hash(bytes().join( + return hash(b''.join( np.asanyarray(v).tobytes() for v in self._data.values() if v is not None)) @@ -747,9 +746,11 @@ def pack(materials, uvs, deduplicate=True): Combined UV coordinates in the 0.0-1.0 range. """ + import collections + from PIL import Image + from ..path import packing - import collections def material_to_img(mat): """ diff --git a/trimesh/visual/objects.py b/trimesh/visual/objects.py index df10957d0..18ff302f3 100644 --- a/trimesh/visual/objects.py +++ b/trimesh/visual/objects.py @@ -7,9 +7,9 @@ """ import numpy as np +from .color import ColorVisuals, color_to_uv from .material import pack from .texture import TextureVisuals -from .color import ColorVisuals, color_to_uv def create_visual(**kwargs): diff --git a/trimesh/visual/texture.py b/trimesh/visual/texture.py index fcc02c341..10da4296a 100644 --- a/trimesh/visual/texture.py +++ b/trimesh/visual/texture.py @@ -2,14 +2,10 @@ import numpy as np -from .base import Visuals +from .. import caching, grouping, util from . import color - -from .. import util -from .. import caching -from .. import grouping - -from .material import SimpleMaterial, PBRMaterial, empty_material # NOQA +from .base import Visuals +from .material import PBRMaterial, SimpleMaterial, empty_material # NOQA class TextureVisuals(Visuals): @@ -192,7 +188,7 @@ def update_vertices(self, mask): updates[key] = value[mask] except BaseException: # usual reason is an incorrect size or index - util.log.warning('failed to update visual: `{}`'.format(key)) + util.log.warning(f'failed to update visual: `{key}`') # clear all values from the vertex attributes self.vertex_attributes.clear() # apply the updated values diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index d3b795d58..58ec95422 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -6,20 +6,14 @@ """ import numpy as np -from . import ops -from . import transforms -from . import morphology - -from .encoding import Encoding, DenseEncoding -from .. import util -from .. import caching from .. import bounds as bounds_module +from .. import caching, util from .. import transformations as tr - -from ..parent import Geometry from ..constants import log - from ..exchange.binvox import export_binvox +from ..parent import Geometry +from . import morphology, ops, transforms +from .encoding import DenseEncoding, Encoding class VoxelGrid(Geometry): @@ -40,7 +34,7 @@ def __init__(self, encoding, transform=None, metadata=None): self._cache = caching.Cache( id_function=self._data.__hash__) - self.metadata = dict() + self.metadata = {} # update the mesh metadata with passed metadata if isinstance(metadata, dict): self.metadata.update(metadata) diff --git a/trimesh/voxel/creation.py b/trimesh/voxel/creation.py index de87523a3..55a67e8ee 100644 --- a/trimesh/voxel/creation.py +++ b/trimesh/voxel/creation.py @@ -1,11 +1,8 @@ import numpy as np -from ..constants import log_time -from .. import remesh -from .. import grouping -from .. import util +from .. import grouping, remesh, util from .. import transformations as tr - +from ..constants import log_time from . import base from . import encoding as enc diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 02c5134e3..723ea8642 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -1,11 +1,11 @@ """OO interfaces to encodings for ND arrays which caching.""" +import abc + import numpy as np -import abc +from .. import caching from ..util import ABC, log - from . import runlength -from .. import caching try: from scipy import sparse as sp @@ -201,7 +201,7 @@ def __init__(self, data): if not isinstance(data, np.ndarray): raise ValueError('DenseEncoding data must be a numpy array') data = caching.tracked_array(data) - super(DenseEncoding, self).__init__(data=data) + super().__init__(data=data) @property def dtype(self): @@ -292,7 +292,7 @@ def __init__(self, indices, values, shape=None): + 1 is used. """ data = caching.DataStore() - super(SparseEncoding, self).__init__(data) + super().__init__(data) data['indices'] = indices data['values'] = values indices = data['indices'] @@ -301,8 +301,7 @@ def __init__(self, indices, values, shape=None): 'indices must be 2D, got shaped %s' % str(indices.shape)) if data['values'].shape != (indices.shape[0],): raise ValueError( - 'values and indices shapes inconsistent: %s and %s' - % (data['values'], data['indices'])) + 'values and indices shapes inconsistent: {} and {}'.format(data['values'], data['indices'])) if shape is None: self._shape = tuple(data['indices'].max(axis=0) + 1) else: @@ -446,7 +445,7 @@ def __init__(self, data, dtype=None): dtype: dtype of encoded data. Each second value of data is cast will be cast to this dtype if provided. """ - super(RunLengthEncoding, self).__init__( + super().__init__( data=caching.tracked_array(data)) if dtype is None: dtype = self._data.dtype @@ -595,7 +594,7 @@ def __init__(self, data): ------------ data: binary run length encoded data. """ - super(BinaryRunLengthEncoding, self).__init__(data=data, dtype=bool) + super().__init__(data=data, dtype=bool) @caching.cache_decorator def is_empty(self): @@ -783,7 +782,7 @@ def __init__(self, encoding, shape): encoding = encoding.flat else: raise ValueError('encoding must be an Encoding') - super(ShapedEncoding, self).__init__(data=encoding) + super().__init__(data=encoding) self._shape = tuple(shape) nn = self._shape.count(-1) size = np.prod(self._shape) @@ -844,7 +843,7 @@ def __init__(self, base_encoding, perm): raise ValueError( 'base_encoding has %d ndims - cannot transpose with perm %s' % (base_encoding.ndims, str(perm))) - super(TransposedEncoding, self).__init__(base_encoding) + super().__init__(base_encoding) perm = np.array(perm, dtype=np.int64) if not all(i in perm for i in range(base_encoding.ndims)): raise ValueError('perm %s is not a valid permutation' % str(perm)) @@ -921,7 +920,7 @@ def __init__(self, encoding, axes): if len(set(self._axes)) != len(self._axes): raise ValueError( "Axes cannot contain duplicates, got %s" % str(self._axes)) - super(FlippedEncoding, self).__init__(encoding) + super().__init__(encoding) if not all(0 <= a < self._data.ndims for a in axes): raise ValueError( 'Invalid axes %s for %d-d encoding' diff --git a/trimesh/voxel/morphology.py b/trimesh/voxel/morphology.py index fa61587c6..fc3cf41c5 100644 --- a/trimesh/voxel/morphology.py +++ b/trimesh/voxel/morphology.py @@ -1,13 +1,11 @@ """Basic morphology operations that create new encodings.""" import numpy as np +from .. import util +from ..constants import log_time from . import encoding as enc from . import ops -from ..constants import log_time -from .. import util - - try: from scipy import ndimage except BaseException as E: diff --git a/trimesh/voxel/ops.py b/trimesh/voxel/ops.py index 2187fb875..af69ec453 100644 --- a/trimesh/voxel/ops.py +++ b/trimesh/voxel/ops.py @@ -114,6 +114,7 @@ def matrix_to_marching_cubes(matrix, pitch=1.0): the marching cubes algorithm in skimage """ from skimage import measure + from ..base import Trimesh matrix = np.asanyarray(matrix, dtype=bool) diff --git a/trimesh/voxel/runlength.py b/trimesh/voxel/runlength.py index 6b911a378..ae8a0abd6 100644 --- a/trimesh/voxel/runlength.py +++ b/trimesh/voxel/runlength.py @@ -41,6 +41,7 @@ This module contains implementations of various RLE/BRLE operations. """ import functools + import numpy as np diff --git a/trimesh/voxel/transforms.py b/trimesh/voxel/transforms.py index 03e6f8b25..f2b50a7cb 100644 --- a/trimesh/voxel/transforms.py +++ b/trimesh/voxel/transforms.py @@ -1,12 +1,10 @@ import numpy as np -from .. import util -from .. import caching - +from .. import caching, util from .. import transformations as tr -class Transform(object): +class Transform: """ Class for caching metadata associated with 4x4 transformations. From 63affb21cc8faa1ff85157470a441d8ade88ed26 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Wed, 23 Aug 2023 16:33:32 -0400 Subject: [PATCH 002/144] passing ruff with upgrades --- MANIFEST.in | 3 - pyproject.toml | 95 +++++++++++++- setup.py | 249 ------------------------------------- tests/test_copy.py | 8 +- tests/test_mutate.py | 144 ++++++++++----------- tests/test_paths.py | 2 +- trimesh/version.py | 5 +- trimesh/viewer/notebook.py | 3 +- trimesh/visual/color.py | 6 +- 9 files changed, 180 insertions(+), 335 deletions(-) delete mode 100644 MANIFEST.in delete mode 100644 setup.py diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index dee9803ed..000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,3 +0,0 @@ -include LICENSE.md -include README.md -include trimesh/resources diff --git a/pyproject.toml b/pyproject.toml index f8cc629bb..ecb079149 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,98 @@ [build-system] build-backend = "setuptools.build_meta" -requires = ["setuptools >= 40.8", "wheel"] +requires = ["setuptools >= 60", "wheel"] + +[project] +name = "trimesh" +version = "3.23.3" +authors = [{name = "Michael Dawson-Haggerty", email = "mikedh@kerfed.com"}] +license = {text = "MIT"} +description = "Import, export, process, analyze and view triangular meshes." +keywords = ["graphics", "mesh", "geometry", "3D"] +classifiers = [ + "Development Status :: 4 - Beta", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Natural Language :: English", + "Topic :: Scientific/Engineering", +] +urls = {Homepage = "https://github.com/mikedh/trimesh"} +dependencies = ["numpy"] + +[project.readme] +file = "README.md" +content-type = "text/markdown" + +[project.optional-dependencies] +test = [ + "pytest-cov", + "coveralls", + "pytest", + "pymeshlab", + "pyinstrument", + "ruff", + "ezdxf", + "autopep8<2", +] +easy = [ + "colorlog", + "mapbox-earcut", + "chardet", + "lxml", + "jsonschema", + "networkx", + "svg.path", + "pycollada", + "setuptools", + "shapely", + "xxhash", + "rtree", + "requests", + "scipy", + "embreex", + "pillow", +] +all = [ + "colorlog", + "mapbox-earcut", + "lxml", + "networkx", + "svg.path", + "pycollada", + "shapely", + "xatlas", + "python-fcl", + "scipy", + "embreex", + "chardet", + "pyglet<2", + "jsonschema", + "setuptools", + "xxhash", + "scikit-image", + "rtree", + "psutil", + "requests", + "pillow", +] +recommends = ["glooey", "sympy", "meshio"] + +[tool.setuptools.packages.find] +include = ["trimesh"] + +[tool.setuptools.package-data] +trimesh = [ + "resources/templates/*", + "resources/*.json", + "resources/schema/*", + "resources/schema/primitive/*.json", + "resources/*.zip", +] + [tool.ruff] # See https://github.com/charliermarsh/ruff#rules for error code definitions. @@ -21,6 +113,7 @@ ignore = [ "N802", # Function name should be lowercase "N806", # Variable in function should be lowercase "E501", # Line too long ({width} > {limit} characters) + "B904", # raise ... from err "B905", # zip() without an explicit strict= parameter ] line-length = 90 diff --git a/setup.py b/setup.py deleted file mode 100644 index 617ca81c5..000000000 --- a/setup.py +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/env python -# ruff: noqa -import os -import sys -from setuptools import setup - -# load __version__ without importing anything -_version_file = os.path.join(os.path.dirname(__file__), "trimesh", "version.py") - -if os.path.isfile(_version_file): - with open(_version_file, "r") as f: - _version_raw = f.read() - # use eval to get a clean string of version from file - __version__ = eval( - next( - line.strip().split("=")[-1] - for line in str.splitlines(_version_raw) - if "_version_" in line - ) - ) -else: - __version__ = None - -# load README.md as long_description -long_description = "" -if os.path.exists("README.md"): - with open("README.md", "r") as f: - long_description = f.read() - -# minimal requirements for installing trimesh -requirements_default = set(["numpy"]) - -# "easy" requirements should install without compiling -# anything on Windows, Linux, and Mac, for Python >= 3.6 -requirements_easy = set( - [ - "scipy", # provide convex hulls, fast graph ops, etc - "networkx", # provide slow graph ops with a nice API - "lxml", # handle XML better and faster than built- in XML - "shapely", # handle 2D polygons robustly - "rtree", # create N-dimension trees for broad-phase queries - "svg.path", # handle SVG format path strings - "pillow", # load images - "embreex", # Intel's Embree ray check engine with wheels - "requests", # do network requests - "xxhash", # hash ndarrays faster than built-in MD5/CRC - "setuptools", # do setuptools stuff - "jsonschema", # validate JSON schemas like GLTF - "pycollada", # parse collada/dae/zae files - "chardet", # figure out if someone used UTF-16 - "mapbox-earcut", # fast 2D triangulations of polygons - "colorlog", - ] -) # log in pretty colors - -# "all" requirements only need to be installable -# through some mechanism on Linux with Python 3.5+ -# and are allowed to compile code -requirements_all = requirements_easy.union( - [ - "python-fcl", # do fast 3D collision queries - "psutil", # figure out how much memory we have - "scikit-image", # marching cubes and other nice stuff - "xatlas", # texture unwrapping - "pyglet<2", # render preview windows nicely : note pyglet 2.0 is basically a re-write - ] -) -# requirements for running unit tests -requirements_test = set( - [ - "pytest", # run all unit tests - "pytest-cov", # coverage plugin - "pyinstrument", # profile code - "coveralls", # report coverage stats - "autopep8<2", # check and autoformat - "ruff", # static code analysis - "pymeshlab", # used as a validator for exports - "ezdxf", - ] -) # used as a validator for exports - -# things that are used implicitly -requirements_recommends = set(["meshio", "sympy", "glooey"]) - -# Python 2.7 and 3.4 support has been dropped from packages -# version lock those packages here so install succeeds -current = (sys.version_info.major, sys.version_info.minor) - -# packages that no longer support old Python -lock = [ - ((3, 4), "lxml", "4.3.5"), - ((3, 4), "shapely", "1.6.4"), - ((3, 4), "pyglet", "1.4.10"), - ((3, 5), "sympy", None), - ((3, 6), "pyglet<2", None), - ((3, 6), "autopep8", None), - ((3, 6), "ruff", None), - ((3, 7), "pymeshlab", None), - ((3, 5), "embreex", None), - ((3, 6), "svg.path", "4.1"), -] -for max_python, name, version in lock: - if current <= max_python: - # remove version-free requirements - requirements_easy.discard(name) - requirements_test.discard(name) - - # if version is None drop that package - if version is not None: - # add working version locked requirements - requirements_easy.add("{}=={}".format(name, version)) - - -def format_all(): - """ - A shortcut to run automatic formatting and complaining - on all of the trimesh subdirectories. - """ - import subprocess - - def run_on(target): - # words that codespell hates - # note that it always checks against the lower case - word_skip = "datas,coo,nd,files',filetests,ba,childs,whats" - # files to skip spelling on - file_skip = "*.pyc,*.zip,.DS_Store,*.js,./trimesh/resources" - spell = [ - "codespell", - "-i", - "3", - "--skip=" + file_skip, - "-L", - word_skip, - "-w", - target, - ] - print("Running: \n {} \n\n\n".format(" ".join(spell))) - subprocess.check_call(spell) - - formatter = [ - "autopep8", - "--recursive", - "--verbose", - "--in-place", - "--aggressive", - target, - ] - print("Running: \n {} \n\n\n".format(" ".join(formatter))) - subprocess.check_call(formatter) - - flake = ["flake8", target] - print("Running: \n {} \n\n\n".format(" ".join(flake))) - subprocess.check_call(flake) - - # run on our target locations - for t in ["trimesh", "tests", "examples"]: - run_on(t) - - -# if someone wants to output a requirements file -# `python setup.py --list-all > requirements.txt` -if "--list-all" in sys.argv: - # will not include default requirements (numpy) - print("\n".join(requirements_all)) - exit() -elif "--list-easy" in sys.argv: - # again will not include numpy+setuptools - print("\n".join(requirements_easy)) - exit() -elif "--list-test" in sys.argv: - # again will not include numpy+setuptools - print("\n".join(requirements_test)) - exit() -elif "--format" in sys.argv: - format_all() - exit() -elif "--bump" in sys.argv: - # bump the version number - # convert current version to integers - bumped = [int(i) for i in __version__.split(".")] - # increment the last field by one - bumped[-1] += 1 - # re-combine into a version string - version_new = ".".join(str(i) for i in bumped) - print("version bump `{}` => `{}`".format(__version__, version_new)) - # write back the original version file with - # just the value replaced with the new one - raw_new = _version_raw.replace(__version__, version_new) - with open(_version_file, "w") as f: - f.write(raw_new) - exit() - - -# call the magical setuptools setup -setup( - name="trimesh", - version=__version__, - description="Import, export, process, analyze and view triangular meshes.", - long_description=long_description, - long_description_content_type="text/markdown", - author="Michael Dawson-Haggerty", - author_email="mikedh@kerfed.com", - license="MIT", - url="https://github.com/mikedh/trimesh", - keywords="graphics mesh geometry 3D", - classifiers=[ - "Development Status :: 4 - Beta", - "License :: OSI Approved :: MIT License", - "Programming Language :: Python", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Natural Language :: English", - "Topic :: Scientific/Engineering", - ], - packages=[ - "trimesh", - "trimesh.ray", - "trimesh.path", - "trimesh.path.exchange", - "trimesh.scene", - "trimesh.voxel", - "trimesh.visual", - "trimesh.viewer", - "trimesh.exchange", - "trimesh.resources", - "trimesh.interfaces", - ], - package_data={ - "trimesh": [ - "resources/templates/*", - "resources/*.json", - "resources/schema/*", - "resources/schema/primitive/*.json", - "resources/*.zip", - ] - }, - install_requires=list(requirements_default), - extras_require={ - "test": list(requirements_test), - "easy": list(requirements_easy), - "all": list(requirements_all), - "recommends": list(requirments_recommends), - }, -) diff --git a/tests/test_copy.py b/tests/test_copy.py index ae8c566f0..9e6bde033 100644 --- a/tests/test_copy.py +++ b/tests/test_copy.py @@ -16,10 +16,10 @@ def test_copy(self): start = hash(mesh) # make sure some stuff is populated - mesh.kdtree - mesh.triangles_tree - mesh.face_adjacency_angles - mesh.facets + _ = mesh.kdtree + _ = mesh.triangles_tree + _ = mesh.face_adjacency_angles + _ = mesh.facets assert 'triangles_tree' in mesh._cache assert len(mesh._cache) > 0 diff --git a/tests/test_mutate.py b/tests/test_mutate.py index 71d3ff8d9..139b3a8fb 100644 --- a/tests/test_mutate.py +++ b/tests/test_mutate.py @@ -68,78 +68,78 @@ def _test_not_mutated(self, mesh, verts, faces): faces = g.np.copy(faces) lo, hi = mesh.bounds - mesh.faces_sparse - mesh.face_normals - mesh.vertex_normals - mesh.extents - mesh.scale - mesh.centroid - mesh.center_mass - mesh.density - mesh.volume - mesh.mass - mesh.moment_inertia - mesh.principal_inertia_components - mesh.principal_inertia_vectors - mesh.principal_inertia_transform - mesh.symmetry - mesh.symmetry_axis - mesh.symmetry_section - mesh.triangles - mesh.triangles_tree - mesh.triangles_center - mesh.triangles_cross - mesh.edges - mesh.edges_face - mesh.edges_unique - mesh.edges_unique_length - mesh.edges_unique_inverse - mesh.edges_sorted - mesh.edges_sparse - mesh.body_count - mesh.faces_unique_edges - mesh.euler_number - mesh.referenced_vertices - mesh.units - mesh.face_adjacency - mesh.face_adjacency_edges - mesh.face_adjacency_angles - mesh.face_adjacency_projections - mesh.face_adjacency_convex - mesh.face_adjacency_unshared - mesh.face_adjacency_radius - mesh.face_adjacency_span - mesh.vertex_adjacency_graph - mesh.vertex_neighbors - mesh.is_winding_consistent - mesh.is_watertight - mesh.is_volume - mesh.is_empty - mesh.is_convex - mesh.kdtree - mesh.facets - mesh.facets_area - mesh.facets_normal - mesh.facets_origin - mesh.facets_boundary - mesh.facets_on_hull - mesh.visual - mesh.convex_hull - mesh.sample(500, False) - mesh.voxelized((hi[0] - lo[0]) / 100.0) - mesh.outline() - mesh.area - mesh.area_faces - mesh.mass_properties - mesh.scene() - mesh.identifier - mesh.identifier_hash - mesh.to_dict() - mesh.face_angles - mesh.face_angles_sparse - mesh.vertex_defects - mesh.face_adjacency_tree - mesh.copy() + _ = mesh.faces_sparse + _ = mesh.face_normals + _ = mesh.vertex_normals + _ = mesh.extents + _ = mesh.scale + _ = mesh.centroid + _ = mesh.center_mass + _ = mesh.density + _ = mesh.volume + _ = mesh.mass + _ = mesh.moment_inertia + _ = mesh.principal_inertia_components + _ = mesh.principal_inertia_vectors + _ = mesh.principal_inertia_transform + _ = mesh.symmetry + _ = mesh.symmetry_axis + _ = mesh.symmetry_section + _ = mesh.triangles + _ = mesh.triangles_tree + _ = mesh.triangles_center + _ = mesh.triangles_cross + _ = mesh.edges + _ = mesh.edges_face + _ = mesh.edges_unique + _ = mesh.edges_unique_length + _ = mesh.edges_unique_inverse + _ = mesh.edges_sorted + _ = mesh.edges_sparse + _ = mesh.body_count + _ = mesh.faces_unique_edges + _ = mesh.euler_number + _ = mesh.referenced_vertices + _ = mesh.units + _ = mesh.face_adjacency + _ = mesh.face_adjacency_edges + _ = mesh.face_adjacency_angles + _ = mesh.face_adjacency_projections + _ = mesh.face_adjacency_convex + _ = mesh.face_adjacency_unshared + _ = mesh.face_adjacency_radius + _ = mesh.face_adjacency_span + _ = mesh.vertex_adjacency_graph + _ = mesh.vertex_neighbors + _ = mesh.is_winding_consistent + _ = mesh.is_watertight + _ = mesh.is_volume + _ = mesh.is_empty + _ = mesh.is_convex + _ = mesh.kdtree + _ = mesh.facets + _ = mesh.facets_area + _ = mesh.facets_normal + _ = mesh.facets_origin + _ = mesh.facets_boundary + _ = mesh.facets_on_hull + _ = mesh.visual + _ = mesh.convex_hull + _ = mesh.sample(500, False) + _ = mesh.voxelized((hi[0] - lo[0]) / 100.0) + _ = mesh.outline() + _ = mesh.area + _ = mesh.area_faces + _ = mesh.mass_properties + _ = mesh.scene() + _ = mesh.identifier + _ = mesh.identifier_hash + _ = mesh.to_dict() + _ = mesh.face_angles + _ = mesh.face_angles_sparse + _ = mesh.vertex_defects + _ = mesh.face_adjacency_tree + _ = mesh.copy() # ray.intersects_id centre = mesh.vertices.mean(axis=0) diff --git a/tests/test_paths.py b/tests/test_paths.py index 80a27541b..0c511f382 100644 --- a/tests/test_paths.py +++ b/tests/test_paths.py @@ -88,7 +88,7 @@ def test_discrete(self): d.metadata['file_name'], len(split)) for body in split: - body.identifier + _ = body.identifier if len(d.root) == 1: d.apply_obb() diff --git a/trimesh/version.py b/trimesh/version.py index 85b1ed9b8..5a80da940 100644 --- a/trimesh/version.py +++ b/trimesh/version.py @@ -1,4 +1,7 @@ -__version__ = '3.23.3' +from importlib.metadata import version + +# will get the version the package was installed with +__version__ = version('trimesh') if __name__ == '__main__': # print version if run directly i.e. in a CI script diff --git a/trimesh/viewer/notebook.py b/trimesh/viewer/notebook.py index 451471b1e..ddda0e69a 100644 --- a/trimesh/viewer/notebook.py +++ b/trimesh/viewer/notebook.py @@ -32,7 +32,8 @@ def scene_to_html(scene): base = util.decompress( resources.get('templates/viewer.zip', decode=False), file_type='zip')['viewer.html.template'].read().decode('utf-8') - scene.camera + # make sure scene has camera populated before export + _ = scene.camera # get export as bytes data = scene.export(file_type='glb') # encode as base64 string diff --git a/trimesh/visual/color.py b/trimesh/visual/color.py index 7ac684569..114e00210 100644 --- a/trimesh/visual/color.py +++ b/trimesh/visual/color.py @@ -146,9 +146,9 @@ def copy(self): Contains the same information as self """ copied = ColorVisuals() - # call the literally insane generators - self.face_colors - self.vertex_colors + # call the literally insane generators to validate + self.face_colors # noqa + self.vertex_colors # noqa # copy anything that's actually data copied._data.data = copy.deepcopy(self._data.data) From 37039c081e5c64aec30e26bafd0d9e7b206a670d Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 24 Aug 2023 21:13:02 -0400 Subject: [PATCH 003/144] use manual setuptools discovery --- pyproject.toml | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5e2d7c1a4..9fcab084b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,18 +33,28 @@ file = "README.md" content-type = "text/markdown" [tool.setuptools] -include-package-data = true - -[tool.setuptools.packages.find] -where = ["trimesh"] +packages = [ + "trimesh", + "trimesh.ray", + "trimesh.path", + "trimesh.path.exchange", + "trimesh.scene", + "trimesh.voxel", + "trimesh.visual", + "trimesh.viewer", + "trimesh.exchange", + "trimesh.resources", + "trimesh.interfaces", +] +include-package-data = false [tool.setuptools.package-data] -"trimesh.resources" = [ - "templates/*", - "*.json", - "schema/*", - "schema/primitive/*.json", - "*.zip", +trimesh = [ + "resources/templates/*", + "resources/*.json", + "resources/schema/*", + "resources/schema/primitive/*.json", + "resources/*.zip", ] [project.optional-dependencies] From 831d769f58d84b1d0d55159303749097fbc9ce92 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 24 Aug 2023 21:19:44 -0400 Subject: [PATCH 004/144] better error message for test_minimal --- tests/test_minimal.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_minimal.py b/tests/test_minimal.py index a3c38ddc5..8aa1347a1 100644 --- a/tests/test_minimal.py +++ b/tests/test_minimal.py @@ -108,13 +108,14 @@ def test_load_wrap(self): try: get_mesh('cycloidal.3DXML') except BaseException as E: - exc = str(E) + exc = str(E).lower() # should have raised assert exc is not None # error message should have been useful - assert 'lxml' in exc + if 'lxml' not in exc: + raise ValueError(exc) if __name__ == '__main__': From 42815ec56c47408e357a5c861a471e3f1e6b8779 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 24 Aug 2023 21:28:06 -0400 Subject: [PATCH 005/144] edit test_minimal --- tests/test_minimal.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_minimal.py b/tests/test_minimal.py index 8aa1347a1..4cd4941df 100644 --- a/tests/test_minimal.py +++ b/tests/test_minimal.py @@ -114,7 +114,8 @@ def test_load_wrap(self): assert exc is not None # error message should have been useful - if 'lxml' not in exc: + # containing which module the user was missing + if not any(m in exc for m in ('lxml', 'networkx')): raise ValueError(exc) From 2e077ff7a42b9f3a7ffd4667c584cb672332bde1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=96=B9=E6=AD=A6=E5=8D=93?= Date: Mon, 28 Aug 2023 09:59:10 +0800 Subject: [PATCH 006/144] prevent division by zero --- trimesh/ray/ray_triangle.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/trimesh/ray/ray_triangle.py b/trimesh/ray/ray_triangle.py index b502ad6f3..2743ba024 100644 --- a/trimesh/ray/ray_triangle.py +++ b/trimesh/ray/ray_triangle.py @@ -391,6 +391,9 @@ def ray_bounds(ray_origins, axis_dir = np.array([ray_directions[i][a] for i, a in enumerate(axis)]).reshape((-1, 1)) + # prevent division by zero + axis_dir[axis_dir == 0] = tol.zero + # parametric equation of a line # point = direction*t + origin # p = dt + o From e4d51e5ba2b4f4a42816e5e9d188c40994fe0450 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 12:52:37 -0400 Subject: [PATCH 007/144] add prerelease warning to readme and test on 3.6 --- .github/workflows/test.yml | 2 +- README.md | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 56f58e3bb..f33a280d7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -24,7 +24,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: ["3.11"] + python-version: ["3.6", "3.11"] os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v3 diff --git a/README.md b/README.md index afc64497c..021cf995a 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,12 @@ -Trimesh is a pure Python (2.7-3.5+) library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). +| :warning: WARNING | +|:---------------------------| +| trimesh 4.0.0 which makes the minimum Python version 3.7 is in pre-release and will be released soon, you may want to test your stack with: `pip install --pre trimesh` | + + +Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). The API is mostly stable, but this should not be relied on and is not guaranteed: install a specific version if you plan on deploying something using trimesh. From b3acf7c6534ce38c5c239494ea7f4b3959bc57e0 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 12:58:33 -0400 Subject: [PATCH 008/144] try old python on 20.04 --- .github/workflows/test.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f33a280d7..740f7a390 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -26,6 +26,12 @@ jobs: matrix: python-version: ["3.6", "3.11"] os: [ubuntu-latest, windows-latest, macos-latest] + exclude: + - os: ubuntu-latest + python-version: 3.6 + include: + - os: ubuntu-20.04 + python-version: 3.6 steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} From 2cd4d8fecc62dc80a6aa4421eb469bc96c0f58ce Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 12:59:49 -0400 Subject: [PATCH 009/144] try last 3.6 version of setuptools --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 9fcab084b..405be8b1b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [build-system] build-backend = "setuptools.build_meta" -requires = ["setuptools >= 60", "wheel"] +requires = ["setuptools >= 59.6", "wheel"] [project] name = "trimesh" From d3fc18ec7cb3f59d4506cc4feb8d3eaa407d373b Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 13:00:08 -0400 Subject: [PATCH 010/144] see if 3.6 can work --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 405be8b1b..c036b45f4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ requires = ["setuptools >= 59.6", "wheel"] [project] name = "trimesh" -requires-python = ">=3.7" +requires-python = ">=3.6" version = "4.0.0.rc0" authors = [{name = "Michael Dawson-Haggerty", email = "mikedh@kerfed.com"}] license = {text = "MIT"} From 88a688c24de4354dab558c72961e8a76505a081b Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 13:11:17 -0400 Subject: [PATCH 011/144] setuptools version --- .github/workflows/test.yml | 8 +------- pyproject.toml | 4 ++-- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 740f7a390..07a1d6308 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -24,14 +24,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: ["3.6", "3.11"] + python-version: ["3.7", "3.11"] os: [ubuntu-latest, windows-latest, macos-latest] - exclude: - - os: ubuntu-latest - python-version: 3.6 - include: - - os: ubuntu-20.04 - python-version: 3.6 steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} diff --git a/pyproject.toml b/pyproject.toml index c036b45f4..504ca9aae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,10 @@ [build-system] build-backend = "setuptools.build_meta" -requires = ["setuptools >= 59.6", "wheel"] +requires = ["setuptools >= 61.0", "wheel"] [project] name = "trimesh" -requires-python = ">=3.6" +requires-python = ">=3.7" version = "4.0.0.rc0" authors = [{name = "Michael Dawson-Haggerty", email = "mikedh@kerfed.com"}] license = {text = "MIT"} From a3c4edf0924639d8ed2fc8ae07be048a059e3722 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 13:20:46 -0400 Subject: [PATCH 012/144] use pkg_resources on older python --- trimesh/__init__.py | 6 +----- trimesh/version.py | 13 +++++++++---- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/trimesh/__init__.py b/trimesh/__init__.py index 591a78225..09aca216c 100644 --- a/trimesh/__init__.py +++ b/trimesh/__init__.py @@ -7,8 +7,6 @@ provide a fully featured Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the Shapely library. """ - -# current version # avoid a circular import in trimesh.base from . import bounds, collision, nsphere, primitives, smoothing, voxel @@ -36,9 +34,7 @@ from .exceptions import ExceptionWrapper path = ExceptionWrapper(E) -# explicitly list imports in __all__ -# as otherwise flake8 gets mad -__all__ = [__version__, +__all__ = ["__version__", 'Trimesh', 'PointCloud', 'Scene', diff --git a/trimesh/version.py b/trimesh/version.py index 5a80da940..032e7c9d5 100644 --- a/trimesh/version.py +++ b/trimesh/version.py @@ -1,7 +1,12 @@ -from importlib.metadata import version - -# will get the version the package was installed with -__version__ = version('trimesh') +# get the version trimesh was installed with from metadata +try: + # Python >= 3.8 + from importlib.metadata import version + __version__ = version('trimesh') +except BaseException: + # Python < 3.8 + from pkg_resources import get_distribution + __version__ = get_distribution('trimesh').version if __name__ == '__main__': # print version if run directly i.e. in a CI script From 6675fee485ee309807bb4a3fd885ae1afff2bb05 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 14:00:39 -0400 Subject: [PATCH 013/144] use target-version for ruff --- pyproject.toml | 1 + trimesh/exchange/binvox.py | 4 ++-- trimesh/exchange/dae.py | 2 +- trimesh/exchange/export.py | 2 +- trimesh/exchange/gltf.py | 8 ++++---- trimesh/exchange/load.py | 7 ++++--- trimesh/exchange/ply.py | 2 +- trimesh/interfaces/generic.py | 4 ++-- trimesh/interfaces/scad.py | 2 +- trimesh/path/exchange/svg_io.py | 12 ++++++------ trimesh/resolvers.py | 2 +- trimesh/scene/scene.py | 4 ++-- trimesh/util.py | 8 ++++---- trimesh/viewer/windowed.py | 32 ++++++++++++++++---------------- trimesh/voxel/encoding.py | 3 ++- 15 files changed, 48 insertions(+), 45 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 504ca9aae..956b15621 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -100,6 +100,7 @@ recommend = [ [tool.ruff] +target-version = "py37" # See https://github.com/charliermarsh/ruff#rules for error code definitions. select = [ # "ANN", # annotations diff --git a/trimesh/exchange/binvox.py b/trimesh/exchange/binvox.py index 54b9342fa..df5924113 100644 --- a/trimesh/exchange/binvox.py +++ b/trimesh/exchange/binvox.py @@ -430,7 +430,7 @@ def __init__( 'Maximum dimension using exact is 1024, got %d' % dimension) if file_type not in Binvoxer.SUPPORTED_OUTPUT_TYPES: raise ValueError( - 'file_type {} not in set of supported output types {}'.format(file_type, str(Binvoxer.SUPPORTED_OUTPUT_TYPES))) + f'file_type {file_type} not in set of supported output types {str(Binvoxer.SUPPORTED_OUTPUT_TYPES)}') args = [encoder, '-d', str(dimension), '-t', file_type] if exact: args.append('-e') @@ -515,7 +515,7 @@ def __call__(self, path, overwrite=False): ext = ext[1:].lower() if ext not in Binvoxer.SUPPORTED_INPUT_TYPES: raise ValueError( - 'file_type {} not in set of supported input types {}'.format(ext, str(Binvoxer.SUPPORTED_INPUT_TYPES))) + f'file_type {ext} not in set of supported input types {str(Binvoxer.SUPPORTED_INPUT_TYPES)}') out_path = f'{head}.{self._file_type}' if os.path.isfile(out_path) and not overwrite: raise OSError('Attempted to voxelize object at existing path') diff --git a/trimesh/exchange/dae.py b/trimesh/exchange/dae.py index 95356bdb9..dbf10b2fb 100644 --- a/trimesh/exchange/dae.py +++ b/trimesh/exchange/dae.py @@ -99,7 +99,7 @@ def export_collada(mesh, **kwargs): import collada meshes = mesh - if not isinstance(mesh, list | tuple | set | np.ndarray): + if not isinstance(mesh, (list, tuple, set, np.ndarray)): meshes = [mesh] c = collada.Collada() diff --git a/trimesh/exchange/export.py b/trimesh/exchange/export.py index d65bd2da0..7c5fadce7 100644 --- a/trimesh/exchange/export.py +++ b/trimesh/exchange/export.py @@ -67,7 +67,7 @@ def export_mesh(mesh, if file_type not in _mesh_exporters: raise ValueError('%s exporter not available!', file_type) - if isinstance(mesh, list | tuple | set | np.ndarray): + if isinstance(mesh, (list, tuple, set, np.ndarray)): faces = 0 for m in mesh: faces += len(m.faces) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 4c155510b..25748ae43 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -242,9 +242,9 @@ def export_glb( dtype=" 0 and v is not None - and len(v) > 0) + for k, v in bag.items() + if len(k) > 0 and v is not None + and len(v) > 0) def _encode(stuff): @@ -654,7 +654,7 @@ def _deep_same(original, other): # ndarrays will be converted to lists # but otherwise types should be identical if isinstance(original, np.ndarray): - assert isinstance(other, list | np.ndarray) + assert isinstance(other, (list, np.ndarray)) elif util.is_string(original): # handle python 2+3 unicode vs str assert util.is_string(other) @@ -662,11 +662,11 @@ def _deep_same(original, other): # otherwise they should be the same type assert isinstance(original, type(other)) - if isinstance(original, str | bytes): + if isinstance(original, (str, bytes)): # string and bytes should just be identical assert original == other return - elif isinstance(original, float | int | np.ndarray): + elif isinstance(original, (float, int, np.ndarray)): # for numeric classes use numpy magic comparison # which includes an epsilon for floating point assert np.allclose(original, other) diff --git a/trimesh/resolvers.py b/trimesh/resolvers.py index 532c0c67d..982469a59 100644 --- a/trimesh/resolvers.py +++ b/trimesh/resolvers.py @@ -257,7 +257,7 @@ def get(self, name): # get the stored data obj = archive[name] # if the dict is storing data as bytes just return - if isinstance(obj, bytes | str): + if isinstance(obj, (bytes, str)): return obj # otherwise get it as a file object # read file object from beginning diff --git a/trimesh/scene/scene.py b/trimesh/scene/scene.py index 392f30e96..28ca909cb 100644 --- a/trimesh/scene/scene.py +++ b/trimesh/scene/scene.py @@ -888,7 +888,7 @@ def subscene(self, node): graph.from_edgelist(edges) geometry_names = {e[2]['geometry'] for e in edges - if 'geometry' in e[2]} + if 'geometry' in e[2]} geometry = {k: self.geometry[k] for k in geometry_names} result = Scene(geometry=geometry, graph=graph) return result @@ -1092,7 +1092,7 @@ def scaled(self, scale): """ # convert 2D geometries to 3D for 3D scaling factors scale_is_3D = isinstance( - scale, list | tuple | np.ndarray) and len(scale) == 3 + scale, (list, tuple, np.ndarray)) and len(scale) == 3 if scale_is_3D and np.all(np.asarray(scale) == scale[0]): # scale is uniform diff --git a/trimesh/util.py b/trimesh/util.py index 872317031..a33127a81 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -2072,10 +2072,10 @@ def triangle_fans_to_faces(fans): """ faces = [np.transpose([ - fan[0]*np.ones(len(fan) - 2, dtype=int), - fan[1:-1], - fan[2:] - ]) for fan in fans] + fan[0] * np.ones(len(fan) - 2, dtype=int), + fan[1:-1], + fan[2:] + ]) for fan in fans] return np.concatenate(faces, axis=1) diff --git a/trimesh/viewer/windowed.py b/trimesh/viewer/windowed.py index b728d53cc..6d7a19dfc 100644 --- a/trimesh/viewer/windowed.py +++ b/trimesh/viewer/windowed.py @@ -170,27 +170,27 @@ def __init__(self, depth_size=24, double_buffer=True) super().__init__(config=conf, - visible=visible, - resizable=True, - width=resolution[0], - height=resolution[1], - caption=caption) + visible=visible, + resizable=True, + width=resolution[0], + height=resolution[1], + caption=caption) except pyglet.window.NoSuchConfigException: conf = gl.Config(double_buffer=True) super().__init__(config=conf, - resizable=True, - visible=visible, - width=resolution[0], - height=resolution[1], - caption=caption) + resizable=True, + visible=visible, + width=resolution[0], + height=resolution[1], + caption=caption) else: # window config was manually passed super().__init__(config=window_conf, - resizable=True, - visible=visible, - width=resolution[0], - height=resolution[1], - caption=caption) + resizable=True, + visible=visible, + width=resolution[0], + height=resolution[1], + caption=caption) # add scene geometry to viewer geometry self._update_vertex_list() @@ -284,7 +284,7 @@ def cleanup_geometries(self): graph = self.scene.graph # which parts of the graph still have geometry geom_keep = {graph[node][1] for - node in graph.nodes_geometry} + node in graph.nodes_geometry} # which geometries no longer need to be kept geom_delete = [geom for geom in self.vertex_list if geom not in geom_keep] diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 723ea8642..8ec3d7981 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -301,7 +301,8 @@ def __init__(self, indices, values, shape=None): 'indices must be 2D, got shaped %s' % str(indices.shape)) if data['values'].shape != (indices.shape[0],): raise ValueError( - 'values and indices shapes inconsistent: {} and {}'.format(data['values'], data['indices'])) + 'values and indices shapes inconsistent: {} and {}'.format( + data['values'], data['indices'])) if shape is None: self._shape = tuple(data['indices'].max(axis=0) + 1) else: From 917c8b59791e487d0af51f05309c9d4642ba535f Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 14:16:03 -0400 Subject: [PATCH 014/144] deprecate hash entry points --- tests/test_creation.py | 2 +- trimesh/caching.py | 66 ------------------------------------------ trimesh/parent.py | 55 ----------------------------------- 3 files changed, 1 insertion(+), 122 deletions(-) diff --git a/tests/test_creation.py b/tests/test_creation.py index 2781f4d23..5599c9810 100644 --- a/tests/test_creation.py +++ b/tests/test_creation.py @@ -126,7 +126,7 @@ def test_camera_marker(self): assert isinstance(meshes, list) # all meshes should be viewable type for mesh in meshes: - assert isinstance(mesh, g.trimesh.Trimesh | g.trimesh.path.Path3D) + assert isinstance(mesh, (g.trimesh.Trimesh, g.trimesh.path.Path3D)) def test_axis(self): # specify the size of the origin radius diff --git a/trimesh/caching.py b/trimesh/caching.py index ce429507f..9e71eebeb 100644 --- a/trimesh/caching.py +++ b/trimesh/caching.py @@ -21,7 +21,6 @@ """ import os import time -import warnings from functools import wraps import numpy as np @@ -203,30 +202,6 @@ def mutable(self): def mutable(self, value): self.flags.writeable = value - def hash(self): - warnings.warn( - '`array.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def crc(self): - warnings.warn( - '`array.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def md5(self): - warnings.warn( - '`array.md5()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - def __hash__(self): """ Return a fast hash of the contents of the array. @@ -723,44 +698,3 @@ def __hash__(self): if v is not None and (not hasattr(v, '__len__') or len(v) > 0)], dtype=np.int64).tobytes()) - - def crc(self): - """ - Get a CRC reflecting everything in the DataStore. - - Returns - ---------- - crc : int - CRC of data - """ - warnings.warn( - '`array.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def fast_hash(self): - """ - Get a CRC32 or xxhash.xxh64 reflecting the DataStore. - - Returns - ------------ - hashed : int - Checksum of data - """ - warnings.warn( - '`array.fast_hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def hash(self): - warnings.warn( - '`array.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `array.__hash__()` or `hash(array)`', - category=DeprecationWarning, stacklevel=2) - - return self.__hash__() diff --git a/trimesh/parent.py b/trimesh/parent.py index 5f63b7eb9..e8a70b7d3 100644 --- a/trimesh/parent.py +++ b/trimesh/parent.py @@ -5,7 +5,6 @@ The base class for Trimesh, PointCloud, and Scene objects """ import abc -import warnings import numpy as np @@ -40,60 +39,6 @@ def apply_transform(self, matrix): def is_empty(self): pass - def crc(self): - """ - DEPRECATED OCTOBER 2023 : Use `hash(geometry)` - - Get a hash of the current geometry. - - Returns - --------- - hash : int - Hash of current graph and geometry. - """ - warnings.warn( - '`geometry.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def hash(self): - """ - DEPRECATED OCTOBER 2023 : Use `hash(geometry)` - - Get a hash of the current geometry. - - Returns - --------- - hash : int - Hash of current graph and geometry. - """ - warnings.warn( - '`geometry.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - - def md5(self): - """ - DEPRECATED OCTOBER 2023 : Use `hash(geometry)` - - Get a hash of the current geometry. - - Returns - --------- - hash : int - Hash of current graph and geometry. - """ - warnings.warn( - '`geometry.md5()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`', - category=DeprecationWarning, stacklevel=2) - return self.__hash__() - def __hash__(self): """ Get a hash of the current geometry. From 098ebd46568bd0d8397930c3c0782c560b5b3f79 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 14:53:17 -0400 Subject: [PATCH 015/144] fix test_repair --- tests/test_depr.py | 39 ------------------------ tests/test_repair.py | 71 ++++++++++++++++++++------------------------ 2 files changed, 33 insertions(+), 77 deletions(-) delete mode 100644 tests/test_depr.py diff --git a/tests/test_depr.py b/tests/test_depr.py deleted file mode 100644 index 992cebf8d..000000000 --- a/tests/test_depr.py +++ /dev/null @@ -1,39 +0,0 @@ -try: - from . import generic as g -except BaseException: - import generic as g - - -class DepTest(g.unittest.TestCase): - - def test_deprecated(self): - - tests = [g.get_mesh('2D/wrench.dxf'), - g.trimesh.creation.box()] - - # todo : properly hash transform trees - # so that copies of scenes hash the same - # g.get_mesh('cycloidal.3DXML')] - - for m in tests: - copy = m.copy() - # the modern cool way of hashing - assert hash(m) == hash(copy) - assert m.__hash__() == copy.__hash__() - assert m.identifier_hash == copy.identifier_hash - - # october 2023 deprecated ways of hashing - # geometries - assert m.md5() == copy.md5() - assert m.crc() == copy.crc() - assert m.hash() == copy.hash() - assert m.identifier_md5 == copy.identifier_md5 - # trackedarray - assert m.vertices.md5() == copy.vertices.md5() - assert m.vertices.hash() == copy.vertices.hash() - assert m.vertices.crc() == copy.vertices.crc() - - -if __name__ == '__main__': - g.trimesh.util.attach_to_log() - g.unittest.main() diff --git a/tests/test_repair.py b/tests/test_repair.py index f02d300da..a49beda09 100644 --- a/tests/test_repair.py +++ b/tests/test_repair.py @@ -5,18 +5,18 @@ class RepairTests(g.unittest.TestCase): - def test_fill_holes(self): - for mesh_name in ['unit_cube.STL', - 'machinist.XAML', - 'round.stl', - 'sphere.ply', - 'teapot.stl', - 'soup.stl', - 'featuretype.STL', - 'angle_block.STL', - 'quadknot.obj']: - + for mesh_name in [ + "unit_cube.STL", + "machinist.XAML", + "round.stl", + "sphere.ply", + "teapot.stl", + "soup.stl", + "featuretype.STL", + "angle_block.STL", + "quadknot.obj", + ]: mesh = g.get_mesh(mesh_name) if not mesh.is_watertight: # output of fill_holes should match watertight status @@ -24,21 +24,16 @@ def test_fill_holes(self): assert returned == mesh.is_watertight continue - hashes = [{mesh._data.__hash__(), - mesh._data.__hash__(), - mesh._data.fast_hash()}] + hashes = [{mesh._data.__hash__(), hash(mesh)}] mesh.faces = mesh.faces[1:-1] assert not mesh.is_watertight assert not mesh.is_volume # color some faces - g.trimesh.repair.broken_faces( - mesh, color=[255, 0, 0, 255]) + g.trimesh.repair.broken_faces(mesh, color=[255, 0, 0, 255]) - hashes.append({mesh._data.__hash__(), - mesh._data.__hash__(), - mesh._data.fast_hash()}) + hashes.append({mesh._data.__hash__(), hash(mesh)}) assert hashes[0] != hashes[1] @@ -49,14 +44,11 @@ def test_fill_holes(self): assert mesh.is_watertight assert mesh.is_winding_consistent - hashes.append({mesh._data.__hash__(), - mesh._data.__hash__(), - mesh._data.fast_hash()}) + hashes.append({mesh._data.__hash__(), hash(mesh)}) assert hashes[1] != hashes[2] # try broken faces on a watertight mesh - g.trimesh.repair.broken_faces( - mesh, color=[255, 255, 0, 255]) + g.trimesh.repair.broken_faces(mesh, color=[255, 255, 0, 255]) def test_fix_normals(self): for mesh in g.get_meshes(5): @@ -68,16 +60,20 @@ def test_winding(self): them back. """ - meshes = [g.get_mesh(i) for i in - ['unit_cube.STL', - 'machinist.XAML', - 'round.stl', - 'quadknot.obj', - 'soup.stl']] + meshes = [ + g.get_mesh(i) + for i in [ + "unit_cube.STL", + "machinist.XAML", + "round.stl", + "quadknot.obj", + "soup.stl", + ] + ] for i, mesh in enumerate(meshes): # turn scenes into multibody meshes - if g.trimesh.util.is_instance_named(mesh, 'Scene'): + if g.trimesh.util.is_instance_named(mesh, "Scene"): meta = mesh.metadata meshes[i] = mesh.dump().sum() meshes[i].metadata = meta @@ -100,7 +96,7 @@ def test_winding(self): assert mesh.is_winding_consistent == winding # save timings - timing[mesh.metadata['file_name']] = g.time.time() - tic + timing[mesh.metadata["file_name"]] = g.time.time() - tic # print timings as a warning g.log.warning(g.json.dumps(timing, indent=4)) @@ -124,7 +120,7 @@ def test_multi(self): Try repairing a multibody geometry """ # create a multibody mesh with two cubes - a = g.get_mesh('unit_cube.STL') + a = g.get_mesh("unit_cube.STL") b = a.copy() b.apply_translation([2, 0, 0]) m = a + b @@ -169,7 +165,6 @@ def test_flip(self): assert g.np.isclose(m.volume, a.volume * 2.0) def test_fan(self): - # start by creating an icosphere and removing # all faces that include a single vertex to make # a nice hole in the mesh @@ -193,16 +188,16 @@ def test_fan(self): # should be an (n, 3) int assert len(stitch.shape) == 2 assert stitch.shape[1] == 3 - assert stitch.dtype.kind == 'i' + assert stitch.dtype.kind == "i" # now check our stitch to see if it handled the hole repair = g.trimesh.Trimesh( - vertices=m.vertices.copy(), - faces=g.np.vstack((m.faces, stitch))) + vertices=m.vertices.copy(), faces=g.np.vstack((m.faces, stitch)) + ) assert repair.is_watertight assert repair.is_winding_consistent -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() From fe7ceec717a9728c42ea04ef50373feb9e6970bf Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 16:44:56 -0400 Subject: [PATCH 016/144] convert trimesh.transformations to be numpy-only --- tests/test_transformations.py | 56 +++--- trimesh/transformations.py | 341 ++++++++++++++++++---------------- 2 files changed, 202 insertions(+), 195 deletions(-) diff --git a/tests/test_transformations.py b/tests/test_transformations.py index 672fc3954..c31bdf9ab 100644 --- a/tests/test_transformations.py +++ b/tests/test_transformations.py @@ -5,7 +5,6 @@ class TransformTest(g.unittest.TestCase): - def test_doctest(self): """ Run doctests on transformations, which checks docstrings @@ -29,10 +28,13 @@ def test_doctest(self): # search for interactive sessions in docstrings and verify they work # they are super unreliable and depend on janky string formatting - results = doctest.testmod(trimesh.transformations, - verbose=False, - raise_on_error=True) - g.log.info(f'transformations {str(results)}') + results = doctest.testmod( + trimesh.transformations, verbose=False, raise_on_error=False + ) + + if results.failed > 0: + raise ValueError(str(results)) + g.log.debug(str(results)) def test_downstream(self): """ @@ -70,9 +72,8 @@ def test_around(self): for i, p in enumerate(points): offset = g.random(2) matrix = g.trimesh.transformations.planar_matrix( - theta=g.random() + .1, - offset=offset, - point=p) + theta=g.random() + 0.1, offset=offset, point=p + ) # apply the matrix check = g.trimesh.transform_points(points, matrix) @@ -103,9 +104,7 @@ def test_rotation(self): rotation_matrix = g.trimesh.transformations.rotation_matrix R = rotation_matrix(g.np.pi / 2, [0, 0, 1], [1, 0, 0]) - assert g.np.allclose(g.np.dot(R, - [0, 0, 0, 1]), - [1, -1, 0, 1]) + assert g.np.allclose(g.np.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1]) angle = (g.random() - 0.5) * (2 * g.np.pi) direc = g.random(3) - 0.5 @@ -121,23 +120,16 @@ def test_rotation(self): I = g.np.identity(4, g.np.float64) # NOQA assert g.np.allclose(I, rotation_matrix(g.np.pi * 2, direc)) - assert g.np.allclose( - 2, - g.np.trace(rotation_matrix(g.np.pi / 2, - direc, point))) + assert g.np.allclose(2, g.np.trace(rotation_matrix(g.np.pi / 2, direc, point))) # test symbolic if g.sp is not None: - angle = g.sp.Symbol('angle') + angle = g.sp.Symbol("angle") Rs = rotation_matrix(angle, [0, 0, 1], [1, 0, 0]) - R = g.np.array(Rs.subs( - angle, - g.np.pi / 2.0).evalf()).astype(g.np.float64) + R = g.np.array(Rs.subs(angle, g.np.pi / 2.0).evalf()).astype(g.np.float64) - assert g.np.allclose( - g.np.dot(R, [0, 0, 0, 1]), - [1, -1, 0, 1]) + assert g.np.allclose(g.np.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1]) def test_tiny(self): """ @@ -145,15 +137,13 @@ def test_tiny(self): very small triangles. """ for validate in [False, True]: - m = g.get_mesh('ADIS16480.STL', validate=validate) - m.apply_scale(.001) + m = g.get_mesh("ADIS16480.STL", validate=validate) + m.apply_scale(0.001) m._cache.clear() - g.np.nonzero(g.np.linalg.norm( - m.face_normals, - axis=1) < 1e-3) + g.np.nonzero(g.np.linalg.norm(m.face_normals, axis=1) < 1e-3) m.apply_transform( - g.trimesh.transformations.rotation_matrix( - g.np.pi / 4, [0, 0, 1])) + g.trimesh.transformations.rotation_matrix(g.np.pi / 4, [0, 0, 1]) + ) def test_quat(self): """ @@ -184,11 +174,11 @@ def test_quat(self): # all random matrices should be rigid transforms assert all(is_rigid(T) for T in random_matrix(num=100)) # random quaternions should all be unit vector - assert g.np.allclose(g.np.linalg.norm(random_quat(num=100), - axis=1), - 1.0, atol=1e-6) + assert g.np.allclose( + g.np.linalg.norm(random_quat(num=100), axis=1), 1.0, atol=1e-6 + ) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/trimesh/transformations.py b/trimesh/transformations.py index 0cfc67198..fc8efed44 100644 --- a/trimesh/transformations.py +++ b/trimesh/transformations.py @@ -181,7 +181,7 @@ True >>> np.allclose(trans, [1, 2, 3]) True ->>> np.allclose(shear, [0, math.tan(beta), 0]) +>>> np.allclose(shear, [0, np.tan(beta), 0]) True >>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) True @@ -196,17 +196,10 @@ """ - -import math - import numpy as np -__version__ = '2017.02.17' -__docformat__ = 'restructuredtext en' -__all__ = () - _IDENTITY = np.eye(4) -_IDENTITY.flags['WRITEABLE'] = False +_IDENTITY.flags["WRITEABLE"] = False def identity_matrix(): @@ -330,14 +323,14 @@ def rotation_matrix(angle, direction, point=None): Examples ------------- - >>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0]) + >>> R = rotation_matrix(np.pi/2, [0, 0, 1], [1, 0, 0]) >>> np.allclose(np.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1]) True - >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> angle = (random.random() - 0.5) * (2*np.pi) >>> direc = np.random.random(3) - 0.5 >>> point = np.random.random(3) - 0.5 >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) + >>> R1 = rotation_matrix(angle-2*np.pi, direc, point) >>> is_same_transform(R0, R1) True >>> R0 = rotation_matrix(angle, direc, point) @@ -345,22 +338,23 @@ def rotation_matrix(angle, direction, point=None): >>> is_same_transform(R0, R1) True >>> I = np.identity(4, np.float64) - >>> np.allclose(I, rotation_matrix(math.pi*2, direc)) + >>> np.allclose(I, rotation_matrix(np.pi*2, direc)) True - >>> np.allclose(2, np.trace(rotation_matrix(math.pi/2,direc,point))) + >>> np.allclose(2, np.trace(rotation_matrix(np.pi/2,direc,point))) True """ - if type(angle).__name__ == 'Symbol': + if type(angle).__name__ == "Symbol": # special case sympy symbolic angles import sympy as sp + symbolic = True sina = sp.sin(angle) cosa = sp.cos(angle) else: symbolic = False - sina = math.sin(angle) - cosa = math.cos(angle) + sina = np.sin(angle) + cosa = np.cos(angle) direction = unit_vector(direction[:3]) # rotation matrix around unit vector @@ -368,9 +362,13 @@ def rotation_matrix(angle, direction, point=None): M[:3, :3] += np.outer(direction, direction) * (1.0 - cosa) direction = direction * sina - M[:3, :3] += np.array([[0.0, -direction[2], direction[1]], - [direction[2], 0.0, -direction[0]], - [-direction[1], direction[0], 0.0]]) + M[:3, :3] += np.array( + [ + [0.0, -direction[2], direction[1]], + [direction[2], 0.0, -direction[0]], + [-direction[1], direction[0], 0.0], + ] + ) # if point is specified, rotation is not around origin if point is not None: @@ -387,7 +385,7 @@ def rotation_matrix(angle, direction, point=None): def rotation_from_matrix(matrix): """Return rotation angle and axis from rotation matrix. - >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> angle = (random.random() - 0.5) * (2*np.pi) >>> direc = np.random.random(3) - 0.5 >>> point = np.random.random(3) - 0.5 >>> R0 = rotation_matrix(angle, direc, point) @@ -415,15 +413,12 @@ def rotation_from_matrix(matrix): # rotation angle depending on direction cosa = (np.trace(R33) - 1.0) / 2.0 if abs(direction[2]) > 1e-8: - sina = (R[1, 0] + (cosa - 1.0) * direction[0] - * direction[1]) / direction[2] + sina = (R[1, 0] + (cosa - 1.0) * direction[0] * direction[1]) / direction[2] elif abs(direction[1]) > 1e-8: - sina = (R[0, 2] + (cosa - 1.0) * direction[0] - * direction[2]) / direction[1] + sina = (R[0, 2] + (cosa - 1.0) * direction[0] * direction[2]) / direction[1] else: - sina = (R[2, 1] + (cosa - 1.0) * direction[1] - * direction[2]) / direction[0] - angle = math.atan2(sina, cosa) + sina = (R[2, 1] + (cosa - 1.0) * direction[1] * direction[2]) / direction[0] + angle = np.arctan2(sina, cosa) return angle, direction, point @@ -502,8 +497,7 @@ def scale_from_matrix(matrix): return factor, origin, direction -def projection_matrix(point, normal, direction=None, - perspective=None, pseudo=False): +def projection_matrix(point, normal, direction=None, perspective=None, pseudo=False): """Return matrix to project onto plane defined by point and normal. Using either perspective point, projection direction, or none of both. @@ -539,8 +533,7 @@ def projection_matrix(point, normal, direction=None, normal = unit_vector(normal[:3]) if perspective is not None: # perspective projection - perspective = np.array(perspective[:3], dtype=np.float64, - copy=False) + perspective = np.array(perspective[:3], dtype=np.float64, copy=False) M[0, 0] = M[1, 1] = M[2, 2] = np.dot(perspective - point, normal) M[:3, :3] -= np.outer(perspective, normal) if pseudo: @@ -626,11 +619,10 @@ def projection_from_matrix(matrix, pseudo=False): # perspective projection i = np.where(abs(np.real(w)) > 1e-8)[0] if not len(i): - raise ValueError( - "no eigenvector not corresponding to eigenvalue 0") + raise ValueError("no eigenvector not corresponding to eigenvalue 0") point = np.real(V[:, i[-1]]).squeeze() point /= point[3] - normal = - M[3, :3] + normal = -M[3, :3] perspective = M[:3, 3] / np.dot(point[:3], normal) if pseudo: perspective -= normal @@ -681,15 +673,19 @@ def clip_matrix(left, right, bottom, top, near, far, perspective=False): if near <= _EPS: raise ValueError("invalid frustum: near <= 0") t = 2.0 * near - M = [[t / (left - right), 0.0, (right + left) / (right - left), 0.0], - [0.0, t / (bottom - top), (top + bottom) / (top - bottom), 0.0], - [0.0, 0.0, (far + near) / (near - far), t * far / (far - near)], - [0.0, 0.0, -1.0, 0.0]] + M = [ + [t / (left - right), 0.0, (right + left) / (right - left), 0.0], + [0.0, t / (bottom - top), (top + bottom) / (top - bottom), 0.0], + [0.0, 0.0, (far + near) / (near - far), t * far / (far - near)], + [0.0, 0.0, -1.0, 0.0], + ] else: - M = [[2.0 / (right - left), 0.0, 0.0, (right + left) / (left - right)], - [0.0, 2.0 / (top - bottom), 0.0, (top + bottom) / (bottom - top)], - [0.0, 0.0, 2.0 / (far - near), (far + near) / (near - far)], - [0.0, 0.0, 0.0, 1.0]] + M = [ + [2.0 / (right - left), 0.0, 0.0, (right + left) / (left - right)], + [0.0, 2.0 / (top - bottom), 0.0, (top + bottom) / (bottom - top)], + [0.0, 0.0, 2.0 / (far - near), (far + near) / (near - far)], + [0.0, 0.0, 0.0, 1.0], + ] return np.array(M) @@ -704,7 +700,7 @@ def shear_matrix(angle, direction, point, normal): given by the angle of P-P'-P", where P' is the orthogonal projection of P onto the shear plane. - >>> angle = (random.random() - 0.5) * 4*math.pi + >>> angle = (random.random() - 0.5) * 4*np.pi >>> direct = np.random.random(3) - 0.5 >>> point = np.random.random(3) - 0.5 >>> normal = np.cross(direct, np.random.random(3)) @@ -717,7 +713,7 @@ def shear_matrix(angle, direction, point, normal): direction = unit_vector(direction[:3]) if abs(np.dot(normal, direction)) > 1e-6: raise ValueError("direction and normal vectors are not orthogonal") - angle = math.tan(angle) + angle = np.tan(angle) M = np.identity(4) M[:3, :3] += angle * np.outer(direction, normal) M[:3, 3] = -angle * np.dot(point[:3], normal) * direction @@ -759,7 +755,7 @@ def shear_from_matrix(matrix): direction = np.dot(M33 - np.identity(3), normal) angle = vector_norm(direction) direction /= angle - angle = math.atan(angle) + angle = np.arctan(angle) # point: eigenvector corresponding to eigenvalue 1 w, V = np.linalg.eig(M) @@ -811,7 +807,7 @@ def decompose_matrix(matrix): if not np.linalg.det(P): raise ValueError("matrix is singular") - scale = np.zeros((3, )) + scale = np.zeros((3,)) shear = [0.0, 0.0, 0.0] angles = [0.0, 0.0, 0.0] @@ -844,19 +840,20 @@ def decompose_matrix(matrix): np.negative(scale, scale) np.negative(row, row) - angles[1] = math.asin(-row[0, 2]) - if math.cos(angles[1]): - angles[0] = math.atan2(row[1, 2], row[2, 2]) - angles[2] = math.atan2(row[0, 1], row[0, 0]) + angles[1] = np.arcsin(-row[0, 2]) + if np.cos(angles[1]): + angles[0] = np.arctan2(row[1, 2], row[2, 2]) + angles[2] = np.arctan2(row[0, 1], row[0, 0]) else: - angles[0] = math.atan2(-row[2, 1], row[1, 1]) + angles[0] = np.arctan2(-row[2, 1], row[1, 1]) angles[2] = 0.0 return scale, shear, angles, translate, perspective -def compose_matrix(scale=None, shear=None, angles=None, translate=None, - perspective=None): +def compose_matrix( + scale=None, shear=None, angles=None, translate=None, perspective=None +): """Return transformation matrix from sequence of transformations. This is the inverse of the decompose_matrix function. @@ -870,7 +867,7 @@ def compose_matrix(scale=None, shear=None, angles=None, translate=None, >>> scale = np.random.random(3) - 0.5 >>> shear = np.random.random(3) - 0.5 - >>> angles = (np.random.random(3) - 0.5) * (2*math.pi) + >>> angles = (np.random.random(3) - 0.5) * (2*np.pi) >>> trans = np.random.random(3) - 0.5 >>> persp = np.random.random(4) - 0.5 >>> M0 = compose_matrix(scale, shear, angles, trans, persp) @@ -890,7 +887,7 @@ def compose_matrix(scale=None, shear=None, angles=None, translate=None, T[:3, 3] = translate[:3] M = np.dot(M, T) if angles is not None: - R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') + R = euler_matrix(angles[0], angles[1], angles[2], "sxyz") M = np.dot(M, R) if shear is not None: Z = np.identity(4) @@ -928,11 +925,14 @@ def orthogonalization_matrix(lengths, angles): sina, sinb, _ = np.sin(angles) cosa, cosb, cosg = np.cos(angles) co = (cosa * cosb - cosg) / (sina * sinb) - return np.array([ - [a * sinb * math.sqrt(1.0 - co * co), 0.0, 0.0, 0.0], - [-a * sinb * co, b * sina, 0.0, 0.0], - [a * cosb, b * cosa, c, 0.0], - [0.0, 0.0, 0.0, 1.0]]) + return np.array( + [ + [a * sinb * np.sqrt(1.0 - co * co), 0.0, 0.0, 0.0], + [-a * sinb * co, b * sina, 0.0, 0.0], + [a * cosb, b * cosa, c, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): @@ -995,7 +995,7 @@ def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): u, s, vh = np.linalg.svd(A.T) vh = vh[:ndims].T B = vh[:ndims] - C = vh[ndims:2 * ndims] + C = vh[ndims : 2 * ndims] t = np.dot(C, np.linalg.pinv(B)) t = np.concatenate((t, np.zeros((ndims, 1))), axis=1) M = np.vstack((t, ((0.0,) * ndims) + (1.0,))) @@ -1017,10 +1017,12 @@ def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): xx, yy, zz = np.sum(v0 * v1, axis=1) xy, yz, zx = np.sum(v0 * np.roll(v1, -1, axis=0), axis=1) xz, yx, zy = np.sum(v0 * np.roll(v1, -2, axis=0), axis=1) - N = [[xx + yy + zz, 0.0, 0.0, 0.0], - [yz - zy, xx - yy - zz, 0.0, 0.0], - [zx - xz, xy + yx, yy - xx - zz, 0.0], - [xy - yx, zx + xz, yz + zy, zz - xx - yy]] + N = [ + [xx + yy + zz, 0.0, 0.0, 0.0], + [yz - zy, xx - yy - zz, 0.0, 0.0], + [zx - xz, xy + yx, yy - xx - zz, 0.0], + [xy - yx, zx + xz, yz + zy, zz - xx - yy], + ] # quaternion: eigenvector corresponding to most positive eigenvalue w, V = np.linalg.eigh(N) q = V[:, np.argmax(w)] @@ -1032,7 +1034,7 @@ def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): # Affine transformation; scale is ratio of RMS deviations from centroid v0 *= v0 v1 *= v1 - M[:ndims, :ndims] *= math.sqrt(np.sum(v1) / np.sum(v0)) + M[:ndims, :ndims] *= np.sqrt(np.sum(v1) / np.sum(v0)) # move centroids back M = np.dot(np.linalg.inv(M1), np.dot(M, M0)) @@ -1087,11 +1089,10 @@ def superimposition_matrix(v0, v1, scale=False, usesvd=True): """ v0 = np.array(v0, dtype=np.float64, copy=False)[:3] v1 = np.array(v1, dtype=np.float64, copy=False)[:3] - return affine_matrix_from_points(v0, v1, shear=False, - scale=scale, usesvd=usesvd) + return affine_matrix_from_points(v0, v1, shear=False, scale=scale, usesvd=usesvd) -def euler_matrix(ai, aj, ak, axes='sxyz'): +def euler_matrix(ai, aj, ak, axes="sxyz"): """Return homogeneous rotation matrix from Euler angles and axis sequence. ai, aj, ak : Euler's roll, pitch and yaw angles @@ -1103,7 +1104,7 @@ def euler_matrix(ai, aj, ak, axes='sxyz'): >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) >>> np.allclose(np.sum(R[0]), -0.383436184) True - >>> ai, aj, ak = (4*math.pi) * (np.random.random(3) - 0.5) + >>> ai, aj, ak = (4*np.pi) * (np.random.random(3) - 0.5) >>> for axes in _AXES2TUPLE.keys(): ... R = euler_matrix(ai, aj, ak, axes) >>> for axes in _TUPLE2AXES.keys(): @@ -1125,8 +1126,8 @@ def euler_matrix(ai, aj, ak, axes='sxyz'): if parity: ai, aj, ak = -ai, -aj, -ak - si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) - ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) + si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak) + ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak) cc, cs = ci * ck, ci * sk sc, ss = si * ck, si * sk @@ -1154,7 +1155,7 @@ def euler_matrix(ai, aj, ak, axes='sxyz'): return M -def euler_from_matrix(matrix, axes='sxyz'): +def euler_from_matrix(matrix, axes="sxyz"): """Return Euler angles from rotation matrix for specified axis sequence. axes : One of 24 axis sequences as string or encoded tuple @@ -1166,7 +1167,7 @@ def euler_from_matrix(matrix, axes='sxyz'): >>> R1 = euler_matrix(al, be, ga, 'syxz') >>> np.allclose(R0, R1) True - >>> angles = (4*math.pi) * (np.random.random(3) - 0.5) + >>> angles = (4*np.pi) * (np.random.random(3) - 0.5) >>> for axes in _AXES2TUPLE.keys(): ... R0 = euler_matrix(axes=axes, *angles) ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) @@ -1185,24 +1186,24 @@ def euler_from_matrix(matrix, axes='sxyz'): M = np.array(matrix, dtype=np.float64, copy=False)[:3, :3] if repetition: - sy = math.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k]) + sy = np.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k]) if sy > _EPS: - ax = math.atan2(M[i, j], M[i, k]) - ay = math.atan2(sy, M[i, i]) - az = math.atan2(M[j, i], -M[k, i]) + ax = np.arctan2(M[i, j], M[i, k]) + ay = np.arctan2(sy, M[i, i]) + az = np.arctan2(M[j, i], -M[k, i]) else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2(sy, M[i, i]) + ax = np.arctan2(-M[j, k], M[j, j]) + ay = np.arctan2(sy, M[i, i]) az = 0.0 else: - cy = math.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i]) + cy = np.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i]) if cy > _EPS: - ax = math.atan2(M[k, j], M[k, k]) - ay = math.atan2(-M[k, i], cy) - az = math.atan2(M[j, i], M[i, i]) + ax = np.arctan2(M[k, j], M[k, k]) + ay = np.arctan2(-M[k, i], cy) + az = np.arctan2(M[j, i], M[i, i]) else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2(-M[k, i], cy) + ax = np.arctan2(-M[j, k], M[j, j]) + ay = np.arctan2(-M[k, i], cy) az = 0.0 if parity: @@ -1212,7 +1213,7 @@ def euler_from_matrix(matrix, axes='sxyz'): return ax, ay, az -def euler_from_quaternion(quaternion, axes='sxyz'): +def euler_from_quaternion(quaternion, axes="sxyz"): """Return Euler angles from quaternion for specified axis sequence. >>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0]) @@ -1223,7 +1224,7 @@ def euler_from_quaternion(quaternion, axes='sxyz'): return euler_from_matrix(quaternion_matrix(quaternion), axes) -def quaternion_from_euler(ai, aj, ak, axes='sxyz'): +def quaternion_from_euler(ai, aj, ak, axes="sxyz"): """Return quaternion from Euler angles and axis sequence. ai, aj, ak : Euler's roll, pitch and yaw angles @@ -1252,18 +1253,18 @@ def quaternion_from_euler(ai, aj, ak, axes='sxyz'): ai /= 2.0 aj /= 2.0 ak /= 2.0 - ci = math.cos(ai) - si = math.sin(ai) - cj = math.cos(aj) - sj = math.sin(aj) - ck = math.cos(ak) - sk = math.sin(ak) + ci = np.cos(ai) + si = np.sin(ai) + cj = np.cos(aj) + sj = np.sin(aj) + ck = np.cos(ak) + sk = np.sin(ak) cc = ci * ck cs = ci * sk sc = si * ck ss = si * sk - q = np.empty((4, )) + q = np.empty((4,)) if repetition: q[0] = cj * (cc - ss) q[i] = cj * (cs + sc) @@ -1291,8 +1292,8 @@ def quaternion_about_axis(angle, axis): q = np.array([0.0, axis[0], axis[1], axis[2]]) qlen = vector_norm(q) if qlen > _EPS: - q *= math.sin(angle / 2.0) / qlen - q[0] = math.cos(angle / 2.0) + q *= np.sin(angle / 2.0) / qlen + q[0] = np.cos(angle / 2.0) return q @@ -1315,15 +1316,13 @@ def quaternion_matrix(quaternion): """ - q = np.array(quaternion, - dtype=np.float64, - copy=True).reshape((-1, 4)) - n = np.einsum('ij,ij->i', q, q) + q = np.array(quaternion, dtype=np.float64, copy=True).reshape((-1, 4)) + n = np.einsum("ij,ij->i", q, q) # how many entries do we have num_qs = len(n) identities = n < _EPS q[~identities, :] *= np.sqrt(2.0 / n[~identities, None]) - q = np.einsum('ij,ik->ikj', q, q) + q = np.einsum("ij,ik->ikj", q, q) # store the result ret = np.zeros((num_qs, 4, 4)) @@ -1386,7 +1385,7 @@ def quaternion_from_matrix(matrix, isprecise=False): """ M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4] if isprecise: - q = np.empty((4, )) + q = np.empty((4,)) t = np.trace(M) if t > M[3, 3]: q[0] = t @@ -1405,7 +1404,7 @@ def quaternion_from_matrix(matrix, isprecise=False): q[k] = M[k, i] + M[i, k] q[3] = M[k, j] - M[j, k] q = q[[3, 0, 1, 2]] - q *= 0.5 / math.sqrt(t * M[3, 3]) + q *= 0.5 / np.sqrt(t * M[3, 3]) else: m00 = M[0, 0] m01 = M[0, 1] @@ -1417,10 +1416,14 @@ def quaternion_from_matrix(matrix, isprecise=False): m21 = M[2, 1] m22 = M[2, 2] # symmetric matrix K - K = np.array([[m00 - m11 - m22, 0.0, 0.0, 0.0], - [m01 + m10, m11 - m00 - m22, 0.0, 0.0], - [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0], - [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]]) + K = np.array( + [ + [m00 - m11 - m22, 0.0, 0.0, 0.0], + [m01 + m10, m11 - m00 - m22, 0.0, 0.0], + [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0], + [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22], + ] + ) K /= 3.0 # quaternion is eigenvector of K that corresponds to largest eigenvalue w, V = np.linalg.eigh(K) @@ -1440,10 +1443,15 @@ def quaternion_multiply(quaternion1, quaternion0): """ w0, x0, y0, z0 = quaternion0 w1, x1, y1, z1 = quaternion1 - return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, - x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, - -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, - x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64) + return np.array( + [ + -x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, + x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, + -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, + x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0, + ], + dtype=np.float64, + ) def quaternion_conjugate(quaternion): @@ -1506,9 +1514,9 @@ def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): >>> np.allclose(q, q1) True >>> q = quaternion_slerp(q0, q1, 0.5) - >>> angle = math.acos(np.dot(q0, q)) - >>> np.allclose(2, math.acos(np.dot(q0, q1)) / angle) or \ - np.allclose(2, math.acos(-np.dot(q0, q1)) / angle) + >>> angle = np.arccos(np.dot(q0, q)) + >>> np.allclose(2, np.arccos(np.dot(q0, q1)) / angle) or \ + np.allclose(2, np.arccos(-np.dot(q0, q1)) / angle) True """ @@ -1525,12 +1533,12 @@ def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): # invert rotation d = -d np.negative(q1, q1) - angle = math.acos(d) + spin * math.pi + angle = np.arccos(d) + spin * np.pi if abs(angle) < _EPS: return q0 - isin = 1.0 / math.sin(angle) - q0 *= math.sin((1.0 - fraction) * angle) * isin - q1 *= math.sin(fraction * angle) * isin + isin = 1.0 / np.sin(angle) + q0 *= np.sin((1.0 - fraction) * angle) * isin + q1 *= np.sin(fraction * angle) * isin q0 += q1 return q0 @@ -1559,11 +1567,12 @@ def random_quaternion(rand=None, num=1): assert rand.shape[0] == 3 r1 = np.sqrt(1.0 - rand[0]) r2 = np.sqrt(rand[0]) - pi2 = math.pi * 2.0 + pi2 = np.pi * 2.0 t1 = pi2 * rand[1] t2 = pi2 * rand[2] - return np.array([np.cos(t2) * r2, np.sin(t1) * r1, - np.cos(t1) * r1, np.sin(t2) * r2]).T.squeeze() + return np.array( + [np.cos(t2) * r2, np.sin(t1) * r1, np.cos(t1) * r1, np.sin(t2) * r2] + ).T.squeeze() def random_rotation_matrix(rand=None, num=1, translate=False): @@ -1581,8 +1590,7 @@ def random_rotation_matrix(rand=None, num=1, translate=False): True """ - matrix = quaternion_matrix( - random_quaternion(rand=rand, num=num)) + matrix = quaternion_matrix(random_quaternion(rand=rand, num=num)) if translate: scale = float(translate) matrix[:3, 3] = (np.random.random(3) - 0.5) * scale @@ -1631,7 +1639,7 @@ def __init__(self, initial=None): initial = np.array(initial, dtype=np.float64) if initial.shape == (4, 4): self._qdown = quaternion_from_matrix(initial) - elif initial.shape == (4, ): + elif initial.shape == (4,): initial /= vector_norm(initial) self._qdown = initial else: @@ -1708,10 +1716,10 @@ def arcball_map_to_sphere(point, center, radius): n = v0 * v0 + v1 * v1 if n > 1.0: # position outside of sphere - n = math.sqrt(n) + n = np.sqrt(n) return np.array([v0 / n, v1 / n, 0.0]) else: - return np.array([v0, v1, math.sqrt(1.0 - n)]) + return np.array([v0, v1, np.sqrt(1.0 - n)]) def arcball_constrain_to_axis(point, axis): @@ -1751,14 +1759,31 @@ def arcball_nearest_axis(point, axes): # map axes strings to/from tuples of inner axis, parity, repetition, frame _AXES2TUPLE = { - 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), - 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), - 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), - 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), - 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), - 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), - 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), - 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} + "sxyz": (0, 0, 0, 0), + "sxyx": (0, 0, 1, 0), + "sxzy": (0, 1, 0, 0), + "sxzx": (0, 1, 1, 0), + "syzx": (1, 0, 0, 0), + "syzy": (1, 0, 1, 0), + "syxz": (1, 1, 0, 0), + "syxy": (1, 1, 1, 0), + "szxy": (2, 0, 0, 0), + "szxz": (2, 0, 1, 0), + "szyx": (2, 1, 0, 0), + "szyz": (2, 1, 1, 0), + "rzyx": (0, 0, 0, 1), + "rxyx": (0, 0, 1, 1), + "ryzx": (0, 1, 0, 1), + "rxzx": (0, 1, 1, 1), + "rxzy": (1, 0, 0, 1), + "ryzy": (1, 0, 1, 1), + "rzxy": (1, 1, 0, 1), + "ryxy": (1, 1, 1, 1), + "ryxz": (2, 0, 0, 1), + "rzxz": (2, 0, 1, 1), + "rxyz": (2, 1, 0, 1), + "rzyz": (2, 1, 1, 1), +} _TUPLE2AXES = {v: k for k, v in _AXES2TUPLE.items()} @@ -1791,7 +1816,7 @@ def vector_norm(data, axis=None, out=None): data = np.array(data, dtype=np.float64, copy=True) if out is None: if data.ndim == 1: - return math.sqrt(np.dot(data, data)) + return np.sqrt(np.dot(data, data)) data *= data out = np.atleast_1d(np.sum(data, axis=axis)) np.sqrt(out, out) @@ -1831,7 +1856,7 @@ def unit_vector(data, axis=None, out=None): if out is None: data = np.array(data, dtype=np.float64, copy=True) if data.ndim == 1: - data /= math.sqrt(np.dot(data, data)) + data /= np.sqrt(np.dot(data, data)) return data else: if out is not data: @@ -1889,7 +1914,7 @@ def angle_between_vectors(v0, v1, directed=True, axis=0): i.e. the maximum angle is pi/2. >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3]) - >>> np.allclose(a, math.pi) + >>> np.allclose(a, np.pi) True >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False) >>> np.allclose(a, 0) @@ -1985,9 +2010,8 @@ def transform_around(matrix, point): point = np.asanyarray(point) matrix = np.asanyarray(matrix) dim = len(point) - if matrix.shape != (dim + 1, - dim + 1): - raise ValueError('matrix must be (d+1, d+1)') + if matrix.shape != (dim + 1, dim + 1): + raise ValueError("matrix must be (d+1, d+1)") translate = np.eye(dim + 1) translate[:dim, dim] = -point @@ -1998,10 +2022,7 @@ def transform_around(matrix, point): return result -def planar_matrix(offset=None, - theta=None, - point=None, - scale=None): +def planar_matrix(offset=None, theta=None, point=None, scale=None): """ 2D homogeonous transformation matrix. @@ -2028,9 +2049,9 @@ def planar_matrix(offset=None, offset = np.asanyarray(offset, dtype=np.float64) theta = float(theta) if not np.isfinite(theta): - raise ValueError('theta must be finite angle!') + raise ValueError("theta must be finite angle!") if offset.shape != (2,): - raise ValueError('offset must be length 2!') + raise ValueError("offset must be length 2!") T = np.eye(3, dtype=np.float64) s = np.sin(theta) @@ -2067,7 +2088,7 @@ def planar_matrix_to_3D(matrix_2D): matrix_2D = np.asanyarray(matrix_2D, dtype=np.float64) if matrix_2D.shape != (3, 3): - raise ValueError('Homogenous 2D transformation matrix required!') + raise ValueError("Homogenous 2D transformation matrix required!") matrix_3D = np.eye(4) # translation @@ -2078,7 +2099,7 @@ def planar_matrix_to_3D(matrix_2D): return matrix_3D -def spherical_matrix(theta, phi, axes='sxyz'): +def spherical_matrix(theta, phi, axes="sxyz"): """ Give a spherical coordinate vector, find the rotation that will transform a [0,0,1] vector to those coordinates @@ -2100,9 +2121,7 @@ def spherical_matrix(theta, phi, axes='sxyz'): return result -def transform_points(points, - matrix, - translate=True): +def transform_points(points, matrix, translate=True): """ Returns points rotated by a homogeneous transformation matrix. @@ -2134,7 +2153,7 @@ def transform_points(points, count, dim = points.shape # quickly check to see if we've been passed an identity matrix - if np.abs(matrix - _IDENTITY[:dim + 1, :dim + 1]).max() < 1e-8: + if np.abs(matrix - _IDENTITY[: dim + 1, : dim + 1]).max() < 1e-8: return np.ascontiguousarray(points.copy()) if translate: @@ -2166,8 +2185,9 @@ def fix_rigid(matrix, max_deviance=1e-5): Repaired homogeneous transformation matrix """ dim = matrix.shape[0] - 1 - check = np.abs(np.dot(matrix[:dim, :dim], matrix[:dim, :dim].T) - - _IDENTITY[:dim, :dim]).max() + check = np.abs( + np.dot(matrix[:dim, :dim], matrix[:dim, :dim].T) - _IDENTITY[:dim, :dim] + ).max() # if the matrix differs by more than float-zero and less # than the threshold try to repair the matrix with SVD if check > 1e-13 and check < max_deviance: @@ -2211,8 +2231,7 @@ def is_rigid(matrix, epsilon=1e-8): return False # check dot product of rotation against transpose - check = np.dot(matrix[:3, :3], - matrix[:3, :3].T) - _IDENTITY[:3, :3] + check = np.dot(matrix[:3, :3], matrix[:3, :3].T) - _IDENTITY[:3, :3] return check.ptp() < epsilon @@ -2267,14 +2286,12 @@ def flips_winding(matrix): vectors = np.diff(triangles, axis=1) cross = np.cross(vectors[:, 0], vectors[:, 1]) # rotate the original normals to match - cross[:count] = np.dot(matrix[:3, :3], - cross[:count].T).T + cross[:count] = np.dot(matrix[:3, :3], cross[:count].T).T # unitize normals norm = np.sqrt(np.dot(cross * cross, [1, 1, 1])).reshape((-1, 1)) cross = cross / norm # find the projection of the two normals - projection = np.dot(cross[:count] * cross[count:], - [1.0] * 3) + projection = np.dot(cross[:count] * cross[count:], [1.0] * 3) # if the winding was flipped but not the normal # the projection will be negative, and since we're # checking a few triangles check against the mean From 76be3a9988673b5f52f38edc6f2fe9129f2d3d2b Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 17:11:45 -0400 Subject: [PATCH 017/144] fix docs --- README.md | 7 ++-- docs/_static/custom.css | 19 ----------- docs/conf.py | 73 +++++++++++++++++++---------------------- docs/requirements.txt | 15 +++++---- 4 files changed, 46 insertions(+), 68 deletions(-) diff --git a/README.md b/README.md index 021cf995a..02bcd3ec4 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,14 @@ [![trimesh](https://trimsh.org/images/logotype-a.svg)](http://trimsh.org) ----------- -[![Github Actions](https://github.com/mikedh/trimesh/workflows/Release%20Trimesh/badge.svg)](https://github.com/mikedh/trimesh/actions) [![PyPI version](https://badge.fury.io/py/trimesh.svg)](https://badge.fury.io/py/trimesh) [![codecov](https://codecov.io/gh/mikedh/trimesh/branch/main/graph/badge.svg?token=4PVRQXyl2h)](https://codecov.io/gh/mikedh/trimesh) [![Docker Image Version (latest by date)](https://img.shields.io/docker/v/trimesh/trimesh?label=docker&sort=semver)](https://hub.docker.com/r/trimesh/trimesh/tags) +[![Github Actions](https://github.com/mikedh/trimesh/workflows/Release%20Trimesh/badge.svg)](https://github.com/mikedh/trimesh/actions) [![codecov](https://codecov.io/gh/mikedh/trimesh/branch/main/graph/badge.svg?token=4PVRQXyl2h)](https://codecov.io/gh/mikedh/trimesh) [![Docker Image Version (latest by date)](https://img.shields.io/docker/v/trimesh/trimesh?label=docker&sort=semver)](https://hub.docker.com/r/trimesh/trimesh/tags) [![PyPI version](https://badge.fury.io/py/trimesh.svg)](https://badge.fury.io/py/trimesh) | :warning: WARNING | -|:---------------------------| -| trimesh 4.0.0 which makes the minimum Python version 3.7 is in pre-release and will be released soon, you may want to test your stack with: `pip install --pre trimesh` | +|---------------------------| +| `trimesh >= 4.0.0` makes the minimum Python 3.7 and is in pre-release and will be released soon | +| You can test your stack with: `pip install --pre trimesh` or if you are on older Python you should lock `trimesh<4`| Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). diff --git a/docs/_static/custom.css b/docs/_static/custom.css index 82ec91f36..e69de29bb 100644 --- a/docs/_static/custom.css +++ b/docs/_static/custom.css @@ -1,19 +0,0 @@ -/* override table width restrictions */ - .wy-table-responsive table td { - /* !important prevents the common CSS stylesheets from overriding - this as on RTD they are loaded after this stylesheet */ - white-space: normal !important; - } - - .wy-table-responsive { - overflow: visible !important; - } - - -dl.py.property { - display: unset; -} - -.wy-nav-content { - max-width: 70em; -} diff --git a/docs/conf.py b/docs/conf.py index 5f13e6801..4c3174b3b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,46 +20,43 @@ def abspath(rel): """ # current working directory - cwd = os.path.dirname(os.path.abspath( - inspect.getfile(inspect.currentframe()))) + cwd = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) return os.path.abspath(os.path.join(cwd, rel)) -extensions = ['sphinx.ext.napoleon', # numpy-style docstring - 'myst_parser'] # allows markdown +extensions = [ + "sphinx.ext.napoleon", # numpy-style docstring + "myst_parser", +] # allows markdown myst_all_links_external = True # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. source_suffix = { - '.rst': 'restructuredtext', - '.txt': 'markdown', - '.md': 'markdown', + ".rst": "restructuredtext", + ".txt": "markdown", + ".md": "markdown", } # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'trimesh' -copyright = '2022, Michael Dawson-Haggerty' -author = 'Michael Dawson-Haggerty' +project = "trimesh" +copyright = "2022, Michael Dawson-Haggerty" +author = "Michael Dawson-Haggerty" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # get version from trimesh without installing -with open(abspath('../trimesh/version.py')) as f: - _version_raw = f.read() -version = eval(next( - line.strip().split('=')[-1] - for line in str.splitlines(_version_raw) - if '_version_' in line)) +import trimesh + # The full version, including alpha/beta/rc tags. -release = version +release = trimesh.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -70,10 +67,10 @@ def abspath(rel): # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -81,44 +78,42 @@ def abspath(rel): # -- Options for HTML output -------------------------------------- # The theme to use for HTML and HTML Help pages -html_theme = 'furo' +html_theme = "furo" # options for rtd-theme html_theme_options = { - 'analytics_id': 'UA-161434837-1', - 'display_version': True, - 'prev_next_buttons_location': 'bottom', - 'style_external_links': False, + "display_version": True, + "prev_next_buttons_location": "bottom", + "style_external_links": False, # toc options - 'collapse_navigation': True, - 'sticky_navigation': True, - 'navigation_depth': 4, - 'includehidden': True, - 'titles_only': False, - + "collapse_navigation": True, + "sticky_navigation": True, + "navigation_depth": 4, + "includehidden": True, + "titles_only": False, } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] +html_logo = "images/trimesh-logo.png" # custom css -html_css_files = ['custom.css'] +html_css_files = ["custom.css"] html_context = { "display_github": True, "github_user": "mikedh", "github_repo": "trimesh", "github_version": "main", - "conf_py_path": "/docs/" + "conf_py_path": "/docs/", } # Output file base name for HTML help builder. -htmlhelp_basename = 'trimeshdoc' +htmlhelp_basename = "trimeshdoc" -# -- Extensions configuration ---------------------------------- autodoc_default_options = { - 'autosummary': True, - 'special-members': '__init__', + "autosummary": True, + "special-members": "__init__", } diff --git a/docs/requirements.txt b/docs/requirements.txt index 3bbfa4165..2121f623c 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,12 +1,13 @@ pypandoc==1.11 recommonmark==0.7.1 -sphinx==6.1.3 jupyter==1.0.0 -sphinx_rtd_theme==1.2.0 -myst-parser==1.0.0 -pyopenssl==23.1.1 -autodocsumm==0.2.10 + +# get sphinx version range from furo install +furo==2023.8.19 +myst-parser==2.0.0 +pyopenssl==23.2.0 +autodocsumm==0.2.11 jinja2==3.1.2 -matplotlib==3.7.1 -nbconvert==7.3.1 +matplotlib==3.7.2 +nbconvert==7.7.4 From 473a2f7992d2fe5061434a06c81269e145486357 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 28 Aug 2023 17:48:18 -0400 Subject: [PATCH 018/144] ruff --- docs/conf.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 4c3174b3b..62aa3a568 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -2,6 +2,9 @@ import inspect import os +# get version from trimesh without installing +import trimesh + def abspath(rel): """ @@ -51,10 +54,6 @@ def abspath(rel): # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. - -# get version from trimesh without installing -import trimesh - # The full version, including alpha/beta/rc tags. release = trimesh.__version__ From ac9d093ccc26554bc4cc5944e72d682ea65ae924 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=96=B9=E6=AD=A6=E5=8D=93?= Date: Wed, 30 Aug 2023 10:24:21 +0800 Subject: [PATCH 019/144] force np.inf in t to zero --- trimesh/ray/ray_triangle.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/trimesh/ray/ray_triangle.py b/trimesh/ray/ray_triangle.py index 2743ba024..74280def8 100644 --- a/trimesh/ray/ray_triangle.py +++ b/trimesh/ray/ray_triangle.py @@ -391,14 +391,13 @@ def ray_bounds(ray_origins, axis_dir = np.array([ray_directions[i][a] for i, a in enumerate(axis)]).reshape((-1, 1)) - # prevent division by zero - axis_dir[axis_dir == 0] = tol.zero - # parametric equation of a line # point = direction*t + origin # p = dt + o # t = (p-o)/d t = (axis_bound - axis_ori) / axis_dir + # prevent np.inf by division by zero + t[axis_dir == 0.0] = 0 # prevent the bounding box from including triangles # behind the ray origin From 3dce9cee09ede6eaeed95ebef84633fd2820c27f Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Wed, 30 Aug 2023 11:05:53 +0200 Subject: [PATCH 020/144] fusion of multiple pbr meshes --- trimesh/visual/material.py | 106 ++++++++++++++++++++++++++++++++++--- 1 file changed, 99 insertions(+), 7 deletions(-) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index a8099665d..9c5b9d22c 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -659,7 +659,7 @@ def pack(materials, uvs, deduplicate=True): from ..path import packing import collections - def material_to_img(mat): + def get_base_color_texture(mat): """ Logic for extracting a simple image from each material. """ @@ -687,6 +687,60 @@ def material_to_img(mat): # make sure we're always returning in RGBA mode return img.convert('RGBA') + def get_metallic_roughness_texture(mat): + """ + Logic for extracting a simple image from each material. + """ + # extract an image for each material + img = None + if isinstance(mat, PBRMaterial): + if mat.metallicRoughnessTexture is not None: + img = mat.metallicRoughnessTexture + img_arr = np.array(img) + if len(img_arr.shape) == 2: + img_arr = img_arr[...,None] + + if img_arr.shape[-1] == 1: + img_arr = np.concatenate([img_arr, img_arr], axis=-1) + + if img_arr.shape[-1] == 2: + # we must use RGB here, because 0 alpha does not work for PIL scale later + img_arr = np.concatenate([img_arr, np.ones_like(img_arr) * 255], axis=-1) + img = Image.fromarray(img_arr) + else: + if mat.metallicFactor is not None: + metallic = mat.metallicFactor + else: + metallic = 0.0 + if mat.roughnessFactor is not None: + roughness = mat.roughnessFactor + else: + roughness = 1.0 + + metallic_roughnesss = np.round(np.array([roughness, metallic, 1.0], dtype=np.float64)*255) + img = Image.fromarray(metallic_roughnesss[None,None].astype(np.uint8)) + return img + + def get_emissive_texture(mat): + """ + Logic for extracting a simple image from each material. + """ + # extract an image for each material + img = None + if isinstance(mat, PBRMaterial): + if mat.emissiveTexture is not None: + img = mat.emissiveTexture + elif mat.emissiveFactor is not None: + c = color.to_rgba(mat.emissiveFactor) + assert c.shape == (3,) + assert c.dtype == np.uint8 + img = Image.fromarray(c.reshape((1, 1, -1))) + else: + img = Image.fromarray(np.reshape( + [0, 0, 0], (1, 1, 3)).astype(np.uint8)) + # make sure we're always returning in RGBA mode + return img.convert('RGB') + if deduplicate: # start by collecting a list of indexes for each material hash unique_idx = collections.defaultdict(list) @@ -701,12 +755,35 @@ def material_to_img(mat): assert set(np.concatenate(mat_idx).ravel()) == set(range(len(uvs))) assert len(uvs) == len(materials) + use_pbr = any(isinstance(m, PBRMaterial) for m in materials) + # collect the images from the materials - images = [material_to_img(materials[g[0]]) for g in mat_idx] + images = [get_base_color_texture(materials[g[0]]) for g in mat_idx] # pack the multiple images into a single large image final, offsets = packing.images(images, power_resize=True) + if use_pbr: + metallic_roughness = [get_metallic_roughness_texture(materials[g[0]]) for g in mat_idx] + + # ensure that we use the same image size as for the base color, otherwise the UV coordinates might be wrong + metallic_roughness = [metallic_roughness[img_idx].resize(images[img_idx].size) for img_idx in range(len(images))] + + final_metallic_roughness, offsets_metallic_roughness = packing.images(metallic_roughness, power_resize=True) + + # we only need the first two channels + final_metallic_roughness = Image.fromarray(np.array(final_metallic_roughness)[...,:2]) + + emissive = [get_emissive_texture(materials[g[0]]) for g in mat_idx] + if all(np.array(x).max() == 0 for x in emissive): + emissive = None + final_emissive = None + else: + # ensure that we use the same image size as for the base color, otherwise the UV coordinates might be wrong + emissive = [emissive[img_idx].resize(images[img_idx].size) for img_idx in range(len(images))] + final_emissive, offsets_emissive = packing.images(emissive, power_resize=True) + + # the size of the final texture image final_size = np.array(final.size, dtype=np.float64) # collect scaled new UV coordinates by material index @@ -718,8 +795,20 @@ def material_to_img(mat): xy_off = off / final_size # scale and translate each of the new UV coordinates # also make sure they are in 0.0-1.0 using modulus (i.e. wrap) - new_uv.update({g: ((uvs[g] % 1.0) * scale) + xy_off - for g in group}) + group_uvs = {} + for g in group: + g_uvs = uvs[g] + # only wrap pixels that are outside of 0.0-1.0. + # use a small leeway of half a pixel for floating point inaccuracies and the case of uv==1.0 + half_pixel_width = 1.0 / (2 * img.size[0]) + half_pixel_height = 1.0 / (2 * img.size[1]) + wrap_mask_u = (g_uvs[:,0] <= -half_pixel_width) | (g_uvs[:,0] >= (1.0 + half_pixel_width)) + wrap_mask_v = (g_uvs[:,1] <= -half_pixel_height) | (g_uvs[:,1] >= (1.0 + half_pixel_height)) + wrap_mask = np.stack([wrap_mask_u, wrap_mask_v], axis=-1) + g_uvs[wrap_mask] = g_uvs[wrap_mask] % 1.0 + group_uvs[g] = (g_uvs * scale) + xy_off + + new_uv.update(group_uvs) # stack the new UV coordinates in the original order stacked = np.vstack([new_uv[i] for i in range(len(uvs))]) @@ -731,8 +820,8 @@ def material_to_img(mat): check = [] for uv, mat in zip(uvs, materials): # get the image from the material and whether or not - # it had to fill in with default dataa - img = material_to_img(mat) + # it had to fill in with default data + img = get_base_color_texture(mat) current = color.uv_to_color(image=img, uv=(uv % 1)) check.append(current) @@ -745,4 +834,7 @@ def material_to_img(mat): # interpolation on complicated stuff can break this assert (compare == check_flat).all() - return SimpleMaterial(image=final), stacked + if use_pbr: + return PBRMaterial(baseColorTexture=final, metallicRoughnessTexture=final_metallic_roughness, emissiveTexture=final_emissive), stacked + else: + return SimpleMaterial(image=final), stacked \ No newline at end of file From 2b8548c78eba5c0a8d6dcd26578701a8244e2bb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=96=B9=E6=AD=A6=E5=8D=93?= Date: Wed, 30 Aug 2023 17:22:39 +0800 Subject: [PATCH 021/144] nonzero mask --- trimesh/ray/ray_triangle.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/trimesh/ray/ray_triangle.py b/trimesh/ray/ray_triangle.py index 74280def8..16a64306d 100644 --- a/trimesh/ray/ray_triangle.py +++ b/trimesh/ray/ray_triangle.py @@ -395,9 +395,9 @@ def ray_bounds(ray_origins, # point = direction*t + origin # p = dt + o # t = (p-o)/d - t = (axis_bound - axis_ori) / axis_dir - # prevent np.inf by division by zero - t[axis_dir == 0.0] = 0 + nonzero = (axis_dir != 0.0).reshape(-1) + t = np.zeros_like(axis_bound) + t[nonzero] = (axis_bound[nonzero] - axis_ori[nonzero]) / axis_dir[nonzero] # prevent the bounding box from including triangles # behind the ray origin From 4100f95105bb364740a80812daf43176513ef829 Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Wed, 30 Aug 2023 15:30:29 +0200 Subject: [PATCH 022/144] added padding to fused textures to prevent interpolation between textures --- trimesh/visual/material.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 9c5b9d22c..fdbcebdf9 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -740,6 +740,19 @@ def get_emissive_texture(mat): [0, 0, 0], (1, 1, 3)).astype(np.uint8)) # make sure we're always returning in RGBA mode return img.convert('RGB') + + def pad_image(src, padding=1): + if isinstance(padding, int): + padding = (padding, padding) + # uses replication padding + x, y = np.meshgrid(np.arange(src.shape[0] + 2*padding[0]), np.arange(src.shape[1] + 2*padding[1])) + x -= padding[0] + y -= padding[1] + x = np.clip(x, 0, src.shape[0] - 1) + y = np.clip(y, 0, src.shape[1] - 1) + + result = src[y, x] + return result if deduplicate: # start by collecting a list of indexes for each material hash @@ -755,10 +768,13 @@ def get_emissive_texture(mat): assert set(np.concatenate(mat_idx).ravel()) == set(range(len(uvs))) assert len(uvs) == len(materials) + padding = 1 + use_pbr = any(isinstance(m, PBRMaterial) for m in materials) # collect the images from the materials images = [get_base_color_texture(materials[g[0]]) for g in mat_idx] + images = [Image.fromarray(pad_image(np.array(img), padding)) for img in images] # pack the multiple images into a single large image final, offsets = packing.images(images, power_resize=True) @@ -768,7 +784,7 @@ def get_emissive_texture(mat): # ensure that we use the same image size as for the base color, otherwise the UV coordinates might be wrong metallic_roughness = [metallic_roughness[img_idx].resize(images[img_idx].size) for img_idx in range(len(images))] - + metallic_roughness = [Image.fromarray(pad_image(np.array(img), padding)) for img in metallic_roughness] final_metallic_roughness, offsets_metallic_roughness = packing.images(metallic_roughness, power_resize=True) # we only need the first two channels @@ -781,6 +797,7 @@ def get_emissive_texture(mat): else: # ensure that we use the same image size as for the base color, otherwise the UV coordinates might be wrong emissive = [emissive[img_idx].resize(images[img_idx].size) for img_idx in range(len(images))] + emissive = [Image.fromarray(pad_image(np.array(img), padding)) for img in emissive] final_emissive, offsets_emissive = packing.images(emissive, power_resize=True) @@ -790,9 +807,9 @@ def get_emissive_texture(mat): new_uv = {} for group, img, off in zip(mat_idx, images, offsets): # how big was the original image - scale = img.size / final_size + scale = (np.array(img.size) - 2 * padding) / final_size # what is the offset in fractions of final image - xy_off = off / final_size + xy_off = (off + padding) / final_size # scale and translate each of the new UV coordinates # also make sure they are in 0.0-1.0 using modulus (i.e. wrap) group_uvs = {} From ab837ce5027489e23793342200669923ae28eb8c Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Wed, 30 Aug 2023 11:05:53 +0200 Subject: [PATCH 023/144] fusion of multiple pbr meshes --- trimesh/visual/material.py | 106 ++++++++++++++++++++++++++++++++++--- 1 file changed, 99 insertions(+), 7 deletions(-) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 4dc745b30..cd2f290c3 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -751,7 +751,7 @@ def pack(materials, uvs, deduplicate=True): from ..path import packing import collections - def material_to_img(mat): + def get_base_color_texture(mat): """ Logic for extracting a simple image from each material. """ @@ -779,6 +779,60 @@ def material_to_img(mat): # make sure we're always returning in RGBA mode return img.convert('RGBA') + def get_metallic_roughness_texture(mat): + """ + Logic for extracting a simple image from each material. + """ + # extract an image for each material + img = None + if isinstance(mat, PBRMaterial): + if mat.metallicRoughnessTexture is not None: + img = mat.metallicRoughnessTexture + img_arr = np.array(img) + if len(img_arr.shape) == 2: + img_arr = img_arr[...,None] + + if img_arr.shape[-1] == 1: + img_arr = np.concatenate([img_arr, img_arr], axis=-1) + + if img_arr.shape[-1] == 2: + # we must use RGB here, because 0 alpha does not work for PIL scale later + img_arr = np.concatenate([img_arr, np.ones_like(img_arr) * 255], axis=-1) + img = Image.fromarray(img_arr) + else: + if mat.metallicFactor is not None: + metallic = mat.metallicFactor + else: + metallic = 0.0 + if mat.roughnessFactor is not None: + roughness = mat.roughnessFactor + else: + roughness = 1.0 + + metallic_roughnesss = np.round(np.array([roughness, metallic, 1.0], dtype=np.float64)*255) + img = Image.fromarray(metallic_roughnesss[None,None].astype(np.uint8)) + return img + + def get_emissive_texture(mat): + """ + Logic for extracting a simple image from each material. + """ + # extract an image for each material + img = None + if isinstance(mat, PBRMaterial): + if mat.emissiveTexture is not None: + img = mat.emissiveTexture + elif mat.emissiveFactor is not None: + c = color.to_rgba(mat.emissiveFactor) + assert c.shape == (3,) + assert c.dtype == np.uint8 + img = Image.fromarray(c.reshape((1, 1, -1))) + else: + img = Image.fromarray(np.reshape( + [0, 0, 0], (1, 1, 3)).astype(np.uint8)) + # make sure we're always returning in RGBA mode + return img.convert('RGB') + if deduplicate: # start by collecting a list of indexes for each material hash unique_idx = collections.defaultdict(list) @@ -793,12 +847,35 @@ def material_to_img(mat): assert set(np.concatenate(mat_idx).ravel()) == set(range(len(uvs))) assert len(uvs) == len(materials) + use_pbr = any(isinstance(m, PBRMaterial) for m in materials) + # collect the images from the materials - images = [material_to_img(materials[g[0]]) for g in mat_idx] + images = [get_base_color_texture(materials[g[0]]) for g in mat_idx] # pack the multiple images into a single large image final, offsets = packing.images(images, power_resize=True) + if use_pbr: + metallic_roughness = [get_metallic_roughness_texture(materials[g[0]]) for g in mat_idx] + + # ensure that we use the same image size as for the base color, otherwise the UV coordinates might be wrong + metallic_roughness = [metallic_roughness[img_idx].resize(images[img_idx].size) for img_idx in range(len(images))] + + final_metallic_roughness, offsets_metallic_roughness = packing.images(metallic_roughness, power_resize=True) + + # we only need the first two channels + final_metallic_roughness = Image.fromarray(np.array(final_metallic_roughness)[...,:2]) + + emissive = [get_emissive_texture(materials[g[0]]) for g in mat_idx] + if all(np.array(x).max() == 0 for x in emissive): + emissive = None + final_emissive = None + else: + # ensure that we use the same image size as for the base color, otherwise the UV coordinates might be wrong + emissive = [emissive[img_idx].resize(images[img_idx].size) for img_idx in range(len(images))] + final_emissive, offsets_emissive = packing.images(emissive, power_resize=True) + + # the size of the final texture image final_size = np.array(final.size, dtype=np.float64) # collect scaled new UV coordinates by material index @@ -810,8 +887,20 @@ def material_to_img(mat): xy_off = off / final_size # scale and translate each of the new UV coordinates # also make sure they are in 0.0-1.0 using modulus (i.e. wrap) - new_uv.update({g: ((uvs[g] % 1.0) * scale) + xy_off - for g in group}) + group_uvs = {} + for g in group: + g_uvs = uvs[g] + # only wrap pixels that are outside of 0.0-1.0. + # use a small leeway of half a pixel for floating point inaccuracies and the case of uv==1.0 + half_pixel_width = 1.0 / (2 * img.size[0]) + half_pixel_height = 1.0 / (2 * img.size[1]) + wrap_mask_u = (g_uvs[:,0] <= -half_pixel_width) | (g_uvs[:,0] >= (1.0 + half_pixel_width)) + wrap_mask_v = (g_uvs[:,1] <= -half_pixel_height) | (g_uvs[:,1] >= (1.0 + half_pixel_height)) + wrap_mask = np.stack([wrap_mask_u, wrap_mask_v], axis=-1) + g_uvs[wrap_mask] = g_uvs[wrap_mask] % 1.0 + group_uvs[g] = (g_uvs * scale) + xy_off + + new_uv.update(group_uvs) # stack the new UV coordinates in the original order stacked = np.vstack([new_uv[i] for i in range(len(uvs))]) @@ -823,8 +912,8 @@ def material_to_img(mat): check = [] for uv, mat in zip(uvs, materials): # get the image from the material and whether or not - # it had to fill in with default dataa - img = material_to_img(mat) + # it had to fill in with default data + img = get_base_color_texture(mat) current = color.uv_to_color(image=img, uv=(uv % 1)) check.append(current) @@ -837,4 +926,7 @@ def material_to_img(mat): # interpolation on complicated stuff can break this assert (compare == check_flat).all() - return SimpleMaterial(image=final), stacked + if use_pbr: + return PBRMaterial(baseColorTexture=final, metallicRoughnessTexture=final_metallic_roughness, emissiveTexture=final_emissive), stacked + else: + return SimpleMaterial(image=final), stacked \ No newline at end of file From 6aa73ba3d565429e05a7155d9872ef0f875a8246 Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Wed, 30 Aug 2023 15:30:29 +0200 Subject: [PATCH 024/144] added padding to fused textures to prevent interpolation between textures --- trimesh/visual/material.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index cd2f290c3..9d3b57683 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -832,6 +832,19 @@ def get_emissive_texture(mat): [0, 0, 0], (1, 1, 3)).astype(np.uint8)) # make sure we're always returning in RGBA mode return img.convert('RGB') + + def pad_image(src, padding=1): + if isinstance(padding, int): + padding = (padding, padding) + # uses replication padding + x, y = np.meshgrid(np.arange(src.shape[0] + 2*padding[0]), np.arange(src.shape[1] + 2*padding[1])) + x -= padding[0] + y -= padding[1] + x = np.clip(x, 0, src.shape[0] - 1) + y = np.clip(y, 0, src.shape[1] - 1) + + result = src[y, x] + return result if deduplicate: # start by collecting a list of indexes for each material hash @@ -847,10 +860,13 @@ def get_emissive_texture(mat): assert set(np.concatenate(mat_idx).ravel()) == set(range(len(uvs))) assert len(uvs) == len(materials) + padding = 1 + use_pbr = any(isinstance(m, PBRMaterial) for m in materials) # collect the images from the materials images = [get_base_color_texture(materials[g[0]]) for g in mat_idx] + images = [Image.fromarray(pad_image(np.array(img), padding)) for img in images] # pack the multiple images into a single large image final, offsets = packing.images(images, power_resize=True) @@ -860,7 +876,7 @@ def get_emissive_texture(mat): # ensure that we use the same image size as for the base color, otherwise the UV coordinates might be wrong metallic_roughness = [metallic_roughness[img_idx].resize(images[img_idx].size) for img_idx in range(len(images))] - + metallic_roughness = [Image.fromarray(pad_image(np.array(img), padding)) for img in metallic_roughness] final_metallic_roughness, offsets_metallic_roughness = packing.images(metallic_roughness, power_resize=True) # we only need the first two channels @@ -873,6 +889,7 @@ def get_emissive_texture(mat): else: # ensure that we use the same image size as for the base color, otherwise the UV coordinates might be wrong emissive = [emissive[img_idx].resize(images[img_idx].size) for img_idx in range(len(images))] + emissive = [Image.fromarray(pad_image(np.array(img), padding)) for img in emissive] final_emissive, offsets_emissive = packing.images(emissive, power_resize=True) @@ -882,9 +899,9 @@ def get_emissive_texture(mat): new_uv = {} for group, img, off in zip(mat_idx, images, offsets): # how big was the original image - scale = img.size / final_size + scale = (np.array(img.size) - 2 * padding) / final_size # what is the offset in fractions of final image - xy_off = off / final_size + xy_off = (off + padding) / final_size # scale and translate each of the new UV coordinates # also make sure they are in 0.0-1.0 using modulus (i.e. wrap) group_uvs = {} From d52b6dab01954e252ac2030aded9d32cc0f5c75e Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Wed, 30 Aug 2023 17:40:22 +0200 Subject: [PATCH 025/144] fixed perceived brightness calculation --- trimesh/visual/gloss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trimesh/visual/gloss.py b/trimesh/visual/gloss.py index 7ab55d0d3..771453522 100644 --- a/trimesh/visual/gloss.py +++ b/trimesh/visual/gloss.py @@ -83,7 +83,7 @@ def solve_metallic(diffuse, specular, one_minus_specular_strength): return metallic def get_perceived_brightness(rgb): - return np.dot(rgb[..., :3], [0.299, 0.587, 0.114]) + return np.sqrt(np.dot(rgb[..., :3]**2, [0.299, 0.587, 0.114])) def toPIL(img): if isinstance(img, Image): From f27e8ed2c36240f59aaf9dac91a8bc1edf57f5c0 Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Wed, 30 Aug 2023 17:48:17 +0200 Subject: [PATCH 026/144] keep vertex normals when fusing meshes --- trimesh/util.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/trimesh/util.py b/trimesh/util.py index 0be3003fd..a143c53ef 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -1503,6 +1503,10 @@ def concatenate(a, b=None): if all('face_normals' in m._cache for m in is_mesh): face_normals = np.vstack( [m.face_normals for m in is_mesh]) + + # always save vertex normals + vertex_normals = vstack_empty( + [m.vertex_normals.copy() for m in is_mesh]) try: # concatenate visuals @@ -1516,6 +1520,7 @@ def concatenate(a, b=None): return trimesh_type(vertices=vertices, faces=faces, face_normals=face_normals, + vertex_normals=vertex_normals, visual=visual, process=False) From b15b2162e295316f922fc319f56e6181b6df9003 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Wed, 30 Aug 2023 14:16:46 -0400 Subject: [PATCH 027/144] ci release changes --- .github/workflows/release.yml | 11 ++++------- .github/workflows/test.yml | 9 ++++++--- LICENSE.md | 2 +- codecov.yml | 8 -------- pyproject.toml | 3 +++ 5 files changed, 14 insertions(+), 19 deletions(-) delete mode 100644 codecov.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a3096fe9c..76251f2f8 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -26,13 +26,10 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: [3.6, 3.7, 3.8, 3.9, "3.10", "3.11"] - os: [ubuntu-20.04, macos-latest, windows-latest] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + os: [ubuntu-latest, macos-latest, windows-latest] exclude: - # windows runners have gotten very flaky - # exclude all windows test runs except for one - - os: windows-latest - python-version: 3.6 + # windows runners have gotten flaky - os: windows-latest python-version: 3.8 - os: windows-latest @@ -63,7 +60,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.x' + python-version: '3.11' - name: Install publishing dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 07a1d6308..992b7f9d1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -15,10 +15,13 @@ jobs: uses: actions/setup-python@v4 with: python-version: "3.11" - - name: Install Formatting - run: pip install ruff - - name: Check Formatting + - name: Install + run: pip install ruff black + - name: Run Ruff run: ruff . +# - name: Run Black +# run: black --check . + tests: name: Run Unit Tests runs-on: ${{ matrix.os }} diff --git a/LICENSE.md b/LICENSE.md index d80b18bf3..d0571124d 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2019 Michael Dawson-Haggerty +Copyright (c) 2023 Michael Dawson-Haggerty Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index cf9c95b91..000000000 --- a/codecov.yml +++ /dev/null @@ -1,8 +0,0 @@ -coverage: - status: - project: - default: - # basic - target: 75% - threshold: 10% - patch: off diff --git a/pyproject.toml b/pyproject.toml index 956b15621..ad709dab6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -121,6 +121,9 @@ ignore = [ "E501", # Line too long ({width} > {limit} characters) "B904", # raise ... from err "B905", # zip() without an explicit strict= parameter + "ANN101", # type hint for `self` + "ANN002", # type hint for *args + "ANN003", # type hint for **kwargs ] line-length = 90 From d57c974c251633619eaf9d4b705c2919391c9613 Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Wed, 30 Aug 2023 17:24:37 -0700 Subject: [PATCH 028/144] Add torus --- trimesh/creation.py | 64 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/trimesh/creation.py b/trimesh/creation.py index 4d7897912..1c9b0fc6c 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -1272,3 +1272,67 @@ def truncated_prisms(tris, origin=None, normal=None): mesh = Trimesh(vertices=vertices, faces=faces, process=False) return mesh + + +def torus(major_radius, + minor_radius, + major_sections=32, + minor_sections=32, + transform=None, + **kwargs): + """Create a mesh of a torus around Z centered at the origin. + + Parameters + ------------ + major_radius: (float) + Radius from the center of the torus to the center of the tube. + minor_radius: (float) + Radius of the tube. + major_sections: int + Number of sections around major radius result should have + If not specified default is 32 per revolution + minor_sections: int + Number of sections around minor radius result should have + If not specified default is 32 per revolution + transform: (4, 4) float + Transformation matrix + **kwargs: + passed to Trimesh to create torus + + Returns + ------------ + geometry : trimesh.Trimesh + Mesh of a torus + """ + vertices = [] + faces = [] + + for i in range(major_sections): + theta = 2 * np.pi * i / major_sections + for j in range(minor_sections): + phi = 2 * np.pi * j / minor_sections + + x = (major_radius + minor_radius * np.cos(phi)) * np.cos(theta) + y = (major_radius + minor_radius * np.cos(phi)) * np.sin(theta) + z = minor_radius * np.sin(phi) + + vertices.append([x, y, z]) + + # Create faces + a = i * minor_sections + j + b = ((i + 1) % major_sections) * minor_sections + j + c = ((i + 1) % major_sections) * minor_sections + (j + 1) % minor_sections + d = i * minor_sections + (j + 1) % minor_sections + + faces.append([a, b, c]) + faces.append([a, c, d]) + + torus = Trimesh(vertices=vertices, + faces=faces, + process=False, + **kwargs) + + if transform is not None: + torus.apply_transform(transform) + + return torus From f21d3adba6406b95d30ab03e20f91cd742eaa833 Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Wed, 30 Aug 2023 17:28:11 -0700 Subject: [PATCH 029/144] Add test for torus --- tests/test_creation.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/test_creation.py b/tests/test_creation.py index f6511fa5f..1736939b2 100644 --- a/tests/test_creation.py +++ b/tests/test_creation.py @@ -326,6 +326,15 @@ def check_triangulation(v, f, true_area): assert g.np.isclose(area, true_area) +def test_torus(self): + torus = g.trimesh.creation.torus + + m = torus(major_radius=1.0, minor_radius=0.2) + + extents = g.np.array([1.4, 1.4, 0.4]) + assert g.np.allclose(m.extents, extents) + assert g.np.allclose(m.bounds, [-extents / 2.0, extents / 2.0]) + if __name__ == '__main__': g.trimesh.util.attach_to_log() g.unittest.main() From 2b549f0ac3c660595c907dfa2396f168f48b1c0c Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Thu, 31 Aug 2023 19:38:46 -0700 Subject: [PATCH 030/144] Fix test --- tests/test_creation.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/test_creation.py b/tests/test_creation.py index 1736939b2..4f654f0a1 100644 --- a/tests/test_creation.py +++ b/tests/test_creation.py @@ -326,12 +326,16 @@ def check_triangulation(v, f, true_area): assert g.np.isclose(area, true_area) -def test_torus(self): +def test_torus(): torus = g.trimesh.creation.torus - m = torus(major_radius=1.0, minor_radius=0.2) + major_radius = 1.0 + minor_radius = 0.2 + m = torus(major_radius=major_radius, minor_radius=minor_radius) - extents = g.np.array([1.4, 1.4, 0.4]) + extents = g.np.array([2 * major_radius + 2 * minor_radius, + 2 * major_radius + 2 * minor_radius, + 2 * minor_radius]) assert g.np.allclose(m.extents, extents) assert g.np.allclose(m.bounds, [-extents / 2.0, extents / 2.0]) From f8652b5174d911252732bbce6806f893646f2580 Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Thu, 31 Aug 2023 19:39:46 -0700 Subject: [PATCH 031/144] Vectorize code --- trimesh/creation.py | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/trimesh/creation.py b/trimesh/creation.py index 1c9b0fc6c..409f26fa1 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -1287,7 +1287,7 @@ def torus(major_radius, major_radius: (float) Radius from the center of the torus to the center of the tube. minor_radius: (float) - Radius of the tube. + Radius of the tube. major_sections: int Number of sections around major radius result should have If not specified default is 32 per revolution @@ -1298,41 +1298,41 @@ def torus(major_radius, Transformation matrix **kwargs: passed to Trimesh to create torus - + Returns ------------ geometry : trimesh.Trimesh Mesh of a torus """ - vertices = [] - faces = [] + # Calculate vertex coordinates + theta = np.linspace(0, 2 * np.pi, major_sections, endpoint=False).repeat(minor_sections) + phi = np.tile(np.linspace(0, 2 * np.pi, minor_sections, endpoint=False), major_sections) - for i in range(major_sections): - theta = 2 * np.pi * i / major_sections - for j in range(minor_sections): - phi = 2 * np.pi * j / minor_sections + x = (major_radius + minor_radius * np.cos(phi)) * np.cos(theta) + y = (major_radius + minor_radius * np.cos(phi)) * np.sin(theta) + z = minor_radius * np.sin(phi) - x = (major_radius + minor_radius * np.cos(phi)) * np.cos(theta) - y = (major_radius + minor_radius * np.cos(phi)) * np.sin(theta) - z = minor_radius * np.sin(phi) + vertices = np.stack((x, y, z), axis=-1).reshape(-1, 3) - vertices.append([x, y, z]) + # Calculate faces + i_range = np.arange(minor_sections) + j_range = np.arange(major_sections) - # Create faces - a = i * minor_sections + j - b = ((i + 1) % major_sections) * minor_sections + j - c = ((i + 1) % major_sections) * minor_sections + (j + 1) % minor_sections - d = i * minor_sections + (j + 1) % minor_sections + i_grid, j_grid = np.meshgrid(i_range, j_range, indexing='ij') - faces.append([a, b, c]) - faces.append([a, c, d]) + a = (i_grid * minor_sections + j_grid).ravel() + b = (((i_grid + 1) % major_sections) * minor_sections + j_grid).ravel() + c = (((i_grid + 1) % major_sections) * minor_sections + (j_grid + 1) % minor_sections).ravel() + d = (i_grid * minor_sections + (j_grid + 1) % minor_sections).ravel() + + faces = np.column_stack((a, b, c, a, c, d)).reshape(-1, 3) torus = Trimesh(vertices=vertices, faces=faces, process=False, **kwargs) - + if transform is not None: torus.apply_transform(transform) - + return torus From e4f4db3f6a695534e5a9a14856067fa14d23d82e Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Thu, 31 Aug 2023 19:43:56 -0700 Subject: [PATCH 032/144] Fix formatting issues --- trimesh/creation.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/trimesh/creation.py b/trimesh/creation.py index 409f26fa1..1b97b8c36 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -1305,9 +1305,11 @@ def torus(major_radius, Mesh of a torus """ # Calculate vertex coordinates - theta = np.linspace(0, 2 * np.pi, major_sections, endpoint=False).repeat(minor_sections) - phi = np.tile(np.linspace(0, 2 * np.pi, minor_sections, endpoint=False), major_sections) - + theta = np.linspace(0, 2 * np.pi, major_sections, + endpoint=False).repeat(minor_sections) + phi = np.tile(np.linspace(0, 2 * np.pi, minor_sections, + endpoint=False), major_sections) + x = (major_radius + minor_radius * np.cos(phi)) * np.cos(theta) y = (major_radius + minor_radius * np.cos(phi)) * np.sin(theta) z = minor_radius * np.sin(phi) @@ -1322,7 +1324,8 @@ def torus(major_radius, a = (i_grid * minor_sections + j_grid).ravel() b = (((i_grid + 1) % major_sections) * minor_sections + j_grid).ravel() - c = (((i_grid + 1) % major_sections) * minor_sections + (j_grid + 1) % minor_sections).ravel() + c = (((i_grid + 1) % major_sections) * minor_sections + (j_grid + 1) + % minor_sections).ravel() d = (i_grid * minor_sections + (j_grid + 1) % minor_sections).ravel() faces = np.column_stack((a, b, c, a, c, d)).reshape(-1, 3) From 3ce5b080af2ba1f172d38841de449d4bf17b51e0 Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Fri, 1 Sep 2023 11:01:02 +0200 Subject: [PATCH 033/144] many small bug fixes for pbr material fusion --- trimesh/visual/material.py | 125 ++++++++++++++++++++++--------------- 1 file changed, 74 insertions(+), 51 deletions(-) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 9d3b57683..070769d9c 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -525,7 +525,7 @@ def normalTexture(self): Normal texture. """ return self._data.get('normalTexture') - + @normalTexture.setter def normalTexture(self, value): if value is None: @@ -545,7 +545,7 @@ def emissiveTexture(self): Emissive texture. """ return self._data.get('emissiveTexture') - + @emissiveTexture.setter def emissiveTexture(self, value): if value is None: @@ -565,7 +565,7 @@ def occlusionTexture(self): Occlusion texture. """ return self._data.get('occlusionTexture') - + @occlusionTexture.setter def occlusionTexture(self, value): if value is None: @@ -606,7 +606,7 @@ def metallicRoughnessTexture(self): Metallic-roughness texture. """ return self._data.get('metallicRoughnessTexture') - + @metallicRoughnessTexture.setter def metallicRoughnessTexture(self, value): if value is None: @@ -618,7 +618,7 @@ def metallicRoughnessTexture(self, value): @property def name(self): return self._data.get('name') - + @name.setter def name(self, value): if value is None: @@ -787,19 +787,8 @@ def get_metallic_roughness_texture(mat): img = None if isinstance(mat, PBRMaterial): if mat.metallicRoughnessTexture is not None: - img = mat.metallicRoughnessTexture - img_arr = np.array(img) - if len(img_arr.shape) == 2: - img_arr = img_arr[...,None] - - if img_arr.shape[-1] == 1: - img_arr = np.concatenate([img_arr, img_arr], axis=-1) - - if img_arr.shape[-1] == 2: - # we must use RGB here, because 0 alpha does not work for PIL scale later - img_arr = np.concatenate([img_arr, np.ones_like(img_arr) * 255], axis=-1) - img = Image.fromarray(img_arr) - else: + img = mat.metallicRoughnessTexture.convert('RGB') + else: if mat.metallicFactor is not None: metallic = mat.metallicFactor else: @@ -808,9 +797,11 @@ def get_metallic_roughness_texture(mat): roughness = mat.roughnessFactor else: roughness = 1.0 - - metallic_roughnesss = np.round(np.array([roughness, metallic, 1.0], dtype=np.float64)*255) - img = Image.fromarray(metallic_roughnesss[None,None].astype(np.uint8)) + + metallic_roughnesss = np.round( + np.array([metallic, roughness, 1.0], dtype=np.float64) * 255) + img = Image.fromarray( + metallic_roughnesss[None, None].astype(np.uint8), mode='RGB') return img def get_emissive_texture(mat): @@ -832,16 +823,17 @@ def get_emissive_texture(mat): [0, 0, 0], (1, 1, 3)).astype(np.uint8)) # make sure we're always returning in RGBA mode return img.convert('RGB') - + def pad_image(src, padding=1): if isinstance(padding, int): padding = (padding, padding) # uses replication padding - x, y = np.meshgrid(np.arange(src.shape[0] + 2*padding[0]), np.arange(src.shape[1] + 2*padding[1])) + x, y = np.meshgrid(np.arange( + src.shape[1] + 2 * padding[0]), np.arange(src.shape[0] + 2 * padding[1])) x -= padding[0] y -= padding[1] - x = np.clip(x, 0, src.shape[0] - 1) - y = np.clip(y, 0, src.shape[1] - 1) + x = np.clip(x, 0, src.shape[1] - 1) + y = np.clip(y, 0, src.shape[0] - 1) result = src[y, x] return result @@ -860,64 +852,94 @@ def pad_image(src, padding=1): assert set(np.concatenate(mat_idx).ravel()) == set(range(len(uvs))) assert len(uvs) == len(materials) - padding = 1 - use_pbr = any(isinstance(m, PBRMaterial) for m in materials) # collect the images from the materials images = [get_base_color_texture(materials[g[0]]) for g in mat_idx] + unpadded_sizes = [np.array(img.size) for img in images] + + if len(images) <= 1: + # padding has the downside that if often result in greatly larger images, + # because of the jump to the next power of two + padding = 0 + else: + # without padding, we might interpolate between trimsheet islands + padding = 1 + images = [Image.fromarray(pad_image(np.array(img), padding)) for img in images] # pack the multiple images into a single large image final, offsets = packing.images(images, power_resize=True) if use_pbr: - metallic_roughness = [get_metallic_roughness_texture(materials[g[0]]) for g in mat_idx] - - # ensure that we use the same image size as for the base color, otherwise the UV coordinates might be wrong - metallic_roughness = [metallic_roughness[img_idx].resize(images[img_idx].size) for img_idx in range(len(images))] - metallic_roughness = [Image.fromarray(pad_image(np.array(img), padding)) for img in metallic_roughness] - final_metallic_roughness, offsets_metallic_roughness = packing.images(metallic_roughness, power_resize=True) + metallic_roughness = [get_metallic_roughness_texture( + materials[g[0]]) for g in mat_idx] + + # ensure that we use the same image size as for the base color, otherwise + # the UV coordinates might be wrong + metallic_roughness = [ + metallic_roughness[img_idx].resize( + unpadded_sizes[img_idx]) for img_idx in range( + len(images))] + metallic_roughness = [ + Image.fromarray( + pad_image( + np.array(img), + padding)) for img in metallic_roughness] + final_metallic_roughness, offsets_metallic_roughness = packing.images( + metallic_roughness, power_resize=True) # we only need the first two channels - final_metallic_roughness = Image.fromarray(np.array(final_metallic_roughness)[...,:2]) + final_metallic_roughness = Image.fromarray( + np.flip(np.array(final_metallic_roughness)[..., :2], axis=-1)) emissive = [get_emissive_texture(materials[g[0]]) for g in mat_idx] if all(np.array(x).max() == 0 for x in emissive): emissive = None final_emissive = None else: - # ensure that we use the same image size as for the base color, otherwise the UV coordinates might be wrong - emissive = [emissive[img_idx].resize(images[img_idx].size) for img_idx in range(len(images))] - emissive = [Image.fromarray(pad_image(np.array(img), padding)) for img in emissive] + # ensure that we use the same image size as for the base color, otherwise + # the UV coordinates might be wrong + emissive = [ + emissive[img_idx].resize( + unpadded_sizes[img_idx]) for img_idx in range( + len(images))] + emissive = [ + Image.fromarray( + pad_image( + np.array(img), + padding), + mode="RGB") for img in emissive] final_emissive, offsets_emissive = packing.images(emissive, power_resize=True) - # the size of the final texture image final_size = np.array(final.size, dtype=np.float64) # collect scaled new UV coordinates by material index new_uv = {} for group, img, off in zip(mat_idx, images, offsets): # how big was the original image - scale = (np.array(img.size) - 2 * padding) / final_size + scale = (np.array(img.size) - 1 - 2 * padding) / (final_size - 1) # what is the offset in fractions of final image - xy_off = (off + padding) / final_size + xy_off = (off + padding) / (final_size - 1) # scale and translate each of the new UV coordinates # also make sure they are in 0.0-1.0 using modulus (i.e. wrap) - group_uvs = {} for g in group: - g_uvs = uvs[g] - # only wrap pixels that are outside of 0.0-1.0. - # use a small leeway of half a pixel for floating point inaccuracies and the case of uv==1.0 + g_uvs = uvs[g].copy() + # only wrap pixels that are outside of 0.0-1.0. + # use a small leeway of half a pixel for floating point inaccuracies and + # the case of uv==1.0 half_pixel_width = 1.0 / (2 * img.size[0]) half_pixel_height = 1.0 / (2 * img.size[1]) - wrap_mask_u = (g_uvs[:,0] <= -half_pixel_width) | (g_uvs[:,0] >= (1.0 + half_pixel_width)) - wrap_mask_v = (g_uvs[:,1] <= -half_pixel_height) | (g_uvs[:,1] >= (1.0 + half_pixel_height)) + wrap_mask_u = (g_uvs[:, + 0] <= -half_pixel_width) | (g_uvs[:, + 0] >= (1.0 + half_pixel_width)) + wrap_mask_v = (g_uvs[:, + 1] <= -half_pixel_height) | (g_uvs[:, + 1] >= (1.0 + half_pixel_height)) wrap_mask = np.stack([wrap_mask_u, wrap_mask_v], axis=-1) - g_uvs[wrap_mask] = g_uvs[wrap_mask] % 1.0 - group_uvs[g] = (g_uvs * scale) + xy_off - new_uv.update(group_uvs) + g_uvs[wrap_mask] = g_uvs[wrap_mask] % 1.0 + new_uv[g] = (g_uvs * scale) + xy_off # stack the new UV coordinates in the original order stacked = np.vstack([new_uv[i] for i in range(len(uvs))]) @@ -944,6 +966,7 @@ def pad_image(src, padding=1): assert (compare == check_flat).all() if use_pbr: - return PBRMaterial(baseColorTexture=final, metallicRoughnessTexture=final_metallic_roughness, emissiveTexture=final_emissive), stacked + return PBRMaterial(baseColorTexture=final, metallicRoughnessTexture=final_metallic_roughness, + emissiveTexture=final_emissive), stacked else: - return SimpleMaterial(image=final), stacked \ No newline at end of file + return SimpleMaterial(image=final), stacked From dc86c681b8041fafd13742d6d4fca8c030b128f9 Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Fri, 1 Sep 2023 11:21:21 +0200 Subject: [PATCH 034/144] fix formating --- trimesh/visual/material.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 070769d9c..72f42343a 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -886,7 +886,7 @@ def pad_image(src, padding=1): pad_image( np.array(img), padding)) for img in metallic_roughness] - final_metallic_roughness, offsets_metallic_roughness = packing.images( + final_metallic_roughness, _ = packing.images( metallic_roughness, power_resize=True) # we only need the first two channels @@ -910,7 +910,7 @@ def pad_image(src, padding=1): np.array(img), padding), mode="RGB") for img in emissive] - final_emissive, offsets_emissive = packing.images(emissive, power_resize=True) + final_emissive, _ = packing.images(emissive, power_resize=True) # the size of the final texture image final_size = np.array(final.size, dtype=np.float64) @@ -930,12 +930,10 @@ def pad_image(src, padding=1): # the case of uv==1.0 half_pixel_width = 1.0 / (2 * img.size[0]) half_pixel_height = 1.0 / (2 * img.size[1]) - wrap_mask_u = (g_uvs[:, - 0] <= -half_pixel_width) | (g_uvs[:, - 0] >= (1.0 + half_pixel_width)) - wrap_mask_v = (g_uvs[:, - 1] <= -half_pixel_height) | (g_uvs[:, - 1] >= (1.0 + half_pixel_height)) + wrap_mask_u = ((g_uvs[:, 0] <= -half_pixel_width) | + (g_uvs[:, 0] >= (1.0 + half_pixel_width))) + wrap_mask_v = ((g_uvs[:, 1] <= -half_pixel_height) | + (g_uvs[:, 1] >= (1.0 + half_pixel_height))) wrap_mask = np.stack([wrap_mask_u, wrap_mask_v], axis=-1) g_uvs[wrap_mask] = g_uvs[wrap_mask] % 1.0 @@ -966,7 +964,12 @@ def pad_image(src, padding=1): assert (compare == check_flat).all() if use_pbr: - return PBRMaterial(baseColorTexture=final, metallicRoughnessTexture=final_metallic_roughness, - emissiveTexture=final_emissive), stacked + return ( + PBRMaterial( + baseColorTexture=final, + metallicRoughnessTexture=final_metallic_roughness, + emissiveTexture=final_emissive + ), + stacked) else: return SimpleMaterial(image=final), stacked From b85f2db1a4fa53512bc35e1bbc2b50c85f71fd1b Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Fri, 1 Sep 2023 11:26:29 +0200 Subject: [PATCH 035/144] fixed unused rotate parameter resulting in weird errors --- trimesh/path/packing.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/trimesh/path/packing.py b/trimesh/path/packing.py index a72849e13..018340142 100644 --- a/trimesh/path/packing.py +++ b/trimesh/path/packing.py @@ -459,7 +459,11 @@ def rectangles(extents, # run a single insertion order # don't shuffle the first run, shuffle subsequent runs bounds, insert = rectangles_single( - extents=extents, size=size, shuffle=(i != 0)) + extents=extents, + size=size, + shuffle=(i != 0), + rotate=rotate + ) count = insert.sum() extents_all = bounds.reshape((-1, dim)).ptp(axis=0) From aa99d92bb190d19fac58a2b05e4b05aa3b00c215 Mon Sep 17 00:00:00 2001 From: munahaf Date: Fri, 1 Sep 2023 18:46:23 +0000 Subject: [PATCH 036/144] Comment: Updated a module name to allow import without type errors. --- trimesh/interfaces/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trimesh/interfaces/__init__.py b/trimesh/interfaces/__init__.py index d9b3aa506..4c2bb091e 100644 --- a/trimesh/interfaces/__init__.py +++ b/trimesh/interfaces/__init__.py @@ -6,4 +6,4 @@ from . import vhacd # add to __all__ as per pep8 -__all__ = [scad, blender, vhacd] +__all__ = ['scad', 'blender', 'vhacd'] From 0c1afc95aae326800686ea5e941c350830fe441f Mon Sep 17 00:00:00 2001 From: Clemens Eppner Date: Fri, 1 Sep 2023 12:26:22 -0700 Subject: [PATCH 037/144] Create torus using revolve() --- trimesh/creation.py | 48 ++++++++++++++------------------------------- 1 file changed, 15 insertions(+), 33 deletions(-) diff --git a/trimesh/creation.py b/trimesh/creation.py index 1b97b8c36..1a8983245 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -1304,38 +1304,20 @@ def torus(major_radius, geometry : trimesh.Trimesh Mesh of a torus """ - # Calculate vertex coordinates - theta = np.linspace(0, 2 * np.pi, major_sections, - endpoint=False).repeat(minor_sections) - phi = np.tile(np.linspace(0, 2 * np.pi, minor_sections, - endpoint=False), major_sections) + phi = np.linspace(0, 2 * np.pi, minor_sections, endpoint=False) + linestring = np.column_stack((minor_radius * np.cos(phi), + minor_radius * np.sin(phi))) \ + + [major_radius, 0] - x = (major_radius + minor_radius * np.cos(phi)) * np.cos(theta) - y = (major_radius + minor_radius * np.cos(phi)) * np.sin(theta) - z = minor_radius * np.sin(phi) - - vertices = np.stack((x, y, z), axis=-1).reshape(-1, 3) - - # Calculate faces - i_range = np.arange(minor_sections) - j_range = np.arange(major_sections) - - i_grid, j_grid = np.meshgrid(i_range, j_range, indexing='ij') - - a = (i_grid * minor_sections + j_grid).ravel() - b = (((i_grid + 1) % major_sections) * minor_sections + j_grid).ravel() - c = (((i_grid + 1) % major_sections) * minor_sections + (j_grid + 1) - % minor_sections).ravel() - d = (i_grid * minor_sections + (j_grid + 1) % minor_sections).ravel() - - faces = np.column_stack((a, b, c, a, c, d)).reshape(-1, 3) - - torus = Trimesh(vertices=vertices, - faces=faces, - process=False, - **kwargs) - - if transform is not None: - torus.apply_transform(transform) + if 'metadata' not in kwargs: + kwargs['metadata'] = dict() + kwargs['metadata'].update( + {'shape': 'torus', + 'major_radius': major_radius, + 'minor_radius': minor_radius}) - return torus + # generate torus through simple revolution + return revolve(linestring=linestring, + sections=major_sections, + transform=transform, + **kwargs) From 69c2d9517f93b1127f9cc37b6c1300c97946bde1 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sat, 2 Sep 2023 01:21:23 -0400 Subject: [PATCH 038/144] ruff --- tests/test_creation.py | 2 +- trimesh/creation.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_creation.py b/tests/test_creation.py index 7777e27e5..fa60f9293 100644 --- a/tests/test_creation.py +++ b/tests/test_creation.py @@ -330,7 +330,7 @@ def test_torus(): major_radius = 1.0 minor_radius = 0.2 m = torus(major_radius=major_radius, minor_radius=minor_radius) - + extents = g.np.array([2 * major_radius + 2 * minor_radius, 2 * major_radius + 2 * minor_radius, 2 * minor_radius]) diff --git a/trimesh/creation.py b/trimesh/creation.py index e3c435bcc..ba49f378d 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -1304,7 +1304,7 @@ def torus(major_radius, + [major_radius, 0] if 'metadata' not in kwargs: - kwargs['metadata'] = dict() + kwargs['metadata'] = {} kwargs['metadata'].update( {'shape': 'torus', 'major_radius': major_radius, From 9a6e5f980ae5ea94d24aa64e8dbcc0dbad4e8dba Mon Sep 17 00:00:00 2001 From: Oliver Lengwinat Date: Mon, 4 Sep 2023 14:50:53 +0200 Subject: [PATCH 039/144] Set gmsh options before opening the file Ensure gmsh options are set before opening the file. This allows hiding Terminal output during loading (e.g. "Info: Label...", "Info: Color...") etc. --- trimesh/interfaces/gmsh.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/trimesh/interfaces/gmsh.py b/trimesh/interfaces/gmsh.py index ebabcee0c..e94569819 100644 --- a/trimesh/interfaces/gmsh.py +++ b/trimesh/interfaces/gmsh.py @@ -67,6 +67,10 @@ def load_gmsh(file_name, gmsh_args=None): gmsh.initialize() gmsh.option.setNumber("General.Terminal", 1) gmsh.model.add('Surface_Mesh_Generation') + # loop through our numbered args which do things, stuff + for arg in args: + gmsh.option.setNumber(*arg) + gmsh.open(file_name) # create a temporary file for the results @@ -78,9 +82,6 @@ def load_gmsh(file_name, gmsh_args=None): if any(file_name.lower().endswith(e) for e in ['.brep', '.stp', '.step', '.igs', '.iges']): gmsh.model.geo.synchronize() - # loop through our numbered args which do things, stuff - for arg in args: - gmsh.option.setNumber(*arg) # generate the mesh gmsh.model.mesh.generate(2) # write to the temporary file From 729dfbf2a6321ded463beda6b1dbfcd3b899f260 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 5 Sep 2023 15:00:23 -0400 Subject: [PATCH 040/144] type hints --- README.md | 5 +- trimesh/base.py | 948 +++++++++++++++++++------------------ trimesh/interfaces/gmsh.py | 2 +- trimesh/primitives.py | 28 +- 4 files changed, 494 insertions(+), 489 deletions(-) diff --git a/README.md b/README.md index 02bcd3ec4..032d58588 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,9 @@ | :warning: WARNING | |---------------------------| -| `trimesh >= 4.0.0` makes the minimum Python 3.7 and is in pre-release and will be released soon | -| You can test your stack with: `pip install --pre trimesh` or if you are on older Python you should lock `trimesh<4`| +| `trimesh >= 4.0.0` which is now on `main` makes the minimum Python 3.7 and is in pre-release | +| Testing the prerelease with `pip install --pre trimesh` would be much appriciated! | +| For projects that support Python < 3.7 you should update your dependency to `trimesh<4` | Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). diff --git a/trimesh/base.py b/trimesh/base.py index 41b25ce2c..c699870a2 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -7,8 +7,19 @@ import copy import warnings +from io import BufferedRandom +from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np +import scipy.spatial._ckdtree +from networkx.classes.graph import Graph +from numpy import float64, int64, ndarray +from numpy.typing import ArrayLike +from rtree.index import Index +from scipy.sparse._coo import coo_matrix + +from trimesh.caching import TrackedArray +from trimesh.path.path import Path3D from . import ( boolean, @@ -40,29 +51,30 @@ from .exchange.export import export_mesh from .parent import Geometry3D from .scene import Scene -from .visual import TextureVisuals, create_visual +from .visual import ColorVisuals, TextureVisuals, create_visual +from numpy.typing import NDArray class Trimesh(Geometry3D): - - def __init__(self, - vertices=None, - faces=None, - face_normals=None, - vertex_normals=None, - face_colors=None, - vertex_colors=None, - face_attributes=None, - vertex_attributes=None, - metadata=None, - process=True, - validate=False, - merge_tex=None, - merge_norm=None, - use_embree=True, - initial_cache=None, - visual=None, - **kwargs): + def __init__( + self, + vertices: Optional[NDArray[float64]] = None, + faces: Optional[NDArray[int64]] = None, + face_normals: Optional[NDArray[float64]] = None, + vertex_normals: Optional[NDArray[float64]] = None, + face_colors: Optional[NDArray[float64]] = None, + vertex_colors: Optional[NDArray[float64]] = None, + face_attributes: Optional[Dict[str, NDArray]] = None, + vertex_attributes: Optional[Dict[str, NDArray]] = None, + metadata: Optional[Dict[str, Any]] = None, + process: bool = True, + validate: bool = False, + merge_tex: Optional[bool] = None, + merge_norm: Optional[bool] = None, + use_embree: bool = True, + initial_cache: Optional[Dict[str, ndarray]] = None, + visual: Optional[Union[ColorVisuals, TextureVisuals]] = None, + ) -> None: """ A Trimesh object contains a triangular 3D mesh. @@ -113,8 +125,8 @@ def __init__(self, # In order to maintain consistency # the cache is cleared when self._data.__hash__() changes self._cache = caching.Cache( - id_function=self._data.__hash__, - force_immutable=True) + id_function=self._data.__hash__, force_immutable=True + ) self._cache.update(initial_cache) # check for None only to avoid warning messages in subclasses @@ -128,9 +140,8 @@ def __init__(self, # hold visual information about the mesh (vertex and face colors) if visual is None: self.visual = create_visual( - face_colors=face_colors, - vertex_colors=vertex_colors, - mesh=self) + face_colors=face_colors, vertex_colors=vertex_colors, mesh=self + ) else: self.visual = visual @@ -169,7 +180,8 @@ def __init__(self, self.metadata.update(metadata) elif metadata is not None: raise ValueError( - 'metadata should be a dict or None, got %s' % str(metadata)) + "metadata should be a dict or None, got %s" % str(metadata) + ) # store per-face and per-vertex attributes which will # be updated when an update_faces call is made @@ -184,17 +196,14 @@ def __init__(self, # process will remove NaN and Inf values and merge vertices # if validate, will remove degenerate and duplicate faces if process or validate: - self.process(validate=validate, - merge_tex=merge_tex, - merge_norm=merge_norm) + self.process(validate=validate, merge_tex=merge_tex, merge_norm=merge_norm) - # save reference to kwargs - self._kwargs = kwargs - - def process(self, - validate=False, - merge_tex=None, - merge_norm=None): + def process( + self, + validate: bool = False, + merge_tex: Optional[bool] = None, + merge_norm: Optional[bool] = None, + ) -> "Trimesh": """ Do processing to make a mesh useful. @@ -238,12 +247,10 @@ def process(self, # if faces or vertices have been removed, normals are validated before # being returned so there is no danger of inconsistent dimensions self.remove_infinite_values() - self.merge_vertices(merge_tex=merge_tex, - merge_norm=merge_norm) - self._cache.clear(exclude={'face_normals', - 'vertex_normals'}) + self.merge_vertices(merge_tex=merge_tex, merge_norm=merge_norm) + self._cache.clear(exclude={"face_normals", "vertex_normals"}) - self.metadata['processed'] = True + self.metadata["processed"] = True return self @property @@ -261,8 +268,7 @@ def faces(self): faces : (n, 3) int64 References for `self.vertices` for triangles. """ - return self._data.get( - 'faces', np.empty(shape=(0, 3), dtype=np.int64)) + return self._data.get("faces", np.empty(shape=(0, 3), dtype=np.int64)) @faces.setter def faces(self, values): @@ -275,18 +281,18 @@ def faces(self, values): Indexes of self.vertices """ if values is None or len(values) == 0: - return self._data.data.pop('faces', None) + return self._data.data.pop("faces", None) if not (isinstance(values, np.ndarray) and values.dtype == np.int64): values = np.asanyarray(values, dtype=np.int64) # automatically triangulate quad faces if len(values.shape) == 2 and values.shape[1] != 3: - log.info('triangulating faces') + log.info("triangulating faces") values = geometry.triangulate_quads(values) - self._data['faces'] = values + self._data["faces"] = values @caching.cache_decorator - def faces_sparse(self): + def faces_sparse(self) -> coo_matrix: """ A sparse matrix representation of the faces. @@ -297,9 +303,7 @@ def faces_sparse(self): dtype : bool shape : (len(self.vertices), len(self.faces)) """ - sparse = geometry.index_sparse( - columns=len(self.vertices), - indices=self.faces) + sparse = geometry.index_sparse(columns=len(self.vertices), indices=self.faces) return sparse @property @@ -316,10 +320,10 @@ def face_normals(self): Normal vectors of each face """ # check shape of cached normals - cached = self._cache['face_normals'] + cached = self._cache["face_normals"] # get faces from datastore - if 'faces' in self._data: - faces = self._data.data['faces'] + if "faces" in self._data: + faces = self._data.data["faces"] else: faces = None @@ -336,22 +340,21 @@ def face_normals(self): # will be zero or an arbitrary vector if the inputs had # a cross product below machine epsilon normals, valid = triangles.normals( - triangles=self.triangles, - crosses=self.triangles_cross) + triangles=self.triangles, crosses=self.triangles_cross + ) # if all triangles are valid shape is correct if valid.all(): # put calculated face normals into cache manually - self._cache['face_normals'] = normals + self._cache["face_normals"] = normals return normals # make a padded list of normals for correct shape - padded = np.zeros((len(self.triangles), 3), - dtype=np.float64) + padded = np.zeros((len(self.triangles), 3), dtype=np.float64) padded[valid] = normals # put calculated face normals into cache manually - self._cache['face_normals'] = padded + self._cache["face_normals"] = padded return padded @@ -369,32 +372,32 @@ def face_normals(self, values): if values is None: return # make sure candidate face normals are C-contiguous float - values = np.asanyarray( - values, order='C', dtype=np.float64) + values = np.asanyarray(values, order="C", dtype=np.float64) # face normals need to correspond to faces if len(values) == 0 or values.shape != self.faces.shape: - log.debug('face_normals incorrect shape, ignoring!') + log.debug("face_normals incorrect shape, ignoring!") return # check if any values are larger than tol.merge # don't set the normals if they are all zero ptp = values.ptp() if not np.isfinite(ptp): - log.debug('face_normals contain NaN, ignoring!') + log.debug("face_normals contain NaN, ignoring!") return if ptp < tol.merge: - log.debug('face_normals all zero, ignoring!') + log.debug("face_normals all zero, ignoring!") return # make sure the first few normals match the first few triangles check, valid = triangles.normals( - self.vertices.view(np.ndarray)[self.faces[:20]]) + self.vertices.view(np.ndarray)[self.faces[:20]] + ) compare = np.zeros((len(valid), 3)) compare[valid] = check if not np.allclose(compare, values[:20]): log.debug("face_normals didn't match triangles, ignoring!") return # otherwise store face normals - self._cache['face_normals'] = values + self._cache["face_normals"] = values @property def vertices(self): @@ -411,8 +414,7 @@ def vertices(self): vertices : (n, 3) float Points in cartesian space referenced by self.faces """ - return self._data.get('vertices', np.empty( - shape=(0, 3), dtype=np.float64)) + return self._data.get("vertices", np.empty(shape=(0, 3), dtype=np.float64)) @vertices.setter def vertices(self, values): @@ -424,8 +426,7 @@ def vertices(self, values): values : (n, 3) float Points in space """ - self._data['vertices'] = np.asanyarray( - values, order='C', dtype=np.float64) + self._data["vertices"] = np.asanyarray(values, order="C", dtype=np.float64) @caching.cache_decorator def vertex_normals(self): @@ -444,12 +445,13 @@ def vertex_normals(self): Where n == len(self.vertices) """ # make sure we have faces_sparse - assert hasattr(self.faces_sparse, 'dot') + assert hasattr(self.faces_sparse, "dot") vertex_normals = geometry.weighted_vertex_normals( vertex_count=len(self.vertices), faces=self.faces, face_normals=self.face_normals, - face_angles=self.face_angles) + face_angles=self.face_angles, + ) return vertex_normals @vertex_normals.setter @@ -463,17 +465,15 @@ def vertex_normals(self, values): Unit normal vectors for each vertex """ if values is not None: - values = np.asanyarray(values, - order='C', - dtype=np.float64) + values = np.asanyarray(values, order="C", dtype=np.float64) if values.shape == self.vertices.shape: # check to see if they assigned all zeros if values.ptp() < tol.merge: - log.debug('vertex_normals are all zero!') - self._cache['vertex_normals'] = values + log.debug("vertex_normals are all zero!") + self._cache["vertex_normals"] = values @caching.cache_decorator - def vertex_faces(self): + def vertex_faces(self) -> ndarray: """ A representation of the face indices that correspond to each vertex. @@ -487,11 +487,12 @@ def vertex_faces(self): vertex_faces = geometry.vertex_face_indices( vertex_count=len(self.vertices), faces=self.faces, - faces_sparse=self.faces_sparse) + faces_sparse=self.faces_sparse, + ) return vertex_faces @caching.cache_decorator - def bounds(self): + def bounds(self) -> ndarray: """ The axis aligned bounds of the faces of the mesh. @@ -507,11 +508,10 @@ def bounds(self): if len(in_mesh) == 0: return None # get mesh bounds with min and max - return np.array([in_mesh.min(axis=0), - in_mesh.max(axis=0)]) + return np.array([in_mesh.min(axis=0), in_mesh.max(axis=0)]) @caching.cache_decorator - def extents(self): + def extents(self) -> ndarray: """ The length, width, and height of the axis aligned bounding box of the mesh. @@ -530,7 +530,7 @@ def extents(self): return extents @caching.cache_decorator - def scale(self): + def scale(self) -> float: """ A metric for the overall scale of the mesh, the length of the diagonal of the axis aligned bounding box of the mesh. @@ -544,11 +544,11 @@ def scale(self): if self.extents is None: return 1.0 # make sure we are returning python floats - scale = float((self.extents ** 2).sum() ** .5) + scale = float((self.extents**2).sum() ** 0.5) return scale @caching.cache_decorator - def centroid(self): + def centroid(self) -> ndarray: """ The point in space which is the average of the triangle centroids weighted by the area of each triangle. @@ -565,9 +565,9 @@ def centroid(self): # use the centroid of each triangle weighted by # the area of the triangle to find the overall centroid try: - centroid = np.average(self.triangles_center, - weights=self.area_faces, - axis=0) + centroid = np.average( + self.triangles_center, weights=self.area_faces, axis=0 + ) except BaseException: # if all triangles are zero-area weights will not work centroid = self.triangles_center.mean(axis=0) @@ -597,7 +597,7 @@ def center_mass(self, value): """ value = np.array(value, dtype=np.float64) if value.shape != (3,): - raise ValueError('shape must be (3,) float!') + raise ValueError("shape must be (3,) float!") self._data["center_mass"] = value self._cache.delete("mass_properties") @@ -629,7 +629,7 @@ def density(self, value): self._cache.delete("mass_properties") @property - def volume(self): + def volume(self) -> float64: """ Volume of the current mesh calculated using a surface integral. If the current mesh isn't watertight this is @@ -640,11 +640,11 @@ def volume(self): volume : float Volume of the current mesh """ - volume = self.mass_properties['volume'] + volume = self.mass_properties["volume"] return volume @property - def mass(self): + def mass(self) -> float64: """ Mass of the current mesh, based on specified density and volume. If the current mesh isn't watertight this is garbage. @@ -654,11 +654,11 @@ def mass(self): mass : float Mass of the current mesh """ - mass = self.mass_properties['mass'] + mass = self.mass_properties["mass"] return mass @property - def moment_inertia(self): + def moment_inertia(self) -> ndarray: """ Return the moment of inertia matrix of the current mesh. If mesh isn't watertight this is garbage. The returned @@ -673,10 +673,10 @@ def moment_inertia(self): Moment of inertia of the current mesh at the center of mass and aligned with the cartesian axis. """ - inertia = self.mass_properties['inertia'] + inertia = self.mass_properties["inertia"] return inertia - def moment_inertia_frame(self, transform): + def moment_inertia_frame(self, transform: ndarray) -> ndarray: """ Get the moment of inertia of this mesh with respect to an arbitrary frame, versus with respect to the center @@ -704,17 +704,18 @@ def moment_inertia_frame(self, transform): # so we want to offset our requested translation by that # center of mass offset = np.eye(4) - offset[:3, 3] = -props['center_mass'] + offset[:3, 3] = -props["center_mass"] # apply the parallel axis theorum to get the new inertia return inertia.transform_inertia( - inertia_tensor=props['inertia'], + inertia_tensor=props["inertia"], transform=np.dot(offset, transform), - mass=props['mass'], - parallel_axis=True) + mass=props["mass"], + parallel_axis=True, + ) @caching.cache_decorator - def principal_inertia_components(self): + def principal_inertia_components(self) -> ndarray: """ Return the principal components of inertia @@ -728,12 +729,12 @@ def principal_inertia_components(self): # both components and vectors from inertia matrix components, vectors = inertia.principal_axis(self.moment_inertia) # store vectors in cache for later - self._cache['principal_inertia_vectors'] = vectors + self._cache["principal_inertia_vectors"] = vectors return components @property - def principal_inertia_vectors(self): + def principal_inertia_vectors(self) -> ndarray: """ Return the principal axis of inertia as unit vectors. The order corresponds to `mesh.principal_inertia_components`. @@ -745,10 +746,10 @@ def principal_inertia_vectors(self): principal axis of inertia directions """ _ = self.principal_inertia_components - return self._cache['principal_inertia_vectors'] + return self._cache["principal_inertia_vectors"] @caching.cache_decorator - def principal_inertia_transform(self): + def principal_inertia_transform(self) -> ndarray: """ A transform which moves the current mesh so the principal inertia vectors are on the X,Y, and Z axis, and the centroid is @@ -766,14 +767,14 @@ def principal_inertia_transform(self): transform = np.eye(4) transform[:3, :3] = vectors transform = transformations.transform_around( - matrix=transform, - point=self.centroid) + matrix=transform, point=self.centroid + ) transform[:3, 3] -= self.centroid return transform @caching.cache_decorator - def symmetry(self): + def symmetry(self) -> Optional[str]: """ Check whether a mesh has rotational symmetry around an axis (radial) or point (spherical). @@ -784,12 +785,12 @@ def symmetry(self): What kind of symmetry does the mesh have. """ symmetry, axis, section = inertia.radial_symmetry(self) - self._cache['symmetry_axis'] = axis - self._cache['symmetry_section'] = section + self._cache["symmetry_axis"] = axis + self._cache["symmetry_section"] = section return symmetry @property - def symmetry_axis(self): + def symmetry_axis(self) -> ndarray: """ If a mesh has rotational symmetry, return the axis. @@ -799,10 +800,10 @@ def symmetry_axis(self): Axis around which a 2D profile was revolved to create this mesh. """ if self.symmetry is not None: - return self._cache['symmetry_axis'] + return self._cache["symmetry_axis"] @property - def symmetry_section(self): + def symmetry_section(self) -> ndarray: """ If a mesh has rotational symmetry return the two vectors which make up a section coordinate frame. @@ -813,10 +814,10 @@ def symmetry_section(self): Vectors to take a section along """ if self.symmetry is not None: - return self._cache['symmetry_section'] + return self._cache["symmetry_section"] @caching.cache_decorator - def triangles(self): + def triangles(self) -> ndarray: """ Actual triangles of the mesh (points, not indexes) @@ -833,7 +834,7 @@ def triangles(self): return triangles @caching.cache_decorator - def triangles_tree(self): + def triangles_tree(self) -> Index: """ An R-tree containing each face of the mesh. @@ -846,7 +847,7 @@ def triangles_tree(self): return tree @caching.cache_decorator - def triangles_center(self): + def triangles_center(self) -> ndarray: """ The center of each triangle (barycentric [1/3, 1/3, 1/3]) @@ -859,7 +860,7 @@ def triangles_center(self): return triangles_center @caching.cache_decorator - def triangles_cross(self): + def triangles_cross(self) -> ndarray: """ The cross product of two edges of each triangle. @@ -872,7 +873,7 @@ def triangles_cross(self): return crosses @caching.cache_decorator - def edges(self): + def edges(self) -> ndarray: """ Edges of the mesh (derived from faces). @@ -881,9 +882,10 @@ def edges(self): edges : (n, 2) int List of vertex indices making up edges """ - edges, index = geometry.faces_to_edges(self.faces.view(np.ndarray), - return_index=True) - self._cache['edges_face'] = index + edges, index = geometry.faces_to_edges( + self.faces.view(np.ndarray), return_index=True + ) + self._cache["edges_face"] = index return edges @caching.cache_decorator @@ -897,10 +899,10 @@ def edges_face(self): Index of self.faces """ _ = self.edges - return self._cache['edges_face'] + return self._cache["edges_face"] @caching.cache_decorator - def edges_unique(self): + def edges_unique(self) -> ndarray: """ The unique edges of the mesh. @@ -913,12 +915,12 @@ def edges_unique(self): edges_unique = self.edges_sorted[unique] # edges_unique will be added automatically by the decorator # additional terms generated need to be added to the cache manually - self._cache['edges_unique_idx'] = unique - self._cache['edges_unique_inverse'] = inverse + self._cache["edges_unique_idx"] = unique + self._cache["edges_unique_inverse"] = inverse return edges_unique @caching.cache_decorator - def edges_unique_length(self): + def edges_unique_length(self) -> TrackedArray: """ How long is each unique edge. @@ -946,10 +948,10 @@ def edges_unique_inverse(self): Indexes of self.edges_unique """ _ = self.edges_unique - return self._cache['edges_unique_inverse'] + return self._cache["edges_unique_inverse"] @caching.cache_decorator - def edges_sorted(self): + def edges_sorted(self) -> ndarray: """ Edges sorted along axis 1 @@ -962,7 +964,7 @@ def edges_sorted(self): return edges_sorted @caching.cache_decorator - def edges_sorted_tree(self): + def edges_sorted_tree(self) -> scipy.spatial._ckdtree.cKDTree: """ A KDTree for mapping edges back to edge index. @@ -973,10 +975,11 @@ def edges_sorted_tree(self): their index in mesh.edges_sorted """ from scipy.spatial import cKDTree + return cKDTree(self.edges_sorted) @caching.cache_decorator - def edges_sparse(self): + def edges_sparse(self) -> coo_matrix: """ Edges in sparse bool COO graph format where connected vertices are True. @@ -986,12 +989,11 @@ def edges_sparse(self): sparse: (len(self.vertices), len(self.vertices)) bool Sparse graph in COO format """ - sparse = graph.edges_to_coo(self.edges, - count=len(self.vertices)) + sparse = graph.edges_to_coo(self.edges, count=len(self.vertices)) return sparse @caching.cache_decorator - def body_count(self): + def body_count(self) -> int: """ How many connected groups of vertices exist in this mesh. Note that this number may differ from result in mesh.split, @@ -1004,14 +1006,13 @@ def body_count(self): """ # labels are (len(vertices), int) OB count, labels = graph.csgraph.connected_components( - self.edges_sparse, - directed=False, - return_labels=True) - self._cache['vertices_component_label'] = labels + self.edges_sparse, directed=False, return_labels=True + ) + self._cache["vertices_component_label"] = labels return count @caching.cache_decorator - def faces_unique_edges(self): + def faces_unique_edges(self) -> ndarray: """ For each face return which indexes in mesh.unique_edges constructs that face. @@ -1041,11 +1042,11 @@ def faces_unique_edges(self): # make sure we have populated unique edges _ = self.edges_unique # we are relying on the fact that edges are stacked in triplets - result = self._cache['edges_unique_inverse'].reshape((-1, 3)) + result = self._cache["edges_unique_inverse"].reshape((-1, 3)) return result @caching.cache_decorator - def euler_number(self): + def euler_number(self) -> int: """ Return the Euler characteristic (a topological invariant) for the mesh In order to guarantee correctness, this should be called after @@ -1056,13 +1057,13 @@ def euler_number(self): euler_number : int Topological invariant """ - euler = int(self.referenced_vertices.sum() - - len(self.edges_unique) + - len(self.faces)) + euler = int( + self.referenced_vertices.sum() - len(self.edges_unique) + len(self.faces) + ) return euler @caching.cache_decorator - def referenced_vertices(self): + def referenced_vertices(self) -> ndarray: """ Which vertices in the current mesh are referenced by a face. @@ -1085,15 +1086,15 @@ def units(self): units : str Unit system mesh is in, or None if not defined """ - if 'units' in self.metadata: - return self.metadata['units'] + if "units" in self.metadata: + return self.metadata["units"] else: return None @units.setter def units(self, value): value = str(value).lower() - self.metadata['units'] = value + self.metadata["units"] = value def convert_units(self, desired, guess=False): """ @@ -1111,12 +1112,13 @@ def convert_units(self, desired, guess=False): return self def merge_vertices( - self, - merge_tex=None, - merge_norm=None, - digits_vertex=None, - digits_norm=None, - digits_uv=None): + self, + merge_tex: Optional[bool] = None, + merge_norm: Optional[bool] = None, + digits_vertex: None = None, + digits_norm: None = None, + digits_uv: None = None, + ) -> None: """ Removes duplicate vertices grouped by position and optionally texture coordinate and normal. @@ -1144,9 +1146,14 @@ def merge_vertices( merge_norm=merge_norm, digits_vertex=digits_vertex, digits_norm=digits_norm, - digits_uv=digits_uv) + digits_uv=digits_uv, + ) - def update_vertices(self, mask, inverse=None): + def update_vertices( + self, + mask: NDArray, + inverse: Optional[NDArray] = None, + ) -> None: """ Update vertices with a mask. @@ -1165,17 +1172,20 @@ def update_vertices(self, mask, inverse=None): # make sure mask is a numpy array mask = np.asanyarray(mask) - if ((mask.dtype.name == 'bool' and mask.all()) or - len(mask) == 0 or self.is_empty): + if ( + (mask.dtype.name == "bool" and mask.all()) + or len(mask) == 0 + or self.is_empty + ): # mask doesn't remove any vertices so exit early return # create the inverse mask if not passed if inverse is None: inverse = np.zeros(len(self.vertices), dtype=np.int64) - if mask.dtype.kind == 'b': + if mask.dtype.kind == "b": inverse[mask] = np.arange(mask.sum()) - elif mask.dtype.kind == 'i': + elif mask.dtype.kind == "i": inverse[mask] = np.arange(len(mask)) else: inverse = None @@ -1187,7 +1197,7 @@ def update_vertices(self, mask, inverse=None): # update the visual object with our mask self.visual.update_vertices(mask) # get the normals from cache before dumping - cached_normals = self._cache['vertex_normals'] + cached_normals = self._cache["vertex_normals"] # apply to face_attributes count = len(self.vertices) @@ -1211,7 +1221,7 @@ def update_vertices(self, mask, inverse=None): except BaseException: pass - def update_faces(self, mask): + def update_faces(self, mask: NDArray) -> None: """ In many cases, we will want to remove specific faces. However, there is additional bookkeeping to do this cleanly. @@ -1228,18 +1238,18 @@ def update_faces(self, mask): return mask = np.asanyarray(mask) - if mask.dtype.name == 'bool' and mask.all(): + if mask.dtype.name == "bool" and mask.all(): # mask removes no faces so exit early return # try to save face normals before dumping cache - cached_normals = self._cache['face_normals'] + cached_normals = self._cache["face_normals"] - faces = self._data['faces'] + faces = self._data["faces"] # if Trimesh has been subclassed and faces have been moved # from data to cache, get faces from cache. if not util.is_shape(faces, (-1, 3)): - faces = self._cache['faces'] + faces = self._cache["faces"] # apply to face_attributes count = len(self.faces) @@ -1249,7 +1259,6 @@ def update_faces(self, mask): if len(value) != count: raise TypeError() except TypeError: - continue # apply the mask to the attribute self.face_attributes[key] = value[mask] @@ -1264,7 +1273,7 @@ def update_faces(self, mask): if util.is_shape(cached_normals, (-1, 3)): self.face_normals = cached_normals[mask] - def remove_infinite_values(self): + def remove_infinite_values(self) -> None: """ Ensure that every vertex and face consists of finite numbers. This will remove vertices or faces containing np.nan and np.inf @@ -1294,16 +1303,18 @@ def unique_faces(self): mask[grouping.unique_rows(np.sort(self.faces, axis=1))[0]] = True return mask - def remove_duplicate_faces(self): + def remove_duplicate_faces(self) -> None: """ DERECATED MARCH 2024 REPLACE WITH: `mesh.update_faces(mesh.unique_faces())` """ warnings.warn( - '`remove_duplicate_faces` is deprecated ' + - 'and will be removed in March 2024: ' + - 'replace with `mesh.update_faces(mesh.unique_faces())`', - category=DeprecationWarning, stacklevel=2) + "`remove_duplicate_faces` is deprecated " + + "and will be removed in March 2024: " + + "replace with `mesh.update_faces(mesh.unique_faces())`", + category=DeprecationWarning, + stacklevel=2, + ) self.update_faces(self.unique_faces()) def rezero(self): @@ -1335,7 +1346,7 @@ def split(self, **kwargs): return graph.split(self, **kwargs) @caching.cache_decorator - def face_adjacency(self): + def face_adjacency(self) -> NDArray[int64]: """ Find faces that share an edge i.e. 'adjacent' faces. @@ -1370,13 +1381,12 @@ def face_adjacency(self): In [6]: groups = nx.connected_components(graph) """ - adjacency, edges = graph.face_adjacency( - mesh=self, return_edges=True) - self._cache['face_adjacency_edges'] = edges + adjacency, edges = graph.face_adjacency(mesh=self, return_edges=True) + self._cache["face_adjacency_edges"] = edges return adjacency @caching.cache_decorator - def face_neighborhood(self): + def face_neighborhood(self) -> NDArray[int64]: """ Find faces that share a vertex i.e. 'neighbors' faces. @@ -1388,7 +1398,7 @@ def face_neighborhood(self): return graph.face_neighborhood(self) @caching.cache_decorator - def face_adjacency_edges(self): + def face_adjacency_edges(self) -> NDArray[int64]: """ Returns the edges that are shared by the adjacent faces. @@ -1399,10 +1409,10 @@ def face_adjacency_edges(self): """ # this value is calculated as a byproduct of the face adjacency _ = self.face_adjacency - return self._cache['face_adjacency_edges'] + return self._cache["face_adjacency_edges"] @caching.cache_decorator - def face_adjacency_edges_tree(self): + def face_adjacency_edges_tree(self) -> scipy.spatial._ckdtree.cKDTree: """ A KDTree for mapping edges back face adjacency index. @@ -1413,10 +1423,11 @@ def face_adjacency_edges_tree(self): their index in mesh.face_adjacency """ from scipy.spatial import cKDTree + return cKDTree(self.face_adjacency_edges) @caching.cache_decorator - def face_adjacency_angles(self): + def face_adjacency_angles(self) -> NDArray[float64]: """ Return the angle between adjacent faces @@ -1433,7 +1444,7 @@ def face_adjacency_angles(self): return angles @caching.cache_decorator - def face_adjacency_projections(self): + def face_adjacency_projections(self) -> NDArray[float64]: """ The projection of the non-shared vertex of a triangle onto its adjacent face @@ -1448,7 +1459,7 @@ def face_adjacency_projections(self): return projections @caching.cache_decorator - def face_adjacency_convex(self): + def face_adjacency_convex(self) -> NDArray[bool]: """ Return faces which are adjacent and locally convex. @@ -1461,11 +1472,10 @@ def face_adjacency_convex(self): are_convex : (len(self.face_adjacency), ) bool Face pairs that are locally convex """ - are_convex = self.face_adjacency_projections < tol.merge - return are_convex + return self.face_adjacency_projections < tol.merge @caching.cache_decorator - def face_adjacency_unshared(self): + def face_adjacency_unshared(self) -> NDArray[int64]: """ Return the vertex index of the two vertices not in the shared edge between two adjacent faces @@ -1475,11 +1485,10 @@ def face_adjacency_unshared(self): vid_unshared : (len(mesh.face_adjacency), 2) int Indexes of mesh.vertices """ - vid_unshared = graph.face_adjacency_unshared(self) - return vid_unshared + return graph.face_adjacency_unshared(self) @caching.cache_decorator - def face_adjacency_radius(self): + def face_adjacency_radius(self) -> NDArray[float64]: """ The approximate radius of a cylinder that fits inside adjacent faces. @@ -1488,12 +1497,11 @@ def face_adjacency_radius(self): radii : (len(self.face_adjacency), ) float Approximate radius formed by triangle pair """ - radii, span = graph.face_adjacency_radius(mesh=self) - self._cache['face_adjacency_span'] = span + radii, self._cache["face_adjacency_span"] = graph.face_adjacency_radius(mesh=self) return radii @caching.cache_decorator - def face_adjacency_span(self): + def face_adjacency_span(self) -> NDArray[float64]: """ The approximate perpendicular projection of the non-shared vertices in a pair of adjacent faces onto the shared edge of @@ -1505,10 +1513,10 @@ def face_adjacency_span(self): Approximate span between the non-shared vertices """ _ = self.face_adjacency_radius - return self._cache['face_adjacency_span'] + return self._cache["face_adjacency_span"] @caching.cache_decorator - def integral_mean_curvature(self): + def integral_mean_curvature(self) -> float64: """ The integral mean curvature, or the surface integral of the mean curvature. @@ -1517,13 +1525,13 @@ def integral_mean_curvature(self): area : float Integral mean curvature of mesh """ - edges_length = np.linalg.norm(np.subtract( - *self.vertices[self.face_adjacency_edges.T]), axis=1) - imc = (self.face_adjacency_angles * edges_length).sum() * 0.5 - return imc + edges_length = np.linalg.norm( + np.subtract(*self.vertices[self.face_adjacency_edges.T]), axis=1 + ) + return (self.face_adjacency_angles * edges_length).sum() * 0.5 @caching.cache_decorator - def vertex_adjacency_graph(self): + def vertex_adjacency_graph(self) -> Graph: """ Returns a networkx graph representing the vertices and their connections in the mesh. @@ -1549,7 +1557,7 @@ def vertex_adjacency_graph(self): return adjacency_g @caching.cache_decorator - def vertex_neighbors(self): + def vertex_neighbors(self) -> List[List[int64]]: """ The vertex neighbors of each vertex of the mesh, determined from the cached vertex_adjacency_graph, if already existent. @@ -1569,12 +1577,10 @@ def vertex_neighbors(self): >>> mesh.vertex_neighbors[0] [1, 2, 3, 4] """ - return graph.neighbors( - edges=self.edges_unique, - max_index=len(self.vertices)) + return graph.neighbors(edges=self.edges_unique, max_index=len(self.vertices)) @caching.cache_decorator - def is_winding_consistent(self): + def is_winding_consistent(self) -> bool: """ Does the mesh have consistent winding or not. A mesh with consistent winding has each shared edge @@ -1589,10 +1595,10 @@ def is_winding_consistent(self): return False # consistent winding check is populated into the cache by is_watertight _ = self.is_watertight - return self._cache['is_winding_consistent'] + return self._cache["is_winding_consistent"] @caching.cache_decorator - def is_watertight(self): + def is_watertight(self) -> bool: """ Check if a mesh is watertight by making sure every edge is included in two faces. @@ -1605,12 +1611,13 @@ def is_watertight(self): if self.is_empty: return False watertight, winding = graph.is_watertight( - edges=self.edges, edges_sorted=self.edges_sorted) - self._cache['is_winding_consistent'] = winding + edges=self.edges, edges_sorted=self.edges_sorted + ) + self._cache["is_winding_consistent"] = winding return watertight @caching.cache_decorator - def is_volume(self): + def is_volume(self) -> bool: """ Check if a mesh has all the properties required to represent a valid volume, rather than just a surface. @@ -1623,14 +1630,16 @@ def is_volume(self): valid : bool Does the mesh represent a volume """ - valid = bool(self.is_watertight and - self.is_winding_consistent and - np.isfinite(self.center_mass).all() and - self.volume > 0.0) + valid = bool( + self.is_watertight + and self.is_winding_consistent + and np.isfinite(self.center_mass).all() + and self.volume > 0.0 + ) return valid @property - def is_empty(self): + def is_empty(self) -> bool: """ Does the current mesh have data defined. @@ -1642,7 +1651,7 @@ def is_empty(self): return self._data.is_empty() @caching.cache_decorator - def is_convex(self): + def is_convex(self) -> bool: """ Check if a mesh is convex or not. @@ -1658,7 +1667,7 @@ def is_convex(self): return is_convex @caching.cache_decorator - def kdtree(self): + def kdtree(self) -> scipy.spatial._ckdtree.cKDTree: """ Return a scipy.spatial.cKDTree of the vertices of the mesh. Not cached as this lead to observed memory issues and segfaults. @@ -1670,22 +1679,25 @@ def kdtree(self): """ from scipy.spatial import cKDTree + tree = cKDTree(self.vertices.view(np.ndarray)) return tree - def remove_degenerate_faces(self, height=tol.merge): + def remove_degenerate_faces(self, height: float = tol.merge) -> None: """ DERECATED MARCH 2024 REPLACE WITH: `self.update_faces(self.nondegenerate_faces(height=height))` """ warnings.warn( - '`remove_degenerate_faces` is deprecated ' + - 'and will be removed in March 2024 replace with ' + - '`self.update_faces(self.nondegenerate_faces(height=height))`', - category=DeprecationWarning, stacklevel=2) + "`remove_degenerate_faces` is deprecated " + + "and will be removed in March 2024 replace with " + + "`self.update_faces(self.nondegenerate_faces(height=height))`", + category=DeprecationWarning, + stacklevel=2, + ) self.update_faces(self.nondegenerate_faces(height=height)) - def nondegenerate_faces(self, height=tol.merge): + def nondegenerate_faces(self, height=tol.merge) -> NDArray[bool]: """ Remove degenerate faces (faces without 3 unique vertex indices) from the current mesh. @@ -1707,12 +1719,11 @@ def nondegenerate_faces(self, height=tol.merge): Mask used to remove faces """ return triangles.nondegenerate( - self.triangles, - areas=self.area_faces, - height=height) + self.triangles, areas=self.area_faces, height=height + ) @caching.cache_decorator - def facets(self): + def facets(self) -> List[NDArray[int64]]: """ Return a list of face indices for coplanar adjacent faces. @@ -1725,7 +1736,7 @@ def facets(self): return facets @caching.cache_decorator - def facets_area(self): + def facets_area(self) -> NDArray[float64]: """ Return an array containing the area of each facet. @@ -1740,13 +1751,11 @@ def facets_area(self): # use native python sum in tight loop as opposed to array.sum() # as in this case the lower function call overhead of # native sum provides roughly a 50% speedup - areas = np.array([sum(area_faces[i]) - for i in self.facets], - dtype=np.float64) + areas = np.array([sum(area_faces[i]) for i in self.facets], dtype=np.float64) return areas @caching.cache_decorator - def facets_normal(self): + def facets_normal(self) -> NDArray[float64]: """ Return the normal of each facet @@ -1761,19 +1770,18 @@ def facets_normal(self): area_faces = self.area_faces # the face index of the largest face in each facet - index = np.array([i[area_faces[i].argmax()] - for i in self.facets]) + index = np.array([i[area_faces[i].argmax()] for i in self.facets]) # (n, 3) float, unit normal vectors of facet plane normals = self.face_normals[index] # (n, 3) float, points on facet plane origins = self.vertices[self.faces[:, 0][index]] # save origins in cache - self._cache['facets_origin'] = origins + self._cache["facets_origin"] = origins return normals @caching.cache_decorator - def facets_origin(self): + def facets_origin(self) -> NDArray[float64]: """ Return a point on the facet plane. @@ -1783,10 +1791,10 @@ def facets_origin(self): A point on each facet plane """ _ = self.facets_normal - return self._cache['facets_origin'] + return self._cache["facets_origin"] @caching.cache_decorator - def facets_boundary(self): + def facets_boundary(self) -> List[NDArray[int64]]: """ Return the edges which represent the boundary of each facet @@ -1799,12 +1807,13 @@ def facets_boundary(self): edges = self.edges_sorted.reshape((-1, 6)) # get the edges for each facet edges_facet = [edges[i].reshape((-1, 2)) for i in self.facets] - edges_boundary = [i[grouping.group_rows(i, require_count=1)] - for i in edges_facet] + edges_boundary = [ + i[grouping.group_rows(i, require_count=1)] for i in edges_facet + ] return edges_boundary @caching.cache_decorator - def facets_on_hull(self): + def facets_on_hull(self) -> ndarray: """ Find which facets of the mesh are on the convex hull. @@ -1856,7 +1865,7 @@ def fix_normals(self, multibody=None): multibody = self.body_count > 1 repair.fix_normals(self, multibody=multibody) - def fill_holes(self): + def fill_holes(self) -> bool: """ Fill single triangle and single quad holes in the current mesh. @@ -1895,17 +1904,12 @@ def register(self, other, **kwargs): cost : float Average square distance per point """ - mesh_to_other, cost = registration.mesh_other( - mesh=self, - other=other, - **kwargs) + mesh_to_other, cost = registration.mesh_other(mesh=self, other=other, **kwargs) return mesh_to_other, cost - def compute_stable_poses(self, - center_mass=None, - sigma=0.0, - n_samples=1, - threshold=0.0): + def compute_stable_poses( + self, center_mass=None, sigma=0.0, n_samples=1, threshold=0.0 + ): """ Computes stable orientations of a mesh and their quasi-static probabilities. @@ -1947,13 +1951,15 @@ def compute_stable_poses(self, probs : (n, ) float A probability ranging from 0.0 to 1.0 for each pose """ - return poses.compute_stable_poses(mesh=self, - center_mass=center_mass, - sigma=sigma, - n_samples=n_samples, - threshold=threshold) + return poses.compute_stable_poses( + mesh=self, + center_mass=center_mass, + sigma=sigma, + n_samples=n_samples, + threshold=threshold, + ) - def subdivide(self, face_index=None): + def subdivide(self, face_index: None = None) -> "Trimesh": """ Subdivide a mesh, with each subdivided face replaced with four smaller faces. @@ -1972,15 +1978,17 @@ def subdivide(self, face_index=None): # subdivide vertex attributes vertex_attributes = {} visual = None - if (hasattr(self.visual, 'uv') and - np.shape(self.visual.uv) == (len(self.vertices), 2)): - + if hasattr(self.visual, "uv") and np.shape(self.visual.uv) == ( + len(self.vertices), + 2, + ): # uv coords divided along with vertices vertices, faces, attr = remesh.subdivide( vertices=np.hstack((self.vertices, self.visual.uv)), faces=self.faces, face_index=face_index, - vertex_attributes=vertex_attributes) + vertex_attributes=vertex_attributes, + ) # get a copy of the current visuals visual = self.visual.copy() @@ -1994,7 +2002,8 @@ def subdivide(self, face_index=None): vertices=self.vertices, faces=self.faces, face_index=face_index, - vertex_attributes=vertex_attributes) + vertex_attributes=vertex_attributes, + ) # create a new mesh result = Trimesh( @@ -2002,7 +2011,8 @@ def subdivide(self, face_index=None): faces=faces, visual=visual, vertex_attributes=attr, - process=False) + process=False, + ) return result def subdivide_to_size(self, max_edge, max_iter=10, return_index=False): @@ -2023,16 +2033,18 @@ def subdivide_to_size(self, max_edge, max_iter=10, return_index=False): """ # subdivide vertex attributes visual = None - if (hasattr(self.visual, 'uv') and - np.shape(self.visual.uv) == (len(self.vertices), 2)): - + if hasattr(self.visual, "uv") and np.shape(self.visual.uv) == ( + len(self.vertices), + 2, + ): # uv coords divided along with vertices vertices_faces = remesh.subdivide_to_size( vertices=np.hstack((self.vertices, self.visual.uv)), faces=self.faces, max_edge=max_edge, max_iter=max_iter, - return_index=return_index) + return_index=return_index, + ) # unpack result if return_index: vertices, faces, final_index = vertices_faces @@ -2052,7 +2064,8 @@ def subdivide_to_size(self, max_edge, max_iter=10, return_index=False): faces=self.faces, max_edge=max_edge, max_iter=max_iter, - return_index=return_index) + return_index=return_index, + ) # unpack result if return_index: vertices, faces, final_index = vertices_faces @@ -2060,11 +2073,7 @@ def subdivide_to_size(self, max_edge, max_iter=10, return_index=False): vertices, faces = vertices_faces # create a new mesh - result = Trimesh( - vertices=vertices, - faces=faces, - visual=visual, - process=False) + result = Trimesh(vertices=vertices, faces=faces, visual=visual, process=False) if return_index: return result, final_index @@ -2088,14 +2097,10 @@ def subdivide_loop(self, iterations=None): """ # perform subdivision for one mesh new_vertices, new_faces = remesh.subdivide_loop( - vertices=self.vertices, - faces=self.faces, - iterations=iterations) + vertices=self.vertices, faces=self.faces, iterations=iterations + ) # create new mesh - result = Trimesh( - vertices=new_vertices, - faces=new_faces, - process=False) + result = Trimesh(vertices=new_vertices, faces=new_faces, process=False) return result @log_time @@ -2123,13 +2128,12 @@ def smoothed(self, **kwargs): # smooth should be recomputed if visuals change self.visual._verify_hash() - cached = self.visual._cache['smoothed'] + cached = self.visual._cache["smoothed"] if cached is not None: return cached # run smoothing - smoothed = graph.smoothed( - self, **kwargs) - self.visual._cache['smoothed'] = smoothed + smoothed = graph.smoothed(self, **kwargs) + self.visual._cache["smoothed"] = smoothed return smoothed @property @@ -2142,7 +2146,7 @@ def visual(self): visual : ColorVisuals or TextureVisuals Contains visual information about the mesh """ - if hasattr(self, '_visual'): + if hasattr(self, "_visual"): return self._visual return None @@ -2160,10 +2164,9 @@ def visual(self, value): value.mesh = self self._visual = value - def section(self, - plane_normal, - plane_origin, - **kwargs): + def section( + self, plane_normal: List[int], plane_origin: List[int], **kwargs + ) -> Path3D: """ Returns a 3D cross section of the current mesh and a plane defined by origin and normal. @@ -2189,7 +2192,8 @@ def section(self, plane_normal=plane_normal, plane_origin=plane_origin, return_faces=True, - **kwargs) + **kwargs, + ) # if the section didn't hit the mesh return None if len(lines) == 0: @@ -2199,14 +2203,11 @@ def section(self, path = load_path(lines) # add the face index info into metadata - path.metadata['face_index'] = face_index + path.metadata["face_index"] = face_index return path - def section_multiplane(self, - plane_origin, - plane_normal, - heights): + def section_multiplane(self, plane_origin, plane_normal, heights): """ Return multiple parallel cross sections of the current mesh in 2D. @@ -2230,32 +2231,31 @@ def section_multiplane(self, """ # turn line segments into Path2D/Path3D objects from .exchange.load import load_path + # do a multiplane intersection lines, transforms, faces = intersections.mesh_multiplane( mesh=self, plane_normal=plane_normal, plane_origin=plane_origin, - heights=heights) + heights=heights, + ) # turn the line segments into Path2D objects paths = [None] * len(lines) - for i, f, segments, T in zip(range(len(lines)), - faces, - lines, - transforms): + for i, f, segments, T in zip(range(len(lines)), faces, lines, transforms): if len(segments) > 0: - paths[i] = load_path( - segments, - metadata={'to_3D': T, 'face_index': f}) + paths[i] = load_path(segments, metadata={"to_3D": T, "face_index": f}) return paths - def slice_plane(self, - plane_origin, - plane_normal, - cap=False, - face_index=None, - cached_dots=None, - **kwargs): + def slice_plane( + self, + plane_origin, + plane_normal, + cap=False, + face_index=None, + cached_dots=None, + **kwargs, + ): """ Slice the mesh with a plane, returning a new mesh that is the portion of the original mesh to the positive normal side of the plane @@ -2288,7 +2288,8 @@ def slice_plane(self, cap=cap, face_index=face_index, cached_dots=cached_dots, - **kwargs) + **kwargs, + ) return new_mesh @@ -2313,13 +2314,14 @@ def unwrap(self, image=None): """ import xatlas - vmap, faces, uv = xatlas.parametrize( - self.vertices, self.faces) + vmap, faces, uv = xatlas.parametrize(self.vertices, self.faces) - result = Trimesh(vertices=self.vertices[vmap], - faces=faces, - visual=TextureVisuals(uv=uv, image=image), - process=False) + result = Trimesh( + vertices=self.vertices[vmap], + faces=faces, + visual=TextureVisuals(uv=uv, image=image), + process=False, + ) # run additional checks for unwrapping if tol.strict: @@ -2330,22 +2332,22 @@ def unwrap(self, image=None): assert np.allclose(result.vertices, self.vertices[vmap]) # check to make sure indices are still the # same order after we've exported to OBJ - export = result.export(file_type='obj') - uv_recon = np.array([L[3:].split() for L in - str.splitlines(export) if - L.startswith('vt ')], - dtype=np.float64) + export = result.export(file_type="obj") + uv_recon = np.array( + [L[3:].split() for L in str.splitlines(export) if L.startswith("vt ")], + dtype=np.float64, + ) assert np.allclose(uv_recon, uv) - v_recon = np.array([L[2:].split() for L in - str.splitlines(export) if - L.startswith('v ')], - dtype=np.float64) + v_recon = np.array( + [L[2:].split() for L in str.splitlines(export) if L.startswith("v ")], + dtype=np.float64, + ) assert np.allclose(v_recon, self.vertices[vmap]) return result @caching.cache_decorator - def convex_hull(self): + def convex_hull(self) -> "Trimesh": """ Returns a Trimesh object representing the convex hull of the current mesh. @@ -2382,12 +2384,13 @@ def sample(self, count, return_index=False, face_weight=None): Index of self.faces """ samples, index = sample.sample_surface( - mesh=self, count=count, face_weight=face_weight) + mesh=self, count=count, face_weight=face_weight + ) if return_index: return samples, index return samples - def remove_unreferenced_vertices(self): + def remove_unreferenced_vertices(self) -> None: """ Remove all vertices in the current mesh which are not referenced by a face. @@ -2400,14 +2403,13 @@ def remove_unreferenced_vertices(self): self.update_vertices(mask=referenced, inverse=inverse) - def unmerge_vertices(self): + def unmerge_vertices(self) -> None: """ Removes all face references so that every face contains three unique vertex indices and no faces are adjacent. """ # new faces are incrementing so every vertex is unique - faces = np.arange(len(self.faces) * 3, - dtype=np.int64).reshape((-1, 3)) + faces = np.arange(len(self.faces) * 3, dtype=np.int64).reshape((-1, 3)) # use update_vertices to apply mask to # all properties that are per-vertex @@ -2415,9 +2417,9 @@ def unmerge_vertices(self): # set faces to incrementing indexes self.faces = faces # keep face normals as the haven't changed - self._cache.clear(exclude=['face_normals']) + self._cache.clear(exclude=["face_normals"]) - def apply_transform(self, matrix): + def apply_transform(self, matrix: NDArray[float64]) -> "Trimesh": """ Transform mesh by a homogeneous transformation matrix. @@ -2431,12 +2433,11 @@ def apply_transform(self, matrix): Homogeneous transformation matrix """ # get c-order float64 matrix - matrix = np.asanyarray( - matrix, order='C', dtype=np.float64) + matrix = np.asanyarray(matrix, order="C", dtype=np.float64) # only support homogeneous transformations if matrix.shape != (4, 4): - raise ValueError('Transformation matrix must be (4, 4)!') + raise ValueError("Transformation matrix must be (4, 4)!") # exit early if we've been passed an identity matrix # np.allclose is surprisingly slow so do this test @@ -2444,74 +2445,78 @@ def apply_transform(self, matrix): return self # new vertex positions - new_vertices = transformations.transform_points( - self.vertices, - matrix=matrix) + new_vertices = transformations.transform_points(self.vertices, matrix=matrix) # check to see if the matrix has rotation # rather than just translation - has_rotation = not util.allclose( - matrix[:3, :3], np.eye(3), atol=1e-6) + has_rotation = not util.allclose(matrix[:3, :3], np.eye(3), atol=1e-6) # transform overridden center of mass - if 'center_mass' in self._data: - center_mass = self._data['center_mass'] + if "center_mass" in self._data: + center_mass = self._data["center_mass"] self.center_mass = transformations.transform_points( - np.array([center_mass,]), - matrix)[0] + np.array( + [ + center_mass, + ] + ), + matrix, + )[0] # preserve face normals if we have them stored - if has_rotation and 'face_normals' in self._cache: + if has_rotation and "face_normals" in self._cache: # transform face normals by rotation component - self._cache.cache['face_normals'] = util.unitize( + self._cache.cache["face_normals"] = util.unitize( transformations.transform_points( - self.face_normals, - matrix=matrix, - translate=False)) + self.face_normals, matrix=matrix, translate=False + ) + ) # preserve vertex normals if we have them stored - if has_rotation and 'vertex_normals' in self._cache: - self._cache.cache['vertex_normals'] = util.unitize( + if has_rotation and "vertex_normals" in self._cache: + self._cache.cache["vertex_normals"] = util.unitize( transformations.transform_points( - self.vertex_normals, - matrix=matrix, - translate=False)) + self.vertex_normals, matrix=matrix, translate=False + ) + ) # if transformation flips winding of triangles if has_rotation and transformations.flips_winding(matrix): - log.debug('transform flips winding') + log.debug("transform flips winding") # fliplr will make array non C contiguous # which will cause hashes to be more # expensive than necessary so wrap - self.faces = np.ascontiguousarray( - np.fliplr(self.faces)) + self.faces = np.ascontiguousarray(np.fliplr(self.faces)) # assign the new values self.vertices = new_vertices # preserve normals and topology in cache # while dumping everything else - self._cache.clear(exclude={ - 'face_normals', # transformed by us - 'vertex_normals', # also transformed by us - 'face_adjacency', # topological - 'face_adjacency_edges', - 'face_adjacency_unshared', - 'edges', - 'edges_face', - 'edges_sorted', - 'edges_unique', - 'edges_unique_idx', - 'edges_unique_inverse', - 'edges_sparse', - 'body_count', - 'faces_unique_edges', - 'euler_number'}) + self._cache.clear( + exclude={ + "face_normals", # transformed by us + "vertex_normals", # also transformed by us + "face_adjacency", # topological + "face_adjacency_edges", + "face_adjacency_unshared", + "edges", + "edges_face", + "edges_sorted", + "edges_unique", + "edges_unique_idx", + "edges_unique_inverse", + "edges_sparse", + "body_count", + "faces_unique_edges", + "euler_number", + } + ) # set the cache ID with the current hash value self._cache.id_set() return self - def voxelized(self, pitch, method='subdivide', **kwargs): + def voxelized(self, pitch, method="subdivide", **kwargs): """ Return a VoxelGrid object representing the current mesh discretized into voxels at the specified pitch @@ -2529,8 +2534,8 @@ def voxelized(self, pitch, method='subdivide', **kwargs): Representing the current mesh """ from .voxel import creation - return creation.voxelize( - mesh=self, pitch=pitch, method=method, **kwargs) + + return creation.voxelize(mesh=self, pitch=pitch, method=method, **kwargs) @caching.cache_decorator def as_open3d(self): @@ -2544,12 +2549,12 @@ def as_open3d(self): Current mesh as an open3d object. """ import open3d + # create from numpy arrays return open3d.geometry.TriangleMesh( - vertices=open3d.utility.Vector3dVector( - self.vertices.copy()), - triangles=open3d.utility.Vector3iVector( - self.faces.copy())) + vertices=open3d.utility.Vector3dVector(self.vertices.copy()), + triangles=open3d.utility.Vector3iVector(self.faces.copy()), + ) def simplify_quadratic_decimation(self, *args, **kwargs): """ @@ -2557,13 +2562,15 @@ def simplify_quadratic_decimation(self, *args, **kwargs): `mesh.simplify_quadric_decimation` """ warnings.warn( - '`simplify_quadratic_decimation` is deprecated ' + - 'as it was a typo and will be removed in March 2024: ' + - 'replace with `simplify_quadric_decimation`', - category=DeprecationWarning, stacklevel=2) + "`simplify_quadratic_decimation` is deprecated " + + "as it was a typo and will be removed in March 2024: " + + "replace with `simplify_quadric_decimation`", + category=DeprecationWarning, + stacklevel=2, + ) return self.simplify_quadric_decimation(*args, **kwargs) - def simplify_quadric_decimation(self, face_count): + def simplify_quadric_decimation(self, face_count: int): """ A thin wrapper around the `open3d` implementation of this: `open3d.geometry.TriangleMesh.simplify_quadric_decimation` @@ -2578,11 +2585,10 @@ def simplify_quadric_decimation(self, face_count): simple : trimesh.Trimesh Simplified version of mesh. """ - simple = self.as_open3d.simplify_quadric_decimation( - int(face_count)) + simple = self.as_open3d.simplify_quadric_decimation(int(face_count)) return Trimesh(vertices=simple.vertices, faces=simple.triangles) - def outline(self, face_ids=None, **kwargs): + def outline(self, face_ids: Optional[NDArray[int64]]=None, **kwargs): """ Given a list of face indexes find the outline of those faces and return it as a Path3D. @@ -2607,12 +2613,10 @@ def outline(self, face_ids=None, **kwargs): """ from .path import Path3D from .path.exchange.misc import faces_to_path - return Path3D(**faces_to_path( - self, face_ids, **kwargs)) - def projected(self, - normal, - **kwargs): + return Path3D(**faces_to_path(self, face_ids, **kwargs)) + + def projected(self, normal, **kwargs): """ Project a mesh onto a plane and then extract the polygon that outlines the mesh projection on that @@ -2646,14 +2650,13 @@ def projected(self, from .path import Path2D from .path.polygons import projected - projection = projected( - mesh=self, normal=normal, **kwargs) + projection = projected(mesh=self, normal=normal, **kwargs) if projection is None: return Path2D() return load_path(projection) @caching.cache_decorator - def area(self): + def area(self) -> float64: """ Summed area of all triangles in the current mesh. @@ -2666,7 +2669,7 @@ def area(self): return area @caching.cache_decorator - def area_faces(self): + def area_faces(self) -> ndarray: """ The area of each face in the mesh. @@ -2675,13 +2678,11 @@ def area_faces(self): area_faces : (n, ) float Area of each face """ - area_faces = triangles.area( - crosses=self.triangles_cross, - sum=False) + area_faces = triangles.area(crosses=self.triangles_cross, sum=False) return area_faces @caching.cache_decorator - def mass_properties(self): + def mass_properties(self) -> MassProperties: """ Returns the mass properties of the current mesh. @@ -2700,18 +2701,19 @@ def mass_properties(self): 'center_mass' : Center of mass location, in global coordinate system """ # if the density or center of mass was overridden they will be put into data - density = self._data.data.get('density', [None])[0] - center_mass = self._data.data.get('center_mass', None) + density = self._data.data.get("density", [None])[0] + center_mass = self._data.data.get("center_mass", None) mass = triangles.mass_properties( triangles=self.triangles, crosses=self.triangles_cross, density=density, center_mass=center_mass, - skip_inertia=False) + skip_inertia=False, + ) return mass - def invert(self): + def invert(self) -> None: """ Invert the mesh in-place by reversing the winding of every face and negating normals without dumping the cache. @@ -2720,18 +2722,16 @@ def invert(self): `self.face_normals` and `self.vertex_normals`. """ with self._cache: - if 'face_normals' in self._cache: - self.face_normals = self._cache['face_normals'] * -1.0 - if 'vertex_normals' in self._cache: - self.vertex_normals = self._cache['vertex_normals'] * -1.0 + if "face_normals" in self._cache: + self.face_normals = self._cache["face_normals"] * -1.0 + if "vertex_normals" in self._cache: + self.vertex_normals = self._cache["vertex_normals"] * -1.0 # fliplr makes array non-contiguous so cache checks slow - self.faces = np.ascontiguousarray( - np.fliplr(self.faces)) + self.faces = np.ascontiguousarray(np.fliplr(self.faces)) # save our normals - self._cache.clear(exclude=['face_normals', - 'vertex_normals']) + self._cache.clear(exclude=["face_normals", "vertex_normals"]) - def scene(self, **kwargs): + def scene(self, **kwargs) -> Scene: """ Returns a Scene object containing the current mesh. @@ -2760,7 +2760,9 @@ def show(self, **kwargs): scene = self.scene() return scene.show(**kwargs) - def submesh(self, faces_sequence, **kwargs): + def submesh( + self, faces_sequence: List[NDArray[int64]], **kwargs + ) -> Union["Trimesh", List["Trimesh"]: """ Return a subset of the mesh. @@ -2779,13 +2781,10 @@ def submesh(self, faces_sequence, **kwargs): submesh : Trimesh or (n,) Trimesh Single mesh if `append` or list of submeshes """ - return util.submesh( - mesh=self, - faces_sequence=faces_sequence, - **kwargs) + return util.submesh(mesh=self, faces_sequence=faces_sequence, **kwargs) @caching.cache_decorator - def identifier(self): + def identifier(self) -> ndarray: """ Return a float vector which is unique to the mesh and is robust to rotation and translation. @@ -2798,7 +2797,7 @@ def identifier(self): return comparison.identifier_simple(self) @caching.cache_decorator - def identifier_hash(self): + def identifier_hash(self) -> str: """ A hash of the rotation invariant identifier vector. @@ -2810,16 +2809,17 @@ def identifier_hash(self): """ return comparison.identifier_hash(self.identifier) - @property - def identifier_md5(self): - warnings.warn( - '`geom.identifier_md5` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geom.identifier_hash`', - category=DeprecationWarning, stacklevel=2) - return self.identifier_hash - - def export(self, file_obj=None, file_type=None, **kwargs): + def export( + self, + file_obj: Optional[Union[str, BufferedRandom]] = None, + file_type: Optional[str] = None, + **kwargs, + ) -> Union[ + Dict[str, Union[Dict[str, str], List[List[int]], List[List[float]]]], + str, + bytes, + Dict[str, Union[Dict[str, str], Dict[str, Union[str, Tuple[int, int]]]]], + ]: """ Export the current mesh to a file object. If file_obj is a filename, file will be written there. @@ -2836,13 +2836,9 @@ def export(self, file_obj=None, file_type=None, **kwargs): Which file type to export as, if `file_name` is passed this is not required. """ - return export_mesh( - mesh=self, - file_obj=file_obj, - file_type=file_type, - **kwargs) + return export_mesh(mesh=self, file_obj=file_obj, file_type=file_type, **kwargs) - def to_dict(self): + def to_dict(self) -> Dict[str, Union[str, List[List[float]], List[List[int]]]]: """ Return a dictionary representation of the current mesh with keys that can be used as the kwargs for the @@ -2854,9 +2850,11 @@ def to_dict(self): result : dict Matches schema and Trimesh constructor. """ - return {'kind': 'trimesh', - 'vertices': self.vertices.tolist(), - 'faces': self.faces.tolist()} + return { + "kind": "trimesh", + "vertices": self.vertices.tolist(), + "faces": self.faces.tolist(), + } def convex_decomposition(self, maxhulls=20, **kwargs): """ @@ -2896,12 +2894,12 @@ def convex_decomposition(self, maxhulls=20, **kwargs): meshes : list of trimesh.Trimesh List of convex meshes that approximate the original """ - result = decomposition.convex_decomposition(self, - maxhulls=maxhulls, - **kwargs) + result = decomposition.convex_decomposition(self, maxhulls=maxhulls, **kwargs) return result - def union(self, other, engine=None, **kwargs): + def union( + self, other: "Trimesh", engine: Optional[str] = None, **kwargs + ) -> "Trimesh": """ Boolean union between this mesh and n other meshes @@ -2917,13 +2915,12 @@ def union(self, other, engine=None, **kwargs): union : trimesh.Trimesh Union of self and other Trimesh objects """ - result = boolean.union( - meshes=np.append(self, other), - engine=engine, - **kwargs) + result = boolean.union(meshes=np.append(self, other), engine=engine, **kwargs) return result - def difference(self, other, engine=None, **kwargs): + def difference( + self, other: "Trimesh", engine: Optional[str] = None, **kwargs + ) -> "Trimesh": """ Boolean difference between this mesh and n other meshes @@ -2937,11 +2934,14 @@ def difference(self, other, engine=None, **kwargs): difference : trimesh.Trimesh Difference between self and other Trimesh objects """ - result = boolean.difference(meshes=np.append(self, other), - engine=engine, **kwargs) + result = boolean.difference( + meshes=np.append(self, other), engine=engine, **kwargs + ) return result - def intersection(self, other, engine=None, **kwargs): + def intersection( + self, other: "Trimesh", engine: Optional[str] = None, **kwargs + ) -> "Trimesh": """ Boolean intersection between this mesh and n other meshes @@ -2955,11 +2955,12 @@ def intersection(self, other, engine=None, **kwargs): intersection : trimesh.Trimesh Mesh of the volume contained by all passed meshes """ - result = boolean.intersection(meshes=np.append(self, other), - engine=engine, **kwargs) + result = boolean.intersection( + meshes=np.append(self, other), engine=engine, **kwargs + ) return result - def contains(self, points): + def contains(self, points: TrackedArray) -> ndarray: """ Given an array of points determine whether or not they are inside the mesh. This raises an error if called on a @@ -2978,7 +2979,7 @@ def contains(self, points): return self.ray.contains_points(points) @caching.cache_decorator - def face_angles(self): + def face_angles(self) -> ndarray: """ Returns the angle at each vertex of a face. @@ -2991,7 +2992,7 @@ def face_angles(self): return angles @caching.cache_decorator - def face_angles_sparse(self): + def face_angles_sparse(self) -> coo_matrix: """ A sparse matrix representation of the face angles. @@ -3005,7 +3006,7 @@ def face_angles_sparse(self): return angles @caching.cache_decorator - def vertex_defects(self): + def vertex_defects(self) -> ndarray: """ Return the vertex defects, or (2*pi) minus the sum of the angles of every face that includes that vertex. @@ -3023,7 +3024,7 @@ def vertex_defects(self): return defects @caching.cache_decorator - def vertex_degree(self): + def vertex_degree(self) -> ndarray: """ Return the number of faces each vertex is included in. @@ -3037,7 +3038,7 @@ def vertex_degree(self): return degree @caching.cache_decorator - def face_adjacency_tree(self): + def face_adjacency_tree(self) -> Index: """ An R-tree of face adjacencies. @@ -3048,13 +3049,16 @@ def face_adjacency_tree(self): rectangular cell """ # the (n,6) interleaved bounding box for every line segment - segment_bounds = np.column_stack(( - self.vertices[self.face_adjacency_edges].min(axis=1), - self.vertices[self.face_adjacency_edges].max(axis=1))) + segment_bounds = np.column_stack( + ( + self.vertices[self.face_adjacency_edges].min(axis=1), + self.vertices[self.face_adjacency_edges].max(axis=1), + ) + ) tree = util.bounds_tree(segment_bounds) return tree - def copy(self, include_cache=False): + def copy(self, include_cache: bool = False) -> "Trimesh": """ Safely return a copy of the current mesh. @@ -3096,11 +3100,11 @@ def copy(self, include_cache=False): return copied - def __deepcopy__(self, *args): + def __deepcopy__(self, *args) -> "Trimesh": # interpret deep copy as "get rid of cached data" return self.copy(include_cache=False) - def __copy__(self, *args): + def __copy__(self, *args) -> "Trimesh": # interpret shallow copy as "keep cached data" return self.copy(include_cache=True) @@ -3127,8 +3131,8 @@ def eval_cached(self, statement, *args): """ statement = str(statement) - key = 'eval_cached_' + statement - key += '_'.join(str(i) for i in args) + key = "eval_cached_" + statement + key += "_".join(str(i) for i in args) if key in self._cache: return self._cache[key] @@ -3137,7 +3141,7 @@ def eval_cached(self, statement, *args): self._cache[key] = result return result - def __add__(self, other): + def __add__(self, other: "Trimesh") -> "Trimesh": """ Concatenate the mesh with another mesh. diff --git a/trimesh/interfaces/gmsh.py b/trimesh/interfaces/gmsh.py index c3ecffd8e..0a2f23f10 100644 --- a/trimesh/interfaces/gmsh.py +++ b/trimesh/interfaces/gmsh.py @@ -71,7 +71,7 @@ def load_gmsh(file_name, gmsh_args=None): # loop through our numbered args which do things, stuff for arg in args: gmsh.option.setNumber(*arg) - + gmsh.open(file_name) # create a temporary file for the results diff --git a/trimesh/primitives.py b/trimesh/primitives.py index 33768741e..70828b9d9 100644 --- a/trimesh/primitives.py +++ b/trimesh/primitives.py @@ -21,9 +21,9 @@ _IDENTITY.flags.writeable = False -class _Primitive(Trimesh): +class Primitive(Trimesh): """ - Geometric _Primitives which are a subclass of Trimesh. + Geometric Primitives which are a subclass of Trimesh. Mesh is generated lazily when vertices or faces are requested. """ @@ -219,7 +219,7 @@ def _create_mesh(self): raise ValueError('Primitive doesn\'t define mesh creation!') -class _PrimitiveAttributes: +class PrimitiveAttributes: """ Hold the mutable data which defines a primitive. """ @@ -230,7 +230,7 @@ def __init__(self, parent, defaults, kwargs, mutable=True): Parameters ------------ - parent : _Primitive + parent : Primitive Parent object reference. defaults : dict The default values for this primitive type. @@ -318,7 +318,7 @@ def __dir__(self): return result -class Cylinder(_Primitive): +class Cylinder(Primitive): def __init__(self, radius=1.0, @@ -348,7 +348,7 @@ def __init__(self, 'radius': 1.0, 'transform': np.eye(4), 'sections': 32} - self.primitive = _PrimitiveAttributes( + self.primitive = PrimitiveAttributes( self, defaults=defaults, kwargs={'height': height, @@ -472,7 +472,7 @@ def _create_mesh(self): self._cache['face_normals'] = mesh.face_normals -class Capsule(_Primitive): +class Capsule(Primitive): def __init__(self, radius=1.0, @@ -502,7 +502,7 @@ def __init__(self, 'radius': 1.0, 'transform': np.eye(4), 'sections': 32} - self.primitive = _PrimitiveAttributes( + self.primitive = PrimitiveAttributes( self, defaults=defaults, kwargs={'height': height, @@ -557,7 +557,7 @@ def _create_mesh(self): self._cache['face_normals'] = mesh.face_normals -class Sphere(_Primitive): +class Sphere(Primitive): def __init__(self, radius=1.0, @@ -602,7 +602,7 @@ def __init__(self, constructor['transform'] = transform # create the attributes object - self.primitive = _PrimitiveAttributes( + self.primitive = PrimitiveAttributes( self, defaults=defaults, kwargs=constructor, mutable=mutable) @property @@ -697,7 +697,7 @@ def _create_mesh(self): self._cache['face_normals'] = unit.face_normals -class Box(_Primitive): +class Box(Primitive): def __init__(self, extents=None, transform=None, @@ -736,7 +736,7 @@ def __init__(self, transform = np.eye(4) transform[:3, 3] = bounds[0] + extents / 2.0 - self.primitive = _PrimitiveAttributes( + self.primitive = PrimitiveAttributes( self, defaults=defaults, kwargs={'extents': extents, @@ -871,7 +871,7 @@ def as_outline(self): transform=self.primitive.transform) -class Extrusion(_Primitive): +class Extrusion(Primitive): def __init__(self, polygon=None, transform=None, @@ -902,7 +902,7 @@ def __init__(self, 'transform': np.eye(4), 'height': 1.0} - self.primitive = _PrimitiveAttributes( + self.primitive = PrimitiveAttributes( self, defaults=defaults, kwargs={'transform': transform, From 1a19958cff25217aba0a9dc513a9a60a868b5464 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 5 Sep 2023 17:25:16 -0400 Subject: [PATCH 041/144] base importing with type hints --- trimesh/base.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index c699870a2..c12de3464 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -55,6 +55,7 @@ from numpy.typing import NDArray + class Trimesh(Geometry3D): def __init__( self, @@ -1497,7 +1498,9 @@ def face_adjacency_radius(self) -> NDArray[float64]: radii : (len(self.face_adjacency), ) float Approximate radius formed by triangle pair """ - radii, self._cache["face_adjacency_span"] = graph.face_adjacency_radius(mesh=self) + radii, self._cache["face_adjacency_span"] = graph.face_adjacency_radius( + mesh=self + ) return radii @caching.cache_decorator @@ -2570,7 +2573,7 @@ def simplify_quadratic_decimation(self, *args, **kwargs): ) return self.simplify_quadric_decimation(*args, **kwargs) - def simplify_quadric_decimation(self, face_count: int): + def simplify_quadric_decimation(self, face_count: int) -> "Trimesh": """ A thin wrapper around the `open3d` implementation of this: `open3d.geometry.TriangleMesh.simplify_quadric_decimation` @@ -2588,7 +2591,9 @@ def simplify_quadric_decimation(self, face_count: int): simple = self.as_open3d.simplify_quadric_decimation(int(face_count)) return Trimesh(vertices=simple.vertices, faces=simple.triangles) - def outline(self, face_ids: Optional[NDArray[int64]]=None, **kwargs): + def outline( + self, face_ids: Optional[NDArray[int64]] = None, **kwargs + ) -> "trimesh.path.Path3D": """ Given a list of face indexes find the outline of those faces and return it as a Path3D. @@ -2616,7 +2621,7 @@ def outline(self, face_ids: Optional[NDArray[int64]]=None, **kwargs): return Path3D(**faces_to_path(self, face_ids, **kwargs)) - def projected(self, normal, **kwargs): + def projected(self, normal, **kwargs) -> "trimesh.path.Path2D": """ Project a mesh onto a plane and then extract the polygon that outlines the mesh projection on that @@ -2669,7 +2674,7 @@ def area(self) -> float64: return area @caching.cache_decorator - def area_faces(self) -> ndarray: + def area_faces(self) -> NDArray[float64]: """ The area of each face in the mesh. @@ -2682,7 +2687,7 @@ def area_faces(self) -> ndarray: return area_faces @caching.cache_decorator - def mass_properties(self) -> MassProperties: + def mass_properties(self) -> Dict: """ Returns the mass properties of the current mesh. @@ -2760,9 +2765,7 @@ def show(self, **kwargs): scene = self.scene() return scene.show(**kwargs) - def submesh( - self, faces_sequence: List[NDArray[int64]], **kwargs - ) -> Union["Trimesh", List["Trimesh"]: + def submesh(self, faces_sequence: List[NDArray[int64]], **kwargs): """ Return a subset of the mesh. @@ -2784,7 +2787,7 @@ def submesh( return util.submesh(mesh=self, faces_sequence=faces_sequence, **kwargs) @caching.cache_decorator - def identifier(self) -> ndarray: + def identifier(self) -> NDArray[float64]: """ Return a float vector which is unique to the mesh and is robust to rotation and translation. @@ -2811,7 +2814,7 @@ def identifier_hash(self) -> str: def export( self, - file_obj: Optional[Union[str, BufferedRandom]] = None, + file_obj=None, file_type: Optional[str] = None, **kwargs, ) -> Union[ From 5c3d0b1376567052d8b67fc30d1ae8b57f88ea53 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 5 Sep 2023 17:34:36 -0400 Subject: [PATCH 042/144] sigh take kwargs again --- trimesh/base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/trimesh/base.py b/trimesh/base.py index c12de3464..a83247d73 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -75,6 +75,7 @@ def __init__( use_embree: bool = True, initial_cache: Optional[Dict[str, ndarray]] = None, visual: Optional[Union[ColorVisuals, TextureVisuals]] = None, + **kwargs, ) -> None: """ A Trimesh object contains a triangular 3D mesh. From bd33e2e52f2c5ba273d3df6db538b7bc719463ee Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 5 Sep 2023 20:11:05 -0400 Subject: [PATCH 043/144] use string annotations --- trimesh/base.py | 70 +++++++++++++++++++++---------------------------- 1 file changed, 30 insertions(+), 40 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index a83247d73..b241dd273 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -7,19 +7,13 @@ import copy import warnings -from io import BufferedRandom from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np -import scipy.spatial._ckdtree -from networkx.classes.graph import Graph from numpy import float64, int64, ndarray -from numpy.typing import ArrayLike -from rtree.index import Index -from scipy.sparse._coo import coo_matrix +from numpy.typing import NDArray from trimesh.caching import TrackedArray -from trimesh.path.path import Path3D from . import ( boolean, @@ -53,8 +47,6 @@ from .scene import Scene from .visual import ColorVisuals, TextureVisuals, create_visual -from numpy.typing import NDArray - class Trimesh(Geometry3D): def __init__( @@ -792,7 +784,7 @@ def symmetry(self) -> Optional[str]: return symmetry @property - def symmetry_axis(self) -> ndarray: + def symmetry_axis(self) -> NDArray[float64]: """ If a mesh has rotational symmetry, return the axis. @@ -805,7 +797,7 @@ def symmetry_axis(self) -> ndarray: return self._cache["symmetry_axis"] @property - def symmetry_section(self) -> ndarray: + def symmetry_section(self) -> NDArray[float64]: """ If a mesh has rotational symmetry return the two vectors which make up a section coordinate frame. @@ -836,7 +828,7 @@ def triangles(self) -> ndarray: return triangles @caching.cache_decorator - def triangles_tree(self) -> Index: + def triangles_tree(self) -> "rtree.Index": """ An R-tree containing each face of the mesh. @@ -849,7 +841,7 @@ def triangles_tree(self) -> Index: return tree @caching.cache_decorator - def triangles_center(self) -> ndarray: + def triangles_center(self) -> NDArray[float64]: """ The center of each triangle (barycentric [1/3, 1/3, 1/3]) @@ -862,7 +854,7 @@ def triangles_center(self) -> ndarray: return triangles_center @caching.cache_decorator - def triangles_cross(self) -> ndarray: + def triangles_cross(self) -> NDArray[float64]: """ The cross product of two edges of each triangle. @@ -875,7 +867,7 @@ def triangles_cross(self) -> ndarray: return crosses @caching.cache_decorator - def edges(self) -> ndarray: + def edges(self) -> NDArray[int64]: """ Edges of the mesh (derived from faces). @@ -891,7 +883,7 @@ def edges(self) -> ndarray: return edges @caching.cache_decorator - def edges_face(self): + def edges_face(self) -> NDArray[int64]: """ Which face does each edge belong to. @@ -904,7 +896,7 @@ def edges_face(self): return self._cache["edges_face"] @caching.cache_decorator - def edges_unique(self) -> ndarray: + def edges_unique(self) -> NDArray[int64]: """ The unique edges of the mesh. @@ -922,7 +914,7 @@ def edges_unique(self) -> ndarray: return edges_unique @caching.cache_decorator - def edges_unique_length(self) -> TrackedArray: + def edges_unique_length(self) -> NDArray[float64]: """ How long is each unique edge. @@ -936,7 +928,7 @@ def edges_unique_length(self) -> TrackedArray: return length @caching.cache_decorator - def edges_unique_inverse(self): + def edges_unique_inverse(self) -> NDArray[int64]: """ Return the inverse required to reproduce self.edges_sorted from self.edges_unique. @@ -953,7 +945,7 @@ def edges_unique_inverse(self): return self._cache["edges_unique_inverse"] @caching.cache_decorator - def edges_sorted(self) -> ndarray: + def edges_sorted(self) -> NDArray[int64]: """ Edges sorted along axis 1 @@ -966,7 +958,7 @@ def edges_sorted(self) -> ndarray: return edges_sorted @caching.cache_decorator - def edges_sorted_tree(self) -> scipy.spatial._ckdtree.cKDTree: + def edges_sorted_tree(self) -> "scipy.spatial.cKDTree": """ A KDTree for mapping edges back to edge index. @@ -981,7 +973,7 @@ def edges_sorted_tree(self) -> scipy.spatial._ckdtree.cKDTree: return cKDTree(self.edges_sorted) @caching.cache_decorator - def edges_sparse(self) -> coo_matrix: + def edges_sparse(self) -> "scipy.sparse.coo_matrix": """ Edges in sparse bool COO graph format where connected vertices are True. @@ -1014,7 +1006,7 @@ def body_count(self) -> int: return count @caching.cache_decorator - def faces_unique_edges(self) -> ndarray: + def faces_unique_edges(self) -> NDArray[int64]: """ For each face return which indexes in mesh.unique_edges constructs that face. @@ -1065,7 +1057,7 @@ def euler_number(self) -> int: return euler @caching.cache_decorator - def referenced_vertices(self) -> ndarray: + def referenced_vertices(self) -> NDArray[bool]: """ Which vertices in the current mesh are referenced by a face. @@ -1079,7 +1071,7 @@ def referenced_vertices(self) -> ndarray: return referenced @property - def units(self): + def units(self) -> Optional[str]: """ Definition of units for the mesh. @@ -1094,11 +1086,11 @@ def units(self): return None @units.setter - def units(self, value): + def units(self, value: str) -> None: value = str(value).lower() self.metadata["units"] = value - def convert_units(self, desired, guess=False): + def convert_units(self, desired: str, guess: bool=False) -> "Trimesh": """ Convert the units of the mesh into a specified unit. @@ -1153,7 +1145,7 @@ def merge_vertices( def update_vertices( self, - mask: NDArray, + mask: NDArray[bool], inverse: Optional[NDArray] = None, ) -> None: """ @@ -1223,7 +1215,7 @@ def update_vertices( except BaseException: pass - def update_faces(self, mask: NDArray) -> None: + def update_faces(self, mask: NDArray[bool]) -> None: """ In many cases, we will want to remove specific faces. However, there is additional bookkeeping to do this cleanly. @@ -1292,7 +1284,7 @@ def remove_infinite_values(self) -> None: vertex_mask = np.isfinite(self.vertices).all(axis=1) self.update_vertices(vertex_mask) - def unique_faces(self): + def unique_faces(self) -> NDArray[bool]: """ On the current mesh find which faces are unique. @@ -1327,8 +1319,7 @@ def rezero(self): """ self.apply_translation(self.bounds[0] * -1.0) - @log_time - def split(self, **kwargs): + def split(self, **kwargs) -> List["Trimesh"]: """ Returns a list of Trimesh objects, based on face connectivity. Splits into individual components, sometimes referred to as 'bodies' @@ -1414,7 +1405,7 @@ def face_adjacency_edges(self) -> NDArray[int64]: return self._cache["face_adjacency_edges"] @caching.cache_decorator - def face_adjacency_edges_tree(self) -> scipy.spatial._ckdtree.cKDTree: + def face_adjacency_edges_tree(self) -> "scipy.spatial.cKDTree": """ A KDTree for mapping edges back face adjacency index. @@ -1535,7 +1526,7 @@ def integral_mean_curvature(self) -> float64: return (self.face_adjacency_angles * edges_length).sum() * 0.5 @caching.cache_decorator - def vertex_adjacency_graph(self) -> Graph: + def vertex_adjacency_graph(self) -> "networkx.Graph": """ Returns a networkx graph representing the vertices and their connections in the mesh. @@ -1671,7 +1662,7 @@ def is_convex(self) -> bool: return is_convex @caching.cache_decorator - def kdtree(self) -> scipy.spatial._ckdtree.cKDTree: + def kdtree(self) -> "scipy.spatial.cKDTree": """ Return a scipy.spatial.cKDTree of the vertices of the mesh. Not cached as this lead to observed memory issues and segfaults. @@ -1817,7 +1808,7 @@ def facets_boundary(self) -> List[NDArray[int64]]: return edges_boundary @caching.cache_decorator - def facets_on_hull(self) -> ndarray: + def facets_on_hull(self) -> NDArray[bool]: """ Find which facets of the mesh are on the convex hull. @@ -1849,8 +1840,7 @@ def facets_on_hull(self) -> ndarray: return on_hull - @log_time - def fix_normals(self, multibody=None): + def fix_normals(self, multibody: Optional[bool]=None): """ Find and fix problems with self.face_normals and self.faces winding direction. @@ -2170,7 +2160,7 @@ def visual(self, value): def section( self, plane_normal: List[int], plane_origin: List[int], **kwargs - ) -> Path3D: + ) -> "trimesh.path.Path3D": """ Returns a 3D cross section of the current mesh and a plane defined by origin and normal. @@ -3042,7 +3032,7 @@ def vertex_degree(self) -> ndarray: return degree @caching.cache_decorator - def face_adjacency_tree(self) -> Index: + def face_adjacency_tree(self) -> "rtree.Index": """ An R-tree of face adjacencies. From 0796b7ac16c4bff689a04a704844edd92d39c571 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 5 Sep 2023 22:56:26 -0400 Subject: [PATCH 044/144] import in base --- trimesh/base.py | 102 +++++++++++++++++++++++++--------------------- trimesh/parent.py | 2 +- trimesh/py.typed | 0 3 files changed, 57 insertions(+), 47 deletions(-) create mode 100644 trimesh/py.typed diff --git a/trimesh/base.py b/trimesh/base.py index b241dd273..ee1080518 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -42,11 +42,34 @@ util, ) from .constants import log, log_time, tol +from .exceptions import ExceptionWrapper from .exchange.export import export_mesh from .parent import Geometry3D from .scene import Scene from .visual import ColorVisuals, TextureVisuals, create_visual +try: + from scipy.sparse import coo_matrix + from scipy.spatial import cKDTree +except BaseException as E: + cKDTree = ExceptionWrapper(E) + coo_matrix = ExceptionWrapper(E) +try: + from networkx import Graph +except BaseException as E: + Graph = ExceptionWrapper(E) + +try: + from rtree import Index +except BaseException as E: + Index = ExceptionWrapper(E) + +try: + from .path import Path2D, Path3D +except BaseException as E: + Path2D = ExceptionWrapper(E) + Path3D = ExceptionWrapper(E) + class Trimesh(Geometry3D): def __init__( @@ -248,7 +271,7 @@ def process( return self @property - def faces(self): + def faces(self) -> NDArray[int64]: """ The faces of the mesh. @@ -265,7 +288,7 @@ def faces(self): return self._data.get("faces", np.empty(shape=(0, 3), dtype=np.int64)) @faces.setter - def faces(self, values): + def faces(self, values: Union[List[List[int]], NDArray[int64]]): """ Set the vertex indexes that make up triangular faces. @@ -449,7 +472,7 @@ def vertex_normals(self): return vertex_normals @vertex_normals.setter - def vertex_normals(self, values): + def vertex_normals(self, values: NDArray[float64]): """ Assign values to vertex normals. @@ -467,7 +490,7 @@ def vertex_normals(self, values): self._cache["vertex_normals"] = values @caching.cache_decorator - def vertex_faces(self) -> ndarray: + def vertex_faces(self) -> NDArray[int64]: """ A representation of the face indices that correspond to each vertex. @@ -486,7 +509,7 @@ def vertex_faces(self) -> ndarray: return vertex_faces @caching.cache_decorator - def bounds(self) -> ndarray: + def bounds(self) -> NDArray[float64]: """ The axis aligned bounds of the faces of the mesh. @@ -505,7 +528,7 @@ def bounds(self) -> ndarray: return np.array([in_mesh.min(axis=0), in_mesh.max(axis=0)]) @caching.cache_decorator - def extents(self) -> ndarray: + def extents(self) -> NDArray[float64]: """ The length, width, and height of the axis aligned bounding box of the mesh. @@ -542,7 +565,7 @@ def scale(self) -> float: return scale @caching.cache_decorator - def centroid(self) -> ndarray: + def centroid(self) -> NDArray[float64]: """ The point in space which is the average of the triangle centroids weighted by the area of each triangle. @@ -652,7 +675,7 @@ def mass(self) -> float64: return mass @property - def moment_inertia(self) -> ndarray: + def moment_inertia(self) -> NDArray[float64]: """ Return the moment of inertia matrix of the current mesh. If mesh isn't watertight this is garbage. The returned @@ -670,7 +693,7 @@ def moment_inertia(self) -> ndarray: inertia = self.mass_properties["inertia"] return inertia - def moment_inertia_frame(self, transform: ndarray) -> ndarray: + def moment_inertia_frame(self, transform: NDArray[float64]) -> NDArray[float64]: """ Get the moment of inertia of this mesh with respect to an arbitrary frame, versus with respect to the center @@ -709,7 +732,7 @@ def moment_inertia_frame(self, transform: ndarray) -> ndarray: ) @caching.cache_decorator - def principal_inertia_components(self) -> ndarray: + def principal_inertia_components(self) -> NDArray[float64]: """ Return the principal components of inertia @@ -728,7 +751,7 @@ def principal_inertia_components(self) -> ndarray: return components @property - def principal_inertia_vectors(self) -> ndarray: + def principal_inertia_vectors(self) -> NDArray[float64]: """ Return the principal axis of inertia as unit vectors. The order corresponds to `mesh.principal_inertia_components`. @@ -743,7 +766,7 @@ def principal_inertia_vectors(self) -> ndarray: return self._cache["principal_inertia_vectors"] @caching.cache_decorator - def principal_inertia_transform(self) -> ndarray: + def principal_inertia_transform(self) -> NDArray[float64]: """ A transform which moves the current mesh so the principal inertia vectors are on the X,Y, and Z axis, and the centroid is @@ -811,7 +834,7 @@ def symmetry_section(self) -> NDArray[float64]: return self._cache["symmetry_section"] @caching.cache_decorator - def triangles(self) -> ndarray: + def triangles(self) -> NDArray[float64]: """ Actual triangles of the mesh (points, not indexes) @@ -828,7 +851,7 @@ def triangles(self) -> ndarray: return triangles @caching.cache_decorator - def triangles_tree(self) -> "rtree.Index": + def triangles_tree(self) -> Index: """ An R-tree containing each face of the mesh. @@ -958,7 +981,7 @@ def edges_sorted(self) -> NDArray[int64]: return edges_sorted @caching.cache_decorator - def edges_sorted_tree(self) -> "scipy.spatial.cKDTree": + def edges_sorted_tree(self) -> cKDTree: """ A KDTree for mapping edges back to edge index. @@ -968,12 +991,10 @@ def edges_sorted_tree(self) -> "scipy.spatial.cKDTree": Tree when queried with edges will return their index in mesh.edges_sorted """ - from scipy.spatial import cKDTree - return cKDTree(self.edges_sorted) @caching.cache_decorator - def edges_sparse(self) -> "scipy.sparse.coo_matrix": + def edges_sparse(self) -> coo_matrix: """ Edges in sparse bool COO graph format where connected vertices are True. @@ -1090,7 +1111,7 @@ def units(self, value: str) -> None: value = str(value).lower() self.metadata["units"] = value - def convert_units(self, desired: str, guess: bool=False) -> "Trimesh": + def convert_units(self, desired: str, guess: bool = False) -> "Trimesh": """ Convert the units of the mesh into a specified unit. @@ -1405,7 +1426,7 @@ def face_adjacency_edges(self) -> NDArray[int64]: return self._cache["face_adjacency_edges"] @caching.cache_decorator - def face_adjacency_edges_tree(self) -> "scipy.spatial.cKDTree": + def face_adjacency_edges_tree(self) -> cKDTree: """ A KDTree for mapping edges back face adjacency index. @@ -1415,8 +1436,6 @@ def face_adjacency_edges_tree(self) -> "scipy.spatial.cKDTree": Tree when queried with SORTED edges will return their index in mesh.face_adjacency """ - from scipy.spatial import cKDTree - return cKDTree(self.face_adjacency_edges) @caching.cache_decorator @@ -1526,7 +1545,7 @@ def integral_mean_curvature(self) -> float64: return (self.face_adjacency_angles * edges_length).sum() * 0.5 @caching.cache_decorator - def vertex_adjacency_graph(self) -> "networkx.Graph": + def vertex_adjacency_graph(self) -> Graph: """ Returns a networkx graph representing the vertices and their connections in the mesh. @@ -1662,7 +1681,7 @@ def is_convex(self) -> bool: return is_convex @caching.cache_decorator - def kdtree(self) -> "scipy.spatial.cKDTree": + def kdtree(self) -> cKDTree: """ Return a scipy.spatial.cKDTree of the vertices of the mesh. Not cached as this lead to observed memory issues and segfaults. @@ -1672,11 +1691,7 @@ def kdtree(self) -> "scipy.spatial.cKDTree": tree : scipy.spatial.cKDTree Contains mesh.vertices """ - - from scipy.spatial import cKDTree - - tree = cKDTree(self.vertices.view(np.ndarray)) - return tree + return cKDTree(self.vertices.view(np.ndarray)) def remove_degenerate_faces(self, height: float = tol.merge) -> None: """ @@ -1840,7 +1855,7 @@ def facets_on_hull(self) -> NDArray[bool]: return on_hull - def fix_normals(self, multibody: Optional[bool]=None): + def fix_normals(self, multibody: Optional[bool] = None): """ Find and fix problems with self.face_normals and self.faces winding direction. @@ -2160,7 +2175,7 @@ def visual(self, value): def section( self, plane_normal: List[int], plane_origin: List[int], **kwargs - ) -> "trimesh.path.Path3D": + ) -> Path3D: """ Returns a 3D cross section of the current mesh and a plane defined by origin and normal. @@ -2582,9 +2597,7 @@ def simplify_quadric_decimation(self, face_count: int) -> "Trimesh": simple = self.as_open3d.simplify_quadric_decimation(int(face_count)) return Trimesh(vertices=simple.vertices, faces=simple.triangles) - def outline( - self, face_ids: Optional[NDArray[int64]] = None, **kwargs - ) -> "trimesh.path.Path3D": + def outline(self, face_ids: Optional[NDArray[int64]] = None, **kwargs) -> Path3D: """ Given a list of face indexes find the outline of those faces and return it as a Path3D. @@ -2607,12 +2620,11 @@ def outline( path : Path3D Curve in 3D of the outline """ - from .path import Path3D from .path.exchange.misc import faces_to_path return Path3D(**faces_to_path(self, face_ids, **kwargs)) - def projected(self, normal, **kwargs) -> "trimesh.path.Path2D": + def projected(self, normal, **kwargs) -> Path2D: """ Project a mesh onto a plane and then extract the polygon that outlines the mesh projection on that @@ -2891,8 +2903,7 @@ def convex_decomposition(self, maxhulls=20, **kwargs): result = decomposition.convex_decomposition(self, maxhulls=maxhulls, **kwargs) return result - def union( - self, other: "Trimesh", engine: Optional[str] = None, **kwargs + def union(self, other: "Trimesh", engine: Optional[str] = None, **kwargs ) -> "Trimesh": """ Boolean union between this mesh and n other meshes @@ -2929,7 +2940,7 @@ def difference( Difference between self and other Trimesh objects """ result = boolean.difference( - meshes=np.append(self, other), engine=engine, **kwargs + meshes=[self, other], engine=engine, **kwargs ) return result @@ -2954,7 +2965,7 @@ def intersection( ) return result - def contains(self, points: TrackedArray) -> ndarray: + def contains(self, points: TrackedArray) -> NDArray[bool]: """ Given an array of points determine whether or not they are inside the mesh. This raises an error if called on a @@ -2973,7 +2984,7 @@ def contains(self, points: TrackedArray) -> ndarray: return self.ray.contains_points(points) @caching.cache_decorator - def face_angles(self) -> ndarray: + def face_angles(self) -> NDArray[float64]: """ Returns the angle at each vertex of a face. @@ -2982,8 +2993,7 @@ def face_angles(self) -> ndarray: angles : (len(self.faces), 3) float Angle at each vertex of a face """ - angles = triangles.angles(self.triangles) - return angles + return triangles.angles(self.triangles) @caching.cache_decorator def face_angles_sparse(self) -> coo_matrix: @@ -3000,7 +3010,7 @@ def face_angles_sparse(self) -> coo_matrix: return angles @caching.cache_decorator - def vertex_defects(self) -> ndarray: + def vertex_defects(self) -> NDArray[float64]: """ Return the vertex defects, or (2*pi) minus the sum of the angles of every face that includes that vertex. @@ -3018,7 +3028,7 @@ def vertex_defects(self) -> ndarray: return defects @caching.cache_decorator - def vertex_degree(self) -> ndarray: + def vertex_degree(self) -> NDArray[int64]: """ Return the number of faces each vertex is included in. @@ -3032,7 +3042,7 @@ def vertex_degree(self) -> ndarray: return degree @caching.cache_decorator - def face_adjacency_tree(self) -> "rtree.Index": + def face_adjacency_tree(self) -> Index: """ An R-tree of face adjacencies. diff --git a/trimesh/parent.py b/trimesh/parent.py index e8a70b7d3..b0b0b4c40 100644 --- a/trimesh/parent.py +++ b/trimesh/parent.py @@ -36,7 +36,7 @@ def apply_transform(self, matrix): pass @abc.abstractmethod - def is_empty(self): + def is_empty(self) -> bool: pass def __hash__(self): diff --git a/trimesh/py.typed b/trimesh/py.typed new file mode 100644 index 000000000..e69de29bb From d99d1afcba1354c3c52156d8b4a5a3e22056f5bd Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 7 Sep 2023 14:54:53 -0400 Subject: [PATCH 045/144] play with generics --- trimesh/base.py | 6 ++---- trimesh/typed.py | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 trimesh/typed.py diff --git a/trimesh/base.py b/trimesh/base.py index ee1080518..5c692f7ba 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -11,9 +11,6 @@ import numpy as np from numpy import float64, int64, ndarray -from numpy.typing import NDArray - -from trimesh.caching import TrackedArray from . import ( boolean, @@ -41,6 +38,7 @@ units, util, ) +from .typed import NDArray, ArrayLike from .constants import log, log_time, tol from .exceptions import ExceptionWrapper from .exchange.export import export_mesh @@ -2965,7 +2963,7 @@ def intersection( ) return result - def contains(self, points: TrackedArray) -> NDArray[bool]: + def contains(self, points: ArrayLike[float64]) -> NDArray[bool]: """ Given an array of points determine whether or not they are inside the mesh. This raises an error if called on a diff --git a/trimesh/typed.py b/trimesh/typed.py new file mode 100644 index 000000000..8ae83e7ab --- /dev/null +++ b/trimesh/typed.py @@ -0,0 +1,14 @@ +from typing import Any, Union, List, TypeAlias, Sequence +from numpy import ndarray, float64, int64 + +import numpy as np + + +#NDArray: TypeAlias = ndarray +ArrayLike: TypeAlias = Union[Sequence, ndarray] + +from numpy.typing import NDArray + +def _check(values: ArrayLike[float64]) -> NDArray[int64]: + return (np.array(values, dtype=float64) * 100).astype(int64) + From 8e7eacf92774e52191246bbf6d25cb08613f6a33 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 7 Sep 2023 15:11:55 -0400 Subject: [PATCH 046/144] add base typed --- trimesh/base.py | 31 +++++++++++++++++++++++++++---- trimesh/typed.py | 11 +++++++---- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index 5c692f7ba..f62fa583d 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -268,6 +268,30 @@ def process( self.metadata["processed"] = True return self + @property + def mutable(self) -> bool: + """ + Is the current mesh allowed to be altered in-place? + + Returns + ------------- + mutable + If data is allowed to be set for the mesh. + """ + return self._data.mutable + + @mutable.setter + def mutable(self, value: bool): + """ + Set the mutability of the current mesh. + + Parameters + ---------- + value + Change whether the current mesh is allowed to be altered in-place. + """ + self._data.mutable = value + @property def faces(self) -> NDArray[int64]: """ @@ -2901,7 +2925,8 @@ def convex_decomposition(self, maxhulls=20, **kwargs): result = decomposition.convex_decomposition(self, maxhulls=maxhulls, **kwargs) return result - def union(self, other: "Trimesh", engine: Optional[str] = None, **kwargs + def union( + self, other: "Trimesh", engine: Optional[str] = None, **kwargs ) -> "Trimesh": """ Boolean union between this mesh and n other meshes @@ -2937,9 +2962,7 @@ def difference( difference : trimesh.Trimesh Difference between self and other Trimesh objects """ - result = boolean.difference( - meshes=[self, other], engine=engine, **kwargs - ) + result = boolean.difference(meshes=[self, other], engine=engine, **kwargs) return result def intersection( diff --git a/trimesh/typed.py b/trimesh/typed.py index 8ae83e7ab..89822d880 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -3,12 +3,15 @@ import numpy as np - -#NDArray: TypeAlias = ndarray -ArrayLike: TypeAlias = Union[Sequence, ndarray] +# NDArray: TypeAlias = ndarray +# ArrayLike: TypeAlias = Union[Sequence, ndarray] from numpy.typing import NDArray +# todo make this a generic List|ndarray +ArrayLike = NDArray + + +# this should pass mypy def _check(values: ArrayLike[float64]) -> NDArray[int64]: return (np.array(values, dtype=float64) * 100).astype(int64) - From 2e0c841d34ae68c6c8bff50d199793ffe4d9d679 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 8 Sep 2023 14:22:37 -0400 Subject: [PATCH 047/144] ruff --- trimesh/base.py | 2 +- trimesh/exchange/gltf.py | 3 ++- trimesh/typed.py | 14 +++++++++----- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index f62fa583d..32fb01078 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -38,12 +38,12 @@ units, util, ) -from .typed import NDArray, ArrayLike from .constants import log, log_time, tol from .exceptions import ExceptionWrapper from .exchange.export import export_mesh from .parent import Geometry3D from .scene import Scene +from .typed import ArrayLike, NDArray from .visual import ColorVisuals, TextureVisuals, create_visual try: diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 25748ae43..1cc737aea 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -1764,6 +1764,8 @@ def _append_image(img, tree, buffer_items): # for everything else just use PNG save_as = 'png' + from IPython import embed + embed() # get the image data into a bytes object with util.BytesIO() as f: img.save(f, format=save_as) @@ -1779,7 +1781,6 @@ def _append_image(img, tree, buffer_items): # index is length minus one return len(tree['images']) - 1 - def _append_material(mat, tree, buffer_items, mat_hashes): """ Add passed PBRMaterial as GLTF 2.0 specification JSON diff --git a/trimesh/typed.py b/trimesh/typed.py index 89822d880..bb573470d 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,17 +1,21 @@ -from typing import Any, Union, List, TypeAlias, Sequence -from numpy import ndarray, float64, int64 +from typing import Any import numpy as np +from numpy import float64, int64 # NDArray: TypeAlias = ndarray # ArrayLike: TypeAlias = Union[Sequence, ndarray] -from numpy.typing import NDArray +try: + from numpy.typing import NDArray +except BaseException: + NDArray = Any # todo make this a generic List|ndarray ArrayLike = NDArray - -# this should pass mypy +# this should pass mypy eventually def _check(values: ArrayLike[float64]) -> NDArray[int64]: return (np.array(values, dtype=float64) * 100).astype(int64) + +__all__ = ['NDArray', 'ArrayLike'] From 891dbcdb943b169cd6c004b2f7cb5dfde9a74b28 Mon Sep 17 00:00:00 2001 From: xiaoxiae Date: Sat, 9 Sep 2023 16:01:16 +0200 Subject: [PATCH 048/144] support for glTF WebP extension --- trimesh/exchange/gltf.py | 76 +++++++++++++++++++++++++++++++--------- 1 file changed, 59 insertions(+), 17 deletions(-) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 822989db8..cc1f7c3b1 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -77,7 +77,8 @@ def export_gltf(scene, merge_buffers=False, unitize_normals=False, tree_postprocessor=None, - embed_buffers=False): + embed_buffers=False, + extension_webp=False): """ Export a scene object as a GLTF directory. @@ -98,6 +99,8 @@ def export_gltf(scene, Run this on the header tree before exiting. embed_buffers : bool Embed the buffer into JSON file as a base64 string in the URI + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). Returns ---------- @@ -113,7 +116,8 @@ def export_gltf(scene, tree, buffer_items = _create_gltf_structure( scene=scene, unitize_normals=unitize_normals, - include_normals=include_normals) + include_normals=include_normals, + extension_webp=extension_webp) # allow custom postprocessing if tree_postprocessor is not None: @@ -171,7 +175,8 @@ def export_glb( include_normals=None, unitize_normals=False, tree_postprocessor=None, - buffer_postprocessor=None): + buffer_postprocessor=None, + extension_webp=False): """ Export a scene as a binary GLTF (GLB) file. @@ -186,6 +191,8 @@ def export_glb( tree_postprocessor : func Custom function to (in-place) post-process the tree before exporting. + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). Returns ---------- @@ -201,7 +208,9 @@ def export_glb( tree, buffer_items = _create_gltf_structure( scene=scene, unitize_normals=unitize_normals, - include_normals=include_normals, buffer_postprocessor=buffer_postprocessor) + include_normals=include_normals, + buffer_postprocessor=buffer_postprocessor, + extension_webp=extension_webp) # allow custom postprocessing if tree_postprocessor is not None: @@ -604,7 +613,8 @@ def _create_gltf_structure(scene, include_normals=None, include_metadata=True, unitize_normals=None, - buffer_postprocessor=None): + buffer_postprocessor=None, + extension_webp=False): """ Generate a GLTF header. @@ -618,6 +628,8 @@ def _create_gltf_structure(scene, Include vertex normals in output file? unitize_normals : bool Unitize all exported normals so as to pass GLTF validation + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). Returns --------------- @@ -676,7 +688,8 @@ def _create_gltf_structure(scene, buffer_items=buffer_items, include_normals=include_normals, unitize_normals=unitize_normals, - mat_hashes=mat_hashes) + mat_hashes=mat_hashes, + extension_webp=extension_webp) elif util.is_instance_named(geometry, "Path"): # add Path2D and Path3D objects _append_path( @@ -739,7 +752,8 @@ def _append_mesh(mesh, buffer_items, include_normals, unitize_normals, - mat_hashes): + mat_hashes, + extension_webp): """ Append a mesh to the scene structure and put the data into buffer_items. @@ -762,6 +776,8 @@ def _append_mesh(mesh, mat_hashes : dict Which materials have already been added + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). """ # return early from empty meshes to avoid crashing later if len(mesh.faces) == 0 or len(mesh.vertices) == 0: @@ -844,7 +860,8 @@ def _append_mesh(mesh, mat=mesh.visual.material, tree=tree, buffer_items=buffer_items, - mat_hashes=mat_hashes) + mat_hashes=mat_hashes, + extension_webp=extension_webp) # if mesh has UV coordinates defined export them has_uv = (hasattr(mesh.visual, 'uv') and @@ -1233,8 +1250,21 @@ def parse_values_and_textures(input_dict): result[k] = v elif "index" in v: # get the index of image for texture + try: - idx = header["textures"][v["index"]]["source"] + texture = header["textures"][v["index"]] + + # extensions + if "extensions" in texture: + if "EXT_texture_webp" in texture["extensions"]: + idx = texture["extensions"]["EXT_texture_webp"]["source"] + else: + raise ValueError("unsupported texture extension" + "in {texture['extensions']}!") + else: + # fallback (or primary, if extensions are not present) + idx = texture["source"] + # store the actual image as the value result[k] = images[idx] except BaseException: @@ -1743,7 +1773,7 @@ def _convert_camera(camera): return result -def _append_image(img, tree, buffer_items): +def _append_image(img, tree, buffer_items, extension_webp): """ Append a PIL image to a GLTF2.0 tree. @@ -1755,6 +1785,8 @@ def _append_image(img, tree, buffer_items): GLTF 2.0 format tree buffer_items : (n,) bytes Binary blobs containing data + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). Returns ----------- @@ -1766,9 +1798,11 @@ def _append_image(img, tree, buffer_items): if not hasattr(img, 'format'): return None - # don't re-encode JPEGs - if img.format == 'JPEG': - # no need to mangle JPEGs + if extension_webp: + # support WebP if extension is specified + save_as = 'WEBP' + elif img.format == 'JPEG': + # don't re-encode JPEGs save_as = 'JPEG' else: # for everything else just use PNG @@ -1790,7 +1824,7 @@ def _append_image(img, tree, buffer_items): return len(tree['images']) - 1 -def _append_material(mat, tree, buffer_items, mat_hashes): +def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): """ Add passed PBRMaterial as GLTF 2.0 specification JSON serializable data: @@ -1809,6 +1843,8 @@ def _append_material(mat, tree, buffer_items, mat_hashes): mat_hashes : dict Which materials have already been added Stored as { hashed : material index } + extension_webp : bool + Export textures to WebP (using glTF's EXT_texture_webp extension). Returns ------------- @@ -1879,14 +1915,20 @@ def _append_material(mat, tree, buffer_items, mat_hashes): index = _append_image( img=img, tree=tree, - buffer_items=buffer_items) + buffer_items=buffer_items, + extension_webp=extension_webp) # if the image was added successfully it will return index # if it failed for any reason, it will return None if index is not None: # add a reference to the base color texture result[key] = {'index': len(tree['textures'])} - # add an object for the texture - tree['textures'].append({'source': index}) + + # add an object for the texture (possibly according to the WebP extension) + if extension_webp: + tree['textures'].append({'extensions': {'EXT_texture_webp': + {'source': index}}}) + else: + tree['textures'].append({'source': index}) # for our PBRMaterial object we flatten all keys # however GLTF would like some of them under the From 548a9e091811f20a3060c1d7ddbd175ce84696da Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sun, 10 Sep 2023 17:39:02 -0400 Subject: [PATCH 049/144] change polygon_hash to identifier --- README.md | 6 +- trimesh/path/path.py | 2 +- trimesh/path/polygons.py | 218 ++++++++++++++++++--------------------- trimesh/typed.py | 19 +++- 4 files changed, 117 insertions(+), 128 deletions(-) diff --git a/README.md b/README.md index 032d58588..6aaa40602 100644 --- a/README.md +++ b/README.md @@ -7,9 +7,9 @@ | :warning: WARNING | |---------------------------| -| `trimesh >= 4.0.0` which is now on `main` makes the minimum Python 3.7 and is in pre-release | -| Testing the prerelease with `pip install --pre trimesh` would be much appriciated! | -| For projects that support Python < 3.7 you should update your dependency to `trimesh<4` | +| `trimesh >= 4.0.0` on `main` makes minimum Python 3.7 and is in pre-release | +| Testing with `pip install --pre trimesh` would be much appreciated! | +| Projects that support Python < 3.7 should update requirement to `trimesh<4` | Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). diff --git a/trimesh/path/path.py b/trimesh/path/path.py index 4ba418698..20c15b488 100644 --- a/trimesh/path/path.py +++ b/trimesh/path/path.py @@ -1438,7 +1438,7 @@ def identifier(self): identifier : (5,) float Unique identifier """ - hasher = polygons.polygon_hash + hasher = polygons.identifier target = self.polygons_full if len(target) == 1: return hasher(self.polygons_full[0]) diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index 96315502a..03174e46d 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -9,18 +9,22 @@ from .simplify import fit_circle_check from .traversal import resample_path +from ..typed import NDArray, float64 + try: import networkx as nx except BaseException as E: # create a dummy module which will raise the ImportError # or other exception only when someone tries to use networkx from ..exceptions import ExceptionWrapper + nx = ExceptionWrapper(E) try: from rtree import Rtree except BaseException as E: # create a dummy module which will raise the ImportError from ..exceptions import ExceptionWrapper + Rtree = ExceptionWrapper(E) @@ -65,7 +69,7 @@ def enclosure_tree(polygons): # we first query for bounding box intersections from the R-tree for j in tree.intersection(polygon.bounds): # if we are checking a polygon against itself continue - if (i == j): + if i == j: continue # do a more accurate polygon in polygon test # for the enclosure tree information @@ -93,8 +97,7 @@ def enclosure_tree(polygons): # find edges of subgraph for each root and children for root in roots: children = indexes[degrees == degree[root] + 1] - edges.extend(contains.subgraph( - np.append(children, root)).edges()) + edges.extend(contains.subgraph(np.append(children, root)).edges()) # stack edges into new directed graph contains = nx.from_edgelist(edges, nx.DiGraph()) # if roots have no children add them anyway @@ -126,12 +129,12 @@ def edges_to_polygons(edges, vertices): # create closed polygon objects polygons = [] # loop through a sequence of ordered traversals - for dfs in graph.traversals(edges, mode='dfs'): + for dfs in graph.traversals(edges, mode="dfs"): try: # try to recover polygons before they are more complicated repaired = repair_invalid(Polygon(vertices[dfs])) # if it returned a multipolygon extend into a flat list - if hasattr(repaired, 'geoms'): + if hasattr(repaired, "geoms"): polygons.extend(repaired.geoms) else: polygons.append(repaired) @@ -151,8 +154,7 @@ def edges_to_polygons(edges, vertices): interior = list(tree[root].keys()) shell = polygons[root].exterior.coords holes = [polygons[i].exterior.coords for i in interior] - complete.append(Polygon(shell=shell, - holes=holes)) + complete.append(Polygon(shell=shell, holes=holes)) return complete @@ -187,12 +189,12 @@ def polygon_obb(polygon): extents : (2,) float Extents of transformed polygon """ - if hasattr(polygon, 'exterior'): + if hasattr(polygon, "exterior"): points = np.asanyarray(polygon.exterior.coords) elif isinstance(polygon, np.ndarray): points = polygon else: - raise ValueError('polygon or points must be provided') + raise ValueError("polygon or points must be provided") transform, extents = bounds.oriented_bounds_2D(points) @@ -222,17 +224,15 @@ def transform_polygon(polygon, matrix): """ matrix = np.asanyarray(matrix, dtype=np.float64) - if hasattr(polygon, 'geoms'): - result = [transform_polygon(p, t) - for p, t in zip(polygon, matrix)] + if hasattr(polygon, "geoms"): + result = [transform_polygon(p, t) for p, t in zip(polygon, matrix)] return result # transform the outer shell - shell = transform_points(np.array(polygon.exterior.coords), - matrix)[:, :2] + shell = transform_points(np.array(polygon.exterior.coords), matrix)[:, :2] # transform the interiors - holes = [transform_points(np.array(i.coords), - matrix)[:, :2] - for i in polygon.interiors] + holes = [ + transform_points(np.array(i.coords), matrix)[:, :2] for i in polygon.interiors + ] # create a new polygon with the result result = Polygon(shell=shell, holes=holes) return result @@ -258,13 +258,12 @@ def polygon_bounds(polygon, matrix=None): if matrix is not None: assert matrix.shape == (3, 3) points = transform_points( - points=np.array(polygon.exterior.coords), - matrix=matrix) + points=np.array(polygon.exterior.coords), matrix=matrix + ) else: points = np.array(polygon.exterior.coords) - bounds = np.array([points.min(axis=0), - points.max(axis=0)]) + bounds = np.array([points.min(axis=0), points.max(axis=0)]) assert bounds.shape == (2, 2) return bounds @@ -288,14 +287,15 @@ def plot_single(single): axes.plot(*single.exterior.xy, **kwargs) for interior in single.interiors: axes.plot(*interior.xy, **kwargs) + # make aspect ratio non-stupid if axes is None: axes = plt.axes() - axes.set_aspect('equal', 'datalim') + axes.set_aspect("equal", "datalim") - if polygon.__class__.__name__ == 'MultiPolygon': + if polygon.__class__.__name__ == "MultiPolygon": [plot_single(i) for i in polygon.geoms] - elif hasattr(polygon, '__iter__'): + elif hasattr(polygon, "__iter__"): [plot_single(i) for i in polygon] elif polygon is not None: plot_single(polygon) @@ -308,7 +308,7 @@ def plot_single(single): def resample_boundaries(polygon, resolution, clip=None): """ - Return a version of a polygon with boundaries resampled + Return a version of a polygon with boundaries re-sampled to a specified resolution. Parameters @@ -326,19 +326,20 @@ def resample_boundaries(polygon, resolution, clip=None): kwargs : dict Keyword args for a Polygon constructor `Polygon(**kwargs)` """ + def resample_boundary(boundary): # add a polygon.exterior or polygon.interior to # the deque after resampling based on our resolution count = boundary.length / resolution count = int(np.clip(count, *clip)) return resample_path(boundary.coords, count=count) + if clip is None: clip = [8, 200] # create a sequence of [(n,2)] points - kwargs = {'shell': resample_boundary(polygon.exterior), - 'holes': []} + kwargs = {"shell": resample_boundary(polygon.exterior), "holes": []} for interior in polygon.interiors: - kwargs['holes'].append(resample_boundary(interior)) + kwargs["holes"].append(resample_boundary(interior)) return kwargs @@ -358,16 +359,13 @@ def stack_boundaries(boundaries): stacked : (n, 2) float Stacked vertices """ - if len(boundaries['holes']) == 0: - return boundaries['shell'] - result = np.vstack((boundaries['shell'], - np.vstack(boundaries['holes']))) + if len(boundaries["holes"]) == 0: + return boundaries["shell"] + result = np.vstack((boundaries["shell"], np.vstack(boundaries["holes"]))) return result -def medial_axis(polygon, - resolution=None, - clip=None): +def medial_axis(polygon, resolution=None, clip=None): """ Given a shapely polygon, find the approximate medial axis using a voronoi diagram of evenly spaced points on the @@ -395,17 +393,15 @@ def medial_axis(polygon, # what is the approximate scale of the polygon scale = np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).max() # a (center, radius, error) tuple - fit = fit_circle_check( - polygon.exterior.coords, scale=scale) + fit = fit_circle_check(polygon.exterior.coords, scale=scale) # is this polygon in fact a circle if fit is not None: # return an edge that has the center as the midpoint - epsilon = np.clip( - fit['radius'] / 500, 1e-5, np.inf) + epsilon = np.clip(fit["radius"] / 500, 1e-5, np.inf) vertices = np.array( - [fit['center'] + [0, epsilon], - fit['center'] - [0, epsilon]], - dtype=np.float64) + [fit["center"] + [0, epsilon], fit["center"] - [0, epsilon]], + dtype=np.float64, + ) # return a single edge to avoid consumers needing to special case edges = np.array([[0, 1]], dtype=np.int64) return edges, vertices @@ -414,13 +410,10 @@ def medial_axis(polygon, from shapely import vectorized if resolution is None: - resolution = np.reshape( - polygon.bounds, (2, 2)).ptp(axis=0).max() / 100 + resolution = np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).max() / 100 # get evenly spaced points on the polygons boundaries - samples = resample_boundaries(polygon=polygon, - resolution=resolution, - clip=clip) + samples = resample_boundaries(polygon=polygon, resolution=resolution, clip=clip) # stack the boundary into a (m,2) float array samples = stack_boundaries(samples) # create the voronoi diagram on 2D points @@ -446,15 +439,14 @@ def medial_axis(polygon, if tol.strict: # make sure we didn't screw up indexes - assert (vertices[edges_final] - - voronoi.vertices[edges]).ptp() < 1e-5 + assert (vertices[edges_final] - voronoi.vertices[edges]).ptp() < 1e-5 return edges_final, vertices -def polygon_hash(polygon): +def identifier(polygon: Polygon) -> NDArray[float64]: """ - Return a vector containing values representitive of + Return a vector containing values representative of a particular polygon. Parameters @@ -464,18 +456,19 @@ def polygon_hash(polygon): Returns --------- - hashed: (6), float - Representitive values representing input polygon - """ - result = np.array( - [len(polygon.interiors), - polygon.convex_hull.area, - polygon.convex_hull.length, - polygon.area, - polygon.length, - polygon.exterior.length], - dtype=np.float64) - return result + hashed : (10), + Some values that should be unique for this polygon. + """ + result = [ + len(polygon.interiors), + polygon.convex_hull.area, + polygon.convex_hull.length, + polygon.area, + polygon.length, + polygon.exterior.length, + ] + result.extend(polygon.bounds) + return np.array(result, dtype=np.float64) def random_polygon(segments=8, radius=1.0): @@ -494,14 +487,12 @@ def random_polygon(segments=8, radius=1.0): polygon : shapely.geometry.Polygon Geometry object with random exterior and no interiors. """ - angles = np.sort(np.cumsum(np.random.random( - segments) * np.pi * 2) % (np.pi * 2)) + angles = np.sort(np.cumsum(np.random.random(segments) * np.pi * 2) % (np.pi * 2)) radii = np.random.random(segments) * radius - points = np.column_stack( - (np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) + points = np.column_stack((np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) points = np.vstack((points, points[0])) polygon = Polygon(points).buffer(0.0) - if hasattr(polygon, 'geoms'): + if hasattr(polygon, "geoms"): return polygon.geoms[0] return polygon @@ -521,7 +512,7 @@ def polygon_scale(polygon): Length of AABB diagonal """ extents = np.reshape(polygon.bounds, (2, 2)).ptp(axis=0) - scale = (extents ** 2).sum() ** .5 + scale = (extents**2).sum() ** 0.5 return scale @@ -535,7 +526,7 @@ def paths_to_polygons(paths, scale=None): ----------- paths : (n,) sequence Of (m, 2) float closed paths - scale: float + scale : float Approximate scale of drawing for precision Returns @@ -557,7 +548,7 @@ def paths_to_polygons(paths, scale=None): # raised if a polygon is unrecoverable continue except BaseException: - log.error('unrecoverable polygon', exc_info=True) + log.error("unrecoverable polygon", exc_info=True) polygons = np.array(polygons) return polygons @@ -625,7 +616,7 @@ def sample(polygon, count, factor=1.5, max_iter=10): return hit -def repair_invalid(polygon, scale=None, rtol=.5): +def repair_invalid(polygon, scale=None, rtol=0.5): """ Given a shapely.geometry.Polygon, attempt to return a valid version of the polygon through buffering tricks. @@ -649,25 +640,22 @@ def repair_invalid(polygon, scale=None, rtol=.5): ValueError If polygon can't be repaired """ - if hasattr(polygon, 'is_valid') and polygon.is_valid: + if hasattr(polygon, "is_valid") and polygon.is_valid: return polygon # basic repair involves buffering the polygon outwards # this will fix a subset of problems. basic = polygon.buffer(tol.zero) # if it returned multiple polygons check the largest - if hasattr(basic, 'geoms'): + if hasattr(basic, "geoms"): basic = basic.geoms[np.argmax([i.area for i in basic.geoms])] # check perimeter of result against original perimeter - if basic.is_valid and np.isclose(basic.length, - polygon.length, - rtol=rtol): + if basic.is_valid and np.isclose(basic.length, polygon.length, rtol=rtol): return basic if scale is None: - distance = 0.002 * np.reshape( - polygon.bounds, (2, 2)).ptp(axis=0).mean() + distance = 0.002 * np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).mean() else: distance = 0.002 * scale @@ -681,9 +669,7 @@ def repair_invalid(polygon, scale=None, rtol=.5): # reconstruct a single polygon from the interior ring recon = Polygon(shell=rings[0]).buffer(distance) # check perimeter of result against original perimeter - if recon.is_valid and np.isclose(recon.length, - polygon.length, - rtol=rtol): + if recon.is_valid and np.isclose(recon.length, polygon.length, rtol=rtol): return recon # try de-deuplicating the outside ring @@ -691,41 +677,40 @@ def repair_invalid(polygon, scale=None, rtol=.5): # remove any segments shorter than tol.merge # this is a little risky as if it was discretized more # finely than 1-e8 it may remove detail - unique = np.append(True, (np.diff(points, axis=0)**2).sum( - axis=1)**.5 > 1e-8) + unique = np.append( + True, (np.diff(points, axis=0) ** 2).sum(axis=1) ** 0.5 > 1e-8 + ) # make a new polygon with result dedupe = Polygon(shell=points[unique]) # check result - if dedupe.is_valid and np.isclose(dedupe.length, - polygon.length, - rtol=rtol): + if dedupe.is_valid and np.isclose(dedupe.length, polygon.length, rtol=rtol): return dedupe # buffer and unbuffer the whole polygon buffered = polygon.buffer(distance).buffer(-distance) # if it returned multiple polygons check the largest - if hasattr(buffered, 'geoms'): + if hasattr(buffered, "geoms"): areas = np.array([b.area for b in buffered.geoms]) return buffered.geoms[areas.argmax()] # check perimeter of result against original perimeter - if buffered.is_valid and np.isclose(buffered.length, - polygon.length, - rtol=rtol): - log.debug('Recovered invalid polygon through double buffering') + if buffered.is_valid and np.isclose(buffered.length, polygon.length, rtol=rtol): + log.debug("Recovered invalid polygon through double buffering") return buffered - raise ValueError('unable to recover polygon!') + raise ValueError("unable to recover polygon!") -def projected(mesh, - normal, - origin=None, - ignore_sign=True, - rpad=1e-5, - apad=None, - tol_dot=0.01, - max_regions=200): +def projected( + mesh, + normal, + origin=None, + ignore_sign=True, + rpad=1e-5, + apad=None, + tol_dot=0.01, + max_regions=200, +): """ Project a mesh onto a plane and then extract the polygon that outlines the mesh projection on that plane. @@ -814,22 +799,19 @@ def projected(mesh, adjacency = mesh.face_adjacency[adjacency_check] # a sequence of face indexes that are connected - face_groups = graph.connected_components( - adjacency, nodes=np.nonzero(side)[0]) + face_groups = graph.connected_components(adjacency, nodes=np.nonzero(side)[0]) # if something is goofy we may end up with thousands of # regions that do nothing except hang for an hour then segfault if len(face_groups) > max_regions: - raise ValueError('too many disconnected groups!') + raise ValueError("too many disconnected groups!") # reshape edges into shape length of faces for indexing edges = mesh.edges_sorted.reshape((-1, 6)) # transform from the mesh frame in 3D to the XY plane - to_2D = geometry.plane_transform( - origin=origin, normal=normal) + to_2D = geometry.plane_transform(origin=origin, normal=normal) # transform mesh vertices to 2D and clip the zero Z - vertices_2D = transform_points( - mesh.vertices, to_2D)[:, :2] + vertices_2D = transform_points(mesh.vertices, to_2D)[:, :2] polygons = [] for faces in face_groups: @@ -838,8 +820,7 @@ def projected(mesh, # edges that occur only once are on the boundary group = grouping.group_rows(edge, require_count=1) # turn each region into polygons - polygons.extend(edges_to_polygons( - edges=edge[group], vertices=vertices_2D)) + polygons.extend(edges_to_polygons(edges=edge[group], vertices=vertices_2D)) padding = 0.0 if apad is not None: @@ -855,7 +836,7 @@ def projected(mesh, # regions and the union will take forever to fail # so exit here early if len(polygons) > max_regions: - raise ValueError('too many disconnected groups!') + raise ValueError("too many disconnected groups!") # if there is only one region we don't need to run a union elif len(polygons) == 1: @@ -873,9 +854,9 @@ def projected(mesh, # join_style=2, # mitre_limit=1.5) # for p in polygons]).buffer(-padding) - polygon = ops.unary_union( - [p.buffer(padding) - for p in polygons]).buffer(-padding) + polygon = ops.unary_union([p.buffer(padding) for p in polygons]).buffer( + -padding + ) return polygon @@ -911,7 +892,7 @@ def second_moments(polygon, return_centered=False): transform = np.eye(3) if return_centered: # calculate centroid and move polygon - transform[:2, 2] = - np.array(polygon.centroid.coords) + transform[:2, 2] = -np.array(polygon.centroid.coords) polygon = transform_polygon(polygon, transform) # start with the exterior @@ -934,8 +915,7 @@ def second_moments(polygon, return_centered=False): v = x1 * y2 - x2 * y1 Ixx -= np.sum(v * (y1 * y1 + y1 * y2 + y2 * y2)) / 12.0 Iyy -= np.sum(v * (x1 * x1 + x1 * x2 + x2 * x2)) / 12.0 - Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + - 2 * x2 * y2 + x2 * y1)) / 24.0 + Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + 2 * x2 * y2 + x2 * y1)) / 24.0 moments = [Ixx, Iyy, Ixy] @@ -943,7 +923,7 @@ def second_moments(polygon, return_centered=False): return moments # get the principal moments - root = np.sqrt(((Iyy - Ixx) / 2.0)**2 + Ixy**2) + root = np.sqrt(((Iyy - Ixx) / 2.0) ** 2 + Ixy**2) Imax = (Ixx + Iyy) / 2.0 + root Imin = (Ixx + Iyy) / 2.0 - root principal_moments = [Imax, Imin] @@ -963,7 +943,7 @@ def second_moments(polygon, return_centered=False): transform[0, 0] = cos_alpha transform[1, 1] = cos_alpha - transform[0, 1] = - sin_alpha + transform[0, 1] = -sin_alpha transform[1, 0] = sin_alpha return moments, principal_moments, alpha, transform diff --git a/trimesh/typed.py b/trimesh/typed.py index bb573470d..7c82181da 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, Sequence, Union import numpy as np from numpy import float64, int64 @@ -9,13 +9,22 @@ try: from numpy.typing import NDArray except BaseException: - NDArray = Any + # NDArray = ndarray + pass -# todo make this a generic List|ndarray -ArrayLike = NDArray +# for input arrays we want to say "list[int], ndarray[int64], etc" +IntLike = Union[int, np.int64] +FloatLike = Union[float, np.float64] +BoolLike = Union[bool, np.bool_] +ArrayLike = Sequence # this should pass mypy eventually -def _check(values: ArrayLike[float64]) -> NDArray[int64]: + +def _check(values: ArrayLike[FloatLike]) -> NDArray[int64]: return (np.array(values, dtype=float64) * 100).astype(int64) +def _run() -> NDArray[int64]: + return _check(values=[1, 2]) + + __all__ = ['NDArray', 'ArrayLike'] From 3fe80c5fd8d8b604177cbeebb6efb38e30f4f47f Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 11 Sep 2023 14:37:50 -0400 Subject: [PATCH 050/144] apply deprecation --- trimesh/caching.py | 2 +- trimesh/creation.py | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/trimesh/caching.py b/trimesh/caching.py index 9e71eebeb..a41396a9c 100644 --- a/trimesh/caching.py +++ b/trimesh/caching.py @@ -369,7 +369,7 @@ def __init__(self, id_function, force_immutable=False): # for stored numpy arrays set `flags.writable = False` self.force_immutable = bool(force_immutable) # call the id function for initial value - self.id_current = self._id_function() + self.id_current = None # a counter for locks self._lock = 0 # actual store for data diff --git a/trimesh/creation.py b/trimesh/creation.py index ba49f378d..73d53c9a1 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -85,6 +85,7 @@ def revolve(linestring, if sections is None: # default to 32 sections for a full revolution sections = int(angle / (np.pi * 2) * 32) + # change to face count sections += 1 # create equally spaced angles @@ -107,8 +108,11 @@ def revolve(linestring, if closed: # should be a duplicate set of vertices - assert np.allclose(vertices[:per], - vertices[-per:]) + if tol.strict: + assert util.allclose(vertices[:per], + vertices[-per:], + atol=1e-8) + # chop off duplicate vertices vertices = vertices[:-per] @@ -130,7 +134,7 @@ def revolve(linestring, # remove any zero-area triangle # this covers many cases without having to think too much single = single[triangles.area(vertices[single]) > tol.merge] - + # how much to offset each slice # note arange multiplied by vertex stride # but tiled by the number of faces we actually have @@ -148,6 +152,11 @@ def revolve(linestring, # offset stacked and wrap vertices faces = (stacked + offset) % len(vertices) + + + #if 'process' not in kwargs: + # kwargs['process'] = False + # create the mesh from our vertices and faces mesh = Trimesh(vertices=vertices, faces=faces, From 3dd27b09e11d34af2b8ea628af7c6ea9a61319a5 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 11 Sep 2023 16:12:55 -0400 Subject: [PATCH 051/144] ruff --- pyproject.toml | 5 +++-- tests/test_typed.py | 11 +++++++++++ trimesh/creation.py | 8 ++++---- trimesh/path/polygons.py | 3 +-- trimesh/typed.py | 30 ++++++++++++++++++------------ 5 files changed, 37 insertions(+), 20 deletions(-) create mode 100644 tests/test_typed.py diff --git a/pyproject.toml b/pyproject.toml index ad709dab6..1b7d87e6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,12 +61,13 @@ trimesh = [ test = [ "pytest-cov", "coveralls", + "mypy", + "ezdxf", "pytest", "pymeshlab", "pyinstrument", "ruff", - "ezdxf", - "autopep8<2", + "black", ] easy = [ "colorlog", diff --git a/tests/test_typed.py b/tests/test_typed.py new file mode 100644 index 000000000..677bb7869 --- /dev/null +++ b/tests/test_typed.py @@ -0,0 +1,11 @@ +import numpy as np + +from trimesh.typed import ArrayLike, FloatLike, NDArray, float64, int64 + + +# see if we pass mypy +def _check(values: ArrayLike[FloatLike]) -> NDArray[int64]: + return (np.array(values, dtype=float64) * 100).astype(int64) + +def _run() -> NDArray[int64]: + return _check(values=[1, 2]) diff --git a/trimesh/creation.py b/trimesh/creation.py index 73d53c9a1..26f059aaf 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -112,7 +112,7 @@ def revolve(linestring, assert util.allclose(vertices[:per], vertices[-per:], atol=1e-8) - + # chop off duplicate vertices vertices = vertices[:-per] @@ -134,7 +134,7 @@ def revolve(linestring, # remove any zero-area triangle # this covers many cases without having to think too much single = single[triangles.area(vertices[single]) > tol.merge] - + # how much to offset each slice # note arange multiplied by vertex stride # but tiled by the number of faces we actually have @@ -152,8 +152,8 @@ def revolve(linestring, # offset stacked and wrap vertices faces = (stacked + offset) % len(vertices) - - + + #if 'process' not in kwargs: # kwargs['process'] = False diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index 03174e46d..bf54c339b 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -6,11 +6,10 @@ from ..constants import log from ..constants import tol_path as tol from ..transformations import transform_points +from ..typed import NDArray, float64 from .simplify import fit_circle_check from .traversal import resample_path -from ..typed import NDArray, float64 - try: import networkx as nx except BaseException as E: diff --git a/trimesh/typed.py b/trimesh/typed.py index 7c82181da..de46c34fb 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,7 +1,6 @@ -from typing import Any, Sequence, Union +from typing import Sequence, Union import numpy as np -from numpy import float64, int64 # NDArray: TypeAlias = ndarray # ArrayLike: TypeAlias = Union[Sequence, ndarray] @@ -13,18 +12,25 @@ pass # for input arrays we want to say "list[int], ndarray[int64], etc" -IntLike = Union[int, np.int64] -FloatLike = Union[float, np.float64] +# all the integer types +IntLike = Union[ + int, + np.int8, + np.int16, + np.int32, + np.int64, + np.intc, + np.intp, + np.uint8, + np.uint16, + np.uint32, + np.uint64, +] + +FloatLike = Union[float, np.float16, np.float32, np.float64, np.float128, np.float_] BoolLike = Union[bool, np.bool_] ArrayLike = Sequence -# this should pass mypy eventually -def _check(values: ArrayLike[FloatLike]) -> NDArray[int64]: - return (np.array(values, dtype=float64) * 100).astype(int64) -def _run() -> NDArray[int64]: - return _check(values=[1, 2]) - - -__all__ = ['NDArray', 'ArrayLike'] +__all__ = ["NDArray", "ArrayLike"] From fb1aa096e9ef731bb92059bfc9fe2b3ddb8391f4 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 11 Sep 2023 16:17:17 -0400 Subject: [PATCH 052/144] add back int64 --- trimesh/typed.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/trimesh/typed.py b/trimesh/typed.py index de46c34fb..2da19c524 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -2,14 +2,13 @@ import numpy as np -# NDArray: TypeAlias = ndarray -# ArrayLike: TypeAlias = Union[Sequence, ndarray] +# our default integer and floating point types +from numpy import float64, int64 try: from numpy.typing import NDArray except BaseException: - # NDArray = ndarray - pass + NDArray = Sequence # for input arrays we want to say "list[int], ndarray[int64], etc" # all the integer types @@ -18,7 +17,7 @@ np.int8, np.int16, np.int32, - np.int64, + int64, np.intc, np.intp, np.uint8, @@ -27,7 +26,7 @@ np.uint64, ] -FloatLike = Union[float, np.float16, np.float32, np.float64, np.float128, np.float_] +FloatLike = Union[float, np.float16, np.float32, float64, np.float128, np.float_] BoolLike = Union[bool, np.bool_] ArrayLike = Sequence From 3f93c0916944bff18fa0d5b7f763c088469302bf Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 11 Sep 2023 16:19:36 -0400 Subject: [PATCH 053/144] not everyone has float128 --- trimesh/typed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trimesh/typed.py b/trimesh/typed.py index 2da19c524..2f2aa0e97 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -26,7 +26,7 @@ np.uint64, ] -FloatLike = Union[float, np.float16, np.float32, float64, np.float128, np.float_] +FloatLike = Union[float, np.float16, np.float32, float64, np.float_] BoolLike = Union[bool, np.bool_] ArrayLike = Sequence From 628741864c08d5dd83405d8e7077c632acf10937 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 11 Sep 2023 16:22:43 -0400 Subject: [PATCH 054/144] remove missed embed --- trimesh/exchange/gltf.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 1cc737aea..4feca33e4 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -1764,8 +1764,6 @@ def _append_image(img, tree, buffer_items): # for everything else just use PNG save_as = 'png' - from IPython import embed - embed() # get the image data into a bytes object with util.BytesIO() as f: img.save(f, format=save_as) From f26043916bf5dd9c8e4ad9838de5d9eea2ba226b Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 12 Sep 2023 13:14:07 -0400 Subject: [PATCH 055/144] update renamed field --- tests/test_primitives.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_primitives.py b/tests/test_primitives.py index 9ed1001b5..c6b4cc060 100644 --- a/tests/test_primitives.py +++ b/tests/test_primitives.py @@ -137,7 +137,7 @@ def test_scaling(self): m = p.to_mesh() # make sure we have the types we think we do - assert isinstance(p, g.trimesh.primitives._Primitive) + assert isinstance(p, g.trimesh.primitives.Primitive) assert isinstance(m, g.trimesh.Trimesh) assert g.np.allclose(p.extents, m.extents) From c36c586433ae4e276d92004f1ca56e571a420ddd Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 12 Sep 2023 13:17:16 -0400 Subject: [PATCH 056/144] release candidates --- .github/workflows/release.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 76251f2f8..46ab54f14 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,6 +4,7 @@ on: push: branches: - main + - release-candidate jobs: formatting: From 8846c650e529b975aaacbe31adf8aa4f28bd838c Mon Sep 17 00:00:00 2001 From: xiaoxiae Date: Wed, 13 Sep 2023 17:58:13 +0200 Subject: [PATCH 057/144] glTF WebP test --- tests/test_gltf.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 767fefe53..2f3ef2668 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -1003,6 +1003,22 @@ def test_embed_buffer(self): reloaded = g.trimesh.load(path) assert set(reloaded.geometry.keys()) == set(scene.geometry.keys()) + def test_webp(self): + # load textured file + mesh = g.get_mesh('fuze.ply') + assert hasattr(mesh.visual, 'uv') + + for extension in ["glb"]: + export = mesh.export(file_type=extension, extension_webp=True) + validate_glb(export) + + # roundtrip + reloaded = g.trimesh.load( + g.trimesh.util.wrap_as_stream(export), + file_type=extension) + + g.scene_equal(g.trimesh.Scene(mesh), reloaded) + if __name__ == '__main__': g.trimesh.util.attach_to_log() From 0b9b77eb235e43f6cd3463472d7a5aed7f70f3b9 Mon Sep 17 00:00:00 2001 From: xiaoxiae Date: Wed, 13 Sep 2023 18:16:41 +0200 Subject: [PATCH 058/144] add WebP to used/required glTF extensions --- trimesh/exchange/gltf.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index cc1f7c3b1..6003338f7 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -728,9 +728,17 @@ def _create_gltf_structure(scene, # Add any extensions already in the tree (e.g. node extensions) if 'extensionsUsed' in tree: extensions_used = extensions_used.union(set(tree['extensionsUsed'])) + # Add WebP if used + if extension_webp: + extensions_used.add("EXT_texture_webp") if len(extensions_used) > 0: tree['extensionsUsed'] = list(extensions_used) + # Also add WebP to required (no fallback currently implemented) + # 'extensionsRequired' aren't currently used so this doesn't overwrite + if extension_webp: + tree['extensionsRequired'] = ["EXT_texture_webp"] + if buffer_postprocessor is not None: buffer_postprocessor(buffer_items, tree) From 597928796d209a5b3eb26fc1c6bd48400f77f93a Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Thu, 14 Sep 2023 15:23:05 +0200 Subject: [PATCH 059/144] many bug fixes added occlusion and normal textures downscaling huge trimsheets better documentation --- trimesh/visual/material.py | 240 +++++++++++++++++++++++++++---------- 1 file changed, 180 insertions(+), 60 deletions(-) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 72f42343a..8e0dba1d3 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -725,19 +725,30 @@ def empty_material(color=None): return SimpleMaterial(image=image) -def pack(materials, uvs, deduplicate=True): +def pack(materials, uvs, deduplicate=True, padding=1, max_tex_size_individual=8192, max_tex_size_fused=8192): """ Pack multiple materials with texture into a single material. UV coordinates outside of the 0.0-1.0 range will be coerced into this range using a "wrap" behavior (i.e. modulus). + Alpha blending and backface culling settings are not supported! + Returns a material with alpha values set, but alpha blending disabled. + Parameters ----------- materials : (n,) Material List of multiple materials uvs : (n, m, 2) float Original UV coordinates + padding : int + Number of pixels to pad each image with. + max_tex_size_individual : int + Maximum size of each individual texture. + max_tex_size_fused : int | None + Maximum size of the combined texture. + Individual texture size will be reduced to fit. + Set to None to allow infite size. Returns ------------ @@ -751,6 +762,16 @@ def pack(materials, uvs, deduplicate=True): from ..path import packing import collections + def multiply_factor(img, factor, mode): + """ + Multiply an image by a factor. + """ + if factor is None: + return img.convert(mode) + img = np.array(img.convert(mode)) + img = np.round(img.astype(np.float64) * factor).astype(np.uint8) + return Image.fromarray(img, mode=mode) + def get_base_color_texture(mat): """ Logic for extracting a simple image from each material. @@ -759,7 +780,7 @@ def get_base_color_texture(mat): img = None if isinstance(mat, PBRMaterial): if mat.baseColorTexture is not None: - img = mat.baseColorTexture + img = multiply_factor(mat.baseColorTexture, mat.baseColorFactor, "RGBA") elif mat.baseColorFactor is not None: c = color.to_rgba(mat.baseColorFactor) assert c.shape == (4,) @@ -772,6 +793,17 @@ def get_base_color_texture(mat): img = Image.fromarray(np.reshape( color.to_rgba(mat.diffuse), (1, 1, 4)).astype(np.uint8)) + if mat.alphaMode != "BLEND": + # we can't handle alpha blending well, but we can bake alpha cutoff + mode = img.mode + img = np.array(img) + if mat.alphaMode == "MASK": + img[...,3] = np.where(img[...,3] > mat.alphaCutoff*255, 255, 0) + elif mat.alphaMode == "OPAQUE" or mat.alphaMode is None: + if "A" in mode: + img[...,3] = 255 + img = Image.fromarray(img, mode) + if img is None: # return a one pixel image img = Image.fromarray(np.reshape( @@ -787,19 +819,27 @@ def get_metallic_roughness_texture(mat): img = None if isinstance(mat, PBRMaterial): if mat.metallicRoughnessTexture is not None: - img = mat.metallicRoughnessTexture.convert('RGB') - else: - if mat.metallicFactor is not None: - metallic = mat.metallicFactor - else: - metallic = 0.0 - if mat.roughnessFactor is not None: - roughness = mat.roughnessFactor + if mat.metallicRoughnessTexture.format == "BGR": + img = np.array(mat.metallicRoughnessTexture.convert("RGB")) else: - roughness = 1.0 + img = np.array(mat.metallicRoughnessTexture) + + if len(img.shape) == 2 or img.shape[-1] == 1: + img = img.reshape(*img.shape[:2], 1) + img = np.concatenate([img, np.ones_like(img[..., :1])*255, np.zeros_like(img[..., :1])], axis=-1) + elif img.shape[-1] == 2: + img = np.concatenate([img, np.zeros_like(img[..., :1])], axis=-1) + if mat.metallicFactor is not None: + img[..., 0] = np.round(img[..., 0].astype(np.float64) * mat.metallicFactor).astype(np.uint8) + if mat.roughnessFactor is not None: + img[..., 1] = np.round(img[..., 1].astype(np.float64) * mat.roughnessFactor).astype(np.uint8) + img = Image.fromarray(img, mode='RGB') + else: + metallic = mat.metallicFactor if mat.metallicFactor is not None else 0.0 + roughness = mat.roughnessFactor if mat.roughnessFactor is not None else 1.0 metallic_roughnesss = np.round( - np.array([metallic, roughness, 1.0], dtype=np.float64) * 255) + np.array([metallic, roughness, 0.0], dtype=np.float64) * 255) img = Image.fromarray( metallic_roughnesss[None, None].astype(np.uint8), mode='RGB') return img @@ -812,22 +852,33 @@ def get_emissive_texture(mat): img = None if isinstance(mat, PBRMaterial): if mat.emissiveTexture is not None: - img = mat.emissiveTexture + img = multiply_factor(mat.emissiveTexture, mat.emissiveFactor, "RGB") elif mat.emissiveFactor is not None: c = color.to_rgba(mat.emissiveFactor) - assert c.shape == (3,) - assert c.dtype == np.uint8 img = Image.fromarray(c.reshape((1, 1, -1))) else: img = Image.fromarray(np.reshape( [0, 0, 0], (1, 1, 3)).astype(np.uint8)) # make sure we're always returning in RGBA mode return img.convert('RGB') + + def get_normal_texture(mat): + # there is no default normal texture + return getattr(mat, 'normalTexture', None) + + def get_occlusion_texture(mat): + occlusion_texture = getattr(mat, 'occlusionTexture', None) + if occlusion_texture is None: + occlusion_texture = Image.fromarray(np.array([[255]], dtype=np.uint8)) + else: + occlusion_texture = occlusion_texture.convert('L') + return occlusion_texture def pad_image(src, padding=1): + # uses replication padding on all 4 sides + if isinstance(padding, int): padding = (padding, padding) - # uses replication padding x, y = np.meshgrid(np.arange( src.shape[1] + 2 * padding[0]), np.arange(src.shape[0] + 2 * padding[1])) x -= padding[0] @@ -837,6 +888,22 @@ def pad_image(src, padding=1): result = src[y, x] return result + + def resize_images(images, sizes): + resized = [] + for img, size in zip(images, sizes): + if img is None: + resized.append(None) + else: + img = img.resize(size) + resized.append(img) + return resized + + def pack_images(images, power_resize=True, random_seed=42): + # random seed needs to be identical to achieve same results + # TODO: we could alternatively reuse the offsets from the first packing call + np.random.seed(random_seed) + return packing.images(images, power_resize=power_resize) if deduplicate: # start by collecting a list of indexes for each material hash @@ -849,68 +916,116 @@ def pad_image(src, padding=1): # otherwise just use all the indexes mat_idx = np.arange(len(materials)).reshape((-1, 1)) + if len(mat_idx) == 1: + # if there is only one material we can just return it + return materials[0], np.vstack(uvs) + assert set(np.concatenate(mat_idx).ravel()) == set(range(len(uvs))) assert len(uvs) == len(materials) use_pbr = any(isinstance(m, PBRMaterial) for m in materials) - # collect the images from the materials - images = [get_base_color_texture(materials[g[0]]) for g in mat_idx] - unpadded_sizes = [np.array(img.size) for img in images] - - if len(images) <= 1: - # padding has the downside that if often result in greatly larger images, - # because of the jump to the next power of two - padding = 0 - else: - # without padding, we might interpolate between trimsheet islands - padding = 1 - - images = [Image.fromarray(pad_image(np.array(img), padding)) for img in images] - - # pack the multiple images into a single large image - final, offsets = packing.images(images, power_resize=True) + # in some cases, the fused scene results in huge trimsheets + # we can try to prevent this by downscaling the textures iteratively + down_scale_iterations = 6 + while down_scale_iterations > 0: + # collect the images from the materials + images = [get_base_color_texture(materials[g[0]]) for g in mat_idx] + + if use_pbr: + # if we have PBR materials, collect all possible textures and determine the largest size per material + metallic_roughness = [get_metallic_roughness_texture( + materials[g[0]]) for g in mat_idx] + emissive = [get_emissive_texture(materials[g[0]]) for g in mat_idx] + normals = [get_normal_texture(materials[g[0]]) for g in mat_idx] + occlusion = [get_occlusion_texture(materials[g[0]]) for g in mat_idx] + + unpadded_sizes = [] + for textures in zip(images, metallic_roughness, emissive, normals, occlusion): + textures = [tex for tex in textures if tex is not None] # remove None textures + max_tex_size = np.stack([np.array(tex.size) for tex in textures]).max(axis=0) + if max_tex_size.max() > max_tex_size_individual: + scale = max_tex_size.max() / max_tex_size_individual + max_tex_size = np.round(max_tex_size / scale).astype(np.int64) + + unpadded_sizes.append(max_tex_size) + + # use the same size for all of them to ensure that texture atlassing is identical + images = resize_images(images, unpadded_sizes) + metallic_roughness = resize_images(metallic_roughness, unpadded_sizes) + emissive = resize_images(emissive, unpadded_sizes) + normals = resize_images(normals, unpadded_sizes) + occlusion = resize_images(occlusion, unpadded_sizes) + else: + # for non-pbr materials, just use the original image size + unpadded_sizes = [] + for img in images: + tex_size = np.array(img.size) + if tex_size.max() > max_tex_size_individual: + scale = tex_size.max() / max_tex_size_individual + tex_size = np.round(tex_size / scale).astype(np.int64) + unpadded_sizes.append(tex_size) + + images = [Image.fromarray(pad_image(np.array(img), padding), img.mode) for img in images] + + # pack the multiple images into a single large image + final, offsets = pack_images(images) + + # if the final image is too large, reduce the maximum texture size and repeat + if max_tex_size_fused is not None and final.size[0] * final.size[1] > max_tex_size_fused**2: + down_scale_iterations -= 1 + max_tex_size_individual //= 2 + else: + break if use_pbr: - metallic_roughness = [get_metallic_roughness_texture( - materials[g[0]]) for g in mat_idx] - - # ensure that we use the same image size as for the base color, otherwise - # the UV coordinates might be wrong - metallic_roughness = [ - metallic_roughness[img_idx].resize( - unpadded_sizes[img_idx]) for img_idx in range( - len(images))] metallic_roughness = [ Image.fromarray( pad_image( np.array(img), - padding)) for img in metallic_roughness] - final_metallic_roughness, _ = packing.images( - metallic_roughness, power_resize=True) - - # we only need the first two channels - final_metallic_roughness = Image.fromarray( - np.flip(np.array(final_metallic_roughness)[..., :2], axis=-1)) - - emissive = [get_emissive_texture(materials[g[0]]) for g in mat_idx] + padding), img.mode) for img in metallic_roughness] + # even if we only need the first two channels, store RGB, because + # PIL 'LA' mode images are interpreted incorrectly in other 3D software + final_metallic_roughness, _ = pack_images(metallic_roughness) + if all(np.array(x).max() == 0 for x in emissive): + # if all emissive textures are black, don't use emissive emissive = None final_emissive = None else: - # ensure that we use the same image size as for the base color, otherwise - # the UV coordinates might be wrong - emissive = [ - emissive[img_idx].resize( - unpadded_sizes[img_idx]) for img_idx in range( - len(images))] emissive = [ Image.fromarray( pad_image( np.array(img), padding), - mode="RGB") for img in emissive] - final_emissive, _ = packing.images(emissive, power_resize=True) + mode=img.mode) for img in emissive] + final_emissive, _ = pack_images(emissive) + + if all(n is not None for n in normals): + # only use normal texture if all materials use them + # how else would you handle missing normals? + normals = [ + Image.fromarray( + pad_image( + np.array(img), + padding), + mode=img.mode) for img in normals] + final_normals, _ = pack_images(normals) + else: + final_normals = None + + if any(np.array(o).min() < 255 for o in occlusion): + # only use occlusion texture if any material actually has an occlusion value + occlusion = [ + Image.fromarray( + pad_image( + np.array(img), + padding), + mode=img.mode) for img in occlusion] + final_occlusion, _ = pack_images(occlusion) + else: + final_occlusion = None + # the size of the final texture image final_size = np.array(final.size, dtype=np.float64) @@ -966,9 +1081,14 @@ def pad_image(src, padding=1): if use_pbr: return ( PBRMaterial( - baseColorTexture=final, + baseColorTexture=final, metallicRoughnessTexture=final_metallic_roughness, - emissiveTexture=final_emissive + emissiveTexture=final_emissive, + emissiveFactor=[1.0, 1.0, 1.0] if final_emissive else None, + alphaMode=None, # unfortunately, we can't handle alpha blending well + doubleSided=False, # TODO how to handle this? + normalTexture=final_normals, + occlusionTexture=final_occlusion, ), stacked) else: From 7de1b1a09f4e3c4bf5f4ad7da86c1e0fd5e1eabe Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Thu, 14 Sep 2023 15:57:50 +0200 Subject: [PATCH 060/144] fixed accessing missing variable fixed too long lines --- trimesh/visual/material.py | 60 +++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 8e0dba1d3..1398f3f0e 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -725,7 +725,8 @@ def empty_material(color=None): return SimpleMaterial(image=image) -def pack(materials, uvs, deduplicate=True, padding=1, max_tex_size_individual=8192, max_tex_size_fused=8192): +def pack(materials, uvs, deduplicate=True, padding=1, + max_tex_size_individual=8192, max_tex_size_fused=8192): """ Pack multiple materials with texture into a single material. @@ -786,6 +787,17 @@ def get_base_color_texture(mat): assert c.shape == (4,) assert c.dtype == np.uint8 img = Image.fromarray(c.reshape((1, 1, -1))) + + if mat.alphaMode != "BLEND": + # we can't handle alpha blending well, but we can bake alpha cutoff + mode = img.mode + img = np.array(img) + if mat.alphaMode == "MASK": + img[...,3] = np.where(img[...,3] > mat.alphaCutoff*255, 255, 0) + elif mat.alphaMode == "OPAQUE" or mat.alphaMode is None: + if "A" in mode: + img[...,3] = 255 + img = Image.fromarray(img, mode) elif getattr(mat, 'image', None) is not None: img = mat.image elif np.shape(getattr(mat, 'diffuse', [])) == (4,): @@ -793,17 +805,6 @@ def get_base_color_texture(mat): img = Image.fromarray(np.reshape( color.to_rgba(mat.diffuse), (1, 1, 4)).astype(np.uint8)) - if mat.alphaMode != "BLEND": - # we can't handle alpha blending well, but we can bake alpha cutoff - mode = img.mode - img = np.array(img) - if mat.alphaMode == "MASK": - img[...,3] = np.where(img[...,3] > mat.alphaCutoff*255, 255, 0) - elif mat.alphaMode == "OPAQUE" or mat.alphaMode is None: - if "A" in mode: - img[...,3] = 255 - img = Image.fromarray(img, mode) - if img is None: # return a one pixel image img = Image.fromarray(np.reshape( @@ -826,18 +827,23 @@ def get_metallic_roughness_texture(mat): if len(img.shape) == 2 or img.shape[-1] == 1: img = img.reshape(*img.shape[:2], 1) - img = np.concatenate([img, np.ones_like(img[..., :1])*255, np.zeros_like(img[..., :1])], axis=-1) + img = np.concatenate([img, + np.ones_like(img[..., :1])*255, + np.zeros_like(img[..., :1])], + axis=-1) elif img.shape[-1] == 2: img = np.concatenate([img, np.zeros_like(img[..., :1])], axis=-1) if mat.metallicFactor is not None: - img[..., 0] = np.round(img[..., 0].astype(np.float64) * mat.metallicFactor).astype(np.uint8) + img[..., 0] = np.round(img[..., 0].astype(np.float64) * + mat.metallicFactor).astype(np.uint8) if mat.roughnessFactor is not None: - img[..., 1] = np.round(img[..., 1].astype(np.float64) * mat.roughnessFactor).astype(np.uint8) + img[..., 1] = np.round(img[..., 1].astype(np.float64) * + mat.roughnessFactor).astype(np.uint8) img = Image.fromarray(img, mode='RGB') else: - metallic = mat.metallicFactor if mat.metallicFactor is not None else 0.0 - roughness = mat.roughnessFactor if mat.roughnessFactor is not None else 1.0 + metallic = 0.0 if mat.metallicFactor is None else mat.metallicFactor + roughness = 1.0 if mat.roughnessFactor is None else mat.roughnessFactor metallic_roughnesss = np.round( np.array([metallic, roughness, 0.0], dtype=np.float64) * 255) img = Image.fromarray( @@ -933,7 +939,8 @@ def pack_images(images, power_resize=True, random_seed=42): images = [get_base_color_texture(materials[g[0]]) for g in mat_idx] if use_pbr: - # if we have PBR materials, collect all possible textures and determine the largest size per material + # if we have PBR materials, collect all possible textures and + # determine the largest size per material metallic_roughness = [get_metallic_roughness_texture( materials[g[0]]) for g in mat_idx] emissive = [get_emissive_texture(materials[g[0]]) for g in mat_idx] @@ -942,15 +949,18 @@ def pack_images(images, power_resize=True, random_seed=42): unpadded_sizes = [] for textures in zip(images, metallic_roughness, emissive, normals, occlusion): - textures = [tex for tex in textures if tex is not None] # remove None textures - max_tex_size = np.stack([np.array(tex.size) for tex in textures]).max(axis=0) + # remove None textures + textures = [tex for tex in textures if tex is not None] + tex_sizes = np.stack([np.array(tex.size) for tex in textures]) + max_tex_size = tex_sizes.max(axis=0) if max_tex_size.max() > max_tex_size_individual: scale = max_tex_size.max() / max_tex_size_individual max_tex_size = np.round(max_tex_size / scale).astype(np.int64) unpadded_sizes.append(max_tex_size) - # use the same size for all of them to ensure that texture atlassing is identical + # use the same size for all of them to ensure + # that texture atlassing is identical images = resize_images(images, unpadded_sizes) metallic_roughness = resize_images(metallic_roughness, unpadded_sizes) emissive = resize_images(emissive, unpadded_sizes) @@ -966,13 +976,17 @@ def pack_images(images, power_resize=True, random_seed=42): tex_size = np.round(tex_size / scale).astype(np.int64) unpadded_sizes.append(tex_size) - images = [Image.fromarray(pad_image(np.array(img), padding), img.mode) for img in images] + images = [ + Image.fromarray(pad_image(np.array(img), padding), img.mode) + for img in images + ] # pack the multiple images into a single large image final, offsets = pack_images(images) # if the final image is too large, reduce the maximum texture size and repeat - if max_tex_size_fused is not None and final.size[0] * final.size[1] > max_tex_size_fused**2: + if max_tex_size_fused is not None and \ + final.size[0] * final.size[1] > max_tex_size_fused**2: down_scale_iterations -= 1 max_tex_size_individual //= 2 else: From b7338a0b31e1ec83d37959d6a6cd74bbda8978c3 Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Thu, 14 Sep 2023 16:02:48 +0200 Subject: [PATCH 061/144] fixed gltf export when using paths instead of only file name --- trimesh/exchange/export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trimesh/exchange/export.py b/trimesh/exchange/export.py index b4fd127fd..edfef4884 100644 --- a/trimesh/exchange/export.py +++ b/trimesh/exchange/export.py @@ -90,7 +90,7 @@ def export_mesh(mesh, if isinstance(export, dict): # if we have a filename rename the default GLTF if file_name is not None and 'model.gltf' in export: - export[file_name] = export.pop('model.gltf') + export[os.path.basename(file_name)] = export.pop('model.gltf') # write the files if a resolver has been passed if resolver is not None: From 87ffb5d58c94ab94c351d1dfa6de7f33f7242ee0 Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Thu, 14 Sep 2023 16:18:53 +0200 Subject: [PATCH 062/144] many small fixes correct handling of texture color spaces multiplying factors onto textures ensure that RGB and BGR are not confused anywhere --- trimesh/visual/gloss.py | 86 ++++++++++++++++++++++++++++++++++------- 1 file changed, 71 insertions(+), 15 deletions(-) diff --git a/trimesh/visual/gloss.py b/trimesh/visual/gloss.py index 27eece9af..5bf119c92 100644 --- a/trimesh/visual/gloss.py +++ b/trimesh/visual/gloss.py @@ -27,8 +27,9 @@ def specular_to_pbr( Specular color values. Ignored if specularGlossinessTexture is present and defaults to [1.0, 1.0, 1.0]. glossinessFactor : float - glossiness factor in range [0, 1], Ignored if - specularGlossinessTexture is present and defaults to 1.0. + glossiness factor in range [0, 1], scaled + specularGlossinessTexture if present. + Defaults to 1.0. specularGlossinessTexture : PIL.Image Texture with 4 color channels. With [0,1,2] representing specular RGB and 3 glossiness. @@ -36,7 +37,7 @@ def specular_to_pbr( Texture with 4 color channels. With [0,1,2] representing diffuse RGB and 3 opacity. diffuseFactor: float - Diffuse RGBA color. Ignored if diffuseTexture is present + Diffuse RGBA color. scales diffuseTexture if present. Defaults to [1.0, 1.0, 1.0, 1.0]. Returns @@ -85,12 +86,12 @@ def solve_metallic(diffuse, specular, one_minus_specular_strength): def get_perceived_brightness(rgb): return np.sqrt(np.dot(rgb[..., :3]**2, [0.299, 0.587, 0.114])) - def toPIL(img): + def toPIL(img, mode=None): if isinstance(img, Image): return img if img.dtype == np.float32 or img.dtype == np.float64: img = (np.clip(img, 0.0, 1.0) * 255.0).astype(np.uint8) - return fromarray(img) + return fromarray(img, mode=mode) def get_float(val): if isinstance(val, float): @@ -105,7 +106,17 @@ def get_diffuse(diffuseFactor, diffuseTexture): diffuseFactor = np.array(diffuseFactor, dtype=np.float32) if diffuseTexture is not None: + if diffuseTexture.mode == 'BGR': + diffuseTexture = diffuseTexture.convert('RGB') + elif diffuseTexture.mode == 'BGRA': + diffuseTexture = diffuseTexture.convert('RGBA') + diffuse = np.array(diffuseTexture) / 255.0 + # diffuseFactor must be applied to linear scaled colors . + # Sometimes, diffuse texture is only 2 channels, how do we know + # if they are encoded sRGB or linear? + diffuse = convert_texture_srgb2lin(diffuse) + if len(diffuse.shape) == 2: diffuse = diffuse[..., None] if diffuse.shape[-1] == 1: @@ -113,7 +124,11 @@ def get_diffuse(diffuseFactor, diffuseTexture): elif diffuse.shape[-1] == 2: alpha = diffuse[..., 1:2] diffuse = diffuse[..., :1] * diffuseFactor - diffuse[..., -1:] *= alpha + if diffuseFactor.shape[-1] == 3: + # this should actually not happen, but it seems like many materials are not complying with the spec + diffuse = np.concatenate([diffuse, alpha], axis=-1) + else: + diffuse[...,-1:] *= alpha elif diffuse.shape[-1] == diffuseFactor.shape[-1]: diffuse = diffuse * diffuseFactor elif diffuse.shape[-1] == 3 and diffuseFactor.shape[-1] == 4: @@ -145,7 +160,12 @@ def get_specular_glossiness( # be multiplied with the provided factors if specularGlossinessTexture is not None: - specularGlossinessTexture = np.array(specularGlossinessTexture) + if specularGlossinessTexture.mode == 'BGR': + specularGlossinessTexture = specularGlossinessTexture.convert('RGB') + elif specularGlossinessTexture.mode == 'BGRA': + specularGlossinessTexture = specularGlossinessTexture.convert('RGBA') + + specularGlossinessTexture = np.array(specularGlossinessTexture) / 255.0 specularTexture, glossinessTexture = None, None if (len(specularGlossinessTexture.shape) == 2 or @@ -166,16 +186,15 @@ def get_specular_glossiness( glossinessTexture = specularGlossinessTexture[..., 3:] if specularTexture is not None: - # convert into [0,1] range. Does this require conversion of sRGB values? - specular = specularTexture / 255.0 - specular = specular * specularFactor + # specular texture channels are sRGB + specularTexture = convert_texture_srgb2lin(specularTexture) + specular = specularTexture * specularFactor else: specular = specularFactor if glossinessTexture is not None: - # convert into [0,1] range. Does this require conversion of sRGB values? - glossiness = glossinessTexture / 255.0 - glossiness = glossiness * glossinessFactor + # glossiness texture channel is linear + glossiness = glossinessTexture * glossinessFactor else: glossiness = glossinessFactor @@ -200,6 +219,39 @@ def get_specular_glossiness( specularGlossinessTexture.size[1] != max_shape[1]): specularGlossinessTexture = specularGlossinessTexture.resize(max_shape) + def srgb2lin(s): + mask = s <= 0.0404482362771082 + lin = np.empty_like(s) + lin[mask] = s[mask] / 12.92 + lin[~mask] = np.power(((s[~mask] + 0.055) / 1.055), 2.4) + return lin + + def convert_texture_srgb2lin(texture): + result = texture.copy() + color_channels = result.shape[-1] + # only scale the color channels, not the alpha channel + if color_channels == 4 or color_channels == 2: + color_channels -= 1 + result[...,:color_channels] = srgb2lin(result[...,:color_channels]) + return result + + + def lin2srgb(lin): + s = np.empty_like(lin) + mask = lin > 0.0031308 + s[mask] = 1.055 * np.power(lin[mask], (1.0 / 2.4)) - 0.055 + s[~mask] = 12.92 * lin[~mask] + return s + + def convert_texture_lin2srgb(texture): + result = texture.copy() + color_channels = result.shape[-1] + # only scale the color channels, not the alpha channel + if color_channels == 4 or color_channels == 2: + color_channels -= 1 + result[...,:color_channels] = lin2srgb(result[...,:color_channels]) + return result + diffuse = get_diffuse(diffuseFactor, diffuseTexture) specular, glossiness, one_minus_specular_strength = get_specular_glossiness( specularFactor, glossinessFactor, specularGlossinessTexture) @@ -227,7 +279,8 @@ def get_specular_glossiness( result = {} if len(base_color.shape) > 1: - result['baseColorTexture'] = toPIL(base_color) + # convert back to sRGB + result['baseColorTexture'] = toPIL(convert_texture_lin2srgb(base_color), mode=('RGB' if base_color.shape[-1] == 3 else 'RGBA')) else: result['baseColorFactor'] = base_color.tolist() @@ -237,8 +290,11 @@ def get_specular_glossiness( if len(metallic.shape) == 1: metallic = np.tile(metallic, (glossiness.shape[0], glossiness.shape[1], 1)) + # we need to use RGB textures, because 2 channel textures can cause problems result['metallicRoughnessTexture'] = toPIL( - np.concatenate([metallic, 1.0 - glossiness], axis=-1)) + np.concatenate([metallic, 1.0 - glossiness, np.zeros_like(metallic)], axis=-1), mode='RGB') + result['metallicFactor'] = 1.0 + result['roughnessFactor'] = 1.0 else: result['metallicFactor'] = get_float(metallic) result['roughnessFactor'] = get_float(1.0 - glossiness) From 6a3ca482fae619ed63d937ba9f9cea7124323b38 Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Thu, 14 Sep 2023 17:18:27 +0200 Subject: [PATCH 063/144] updated pbr conversion test case for new sRGB implementation added new test for materials without textures --- models/pbr_cubes_emissive_spec_gloss.zip | Bin 0 -> 201626 bytes tests/test_gltf.py | 35 ++++++++++++++++------- 2 files changed, 25 insertions(+), 10 deletions(-) create mode 100644 models/pbr_cubes_emissive_spec_gloss.zip diff --git a/models/pbr_cubes_emissive_spec_gloss.zip b/models/pbr_cubes_emissive_spec_gloss.zip new file mode 100644 index 0000000000000000000000000000000000000000..d5d4e8333ee9d5d4dccf1875bcdb09fd0a102c9d GIT binary patch literal 201626 zcmV(>K-j-fO9KQH000080PTb>S7^=0oa_Pt09OM602crN0Ag%)WnXh}X>ws~Uvy=7 zbairNE^uyV99jiX7Ec>rx;v%2K}uS9>69*M6lp}d8>FODX#u6XyHmQmySw4z|J}^a z&F#$X+&=M(y9-rTltx1)LIwb!$;wEm0ss~LI#>{4UT5Yl@J?S(P!6inVxVN0WbgF? z)=cz+C;;VAD38W)0Kfoc1$9XPngDnK-~xcx8x8^30bm_~WdP;?m;zu7fPVn=0?-LS zD*z1u)VwYMKoJ0W0OSCW0YC}>NdUwG5D7pi06_rw0^kXND*%oF*aBb$fGGf<05AYR z2LKHKUZ3^@05Skb0003%2mn3+xB*}XfCT^s0H^^V2Y?s=0swFTK!4rK>plT!1K<*X za{!J3*au)6fCT_%0GI$^1b{&R`T*zxpc#OA0IC5f2A}|dKLBI{kPJXP05JeW01yVi zcK`waa00*{fUf|U1Mmd^V*qpk&;mdm02Kff0gwaWHA-Rt2m`^Nk zXaS%CfCK;{0B`}o1^@&6caQ?$1;A_Ap8&W4;2MB408Rin0$>w>RR9)WlLWvl0K)(b z0MHLWHvk;~v;)urKqCNk0Mr6d1wbVL6#$e0Pzpc^0EGbL1MnAsTmX6i7y@7vfE56? z05|{uAAmOiPy#>$z*_(~0T2Y>HS5v- zLR8(=z}gEgGvQ0+)v|AVd|(v?Ek%fGu(%5~A{DI+B7+-`n|U-Y6r&_2l57IHETb&e z9%hb;SgzoSj#c{25^<}?@zdkFh-<4)>HA94p`xGkn20^0#3wti6?zZ+Y8Z%-wls#KK&UI zHc$U6IjDIjY`ydcvJfTwb*4o*I8su;DKaz+J*dI)Xu4T?gs;5WJ?U8UkJ4ic()|^= zGMma>7an{I-)2m}YM^G(kB3w!7^u zPLh&3oAI1?+e$dxAkDD;ou}bM10)wB}O!%aD6ZL8_rGmJ>?uUBGoB3_r4<5O6 zhW}=iGANZ77mixP<;=!6J zgK{tFA)<^ZWLS@$>*YB!)j9oE&okMr^-d8|4X?#Oa;-m0X)Rxy4I7X9D^{mgS{4>e zhOO#TYLn|v#5=RaV-z`59x&9AdN_pcaWH)OJ-hnGZYK~hnp2%`)~MfbvI)Ce8z;hv zSPxt;J-RvU{7X-GxY;-`TygodMC_BQ_n_J6|F?mUFDwZ83j?W;K9%C4>RChc>C`?~ z-0-!M40j7GQ9>-LA~JgcsuDVT2wY}fy8YoS(S|BiAX2{C-x7_3)%E~E?@cA5r>kCa zm(v}iVx4?7rDTXTP>|~oQ&$BEYktQ4-_i4hIQr@F3`w*p?e6s3HV<>Syjtcz3K*2m z^3e{%Y0WTiijJ4dXI-x}U5)ia+YI|8pNI|Ox4vBNSk?1z&n?ucbhcKsX!$5@E1XOh zrnD#?)oT684~n|XwbX#=X4Y~q7rV3ng46XDMq(W{PBQ3sc9+5tNzZCLZ_z*R$bLo06GR*Ar@Wnyg!ow!TWBoKp*D* zM*f@WyUrYS>wdnafH*E(C+;U&W$yJk^J@4Q<7lUi-QXW@Z z<81|b&L%apyDf2q-YB&A-FNj0kCjMT8hLzn<_QJVy+25OzCDDc1V^!W-Rwx}^1PJo ztv^Pbj67eQY_Q&>nLkM{`1tm=hD7OhTYldNaAg-3CJwN!h4B&4zi-fG)&JskRU00$ zv*k&?HumUh@IAOW<1%||K<2t=a&#A_`L%-15*xHkdy8t#; zOVZ?rJCfgPNa14mBna6`wa=~>EGF0ty(b>>hgw-$>pk7(t1MQ$J>UQ~XB%7@NU#gR z3q+>cQiGLCSQihK?S6|%r=V{}-+iKHy0fWj_gOw2y=PFH#*tZIUuUfF1;2%(dbO9l zI*n(^pC-!RWMlcH!sed&AR#C`^7$7>XN67UU5p}O(s^zS^>=#tUnZECd$u86R8nD* zy@XV@$T9e>JKn5X!z69o2MJo7W&_DkA5XTpt5xAXZ5-*PWr`r?sE!-=7dFS+dlqvi zCC#{*?Zsw9u{Mnxsb%*N^Kw63oY?Bt2-uOiR;5_F23op3j`_^Ig<)1CiDkL_n*sH2 zQlv!BF7C4Y*6eg-o4iH3VV=)r*vf}WNauT}_@*5_)Be}pW&f6Thw^gY&t`8nnb>>T zFnbB`+KGRl1@`=%68>xgP`iT4_^Rw_{U}hYT~3zM*!0snZ|Csu9B#j*2{?`C%GWkJ z+kApCGBBurQYL~Qgf(UNJs;1DnGz=T+x(K;gs7l-BsWX`)9M|?9w{%+wd1#|M8T{F z6&05-MXZFT>g!E%#8|IVW|?0tD>Tv@;R?FC*N#I~utI=puH~~>LHuVaZ#fAj^v7=T z9GXu#+VPloQXz*avJ%^yiw{e-KCFG`ax;bpQ%g01M+4;auyRfZ6Sda-*fJGsj8b00 zY*$gjS%&@hUNwgoJlttlYUx)5im4gi*shLuwG-952wCDWuBi5|lY|)pLj$#b((pNg zMqw6TiQG{c zS$5SKD*Od3Twi|~5NxwI(Rwi)Lp_+tTBuw5^*idV;nV8MWVes=W}o0m4n5DF^GS;v z4pb#{UJewKj8O_FZ~0f*vuJcJB&vHefUmnZZxaseyy&KTFWo{WxfnYPjMVciZg zcu^E=kGPN~rn^^KY|B1MR5*RS7vL2=!cmPNy_oyk8BCkTCliu-Cl>-sc8oyv;>>q7 zq-%?B{zQp&XLSAWVH;)~?&^j_U(fc9g^6^g<4>+9`1n$GM5Kd>5n%$q!{wTye<>kDgy#QRSca@XZ#gy@MTF%m+O_zKA*H zI$lkju<97}{XKOJug0s?P``Xue%@?1N}a9Cm7^ArA>G2J)Z*a#PNBiijY*W>L(NG@ z_!cL89ucWa{N5!J1fj|&eO=NiHE90vCh2r%$>mB({?wLhy0F&iU^*8Bw*i7Se{kKxjaUu^cn#PTb_d$s#(QQ-*?uE-*>ucvpC4Prw!#J;6~3QiTSdNE zgilQ~_eT29ujD&i*x>1Y7M73koW-Pwb=7*Q42fATFK_4Q{wmq+$SLs*s92+TUnVf` z)|y$vBU>7;`qomI(cuiVBf@S5!(_js77Ex`A?94KebHTk%d*AK8nH-xxWQ*|T~N?{ zuzMgT_3P~%?KT>TZI;W{ygrlRu>7Z;*l1xD;3ct>=j;RXlGz~EFd$f&5XKycU|Hz% zvzRO}Y|@Q{1&jA9BTb<&K4~k3md3ASM?_1e(;hPj^_K{B#E1Y9gk6)s-*O{Gml!IzTe-9AhxH}_d?-$Ic=JSSvxz4AD&4rZaKj5#3t57RxdHBMsdP(&=_Cu zPcBeiMvg$LcFvh=IX_>pu$?vy!^TGprG;a;!9OdyqI(lGzm{wue7S1=!V*YFTqN1hz5YyQ>;h#b&qxZuzhA-w!#yWL1DhuIz-><0oXFKm6 zxB;wqorp>=rB5&KJ}+JFjl(CGsh=?!tN2lDnGB$cy|j#ZV)kVWsTg_9HngRX6R=F$ z$?fd!Du+A#^+TXK;1{x#^G3vo*UGs#%u7m9^{i?FJb$l&2jR~Zi{kbx^ zmXL;&kj4h33afUY(TiiDWt`2jW}G8MF>XegqDFcOzXs*%mA`_cZ4{(R}(?&hUa zG<9qCZMCX=SEL+kGz9M?nu%KZY}%y5ANsDE88I+Wm>}@^=w6fj_i7Nxf<&QjMJ>GN zVUy#M!oM^k#2CP!5lqy|bjZUP?adG=f9)8iQT*;H#tMY~M@SDDP1JI(p(}bMW%tiQ zsYs%yyIDK2jnP4786u~eToHd7jkg=vpT?@KOkrXD0P;z5ty{Z)(MRIb9fX@~pSX8c z({Ly}tvAaFEFZx?W?xzd)8GEua}BsL6-Q-J{i5-m3i_X1F&~Yjkgy5%o(c)%h!b)w z$=!Y3!$rr*M4{J?$2BYeV1}+dT|Z%@9AtZXxiAW=BVmi13HbHlssVjjrOy2QAvu&;NH$$?e=AISYCa zR>oXzh2qTUk2qsAY&GYdo@)*cnWjO^@W-S3z5QZmhLY#zf>U$;(F#1d!WOfKBp4Dy zKEC9_9tu9oKXkSfJRv=T3Tm(cc7A! z-K@0U?;8f1wQmo#eL*o2EpxD!z!F}1Pbah7S*V~n2yx!A((glvkW*?L=nY~jCt$Zmvj48;TZ3MW|roXz2PE0 zl9yqQQreWv_PO1P#(A_aQ8Fh6Fc-CV!`!NWJY!RmhR&kG%@WTF82xBeZ58FuOXvRL zm({K=F^bKOJw~qya{outmmJGSDP(RrPaX`_4E;`TWxr%oUu4jVJ zCO1Bpwr>rrCmD2kRx9^BR>vurJK#dC2bn!0DOKB;-uU%%8bb?1(dAqHDTs-U{-GRC zL)lDEG@5a^%=&6*W*yqs==Gx=875mu>qsq7rtl@HLUK6r+}iQ-oD*C37_h9S^Nu+l z-Zsw8KvVr4sRjj>R{6I!Q|El4UfJ#pC{cb=(4j4!WAfU9nk~(OUixYjs(}=a2gybH z6VLF-%XtXhs%w>pOGkw*PXh_k7Vut*`)6m6dK4i8n~-y4M|3;Idsi6$Se?_A!nEw) z$HCY2r><*-w@3qG@ZO;V$cycq1m7sMM8A>fg#HREx65I(WDJC-7B`|`@ut9q{2r4u z8g+esZ1!;2Ob*V7c*c0qk8Jk18O~U#v%5Zo-R2Dl$|GdZkD^izL0YHLw?T;uz)7x$ z(5>FA%OYqM+;%5z;h=%W82#mw4M#!ex$W&83x!F~w&2-R=S@wQg+SI>wi`8R0>zKlx~t7{$2Q|pVWNClU? zbssnl37#LQn1bLph+~m5wrw(sk|OvlqHpJ+;ftuc>}h7%p=PPP3~Ub}{?KZ*HCN_N zS0drzv54i`S*$1Kv3AW6aoGOn=7awH=Eb>lv7X<7#{$~X{*ab!MW6eXH(=rRm@ADd zaFex@eLs-Uu*orrCdIELl#yTH9~4itW5F@v2sTy)Su(eVY{FdULXK{g3D}_VGWE`} zM1Pc(N=i8Q1d#l6e22@*KYmP3Q6QX+C3s2sYz^=duUyqU@ZP?XSeIJhVRFc*V8EyY zya&YB(`nmUY!!+EElMYtO?2U?6)ihJgyIC zPA6C%1AKyC$}CFs>bDVp(!tALpI|blv}on%iDt69F}i$Arf~3ERdL{#`L3CME8FcC zDofj^o4dgw<356M#QW83-V!B-Z$oTwBXj!?F;B%@-#I~nAU2khimyy-i>?lVdjglNbt{N|>qR4prfRE0E1F@AU0 zf95-V0#kt7Y-@oN;^b#nNRzTRca}gf5e?bzD0D+E{U!9FJ+#n~(-4t-L!U2&Gzq~p z;#q>{xg3nWQwksUDPB2ppB2j1mnX0X!w2ez6u}#U5M&4x%`fQhrj^f+7XcXdJHt;n zKF`h~w@J5A!ndohatewVn#(L8i_@nC0XgqEEIh7TexFjIW@@hZ-xkFiBzFXwJinM) zW#~of02c84|^{$rhJLkti~C2^%WG% z7x7d9Mu;d7j&KO@-yZTAxnT8|w|ZGGZVN?-A@T_#!GeD>?%Jd7#@%rqj;4HQ7fRW7ixMWCg z$QXXfl1)(zSh0WP5E#!YqH-|VceloHqh`jQvYlh|o^iE}jNsqlWFy226nhmvO$Rft zRuD&|bi|WXgy5Fou`!W#rgS8Q;MP;#==quPCc_6if&Ya1)li_ue-DdexzbOz9`gOo zyfjpmq{Sqs8c9iGbQ7A92}@VcC8PWmE@Qg3>dGlttjOoZmn&{FH}!4k&#kKPXm zWx<5DUI@*h%Xj-6>2D=+te6kFG|m}Pc~a1_Q=J9%p9NdD<9988mIcAVK|{M;6x$-> zrH{X{{K`E3!4|iPp^OQV>0pq;sbOj9v5#>s4b$&{Ke$q2=B5Gu5A$`z_xaqXIoIsh z5LZ;w8t5c(7X?g`Y8M>zcnuL3jRKuEg#@%vOWQ~~e%_|2HgleEZ4kBret(hod?wI2 zTBv<%*y71`>fkT(NWf*fDl%7FIu(S{_Gjz9$#+ZlRsXoz@KAmEIvjCv%~Qii*=wtk z%#yz&>dyextVMp52P+C~Zx)@m=uT4P{b34Ug6fOlf2qfsOL}xDxSw7Jhs*y{8 zLobKu$5*e_??9^0ruQ^XwP&8lps?u;47UW@k54 z>~$xf$WB$#knt|Yde)g>V`%Rb*|uklBl>+EnU3o*);L}ce>n5^-maW(N~ZR$&uUXe zK?D0oDSK_3^0J#I=&-#Qn`m7H;;#cmSd1>9#wbskq5M*YL@;Ddbf7HSbYM%^R4jzy&i> zK{}?eZ3FH3UKkop`8!O2w%&iDpAY!+O#FR0>m`RnR$yiydSLV=e>9sw!pQ8;AUEDn zwy;ah^kgr51N{Ytqu*O4-|E7{2nPbq2}QGtt*%!Mq5UkS;UIb*l)R}x{>`I*)@Z~n zYe7YY!k;b28BbRk7n5d$vV%?T*JQpbbPQ8TC}%!K*h*}BOwzHmVdMOnlt*Nta6D1} zJ6`GQm1ks{Qut5Pn42#jv?(wnAzxQYSj1+&ouS}r7scD$4;zGE*8idJ(`pkx|6F{! z>xi(Q-5G5}5)zd*>OV?`PeyYwhKONOn82Kf6N!4Y{B&h*+EKn5ti}7}%<Lx_XNhJ~SEjmxM)ltjUcx zCQ`mW4m}N7eCutxG{revN|1%tGopk+fo3%_?C-FZ(uQ(^R@^RcGr2V};RthY*K$h2 zW_KnT%tJ|McY=k}>Zo0zm zTY{6N;+JipJh()S%&8Ba^bQ-6q8Jau<{+Wo|82eDN%JlZgAP;W(b9jGdKQ;ky$aDh zFv74P21ES;z60B&f223LtmOh*^O`-F_^nI#U#5|HD6UYdUF5 zf1WC)i7#U1yl~aD-h)C0liFBfR&uW42Tma=PLJ1ftkxslCTzKu^-Z0X=wX_&pjt$x zy_D-a$#LXwOit#KHi|Zl<5S=4gs~G1&nkkPSOwJc7P4mzatBlS4{!vK4G=MrJATBo zp*&m4Jhfk!oKN*f1tOGrVh$)0h@X5lj1ktPeTE)S?M~O7g$|7FLK=V|QK zai&YX$pkAsF;^*ki}X{@{GZV;D#8$yPf!f=N^q#W4UT)tb=2N1JtM7;Ypu8U2a%*G z1Z<00C~cJNE1s36Xet&Su}DQ%0`SNif%WqFR>wUg7w=`-X!Gi7rr{D;d_1vy)P)+&g!TLoYKTQN!vx}E`XDl!=h8@itJ}cBiQ?% z382VQujMl0MA~*Huyl@sq$i6<$5%gJAACIe-SDY_i03s3LBSPx^-auc+N+&V+eQWV zHE^7GqY!fj>ii>E1SIT0Lk#AUO1kXiEB1ltnZ{=v5* zPSZ+Z`RTG}RR1CtSFiHgB;^7fYZt5XhGej-{%~lpqAlW)n_WFu^CPAp-opoV%e22T z5#MxZCpvoow*Voj;blgY&DxHf?Cni$?5=|LFWE0paLQQMD?xAT9R6yzqloso9bxRB z4EK;%-4BFE<;iDp%Q$zyT(Vz|3c` zf-sLs^aB;XXHu=pUl}vxcbeczy{QPtb+%8tj}L-KTDWtAz!&6id$U>MNn(A*NAq5f z7~?ZlBJ)jl%k2Fgk2kIDryq8F6}HU~UMT^$UBCVKpKe|)dZ?`O*U*Jlqj`TB6M{|0 zZ^pZccg6EuQY2J)-WDf*$Jeais(W|%XD zG87Xjip!qrE-sa%AGCr_#>;7!@F>!3pyW&{+W+MMfn$GpcB`q$GF3gbjP`3VEU<%Q zU}Gpwgpf*5^;Lf?j%p`{Kq#rCJUj8|w<_9lN%V`ELiOL{GF=~^%?C$UwH%K>Z&bsX zxZmc_r9wV%HqeWjbaKk>(iY_{+#po02SIZ{=To_aA*NkR)l(AX-*kJ2PH7$4@`*Sj z-v=qPs(H~!Rm*qgzp3z99M-uZO>BtX`nYU$O7Ol7pOl=h9zxeg|Pe!fJhG9$Yr_q`EYGoAa$S!p% z=9cZY(@j@nDK;pNXHr%uI3^_-!k9ENdUHHulwtniZiBBtm}uu=w|{MjO$$p!6N}wG z*}Ypkj;S1ApTghV8sjhr_wjlZI*H6aFn5|EUPm^rDcKbdS1_2meKUzH^ZH0zrV2X6&evcB-H2wN#|_90!Ms+;`faG>D=?dH=kBi!4tylJ@Up zW~e1v4EJd zkJ>_>5Qea&hnq*MQnrt0Zf9>tE+dI|K1TK4x}6tESeFvOd!FyLUzTq6HPUSh+d+K_ zOBjaD&1ScdR%oNQO?~;+BRr^w75z8`Dquy$?CIk&7m1Xp-;LuJ6#IqZpzZ#BLxFks zBfV=lB*q_~T0O*iW&B(9PwL2G%n$}|70ZZwN7u7~49}Qk7>V6ZzV^>WUKXgI=+hIX zi`vj9b|c4lzq%d;ywDVR{hj07o6vf`ZSNMPm`ZELf}3`57Ff{2B(qKm{2k`uI!cc5 z>I4Yi!RvfOs744Ybf;4JN0f7ayCTtJF|Ls# zT{lffA}ily2%R3jO46`Y^4>qYqI4Fi zJl5-)Tj;o8ZR{6m{iV>2e`VzAS9qi+eRLrUAF?8xji-KKmS5BbXy0urZ68o_&zMG7 z&X(w}yWhK7d6mNDcJ9K$y@m&Kwe#ckR>#;f*n_0X$Q!VpzuBQu2x)qKr15@)S#2yI zW?~C05I9vMCr>-e>t@iEicvvTCw=h!R7l{}$bs!fisC1ohxxW>q*D=b=a5c1ku2ZA z0j~KdTUxbToSbSa!FXKm@f14;Rq)K_FPFtTjIvGr`x0(}H&4|W=MaDGj{77eDhLVvMIL0!dlyDSqw%( zbc`>|xD7)IMF7DbFF7E;0ERPRG^7D#zQx&iWOu4-ojZ(oO}Rk*Wch6ULywZhi`n_6 z>zVrvn?^zWw@}@6S(nF9KEsl_TMX>kTo_{oWmigt=z2ui78KSCZxrKmof5RgcbS!G zrp&ORkG9M;gERsKIvNzvU!5Pw8GHf)O6`=NAD@G5h&@-pYn$T>w&ts8kbfY8N>!#V zG&EAP;ahU)A+)7kk6*mA&+>5-P(0#d)93e_NvC(0DT}Dxm~4 zpJKo4{j4#^rj1c&G^odZ_xpk-(J^H989`o~XOe$C8nPI7H}m5S;-M!jBJ~>5kH4_yKT{Im*FnZjU>sLh+;obhza(U! z*jlYv_L9a%z8J8Et%fuZAnS|N=gP6v{g!5FXMke}k@#79Ut~Zk^8Ay_>x`@aWpCj| z;gRc1s}!CO^cZ0=D1ia3jS83cl@o~izv^LBoH8WNqyX59h6Jep6~$1?R* zl2pdLX%6<3=LzK0F%RUa`}V4AC(!c6Uuc{eud)2HthoyErNr>Xf|tZe!?|(3XpNg>cHZ@$g|Q zm@tFr(vD02{)>XvDA^~BK!kXN$|(ihhA{L=jlk@_soyTjD*DJl&-o%bX6(0o=;aW- zcpe7Ol>=)wa(ysUYCZS-^gv%l{#f|@G`KDzlmbRmq1e3uiZT=fxOnr4WqD^#@Vi*8 zirW&S<*yFdGVC(M&4Ne?&EI)rhkF& zoX+{<0;6~BQPu>H4q?Cts4ZrfB|VkX)x48MmX9vJfhfmrC*`2t zhYu-R+hQX@p5(tPs7T=cE;Iat>#|zhLvped;#6e6g>^%Y@xz5-|zw6bk4cDkE zCnU)cS*vf^p9#6K&O7fm_(v+d(zPmBPOl5=AnU~e^Usud0TPPYa_0ICTp?(|J;MaB z@a4NAMv!{_z}2L#(ALKau^>M@_=XJR+q?6@u27%5T%k-xt*?-d=P#kiVnLZ*7woc= zSH;S^ew+mTfzg@k?w`|CLZ?MAY1XEOi=Aw!r60(ZkHq1v7Qhwu0z>;+ZRx}^jCTzd z!WJTd;GT`4xxCeQurhVYwUaEf-DELCgcRQhnk&@r*rXpV$5AyE?3F zOGQmPybU^T7J0NNYkV~*3=?M6Z2-R!8!Pl}D+%9eIDui2w%ur^(y5xP$fH&Oyhf4s zS@+u(LPQg9Mnmr@HhB5gN{~CEg&sF+)^5~qH)QJ&!47XCCIdNH^fCC`v{me^8fRUi z6vq%6NX(xaml8)rHL}2y*(^=Wum48?W%1s&-X@y5amS`~a1C(^(Q+j#&x`+))_4K2 zd*4`}dPoc}G-@=<3!_`anC%KNqSV_D3wgB=s#;95n)%TnsvNN!6qa{o++IEd7AHP8 zc$OB1YkkwSMTEy-LnQJzCQFTV$HhY4;+1k7N{^#u!A5(vZnJb8y(6H1pW0mKz zW6GdHBg)dDlj+slZ{<9`;gon$uRmWM2%uL~fc^##mE?zq$ySI>sZPk)XV3vF02ibc zE9D|KL#b#@ODcIPorzsLf5-b1_E*<^k@^v=&MI?@x+lOun@_N6o}OzZ0I->f3#5 zE;+**{X;&j#}TC>7188tiRo9U@*|NWWR4NZAf0ahzZNGsJ7=d)XY=H1fNT$WK47gK z4qISz=9}A-;W4??3C5DP5Rwzq=&D#=%}mUn0jwX$Fs=W6UgM-7)M;|9FzJj%D_nm< z?Nq8T=?=$$6G}k9fM*li8tm#b62gu)iY*qosbSx%{epaxQR>v9lk+)P?Py`nlHR|o z*m>JzUhJwMA~iS1+4cp73M0R_(A%$Xy3Z!SiKUvV`6v^s=1rE=te9xa=Ug~9LSDYh z@h1kJ_4WfOt6zD8g|W3m+=)%2yIEpJm&;4{3aS|4#I`jZ$BSZGbsT8Wlv&y6e0MSU zVxNDDkvjXkEO5Eu7bge&5t61PuSBTPCYsPu0Mxo}1bkEyU&?c+txvSj9 zSw2o5fH?4W3ya~&4{0Vt9wxRfkKmqV&z}K`){6FtxVD;Sqkd{wt3GDp9ZtUf!H5Jb zl`Vs?Rxsj?2$9DEyrw0;+WMo^WUx+*KauE>py?(eva^;&cMsB6P3rU}GR%NvI*I;4 zP#${&*~R>;nu={7Mph1~`0^ra9&~-b!WW1Ramei_T)6_>~V7g{%$zF1n>6 z>PM(~ZaO-rMFp2a!&*q0+^x+{9c91DKLqnQ7mJyDwojnrU=hH>l?vv_s1(ol#bkJ0 z&*hztm}Nwe@K@+m<92D;DWLKn^o}iYUg4KN61shB?cdM#yQnQx$SVFim0U)AQkj{& zMq`I`QLsT23qs+fGVDr%3H7nNuoF$gSq7^{sXelJW;?HY5V9t%jq3LvE*><_U!I%H zjT4J}vIHiCqfpz{K12Wi54J^=_qSH6r>U#6{dk3ys}T}gtwp{<8W!uTIpS^~jQFUa zm3I%3PSVwTmE>Oj-tT1i2!BuF2582VOhSKR;`atgzpj3w?|}V@5}k^YUXP8_K#b?~ zH?Q_Zs5qjQddp_B&L*HZW&u90IiUui7Cq$jWXJHlG6JIgeVA^^JB1Xyu*PLVeFmfEGDlS`h^I9f4_SS9$k@px!4>7kbfk zgB9v{$I`>g+%7%y?s5Y67&;>Defa~I03^Vt1LN(;#rlhr=0W{;ov39ohQw*h?(LyP+v4Nt5G@uX{;j&qBQ1H};G`zjYR_tWh?JUGDKm z!ir1hs4(a1O)UPV@$bMm&66C${=$z_5&nKJZz0;}E%=_MTK-kWlEnDf(_d;KpYTd~ z97+Amp*WZ>C+g{O1I-Avz8XSZm2@L)_5KcOs%d|U(Tk4jt!^G(4|~wiL1-ow;FlUa zfPYo{rK5Ga(1>Vh&V+l0@82ZJA(*@RNFkUV?yQQErjPl>sc-Z|;xP&QiAnW~b^I19 zcbp7=l5&~|pG`xET7`@k9yx7kmUS(J-8Q8tuZtV1Q?vDqfK?wl$(7Z!^Y#tZ;>+%! zQjH1w+|q%{Oi}jNfRAH{Qz1uAWF=2E_bqf7K3OW^pJ@7RPTgZV)L;;!S6T+wpe25l zp`>VIFi}Aivg5|8%i72i@n!TnR124N%lyTnkkgoor;}Psjh?W9#oc~hap@p=sTH&i zf>&H5;&+4@(GyZ;S^1hHaym$f&edSjau(WezdlStXFqtz?qPq{(Sp`I6w$L2EyjrT zig(}ZHkW>uGm4O3jLV0_W3>g&{8V=1P^hv#7*2^tbQYQPa~r(Q=%>*;^7MMDUF^e( zIaNo+9Mir&r>yxyNPTEEmy;SYDR$=3yGTP8k<1u_Ks*`RT69LawU&uo2%q}SU}WHZ zdZ{h!ej`{*Wfw`a<89fNZtPEJf*J7m!G_d9h98@1Yb((`h^CJvGkK)j?_XZdJptC!^bKusTAGVMRKB(R&SyzJNh*Eaw~(+n2jB%Pfvb}lPoL@G+u7%IqbdSJK*F``0=Qq9R?3-)T@Cz2>A>|zu zC(fgYM%mrrWyCFLi<4_CbPR#aQ}q%Ho}s*@CRjwz!_S9%lv)<6W?olx!apbgqEU~_Qg`^r_RV0Yi` z=N5Pr%!`*lU(B?#arR^?c~xg&B%UcNc`Yd?kC=au6&^S?PGqY)yYl%w)-ICUJqRs( zPu-u#ZwX0s!ZFh@+q%7vepD0Bh8NpdMK^d~Z%ttCx@4S!K1wnM^{c_@PODMyLeYIW zP4;giSHrgsYAC(xCr?yu1LKZ=-JNJMdYLXqMDd7;+OBDX@C>zWM_;Lfd7(|;pV;iY zr={$uFR~%Nl)e@{d&s+FoMCfhRz1J@SE<;fq4V}7dH&riqd(z9dyyqEV2Xvn(u|wE z%n5Nq*qrts7;11Mv{5m?Ub_A`{)bt?tJ8tJK&ZCwoxYn!V4=-&Fht^k94myu z%Hl!IgRFpCu;{O6 z3kOCK2puB?`_k}gD>IZKLFlE_osKANGoB`%ym5)~`OSeRKeXKgvcUD`+@3O0;F&q2 z5A{y7zOufgHc?-g&P!Gsr86isyWmqcH5o1Be%fwtKJ6z*lHU;mta zd%ljI9{(%<{Ik*dJ+jf_(wp?OSIi)!e@*~oU&|64e>vl%o?U0CIYV<99!S8X+H68L z`01TssSd5|79jX_p_-PWZb_c_|7g7k4f<5Z#d53bTaMrHajai7B(!v5CHt-LA)G-6 z(k7BqOmF4Vne;QiloF*3BvhD-#ojP%3iZp{zb&FHS{~E z&npqDO`3?-ms=QrLa4LZTR%?pWi1WHu=(b-RFY0|giLbXRPk*MpG-LPHs6r^1qmY+ zlVHh4TTD5!sWKv&!X@_(D?j|Fl}Q*DI(CUOo#Vp@tvoEw=9VRWAuDF+wMhC+4Gfm0 zv(F2Ry+kg4C}~)8t*u;D&Q&ojE9-m#mJ$xKtCuV1FL@k)PxoG76dn{wI6{ibq9NjT z3Of&%gU(9s$v_r2D~s%2F7Z6BqPgGibawIG$B0|PAwn);*LYMn(j;V>26q1yn||!w zL8czXnt=IMR;Qlh4;TjqKFbr~e@9ojA*g~?YbEJ=Znl-^qqA^CPbNbJ4|GDgb>CR_b5be+|Quy)~@lYMz|`8!}amC zkOt;$5NgpazkPFCYNR#dHE%E#?hz;c=!zU7?fdM0^>DHT_3Jy@*+>e{-swL>5llh@ zzK=4>n2dma}HKT|8%Tr#e;UnaqB#y}y!SHj=d zdxS?ZGXQU+7;@5pg86212z6~yOhHzEs!9`$41eu0*&;cOJ_|x!vLl7co^oX0m1`aunf63Q@nvhhJQ&+65Rc zV(duVHvHxqY(Vler2}FhJxcl3-mq}rgg_L$+*(^ zN?CdiZajB|f4+>pU3SL68v2s4!c*4HVjkEyQr><(mXyhyT(z9qf}ViCMM->?N+z}M zl+)nXnk>MAWlaLP&zkB&Lk{m*YHiG5gLwcYdbsfWj&ktJXaj$E{QL zTQ{TcTLdydbR>>DN1947Oty!|47D+g{eoM_vMbC_j>^AYLREdSbU3f)I; zt5JMv(I)^cnHS&7|+THn(}pX$gB2k0`jOFvU>;Xj?D#71GBbm>X;?y&+Av zn0G|S7}?wm^=*!*#ECz#q1M-e#=nqjb70=nJIaxE#2jS(!zvfs+rHJ!mgSo!M(-r? zqb$5bw!Q{U{n39`g==hKShMNQ!@J@?X`oZ(WMjDR6zQP9r~6UgFi^%iRNC*R*s4S6 z2EOIUMO;IV00jyf?Nv7F-{;;INY@3Wz9f}=Z_L8o##hajEOsBNNPYFhxTGGRC|9ow zg@k^`T4W$xV%4MH&zaYr)QF8i@ez^;RryIsav#D*xcL2r3CJIGaO zx_A2Hc=+ESM{Bv@pS2QL-!+?;pUkQvu}jiXa_yw~?Taz7OUM%bs#V)ZPh%V;gHiy4 zKzzRxVngU_A2gu&J2Z5q566Rv64bOVnG3urpWaW*lw*k$=Jy`_#H4D}vKHADGl~8j z!}lJ#KC|wB0Czx$zc0*S*Tg32t-AN*I-vzf6*7pHrvJw+ z*fs3ULU)S4uVDH^ctnM+Rd#gF{Sof-Buy4Tc=*ffwqV7DH!UDGaECT>@R-2+!h%;% zc=qJ?Ji0)m6EygzJbCi`&cid_-5I-Z!a#|<65y|-uUJWT_k%>XT~u3Kwgt5Zcu8M_ zat{$2t|+{@ZJ;qA-6Xtf;zs3(AiXuM+Qv$YJ8N9DwGF8z)azNm>(LO3SQdj)}c}c9%dK1Gtu+^+B&EVry~^}KOO>?*GYfG-4f99`m38j8}ot2 zT!PVzw;ce54$$!aj1JJu_&4kAzzJ~q8>j#j9&QjOb|ql5JKR7>w2)(W8?+-u zlPEWGKqNqeJ!YV^oL)Kz^69)uE1l8+F1*{ovop;QC_1LWD*}NTI6+ZNP*YwT!TvApHcc!9i~e85>fO*B*2fC z1(Uw+w_^GQ<3beL90YpXb+KH4(5Yx$-t&K~MS$Rgy%(x32(2pP=)~MYMj)DHnLqwp z*&E&Y-jm_Vz1yn4a|G;OBy z08N1c1?=t&&znr=DGi=&IN^?I%{SjowOT|VhKE8^7*4EdDYJbG`dD7apv=&G<^8qVH2iJ2E9C>(4pjAGdcF@%m zUfTFa_h;Ja$K9QAhi8ZjP$4J|6Fa7X%;iJEg`A^qwexMrMBB(2o{Xg)z`vHYGM|g6>*&9h9p1slR zbkOG|QD5=A1MLt{gP?P_&qD(+*n{B#IK1U`-aWBH8@weDdG%z?H#~b{_h;^o0x69JkLd0VcP!Il$C)X3SF2-iW$9$S>HxDLt3s_r}VOqK}P~UxA zYJA(b)y?R4v0RDOmI1;}{SRzJB0$PHGlc1STeN19V~bavFO&h^`QL}3c{=}FCjMF3 zg@8v@`Auzq#O?Fgb9-6`)oF+0d3A-x-4gicEjvNuuAYSN&vfz*&*;EO$HG8`7ft|* zFkzVFyTB^}ZvWX<(itRL?bJs4kGv5ePog2B7(w+rCUi?fCmq~fr^QYk+|~0R&flX? z7fH+S;KrWjtcs?H$tyUkO0wVw4yp z^W4i`JQAR^#X}m}|ER_X34sWwfSk+52_6dFwYz+to5W0s8kJ|}UZ^Dxg{pK!i-=WUAA(1E^p@>FV?1>rQ@74N-{tm4iZ* zuhzwq2B9+!2)@TIuiuDFPXDiW3{4WQ!)M1MRAEVr-lLVVjFc^;4S%b*2qTY3}2 z!!573@R!%${K(Moub%L@LrhHcfxGu_-@3jZKi4nu@=P@UQ{VzrrcB}Ch8xIUc1)v9 zYb~vLE9qM8WRPgJllQH(Y|CP+sv&|H(T-G!;48Yz*HNVw4=;#D*ov3Ghh^;LG;U{& zVub#lc(FZBf5_6;R@TTO$aE1bLX(z!jGV5QD;r<}qNL~?Q>=Zdry}Tbzfj?+=A|gq z-His{U)-D1pY>V|aO+M#1c)`UDh`&6G&2Msv!5#5Ip)A^ zT?Q(4+g&)(kpLdk;7YoK1X0q$TkYhIl!8PXIfrf8R!>A4BHsRoJGkpzN;Ge~GbLKm z;`)b%9B$db%nPFwA|0mwhebRs!ap*)T5=LkqW*yy=L@WDH&u=I1|S5gbfa|6G$;yD zFJ<2H9tVZ4%`1`Kkqna6&3Y|BJTrtbx)K)cjcj3QgPm@n=mVjfW^c6jF1Qa`&Fm@W zH?{o{zRyEL_G^E-bx>}QwAHrsDLZ=-arLBA>p5ArjSqZrwp*#Vv2o+}iWfgo<>2xGErjc43IqyOwns1kf5)Bb=|0DL? zvwbRl?eKzKN<`7*{5@7_9jY~6jd-?wX8HE~^z^j5Jaf8a|Cj3J{Fo;o?lOPbl>jZ+!B8wbw+kHgr)gpUsB6wzs-ZrpHiJ%c}+Pb9WDlJcpa8^~;%nL*IKM;$o zS?fY1*Id`pdUvZVK{aVWQRN%RKOGqhpKna)WKW2Y7w~;ImF_5={Jii=1J_p69E4wX zGRQ@L*2@4HF*1BDkP_s3&)%qo9uWxs%$sq~aUoRiE7qC(mn?e}Z_Ip&%j z(>PVIovs9&|1ck4_L8-|$=>XZHtG30zN4yb10uq$HEuuuQVt(2_<$#_Zj0DOBW6kr zCoK*+%)F37yzZM9ntBZs_83izcuilr=14pehXYpyP%^bUrO$Vfno^ofNu}4Akmlu^ zhxv7{4@x&tXoEr)$x-+3f8t84h*_`tESwUO(f!7kE%Z#GKb{M3lVxw@zJl$KXxhv3 zroYN>%6$dx^Zd>5K(_khB$w9_9kwkUbvG1>{{8T(r@`PkU7(3PK*OJSc6o-kH0;7j z+$sxCUJn(sV;V(}xez7)_^)=#+RoKZzR2S3f7CHVz$3Q*LGJ>aQ*mfY2eg2oPQbnE8bpV5yl&$z=g1f>L^ z>p3LL8t_e2)U`WyCBRlY`5?_CDN1__V*Y^ z(@AZ@LcH$#d-|OLi>QHOAu>&c7D7aG5xV9)afm+wS`Y9GuZ&gT$rF~a5Fu*3zm&4( z4&@)VvG8?Q&8xmUXT6}DP*oG@{gHbH5B>kl5F7Xc$p!B|HY?tiT(FWqUIj0YZ@Kq* z(gTmKytV;4xBU_B^YFbSWGinSRIjxb%~l*A@H0jRzyK8P>WO!Mrms%V@W9DeFTAkR z9n&cJ1ab2D?0cd8sE6-#n!@uY?LJ1{dgHuyq?-Gyo$@69nlXbAar+sI>eeg;@1$tAEz5z`{B={x@-r|Z^Yq}@TZ{$hZtP#No( zYnw=fFk|8S4#`bQU+`)pe%)QoOBH!1Dm3GCkMOLQC6;pwp(wLHiz@=y_|oVk`c~90 z)~aXc%k3-h{IBq!earB$eFeAAW6$k*%j=;~!HkoAL&V?z@ZL56vBwQo zzAsav_WV)%`a_PaSvb%2eY?bwA;Lnd1mj9tgmV4Ddj1iF2#CjtLk1N~vx)&zg~(V* zISHW%`J$5RD{hBM-^$x&pmfi^rB4^X|1k72+q~BI^2`wI@@7yI%QU((Ll8oc&co0$ zxrNxOcWxoGH@bZuo}2x?QR5tQ>2Lg&j~S!Z<$Fnd5QdNE77Kk`yI1g0oLI!h`_=_dhFG&5G)XHPf@>Bo%a`igA4*ZY+OjgQx(bWUzj^Aqm?7jAf+R%3n#o0L>qK&vrR$AcT-5nrvu`!Z&UIQRkBC-Z#{pHop%K#%#j$;E_%e+sdA++ z%5yRsFZNmR1B8fmv@|*t@ic6}{t1H0#v-Kk05cXd(0f5X_ch6Dn{dp;1%xF(dn#R& zZuTu^xGng7>wDQskH;Y1=@cNC8;tH5zB)(WzF#TGMQc<6-fbcOdZXLt;duu=f2e&0 zy$(wDV79s!idr<8`-cb)uIKhmpsOdkKvQAh7cW*EWcO$EGiXr77kT02%U^z3m1x(1 z{$&Z)mHgQ5c{kgy3e8Grrybm(4WaSNmlX&FubxbF_h*LSua)h=zQFVy>VxW)n_xKW|bl}dz= zpnXb@3H^>bhc&Mn!|Hpn?XD6FH9$7s4Dp8gEDXVdrA}K_7I_RU4Fw48jov|YP_`b7h za^?jK5jS>Hh`6y6C$k8vTA>3d&OCAWZSL8qe5ofh6GHr-89$bPAn>t$z#D^c=r-Z( z)-`&WvGDos$&z)`MH}6=V#dP9+v(cFP-@h?sL8xp@9426((5gD0m3UNEnZj2D>K9a z9z&BvC+Sc}I=4_d_c%4(5Vp_Lg5?$czXea}eV(ZOq3n~jK;o9y2khcQ_%BxcB+10e zL;R~JIzPkXGrT>c%QM506(1r{!dC+LaJ{W|KCR1T1MNR^wbS1=;4x9nWr2wI+5{In zxke~cqV)^=b)qS;<0*c;$Uw)veVqR>FR;)Oi#(3~7$L&xI>n(!>)%toFv~$svNB5+ zXm_gPReZoND`O>asxTsBp@dam@uzRs?_-G49(3JHGJ1qZO`6FJDtVHNzwI%T z0UDm4vHLUl=aD0;rAXmlIH?Rw?3hNvvS$eix7MBn6Sss=`qJO%G92e1>U%|$SO^fNI22_M z_79N+p`XP|Sf~R0V`y}zaJUHm03NMTUae_=qwtiX*P`!+T22a<;C_|XK@;vOucy8z zCe_uGv0+AYnW%8=TH~ zn@0k;+UfQ`_7M>>I}vdQcg>Wjr8O`@V%5IAV|$#NiKOL@j(PqbO=iO(GFD?J%608M zKRVm_Qbgqo-XVm@a*~ty1L`maz_=)S^F4*ilRpmikEaULOBEtxv2D^kuZa*!?z{Vk z`IL_O)vL)4`?Fql6)ixli5k~ccL|?i_VXIDU{_j4X3ue zZZEGRIEDa?(GCs&O^Orc9{|C)E%s!o;nl+m(Qwr4b}Og2M{pdmet< zK&LBdxbpWuC=k6%iSo9q8KOC$j+0F3V7&gkd4U!bv9udI?JCQYKXHQDBP-t)&1KQ@ z)Vgxk=Oe2I>>K}zsl0@%P%|+DjjP3s#n~;s@LZWv+XNYlmr$%RtVy9YhOOr197HP8 z0E`R}njzxq9`kedMtX)deCJ!Qbf?(<$REU`i>?V2yL&MBSO!P$<#h+gaG;^9C%|HS z&6=rG*E-1iGde+Y@L(-FJTq==%IW}RWQtD_Cw!9o6I}`De|FMpJ%3j+81FT=+6ij^ zsO|LDA0lh9`qld%?rj5^5=A3i$e}6G)<1Z3Ouk*cZ16v0C$fk-TAIwR?$$eQuz0b% zvOBy5S|@}~t@95^iU`3|;OKjy&s@=KvqGru9V^wxQjO(hERM2Fgk)yMQkz8qWt7eg zXDU*oEELJw>lK603=yx)^JWN}EiTqQf9)&SdobsxD_;?h=uqe4LGO7td&_uGlFEFo zl@H;M{yZhY02I7>!uvBXue94U+YcQ&lm@`qg%iVM1j1xUz9*c?G|3Im=m5&y2RE|$qwJ$5JZly|g-`Qb*gn1z%M2?B2EkrmMfyVE5 z=7q_qd_N4uu<{Kzw3_e_m=s~o8ap~gfU4rs)iURhP*B8L8-yP)R5vpg{fc{Y=7K70 z#jQ7FEQpZ%1Fg+M(K2M1qja_vIqPMcx9OjMn+9a`r~;kC4w1 zAACKxO?kerpy%Bf9$!>UGI(e*z`;WsL}LO^JMhrPinr6^N7rX`d1i$!oB$PqLQJ@9 zo1BnT8pQRFg#9M*XhL(p zf6NO`d^1GU^fgP+s{WI{Vi&%4=%Hv`4K1m4R%RweI5-^ld9{F2P@YFUDbxEWLea~^ z4dYVl1Ah1DD_2ls<*4WQ_{SrnjuUB&~uh-)C`pdv#xqpAgzdTdB_RZzG zaFXW7gQ;t#;|>toIOiKJeb13|M4>w2T? zjXH*=f|uu*`92TrjnSbCvCfY@N!!x4yiPcHXaljqTLNis*4|y`@V4~n-|?@{EL+CQ zGusXW7G5~v)tQ3|6GGxv(itRL?c}yWqBlZO(RZfsSPYnb9Zw*;Z37ZJdnnxG@Rpc4 z_%3V92A}gd2|iyb3kwlni>T?#(*P8Qs(iK6=`31n3-Ay8JquDqNzSXY#*Pkjyc%<_ zSPGYdKp`65T4Ry3!UfD&q-gfBTx0mwMDN$}efLzlvu|0=>(6>Aa-u>F|Hll$s@lvD ztjdf)_|~ht-l%=I*JJU=dUyNkOoIO2Yn1kuG2z&e2C=dHCa<3G&_;%>TX}xQot~-3 z3nx6LvF&iW-3Sz0N$2**s1#L)GjCNK_{*vR{U*h?*Cu-9L-`9q;_ZKM+kjPUcC8W0 z-=igF2@7T*`gW<;k9%V$niu>Kv5uA|v(kE%KdDeYVS+w8OT`9LgzjCFT6a>UY0eyK zpO|(@lX~2k065`NCsIh6zDz-~oA4Wd(`0&sP3pYMj-}ZxYpfo00IRR4(XFX;eBF;( zK-t=&GMvqND~dr9$P9rLr=pCV%ZSq{_#;}FwwJHC@0Kt2QOVeN-LwwMZE1vuy=6SE zX006v4nBm>I~y5vfM)Tp;o~#BJYyG5GU&odZ5}vDvmi@Yo|M$ zURO_m#m`5(c0IH(~q@Zve-)JHy9LxqE}l}Y0ZzNqF1v*4%6Fl4e?&+Q)2|xSl+Kkv{}>`RzN`% za<4bJUt;-Y2yfPFW(dCUrr9FhLUxY1bE06}xQLIw_BtpZ&*eVPSZ?R?bx`$SP-OIU%>z^E|r9UBBS0NA_bnAh{%ReVw z&ayhoc^Hx70#~mc2&L91>{AG5Q()YTnK=4mWARTnD^%xwf1%zqII_{J8cX(3Cqg3H zEKU>x?hjFu;rBI*z8X?km{yZb(*u zgNHV_yqUQ&v=(-+=UaMa!gkO>MgAcNOUEjmUg77Rpn!Ad~6tq*n8T& zK9mKn@F^Z%2wzJ4PDx9qM6N#n=Qzo;;A<8lk&{{e5w&m~t#j<;sr4S`PK2EOWIA^a z!8&aDvJ7C31Au`}F}Dv4U?a=>gh3%3j!RXz;jJgrQS{;i=9U!`oU5_SB5#u^In0bGA( z*4o)66s7$ST}srb931UTiFS01i?(VQhI{HmuaiZ*{?u{KMN~JT@?EwKe}X+<(fTTm za`6Y2x+I63Wvs)Tg)-n*j@AKS02K1iv&S0#;S)1X&>EZTn~0t-?BAo3n=RhIhwR?l8pD2# z%+FUD3%{PL8CAOZ??~z2+oZ**NEssC;=89>%nZR+GOIGWg-$;)cZ%8RigO*bI?@%w zL)*D}{_qt;17I|=qbJ_+8Qz|mufsFH>B7l-+<)$x2~px!(sd+&ZyNv+JxC}ET*=_b z{kQo(I*FYvb`yTVUnOTtFzkb z_c^9Qj&lHlL;|Rq-eu#@k+eF6W^Vw6ux~;NQKwf$Z&c`y9-xFW&XR47VbxeXHn+({ zh;FBVanu+tZhv?6E#0j5`O_(CMC$qtw}7ri?eCO8vn-uscISV&4(iqqX^_`C=w`d@ z@LgjbJ!z6t*JpTuW`kWgAu8WB`c3=KSGK%@D6!XuLGFCo*1YYNx&OhTlMqR(ONr1G z=54p6MR|B-Pgi$L^gm-KZ!%l>)Ed)zY~jl8;AoxFLhDhMvw&bmKWAx7HH)Kl)nz1| z?FR$b?iCwjIFB(Z(BP*j+$VEpXvYTkO_^K7X_5p_tj^P@MlPUXkwpq?U7Nw)+ zeK85)R)%vs4m3l!SuX%bmCQeJFO-Ue&SPl1h{sUjkqUTlTUzUfnfKJcvY^4Mn|{0a z-Tb}tfQ62p%;xDC9-o=rv}rCLo@ugi-dr}V@3&^7j~~u3UapS{*yFzS{Zs6)2SnJkXpFFa07CdXqsx>=2Yx3 z%6@{JHR$#OUmI$)o)dK$#JURKmv;oN-HtBX-NxyXg>HI_lp@^?PQVAoJ_wJYkV1Co z8_$|aOC-oZTtlQ?$R;7P{EP*a9v#|hZ5DP-yvFclZ}I)FhS_A4%`G@ae7*{nZ5$ZfZ0czi}DPMXfqpYq1w{x<`KJ@8J?NgayqDIp;%5BSie3y1wzwywE%L zY43u?0HW)k7_GY&E`8L>S=E%YtbOvN=QPV%Zy{$9K+X-V@Fj_)3B(lpxWwbQrImxM zYT{R(!#`_e=@WK+6R42e4-?vKQSJuBlQNJ-Q~1Jj5@H$48pBcd@0E9L7E(GVE#9n` z8zf>71xR?-TdYgzbDAx>w_bU@QFwqpxPJIE_j&l--o?f_Hs%Bxy0h_j^WS-XhPP*Q z;pBH7o|)BbcCPp5^1#VOF63}WN{_91+XgB1AR!O#I#Xh7a8=ADBou$hfu!}Ed4b1C zJQh)Zs&w3v!OWrP80Se0yGi^OF2zeYXHgqoLC%^|Sfh2$4XyAsv5vww5EJ_I(KEJs z>Bu6dL+6FR^4tn>$2Z<8NX89K>nTGW;Tvo+o$-VWq&}9bv6!(;gv{h|A!pxG&C52K z!R;}KS-7?M&V;3x;ZT*)Ei_T~M(_vmIBfoS?eqM0vGLxGIp5dv`rp6jInJXeEoaVr z{dGG&!^<kWvy&B_-Tw(+IKZ-3Dw8kw_DXCb6^mWmrp5xR>US4eY1D}34iT6oMz1b{5G zRxiH8Hk|T0i{y033R*}bRLFNWn{x1tVo9c=*J}*hF&j6^iViK)DALvz&ELopz|DGH zfN-M+vYx6XD%3pqjAn>Q{dc)@%&4ynk0VR@oS64mtJXp7vP0gMGhc7jY9Ei!oZx|z zeEIfWxbS+*^}2BK!7PLcAmKx*3KH&ryaOW9`m^rx@f|7de}LFafJUPD@lJIwp&~@Q zo;q_%I7F!RG#4qs8@-_QscsubEnJ#J|C6&!8^(+KG+u}6&^r|CZCrzqgIp<>!^>9& z9@20UZuQEMb@}GDx*9fdddsi8Yh>{g-fa}ZPOsq8Y!Q^8#xUclxl@I#vF_jfMK!X| zIBc`Ly&L$M=!^^y9Yec);0tk%2*jOZMt!yNZ9D%9Hs|`Gds_O>-0rdi?`-5>=Wfrm z*6o=SGxD|4HI3gHCV<3yBhq=>Y45RvyWEk|RyiLPEO{>8RObyb^MXyjr7Yy1TE6nf z36kqJc0%10mzA#>uGG*nX@Qtx&Qd$X>MXlaR=O8`*DvlS!tN{26#w9MW$(AcC7E-as_32lfvI}8<1piN51O&)PGK91zi$3U z=NM++QhhHglJ(ZB5|+#m;d9K2k4%|g$wL3{cihgMO*mStpV^9+9nQCIGkto20@Dgi z!%u-fbm9b{@R|lMH{_n>y%DMXkFHwnWQ>s6x&Ohfa{7BnBcuJ}B-SttkDY$b==G;; zGMm`M;o~^}kfuABS`X(eAV@XoYeVF?1gi<)P0kHj__7LM-JmHINd!tHa4Tjtd++v{ zx!x^Xy-f1P>7FcPr||l}lb^bhj0QQQ5URZAO0sZcI5X0rOK{9sY_k}pyT&l@*+neA zU!QWOdu%EH*3&S(ccJ|%cOHAe5B8i`QzWZAd|G;jcW0yRn`_s8^wB^6@c7J|Z?>H_ z?GJ#W%MF0!@7#Ra!0mt3-;){CE<|=rsDUchUP9&f7nD#UxY5iDD1*SdZZeB+yqnBY zt}6%GK_#_t_lTw=+98x8{-_+k&oNb!7kM-e00zt7qgZDZU?ie$i=-q-BoO+j1XxZuePx!OyW>0ypB*O-}KJVr|C#LP(8TtLo4j->+*Y2Bw1$BI;-I}-l z`3RsCX!FLp6Egq_5|=h|_Hx?*L;~RromU&o?p~V^jhHF%QT(fRDUm&OZy``2axA<# zFmi-VW=ZRDgB9*Sh$*TTZX0zNg`mbx_C;(OVz8qbE1?Q|QzwUY^l4jRMo#od0kJpTFZu`up~@?E*XR8a5@;-{X#xFlnX# zk9mQ|0bHSl%9M4JKbaM+_w3$Etl*EGnmu31)=I_EI_k1j2@|OBZRpG=OmP5w_J$l6 z8-hfnL;_Ua9Ot3xk0`PnDO@yvgK`je*`q_tJx-_YW)(|LkP1Wk*hq0|?eprqJC98~HXvQ;IWcYLzR5cqQ+a2jaL2E| z{`cR1b$zA-56`r3*N&GP{)l);bxzKCTJ6*cc5RaF?MFcrItD<;$F9E!>oT;k%lJ?;((g*KMJcBu zzzSB$hR05>lHJDD5BjYj)=>qp|Mh6sf!o)hL_{KVFjCuFJL{d@flw-@c@(B@7&3N( z2|cx@v^a$#t91djdavEO)90MV>AozKAxp!;zx3O(0Fy#k^xl6_e{bJ0%o@u{9H%*L z6lwUeF-$WARixP)m9U(9%bmvt9)I)PEcX@O1RQ(^|KB2=ir&%v86MKmjgvb-r3f%F zNVIMHI-mB^V{3+pcBFP|RIY=&NLs5Bo~0@AS#TB`fG$K@>GFbUJ@PIaPNeCGc1X|e zGnun&Dne~&V=4pC3SSkVf;JW95+>T-?eg6s3X2|a#hDzaw#qZ7+PssN6- z3P7EDe!{eyP=&m2za0S|OEQ7Y=<{o2F%CBtYze4bVyegyTp+Pg76Opxv30JT&xvu} z4dK!LRELhbvk`L_Slo(PlP^!xIe%aHr|1K~ay!~CH}LtpcJ|uvzL`4j>P(6Cx&LuQ zj}OVbV5UTwLrMhadLd$3@C<}$QCVjT<;OJ`vx#I;#8oMH~L&LY0@ zey4s-d!?YQj-H$wf~p=R8NctY#-iQJYGlrOx62H{XEI!4SUV1=c-dzG=kRojcdmTx z$KPn(t#eGw!~LQC?Hcb+)+f&6bHQNu$0fRblBU7=d%C89F!{G|m#N$~&?2EBGOsaT zLY2P_I)-`R5!5=Z!KIc1cKJRcF16L7|TrV_2kJL-q^`{@chni z%39wFr^_G_oice*QWE$jVGEw^ECm#$Ap=T7s=$X&przSb)VR=n=T-H(P8NiMzQlf! zWup_eS)5K+4zkXos{pk6es6y@s{kB@m`u0LcWq@^myB&JM&I?0JE2gm&BBt}JAT82 z#YUvuTRd0pJ(t9FH=PnF)SyaiFc>`dq{p)oC7$(s0$74iR&Bd*DRe8!ZQFW_gs-o6 zCy!FWoPYwnj$LYnNQuyJClU}hs5GxZKUJKoJ+qUj$FBc>J z?MgnV?Y)@x++=phe(lU^Y7yDOH8+XxfU_1ZzVjnOckMer4TZ1G0qM_DiX(}b5@DST zlInTx+c2wZu%6R%lpYCeq73opw_5hpx0qEgKQw&jWpLmyMAHp#mL-<<2xp;Z;Nt2u zXCWjUQ~*#Frs|EJr0P}lT4Pvk6SI5SJv-UGv{lD83(!VBM27QE+1PQqwI93Zc@Bh% zq%RpfT{3Pjp4li}x*L8&cd58>V|xaJWwRa#-HVFpJmqV9+RG3@t;K$!-ZtRn zg^0mWE%z3xWIbQinQ!NA=o5U5 zfapEB^v)N)>eHvs>eak)Qs#wjIuj5l7_9J9Xn!=H_TqWhH{~Yb)*4@%(Cu7JX8C+` zN*xoeXj%^+S*p+hqiKGLWw^4OTA@?ml(fZ^^uIlBv2VQV8e+q@3PdF^HxlEL7?t6WLHTBI;v?PQGTDD?(tZ&aH9xxCr6Z9B#AF5LqisI5zA}^Rjh% zl?|clwX?tqfb~LcKVz|{=FAh+gbI)xbPUUG%0{HQw~7qavUKSezKY-XEYEBV7*KA& zfP@JXmhG2-FV|}Ux@Yjr?-1U6NQyBco2dWHqsC4$C9*%|v50Ar#gZ1Pgyh1dNmNNF zoU>dDSB39HuW&LFQCTJuagIdJL_AjSbvoYH$Z&{1j(tTrO^kBhT^bllK+LshfcUHc zT_~kT8NwulZ}I$K9NE|%;kZtgYfX7=79*T%5AII{;dqjBIKT2#^(HyPs-Bi?($j+q z{B3{GVwVdLeN2Zc81Oe=Zwo8=OYTI z$#I|dI&^WVp$_dOU4KR(F&`-J;*7kPNJP6pK&IqiNHH z?2l6_*o6}v)UYJd#l7_ z4d3Zbv5dtV&eR~@t(E4eczNBF_Fv#}s#}oD(q&fjW$8ra6IU4c#TPH)hc`}mP9vc` zlis^m+$X{9p-W~H1triX+Tyv%tQR7>iSM~^p9O!CoMz5a5^6Tb#!mlf;rb-Yd~y)(XD8))cf*0_xdIG ztFA{?GCTH}1c2k03cRwxlP44H$O%t3>>6Ni+xChr5uC6eh;;hboq?m&o7I$S<>Ym$ zY4WaTUa)pZTpF&Z*26hVwW;PvDx4_R)xtIPU5{m%x-EV?OYR($p3$Hp% zU&ns3HBVS(1s?YhAgr4>aAH4JiNf_l!n%;%+aJXWUIjqj^^=erE~@H9*#yEl&RN!T zgu@Cz<_X8?rs}CeqC@HZYR_du(34cX*N0S%Ovd5lC{Krzu>lzi5+kYW0yeHMHi5Jl9&>5qv7j)XfTL+Th2M~nJlAX9TD~lttnjtd0CZ-)m_6^zdelB6RQE@O}QjPsaKTQIxec@!0Bgk!>M}|)ffv|O#sdIW_1$* zxqqO1&?d5urj2med^c9`99(vk@j1d#)yv*H-7GA!-e;S|2{*3IGAZx8&mT*LjV3P!hcjs=E&S88iB&&#HY|b@(tqV(;I@=i&CA_`>u357|W3 zpHkiATDW!}NtG`b0&R|z;M%QgMQQJ0DP)C{YvFq8TlkqHF-~y4iU6>>G8_$zayt0G ztq#24Lz&g%4Z=s=$#`lWjwcTgscAp2$Y||+h%@tPf`mf$cGQ{fCe*(a;Sdm?a3WZN zjmSGBoL=5^ib;sW5L5unZ@$6_ns7QNk zZ`7FTDQk@5m6LHbQdS=`YGjJ4+U*?ZPU-iGeH2emoNMgF7A`}C>d@LDZn=i<8fcD$ z?|jehEu;-q3hi?(T#^V^Y^6j$P|tcs5TsSAQ04YDF0v z0bbq9B?baw?F|lDBNKgCS@?zQrJ}u0`<)ZN0#K{o`_ur4Yee2F_L)yO-gL_BMOs|+ z+AJqse#=Tvy_etAQVS1~{g#>|oCDm!)7D}lxlNO&;8U&RC+ObE!CI+nrs8wM{*u%# z&$v5c{{u~C{Sd*I*jui#N><-u`_MRcdiS{&E|}LPdMXk2)SV;I&2e8UCRTS^oxg27 zZOs#&`f+7epKhgiDMfWJ;>ooaH9*PyD*?d=A*lyIthX5XL6~?0AVp*L--ydqD|HJc zrmX@9*G-;K7vbDa8j;_ma7>0+Crh&|_TF87%fS35`(!x3MGTU|nuU|df_%n7lPsFI zc-*u`&00Is9NNATKfDvbwKYBxStatCXq;eHdfVls_Lh97QW#`4?XPig~TKs1M?;I0gt3y;T4J#0bZYr+mWcbnX7V#MT-1bRqK_6^>s} z1CVvo+p_AlB?oJ>uwgLL@*Bg+wUIR%ZgY~8NQBc5Zbe0xYakBO~ZVaHC=u6TJR zALZsFkuvV$^W|E3sn~PfZWL1s*SSO7ZU)kd&5^$FT+dR&H$J>1nmIC=F#1 z1ubx}{y4S}w7N|VS!QYKM2139#?>h1L&cM|TE@G&S0$X6;E)ajPZc3H0+bDb)uwCq zdKL#K_0(IGPhP!6mSk#W86YZTIgvG!-0kRoZZ>TB&8O*z{3gRm#4O9g34mC`fZ-$+ z;^YuDi<)FFSu%7bvdnaR8nn*g!`rqUuCx1Q+7*Cg!ZWw)YM z7v+?4)&WwZBgeSM8i$WC7DS#5#; z8Z#cV1%Us~D&OQ~xmo8(-K|TOEapj#tr?E+-A>Qk4weqvNm4^v$f&MLR<)^G7HV*~ zu|`#nbi zNTd@{0@DCKbl6iozB=aZg9w0H-JN7d7gj~P|h<(OJI0D4# zCKl9_9)WON{l+i7l?y*Fs!utjPJK9Y?hVFa0f1=Y?^?^5Z&&@?HFM^!UHMSD_SRmu zi@Tnkgwn!IIW$L7$H`{js{CPe(dH5dnfYl{#7A^<}`yuUeZ9ShDZ{!`sp zcj|ZAfV~L$PmOFP8356A5hdBwSZ%+%ri@>bc@=>7r57Nw;Ip81-WU9`I$4T6SL{(l zvL|ybw9xoQwO)$RsNN)ox-NQUi*9NhJDQ0P;O*uOWQuQZR<01qpzRMw>&#g;`*vd| z=Us3--RsG-H-R5G$F|kn*uu0r;ZvDq@R1ghU%Kn+-gWu3_W@c^?@JzdONVN}Lx}Wz zdLeX8?fU2b)~|FWAa1IM`rkN25kf#bo1E7!{QaEe3cow0aR@#(10@gXtaiYv=O|p98^s;2KK()`u|9E-#JII6!uPn&gAZykI zSNOWEe*@V~IjG)P+&n6aXmn$X4_mEgoy87g*oCV(pv)v+bpxyC@_5=ThDHm1nFaXF zkf?YHu5xuR5*+tFOo&|36qh=ZHMLWQ-TZd_|C9hsSw0}D|HB-@2yq4)a z2vU}nWRZZV8KRO5UwT#m)ZWNK7jw9#j8_2^Ek!`AppN)u|L^SDECeJfl%9a_Z=7W4 z@R8MPrg^(ghQ%nycte1v0^FM{%k1h{O1HPaOX|Bsu$eKhn|-VAZC_)rg=+>lsqfg} z9{jd5BWTDm*U2|-a(G@6#1s2dy-s)QddpO{EP_M+t3wze5oIp`A*nk+o{tdw*p#wd|Mr@ZI%z@0DL-SStO!yE zAkJTOJ1T)ET3iKSfZ&&{$7!Vg5x*DLLYqAfsWgNUa%g$_C5tw1YP@kSj|JGi0Z%Hq z>m*h!&Q$H{OK9eQX{r zsO(kY6#k6=+h;Lp!9H>#R#{f|hUaA7FI!8Ev8Q4$R}g6ys{YGQk_J+Y8J{L?`X#UY zkbUm#S%1#rqlDr2pnPAn*2GQhb|0r5D*ix7`P{Gn{Y8(({X zNRa^$&varDV#TQmLtPlSb;pFEt7M;-7U9O10rKEMB~Ds2Nv_Psm-@)h3qz{_Ksd$L zK6|47&WS4y9ya2oaR)CS&iKmrIseRa{J2&?_<*y`ojkF}`_g`@$V^)3BDJ7zCbN*K z1#=|Qj9_YEGSotm>#Td_B?%`u4jGXT9Z#Ewn#ypmur^xIM8tU7f-kanva@$DXU$?Z zGxd(<1*Ix$+fiejdpq(#J+-dZQRiUy^w zUcI(M**f7FPUleZJCPji+b?^QWc{+jiG))gU!vhCv(hYI*0o{N^)pW7nv0%&!rD`EaX<7tTpA8LHxk#`{k;=2zi3{l zU3rarfgY9NghMx%ZEn4`Ee$33)LakHvXIjjz9lMd_kIs4_G}L57pGbF995acEX3vs zi&G@Ho~PHlETlP-$2i`g+%Ey#s#@0wT8QYKa50KSvJG*)X4Xj8t(OtcYs&x-w%;8f z)_&*Z5~uaJMsBoVm29EdrxC@e1H_5%|H(qPIq!8=Yx~l>exYW7*6j_C7wZZT8!rZI zk4t_QO>qBfpJ=IRmM%WACgW*=litt%q=Q8snQB z{%7VSK>llTc*h?Cf_wcYz`NdlsAqD}IwBvyPyy#FLpJNfCyC^swEby_&U+z5AKL)^ z1|&*6@Wnwd4qSD1a-0*lI-Yu~!1;aao6kZqg#XW}a#HeEdE*IyMV9GZE##)z_|mI5 z^%{mAEgWS{_Fq~cAewR1_}o)1;^_&Y1J+&SRQBR(?NP5%rS*u#u2rj64LI8;ZizOx{=Kp0OaGnQ`@#IKE8iywa%qRzmsw+<4o;!^gd13XL1ZVs8<2B@`1ELNTxv{40 z8z*Mw`5;$L_Oi+9b(X8;^DXW2S^Ll z$ewf2mPu@*&9jpF8#tjBb{55GK{C<>pO5u=1fORmWrFkL$;eC<9EZ4C&VoecDi?1I z@EO;+rys@b#Z`npefpHyIk|VReyDSiLIUJls4Nc_mJ)eHS4+ z&1@~T-~-}W5kLzOmrf$RZ;lqMS$52{Zh8N z_=!2|mG`*HMOt^;2&Z^A=wT@D>R?ZMy?b&zd>A7h+$;L^sgF;h@TKexM+n58`)WEs zE_*!-e8$hwf~TjMg}esGJJ0fTHGD~({x4wwqelcQU2Vkq-V?brzf?wyt)dzHw>Vc6$g{O3m ziP|6f@ba0{LHs;RI=7fypZNdXx_yufY}_OK)W_PXdHF#Ff3SMqUz8_F9A(dSOzxlqEoFSbNGdlXAEHe{~1|L6y}jm{?`6 zaY&>Nx#?)ZRz6HEcsn>AE!aTBbr;>xiL?O5@oF-e-?+xuV8sZh$dwgA+x|so_zCY!CM;eoN{VrUAr_Tp3J>Ys;dNu*tgQ!8PvDO z(;J>!i$**VK9AK!BCL{~G`v)lZ2+}eXn;YTW%n|50vo^2`-~s(*|7X?6nOKkZ>HYp z`0v%CJOOkEKb?zm6J3^1r}f;>Ni9Iv4KY`3oM2ABJ>^fRY>edpvM=jP1{Z|U)89F= z0!Wj3oZGE+v3J>HCzWK+0pb=Cyw(YH$e;%ifXEVI!my!inF$hFxoPHJubE|jkMPjx z{=HvpLBwNTR|0s9-$QD0%b>X{)?!un>g*Ml-F;b7m z`D_uuEx=kw-5<1CbJ0z{fnN6Kds$vMJzpbxgp)3!BxA~wG0U2Z9+i23^a>_*y)4h8 zf(k%@%+hHM*X|W9cugb;GOu@{NyOd#BO~OSZDy{hz0G-|{qacw#1r7V6_vn0t>J## zcHmW%xkj1wzLc~6v|~X23*w9@u!whiMo%5`)=9n(TmNx#?OW~IPc77?a7X+U?$D)B z`)};^sF}z4sc9N&)eVX)TY$BhQ?I9`I{bI$6z=wVfH*gzc_KO&d2R^k=Qr)Cg+&V@ zq@F5(h;l)IAVL6$Iyfx$D!3Xs;7mi-?UiUD?*;Pr;48S+Qm+#BBKYzxVr*fP2==F( z5J7^o+SC#pZ4TS|dHtCg1$60TUuR&lKs*ayADwW#{=TI~4X{^(9s?*l&-}S6-DY zFLAgGr+K8xlN@}tMRrrx{{!u*BL8MA04dF%g~82b4w%uYr)Kd6ta zwxt?h*0vPk=!@Y9=eZgg`ON_FM!0ss%+UhAmQcv9VMtm~oAv+glB*ZVk62}vC+vOr zBwKW8$U2t*Tisz%j_VjQpoAX>f%x@l&z@*6iuai;L?-;`4rbX!IYXrRx9eZucOpN} z^`30ow(Xm5wmmuRk2bCI&#>pv=kSb)-D=0@u)UVId1gr{HrT+YX^(mFL1^;%7Jz6m<*x0Q~iD zY;%zJfNU{p5bw{7s-6n0bMx?#t@gyX5aG>U?}YOCH`=tBKHbh(&)9ci{kP3?^XcvH z8hQ_Y$e86Fwdg;o%;dN|o}71H9i!>ULN+|sV{NLI>@TiI4nUCK9O0Pj#M$4S4G362 z>)zbeZ+2FW^&0@;r2${zL{tE3TY_*Rrda|cG~UTKQK?;@*$tj{U2>K}VfUMsWnDYO zm06}E{VqA+v)=}*Ke(0DClR9@DiN<^7;(1OyO`A+w5~U$fB6E17H`TaX?c&}6A`;= z%_HOPM=zAuZLoR+bWDew{RqTe&ZKkZG)ML7D@lvkXpA8E_y zVBSdeX7^6y%i;kV-OJz}F0k(G2*(@1YZmFMZ+9HUO~TH-4VWD_(bseSP21t`(}11-U}y zVeK{ye2WVI)=m!Lzeuc>45x^20Ei+LfGh+cuX=}+z5bNX)2e%|9b(H%k!6BoQyed1EKyCnuNcEZcSltrzA4V$Fgj#3DQiWQTiG*=6gx^$;WAkyn4? zsqy!7y*4ZNPtEgud%Q(n{IsAUOYc%_*f^hfT+tmYeB*WGCq;lY7WMJuUiPY*8YiX| zq{r+6hBKXbPAd_PRK4Fh2`h{L^l$q!znv>{%^K=%<%1IrBOFu!ZfTc)Yc2!X7PU>1 z!-Go$q&Y3^;jx8pB{80sJH&V3#PGDPxm3L0HuY`4%^JXFRMu*_-m&y)3P~vu=0m+f zNk<|lvt+1dexhDygyY*BBN_%y;RHhel<@XA zTsuTO{oE;Dugn6gUwCTfMQ1$0Z`%|%TmzUiY zH7oz<8)n3)(D7hTciZ~Qib;l688LFq*qR3q@fv^KjEnu(b@=wwhWB&Lp8Z<0S@>zz z?6s!3=FDv}Z~k|UHvP2#@$vJLrF!65w-j00`1>JCw5$Hybp}@niPQZCptB5p<89UW zpW1JKZFYtTO>n(4e*{~E<7w~kl>BEF)Bq%+pr#Rch6-xu_>$xl;RJ3+#WyR7Tkm^r znoW0z$}Df{bg5G}eCNYsb#;e5^=(Icr0i`D7{!w$k`~HY`gNDHkm6V;dxJn78!sAl zFH{2|B0tG511Uy6{Y;3gSgx$~bLLmFAV@X=lKD9{HkrHO{W)`T<(gwJvgH6A4PUjl z50-2_a@bBJ^R9bk=)3{;TJy8D`d%_)$+s9`evd3o-FN=cu6x?}(B4@jE4aRwBb>x8 z`rfwbBD?RN_I4w6jziqVEF+xf>|xI-oWR{%d{QFZ4q=xy;vBbZfvq~quwmcelc4^duA%~b&xT<$D5=g9|ytL zGA=JEqQ=r!-o&(6nv)Y!h_^D}Ptv@5f;st#7Co77`Pu&buj&jU-BD<9f zs7^6=FHz-$#k%IWvLu=mp_FtX!e=Xa=I2=d4({W5T%4nXm@?D~3J0?_=@Leg&7D zxG{i?#bQ%qZ&C=-iuo4x5g(IQctl?iq0>$|$idW+_hbG=0Z z5=^A|lqWgESO@%eh{k}7l z-8$qht(5N7^HHA2kJz+=8xepZ}sDrevqe~9TSk?+Lmz-oFbo%7D zrzg8pl^&>#y}U>+NoZ zh`rO+LB~dq@u}#YeI}I!v3s{=#*VM`<959iEl7Uu3QQK(&ywYee)M$uqRfbnC0p%H zXnQx(y}-5#jE+yNWN>@vKop85eB<5eS2j2B3F9kookd$E@%xZf_uH)#Z)I1j%TCpE zJT*qD-fJUUo5g-R%nISvh3AEO*qH5kxNMjexOV$MIh#6}8R0);3nUf2Vc;d9hekOh z5px4OCE~TSG0+&jNs?b$f=2A<_^j5rr1DNSn4i|2m~})cs4J+FQ7j z`tL%iMMO|*=`mcsrG7a5vJCZ>0F$EtWzin}ELwCd^HIK=S* z(XabXv+bQD%Q7t8S}$Y9CH%<Mm4mwXi?RJsYYfZ!&ax@2>eb0|v#dD5 zCVmC*{JgN}P zFTe0LvEIV!EKaH9CK0Xc_nG4uBD>R*MO0|z0EEq3AZbl+OEDr>;$8^$Ql-lXkqQ-) zR4!g6d5X^vB;!*bOtbyvw~5K&^y%MOwybX5x*6)$T?R0=9Zt7m1Lf( zf6}vnE3)(vyJ?750jRHUzm4dl4B^RB8zg(W&?IL7g;P7SdbppbPc~%YAie_)LhshAWRKu$|9f)LUZ5~a=i%Pb8hWEBvl|0;mDGC-k47C( zNEXxSTwI7$^}V_$h;sYZ?%G+nuOGIka*%9&uZjlAf6;*P%Ug z@*$&^=D2F#1vC=XNyS*xhk(MV8w;u>+g@ zZ9cx>s$ORTA|N1~01tx6)J_kz>%h}_Vq4<2h=WQNIi9X|wzH;`;Y&DY%0`$zQTW=Z z+}DO`;c7Cwzl&arj89LZ4#15W%OMA}$h$x9p|!m+dyGn2ZBBkf8tHT`>Q<;0Vubq! zlll!vgz}fCU;|ZC@Yv~n46gaEs+YgbCZ~Do{LKY6A~V$sFwJlFd+H<+KFJO7<{G}) z=DPzM-fhwYp_J;Lm^%bwU3HdkR96dE;u`=`xjYP|vz~f?`FmW5tT4wF)ghA>{XG|E ztebxFqjp3jCi;>gf?BKZ*@U~wSKzVap~Hq(MdBL$Iq#3woRGR!t%Eyu{J3Msj)MnN zr>;3Z)wr?M$Gr4ewGqjAk5or{`}1gTsgkk5VRx@bU+eU>5*Dw{AW5jiQh>10yYiqq zQ#$z|o%bw=I_R2izpISZSN^vjuBb6=H8M#7EDb$B!SMD%H3eV^dZiGkVm>6o4bykinHI4ou9m*5LEr zv9ZC*QN`@^`cDievaNwlH1=m{CK>};a^mzQCUjg`j2=)Me-5TJe{LX5+IGqLF8GKpEIx~VB zQlaprw1_G8P3&XGIm%h1k+YmpT`pXSZ@tMa!xbvun%>yy%NIH1(1VfYxaw+-t9dl; z%1TX}C`v69zqdk%gHM47sC`mD1j(0QRID^H=}^02J@T`cMvwmd^Xk>Br%ai$dUc*p z9`#u@pb;2We8}J=gAg9O687u6`i0V^fXCoFe6^kri14|J8L#6D3>M|h-f)0$wCF#v z!^ufq6an5Xetl-D$=p}o?N7^-Wxc7Pm;UDws%$@a9&VYkd}HtAUZ*FMpKRJ2Q32q$ zGX%i9_d=tsCrc7BR`}6x;eJm8726?d;j-|(TP(Q~>k&qEMU)Sq(Yg$C43Ye&djLTf zBE^%N<4Wq#OInnN&5x7Vp%3+7OAMI3@wrE_p!OH~oH<(-Od?3^%6sw=pQZpRqwMFi zkt20re0Y^)Lz51!H0Z?&zpP@q>hr?nUL{Kgi^q6)fIuigkbm4`syh7c2MBTsKVZ&d zDHNz$+{1a+X0guzfhi!u49X)`G{cTRQ|fOLa1}p@t&tPm!eg`|O6$yuiv=;?P2bg%;#} zgyV|-o`&B+9@0uZlg3>=F$HFiJ`no0Td-touXx?~gie2hL9DhI4xv1T{(!YUL7!fZdPwP-7^e%19h4w_;;5r z+YMT5xX9)^o16jp)MuXSDKmmt!I6mCSu~S)YvRm7R`|YE5;==n51%jZP|Gi8{VCGA z3|CU?GA}Shp7w~1#}#SHbxxhE^gS%S9yacpm741Yg*yCum3rhxAlM@|ZmG#&5FRVaUFdog z3*JHnKVCb{xlQ?+ix{0}wfMeH>KY0VQ|X{|o^VhZWQ|3avCI$2jAedEuQ5!aXN_S; zA#NGb5}X<*o?(b^_i}fw2_H)EmTr^81}y>3k#2RS-O6JHZw}~MxT0xmhwwEO{QAAn zh{zVMID!2sZ!)VFbXoaM=VbOd7J*!sDbYEuw$-h-I5XuTXk>1#Wtf(1pNy!8T3h|BohY@u3R}OekxZUJecpeRDAJ^FB6wfR8DXp zJbIzrEx9ptpU)k2yg@ub=yIFN-S`7Eq0A{O%$x!S;rqnu)CX7HfZ zwt*}@VfG5uyak1%D0WXmj{`vU-lfyGXjbpGr)*qEhicE$Jn`$y6aFmAD(b)~kjR11 zSW_aJ1G=e&n~A7(JO>8i_oaOi6K{}vy+drF~+dPS0i2T7qixARrWxNi| znY17ex0rEv10UwLhgHe!9loB@IV$RY=)xDl$Fa;?FEC2_^(Q8Y0m>H@0n4CI_%mn_ zu=rvi(bzShY(J1knQoIy#f^O&Ogb%eAnIDDA`l?NW8I7k>-CHw6BfSiWONhhs9~$v z^TZ4gT_wK;*@6%F4XvE;J8V)~8Oz!%%vfqMRbcH$UA}BLi*ng@wTe!3&N=j-n%gq7Ode-kR z(o_2P@z*^T=@j)46;o{7(#UdF`pR*|^T&0LKpr03@*Yiz8#m0J_0zYA5iNi~k93?; zlzVIUmXu%tTuP9fL73PJU_j-|3hw92#EF05TVM(K7Fc|(Nym*X5$y5q?ND?ym#c`! zNy9@AeP+)lH{D_~IzC>WlMO@!$fHR`FR2l!4;ZXFi{E&qAY?4bbbpjZ(emU;8LJU7qh`F+9hhV23Ivt!|{FXiGW?eF!pKbV;#sKL9_`tvl)A=H<*TUrny4Qkd z4sv4s!ihId$|H&tl#1GrQj(Kdqjk4jqlTeYG$TW#SeI_yV|&w-NM84omV3N@QGKWp zJ(m8b9!t;Pxa7|`l}bZ0S-PY8F(Yamsj;@9Y|5{(pz}<4s}u5z_gb{+3gjSb^?DRyy?5uzjvG82ioU~=%r#DI zpl+7BfVFV%{u0^3WnJn?i!BSR9l~uV<}A~CukrfJSN>#Hb!=LO)KPCT%bst(^JEcw zRPDWB9GVx-A_y&Mjj6#=)$VP}>OqZw*o6pU#I5AC{b&t03HknTbSQMZc#!Pg3t<9G zR>$u^k8V?yZ=F_CP2zkfU|_ z)LHqGb)fE%I*n5cW9VzZK;3qCE0FwrxsWxnbYKmM6Beep91|2Pvr{^~w_admRMQoZyF?toXL zLU$P+{}wJZmG7y)fqd0ccr*bXA`tL1B*>wIsZz4dOaE`Vvu9plfGl*Qtba~UcS~{` zvy-FQ7!KE1PNsvTALR`fz50EA<*l>m>|UY}_1-7Pef;br&w+3D$p}tQ7eF0E647Rx z-wyG{P8np{rT4__JxoE?!rh9Gw_6{!?3v?I*krcOJ6j_|M1|H#w$vFSG9?1@{_OhF zCW^4Cm4>Fo4Ah5Nvi|kk#l1}wBQ0j`J8`}ZiKKlY1WBiBQBe;?vEo%imlhVPQ;k7Q7_ano~luZYI5qY#_E^F=T- z0h>+UI9(-Kz|16vg>+L1=&IpeiNLOp8O>fQZs^{>K z9UGF6=g3EW8W;*jckZn?!B^eGFKlD&b1K~}Wl`4SkFXKqJ$VS34o(O+hGqA{SKgla zbSi|`ORYvmg45*7w(8s(kf+nr?+%!m#0p<;Z=YqU{8mN^YT?QpV&`}yMLx5+Z+}TW zeBI5wKy|MWu|lh5KKB$PcxV5V)sK|4cy{a)vx$O6wsRwA>AT!oTkxuQgiBJ#*IxjU zOrPe~ZMgv*6LnyJ=z4)y0=8g_x{j2uGJw z1=k>5wK@OczL_oZ{N&-Gx9yjJK-e>(1YyGB@STmn*x;c1_l-fkQ5JJN=Xs_o_yOBD zp3^4O06n`yRw!es^=_YVLvJ|c8pEIvs(Q#movG9GU6ZfyMzqXG0i)@dP$N%Tcw;AR zJ9#Z!eZDO&*b=kcBu(8>w_Tdd3c=??#N_%3K6M9mNK;}HRIjem`l6C*U+sUqT^ouW zF|zPVR`LiH;s?#YC6TOqqrl0vAGZ@EQ#t_>hDotg8;%Uy`GhAmf{(hNJAL{zfZ)%w z1Yx;%-#c>y7E)SJxe7Dj&5 z%67o#Zm0%lhcB2sB#oqasXFwbhsZHzEv~KQDCy2R~bvP z!s$FCX=@Cd2iQ#u*%+?yKosH}bu_+9)!W5~Kmn5$_d?a2ShsDbu)W6(a^)s*%0je$ z#Z6|_Foe4QJe9{H>=yz;HhtL&0fzWLaTQ`@N*`8QWoQ#c?K=c&QsiqStM=b~jBGW%^|}+SUI(3j@LI0t@K{C#@+0lPsFx}uzcXQh56<=eCWPWl zuj@qw2y`UB@ab!FEu{Kq^3yqve$w-#GL}-1b$YGM0@31%-mqhs6tdf^k<}P>tzOT6 z7cgmYjjK4!S+2q8*I9Jo*4ILy^`}JZ{J!t+(CwVMA*Q_*BI@9(3-3Ziyz@qZwQH2L zqEO5AELp0Xvx!2D*n?haUQP={#O(DqH`tWzI}}^FUV}yix93CN`a_Tua>>s{ah0ph{+?VB!J|+AbD%eH|p^MUwK%URxXjrFIfESh_hS$?{qx8gjO_KpY(MF7jy zPVVRGlqp4u{M)f(hb}ixP5p*&v}JVUpRw=t^~X_Ny_Re4#=Y6IZQ?qQhsVo5)*A1< z<5q$1xUqnJ8fQ8_-A)~8Z}p`X$44HxA|jQTgFMdg6&V80xBH}7cShfZ3Q?7MPA?1vt1X{Xw&)b8;eG{q}u>_v-IKy}ML+q^ZRMYeaziBOs7TeRlE*VClr=S_`azp-eIVcP??3}=nuEDR{NWz-m! zDI541g}i+z;99ty)xwRHHnbKld+;Vjx{}tJf$)8Q{RBSUcGl23|Fp+{%?tjym@SZb z;&mo1h!Is<^tLyJ*anT9fj;D7XL0&=$BNl+LbKO<_h%GaNLvjrHb#&zZ~YB+pLt>5 ziTvl^FiiddDBl!p-+n3&ofIlmh}TZ0wr^kXn|5nHe(RsLZ=NjhN1N8`KAgeT^>6>q zJqzs7BuDl}4PR~W-j+pOS?~IIeD~8_N&SLG$KL%JgxfZVMJ$(|O z1j6Ub%>mN7Hl)LoS*v`d?!C$EB*!Ama{Lo!UbsSDr{4h}DrwoN#D}4b!8j#*lh_B? zjua7Ltp$b1wKYh@JzK71WsrbLkhcIyp5u92Uf4H-m`wkp06)`U{x&M5v=>~3tX5+fs!bP@TWq8ye9?2?Gfw=dJgFCxW zg3ymm_ZBtbY~~+|bhQ=Tp0$m#`cG2l_A{e&stRz9Edl8pZ?h>ih65(kRrJ1i|A9&X z7f`M-Y>j=W`T$6Q8=6J%jelQSk7Fn85BgHX?ab;1oXifae3|4YKu(bAo<|$-@k3aFv)i! z|GL(1OrQS8v}q@KJldkVn(^$sP)i`k;@+d{M9V z+kSi`OPYkwPM)QGEB48`pF&aOTSyS@Ad=C2GlWJb3=kwBlx1I#b?}9^VY-R{bEohF zrbpgtEH9$u-=a*94c4=w7b?g64KhzS4>0fxXzZjz3b*bg;C&=r1<*pE##E&BpZ7?u zOX}HV79r9JMbmTU1^x-z9oU>QUZ-AnNegi(Q({c?{)d%G>?pPP8afaGv!nWjX5W4} z?e4=zGH}~=PyH)y0eWN3_j7?uernDznR%gAz8N3p2P&=KXfyo}{1o`3Kmq>q^fql; zpI?X2n6aoeE4`1bdokgNy?imTt;HfW}Ix7M|Fw_2%uJSwaj(2okFWXa&MP&B9S zNLYH{TV2I@__)FW!W{$)5SGc^ddCJU-Fg5S>F@PYRfwbI7?!=kHHN8kxZ|*o)7$?v zhOKYO#(wHR*oFHrwA;IEVf`tibu(O96CNQ#jz*HR$?UqvPf*1|i5f88P>g2XLA^GpEfeU@Reo}lcT zG2_ICALd`TZryqP;9soM#gl!lF3fD17icuk{Xw(YxeShMoBqoB9;)4=zk<%wr{j86 z>i^(DLXQo8UhF|MB%viNWAr&>bUQ61k*-4ozFUa1-bSv^q6}mrM4fln7^Y*GjbZvt zl(EX+yH7WS_JFMo@LXefLcp}1vGl&~J^hw1X}!8-AUBycPa?&keAi-;2`)tNCum-< z(pRQLwX_-{Y00~A;RODWS>U3gLiu95$Htw!bM+(&L?Qx(_!1WhLG4Og3X&~ZHa9Rx z=7HXGz1Hl{+R=<>^l^h&OW2U3q7_l^x#jNVVX>5cYGSp4?V&dX1iDw zKV@Z8Rc8{_?vB^cGTW{;++Xh-%iw?Co zcbal!HIP({46VY|&atVKwow1_A=RTKcL&M*91N4DuQ5&l3Xhz$$h&@h%a$`|@_+FI zC|Vr0!sDGfH@WUslkZ6Tc$A&An)^JDO0^Y%beP(X zJrlZx{+vyFe++Hu+CR_sI2dp(KjzU~x*w0wj7?vA? zeG~p);L=H|fNa$Ke7Q{as-y@v2OO>cKa*K%Z!49H#ykwwL5OG_2Xd07ukm_q)9Z4R zKpt`-XP+!^?YaMfQY$B?f0G@vyZsMH#Bmc2YTI@jn1tK5uK<#)fALH}_XmH@dTn-7ppkRl-p1K4{Y2K3w)s zy!$lxq$XSSoQ?Yd zve5dxtOAUp8B>(qPJ~QgW7yWLtTDXUH5R>LAu^mNTEI~Vr~p~IjdSfj3|%HC=;Jk# z_Qy_sh_GmyK|)ZLi>3?JHBgM#NAV1t#(?k2O+r&*#J0hjsrX|w_cWH18ZmMfn*EDG z6SYXlw3T|tp_Py%he7Tc7iT(p3|#Wwf(Czm-;i-KKgYa{O>%D709bN0Z904QYxtRs zpQcTl=HkhdoJ}_B%toVzoBUG(KYPp4sCExdNR<+Fp1jJ?!IikLV0e^#5^T(U9uhhw zI_;*Yu#kN$VUf{Q@-jeVhH$gqNu6APxW>CE8Zl)Vh;0)tTD!GHI)-}`pkr8_OEt<| zn?*$f`(&v;59nZ=j>S-l4D(-HnqbO>*YU zId_BiGdCxom^W|l{Knb8Lu>$z1`AMG?K!61^$bV0r9*fe+5t6K{iTtgCLcCr@Fy>R z$$bUk(KGm4=l}{V?vJpi2m+x$9u?N62I}&X(WMEU8Dc;`b|Om#-;+cB#y3tBTFt93 zyw-F))uKgf^|6NSziTy?YYcPL;_o=9%@WA1{uHeU-19G1#U{C&dskNFTXP2uhTddW zYvqnbDD)5_kdxkV67sr|mMSfvC>r64l(r3Q|D)Zh4r~@5K-MC&cl+v0g8sW$Yz?-w zrES|ZD}6}b&2+RY!o*>+so`(ggv$K+d-v|m!Jm0Ka_rqZe|}?P(J0$*IzvHy1g%g0sxJqJq68!$e=>`rNgdog__Djp3!DR1`(IOH>NJvvye104ev#V-e`?#H zH4&NGg@}OJ&vlQD?cI$J7nLKmbCIwrw{2^1+jixOv^VPjlBM;QEY8$5Gha);!fP6T z@y5w-fThtz{(Og@ix(R;%9ic7hQQ)4gvMr~vFBK3R95wt5{}&sE%qkIp`xv z#eu&hN_e&}_W)Z!q`wtndjwa69smz0^mg1Ift)Y`VZ!17>Od90@kTbWoV^jB?kAoZ zq6G4sR!GbY(UPjW371YFBQw_JB&60j+gb41CZ{KN*{?p9Q;`uhnPsf;)kg&+zGY}* z*>pQ9A;h6gW;t7Dh>)XcQ}+w4pN^wuLQruI2n_!y@mP9Ndfmi`x$q29ChCz9H@12ndA!cnCZtEPPn9Hzwn!3(YNpPIBpfRM7}CM0nQg z$w+V3Ywm_vKDVm-TTT^1MUrx?F>J&7kpU;Zm&BCgQVvXc!tz&pYa zs-m7ZD%W6El~&CiP?YqxccleR#IF4h4-vO*u&*>qf>Lgg@Rte*k|D_gU=rjl9&R8c zH+LUqm=F`>uFQ+}yt1%FisgboRZzwZ^cDWU5Ga9qQPxY%E%vB|uJ)vEA*=o|?>^ zS@R@%A;PutrM$c#BegRxwDB4!x1xWMUibVxG9|j&*DP=(c9vR1#GKTK5g-yBIyDi9 z^a8b?VcJUkg$oiuQhjVHgXHa-wd?Sj2I6GNVxBn3d~DI4Jqs6Z*;1cBS+ZooU$<-_ z6nhpO%Z$+Y^1$W|$;2DF{a^}~LX;+KJti0$5eBlz6K$D7>enc1Q_ z2))VPi;(C@K6FEXo-JXK(OL3p4)S^Eo^OUYae;fi;aM-LGAAQd1UO2sc>e*Ur)vze z^Nx&#Kj0k0b{is~vWXeU)VfJMH4M2B7|>yWaELg6PxBU>1L`;lr%u1?Qlj(sB$Yf& zWv0ZXP$LV0quc-Ba}$aY^2k+c_UJMAv!HgREen#QmbXTO-I2J`Y=0SK!pjZ3rcn=| z@Wu&X$$aeCBKLFb*s;u+0maX{c~Wny&^XLn0=(={YXaavwjz6QJGVmQ`eAJI(8CBk zRABKM^IpCI&V?RHLS%PjA9i^wCf=pCd+g(9e~_U6Ylcu;N~0!KWr`^ozF3qkP)Kn) zY>i>p$Ffm~+sX>i*xrK2k7ah~920u;*%SG$BSJ)l7DFWL@3GRCyiQZ%Wh5=H(qb(@ zZ+ornE7(G@GvVIhc6r;~wt@CP_U~UB1hsRKuy2C?;P!!oP;Q|D{qf^SMPs7whjPVQ z+WJb|jvyJ7r1FqtRgyDteV(#LstL8+HI3!9)73e=E&Y)r%j#wzF7@iw%g7)6^VY3f zmu4Uq?+A>W-kpsJGzbBg=9n&ANQ8_BDS>~ihqJyFgn-Xo?z&jJJ z9jV{*zHZg;n)Zc&2+QA-NrF8~#_e7D1uzL$JCU~tNg(-%YM;F{hG8^%`#Eyt$UA`I@ZogncHcy3Twb|74exBEs*&>ZFdWEMza(`W1aBeK2PI|-;>ahdnnP_V zNuw;zdY`Uy=eIbTV&7{H8^g{*s!dOIHU!hY$0wb%4a~o513217k-mp<$ODssav>2eMW9 zatY5WJbHmXN;V1Ij*coiJAAWq$-`Kd(rXQdPp+L;u$R3utQ<$(&FVWt3&oxar zTU?)qO%Q3kJ=BpJWtk{cBT_Olg^B<`Xq%qCJH-r z4ch0~3`Iynm%XtD{qe`cj|du$g^*SDP1DOY5=B1 zsK`JgUJz3^ga^NA+vObrS7=GDBShFQ1VghsMRlmCVoM82%X3n@~St8IXI~0fGedo^)uk053Ou z_7X9vK~Pc?l^@d_I<$TJ_Lc5u`}RYJczyR(E%`4X>?4uWjKpFw~Ox_zJX`u1;yFA=y#FGPrE$xc=|V0CQ7LLeBS)qg_7 zH!s*6(0IMMTQ>0gJxh?Zbh{EXBEuDl{oo-0sa0Dc(#W^U6`@0c@T09dY=}uh zOISQkcZav0=sx725QV^A`au`k9;qK7+UY@n^eP!_XfZ|mmL?&ydAlG|gjkUrFi#?T z>d%J=FSY*Hyr4aQ$Lpro&EJDcEBgMeDl#Rm=8BYvokyv){{cBdfoL(J>K9;M;Ib{K zyTPu5G)Q>X%K+(r?N&t1OO-B4r+tFGWJ3@| za)4tew-9ItzZoL?x?7=@a1}4Gqh>;gICbdrx``3`dw?S4;nc}o$l;0_ z=n}OZDYtF#!QZx#^T)}LKK|xZ7Yma8p}duX-BAJe_M)U*qVkD(xj~pDtAaoo@o6=l zIC%-U02u%0^UpsYgRppM)Mq?;0x*!N?9RrE11s>dLs`J_)eEIb=I4TyT86GiMc?UE zq%i1Q>!2t0wR{c_?(-}K9xDzVtXYHm3dIMb;$;sD>b^v99&dHXEmWYj2Di=d=|bZ9 zW(emuwQFeH387!njk2b-`4B4I*|&56$o0LH;wlDQ!?3v>Gx5v|yoqli@&)^a+~3ip zD#N=+p(-N%J*>3oCIKR^nYaB+KeugwTvuHiIUXXYwHoq~oH?)k`8OYu`tZD^EmT_! zk^`aJn|E@y+bbTTWc8T@b|v6Tun91!oOEdMVZ#E$hbJE~;?op}mXV*~XVfVEbtHbO zr2rI2Q^R@mWJuD%K%=5vcIaO=L0`b3+UiNL+m+jpbTOTwJ_@y8^A8_)o6&0h@!xYd zYdSX)k0Xa~a(@JhQ1(U=h{hS$sTR;(`EczS6vtG|| zYLu>*O9JYtM~t0Lv1kgl?jiGnn!XTvp0%A$U+{WX-S2UxM9M?N6-|j-gC$E+jnHtV zH9{fs<4Oob9}(?;%$d!Hq-KIhkhWNDF-TAV-w$;-5*vIH%a_u3mRZd!0Sc2Z2O@V> zN;0vsPMi#@5(o_E=@}lM;Sc^r^5ncf11yRIjLH*}e8SjBjNlm1zh8ng;0{Jd+@8T6 zHw%UCLkxB*!fk2N2T0(;`s2-^B=JXrAZoWJ%*WT!**dZtWi5Xh>S>r7azU)ig- zhv6EVCiL38+9QPqac#wt!955NTUk&WOMgTvU9((B1}yEpVmB!h%^8<4lw9C{Lbti;vrg+B57Cg>+jeo0(m;EOzQGLZ*P088=W$&wB8 zev&1_zaKgTPz*+70E{mxAU23b*#wi$tS+;&bnjkq0mq}yTZf*;%XW+ zo+cc{t|kDE?8LkMk1AEow`<;ZZX48pufg|Pw&fxrcchrM$n6Lc<*kRI&>sh%A#vF; zjWQ0C0lN&8FN8{^BzAfRKS}l1!8|{c1W@qm$-plW8i~pwHqLsN9eTw@Y1MsntB_?Y zvIoKg>5A)zdB6SX&)IWw=71gy=PNts@0}}$0_i6Li4&|Zdt(PbVd29i9nk-)4U(`K zBEl_njJlpdWo8K0yqxu#eJd>Mp>zUnGOOJh7EN|}fu3kCF@q0m&tF^R;&n?}dM()G zp%!x3WrP};QR1Ao4bJ-eALiQyBBn?9Y!NqdHqOJ{@x*gybK?Y{l?+>rGU<`DfVr;q3Qw=Ex3xzM%f|c#IyYbId$fKB>=6`r}iE z9%$pu_hTdbu*-{YyQc|jh0PGAK_2zJ>MlcsMWOmEt_X;n^#&G2-o#gkOeG;)>U75W zO0D!IuT!t@#>c9`td5RxXYB$f<<8q49oq1yhe|E>P*D7Oh;ZA0`yW7L;T3M=pxAo9 zDbv>a*YhDrxZ3%RT?qh{0Fw5Z%epH8@qkI0KH!rCXZ!U}IH26FU8~BM|AnV#?Dot+ z9-qNa;$Hy8DqTG(+wbh8zVU^|Ft z*}jK9e?|KWVLZHZ3z}63)=HHUZ+WmVnn7yl*@FJpjz_1?NB3n+n^*g z4EH~rZ+GPA>h!ilkLcMVAhN)y9jg6NHvv#7+dm<20WgVpe1`wS>odE`@%~JIV3B}e^eMxuCrni(p9X`wG8Z&Bit3ht zme&~`NLO4x1f9?S&h~k_X3}#oZ&RQPkK){52cbg|_KH3CY#l1zPP(Q^cZ!CKx-De( zMk{&w>C*e-n<2{HVTn~ZJj!y<^{UTejdwflfubz%vpdV@tq|Jfh0am;uMf7;SC$}l zeWll*Pg)!3U6{#Bi8LkhfqgiyQ*h7N@Qm|mVx3s)|Vjrq2OIP-~8~1sB&A9X(#zz>BN-uI> zAwJK|_6Tlp{&>1mo=;f(aK-3W@@i1X(FqONuUdf66PbUXjcz3hpR0Lo4Y6Oq^Y=u! zMAzfk26Udh?4QCI<-^lS0p>3BhvS=fRtDOxm zDs6c|@?mSPq`#$=bVNz#Yf%vIq3EAhObQ}*l>{ceddHg#RCss>xWqpbzCF|T4A0L9 z3U~E{V8ktf*myRq0fOUpR7}jBqEr4Y+=b+^7TKybid}}s2hDPA(0&zPcRO};F`wtT zY559*5CQ~W^i>1Ss=#vRvE4al+8a4Maei!NA2Pa?yqu(SP{;sz@$P>vL(JZwu89Q* zYTnc}=$|kM12&np^U~gsV=8^6Ja{c$XG_a7C2BCsqhmU#q=g)fM$S(^34zChyHWdN zCaU;#zFk8^3ei0zg<@+FcckFJ5onpAi-%djh{;k5y?m7Ah7B-HkzTpwRsqaPZl6 zkO$z%=dFXj--HW2+^^z35Bs~&=N_Naz$H9vf28<5?>x4%H#&EUB`lL58oskNY?lc5 zGTf)3yK)Obq-Wf!wo#t&?)bt zl>MLarpdV{J>w7;{PY4i;eU!P(ep9yo}BCT$?`@8knr}S%0+PK&<3d7;Ape%LuM;z zbA<=0tA>ANy<+R2%eJLkep$~Y36G?exX)7-c(C9te}`1?uM`Lfgx56jU6(s$%OZ0N zX?DVTNrPLp>U*3!;g3%ISuYpU6(D>C;Gk_~v76j~(pLdJ8=(zN8yzx4qz=LBw~~<8 z%kc1mdz)615sq`0{CsSoyzO=ZIec(eh+HW793tNS$9Qh!u-F2TFm0jQ0=M%4=w{4Y zgoIH7djE&_O?XVhVG?zZ$21c1u@I4!Zm|olA<>b2oq_2ZNuXe#G<&3H^QyA^MBLm#bi$> z)v6x08Dx!2&cguFP-v>GpAKCU@%$cIFc|>_A;Q-8IFiqi^g&vMa2TI< z5sB8E;VzorerSF>Cuf9+|A4NpiY@v`WTS`TGiO0W^NdPLzT;(l5(gjh{4INiC zKr(d-Fj1WJJ3&x($^LV zFFI|UKvzX;2cS`7YEPDOiPyW#WbvSQect@xA4H_nKr*QvBGP|Ik;Bto z)JRc~^N;@95F|$zZl@%D9`Y6;`2vvat9K<|JY3%eu}>s3&W`ctz6EcqqeNBPBWt9H|SO*meIy zd&au4d08G2%x z<~@s+cajcYp)U)`5OtEfj@MzT%>FVa9l7mvuL;c9R)?X2iOTk+4>C|A!{@2mzNN-dDS+<)U zmJ*hE3t>h_MSm`+h>G5KGT#=b6GnSFu>jFidKe&{03304yC2$gXm#CITE_3$$nwmj z#qsbpEU&Ja_nO?`U7be1&?R4m?cH zgI{z_=m~br+9{%2ewZVn5*FD)811QC3|3f0=S*0IgQUHKSb#9gS&OAB0D9{kk|W|T zt6G!$$Wv z{->9AK1B7050d_i*#WbZFiP6bor8``E&)lbCc*^ugkBmer`8}UfW=BwjDExc|2b4;}|KIVjJGNzcP`V(NDpABlLd z!D2z5-&O)E77)=JZ}NO>z1x!Bn02VH-m;7>q(L%#Itg1WJ7~?V$)Snw<-8oReIchq zwZ2x@Ij{R+q~jH&L=EiXwr?%QrIxhMc5TA+A8|y`{bC+T8`9t&zF0NnK%yIwNL%A* zq(Nz?L~FO{Cb#LIDM^2-11)D{ra|L|Rfnq(lmiZxUcGA6=sm?d&)6sb86q*YSFfJc z=|Dp;`4hLPH&ti(X{b@L$;8fPC_z-Z?Rl}42tlawrhRmXJU0- z!-z{-D;y~?!El$P#N`QU1L;4?UyVpKwr^9A%=33Zgw2+2L^5rm+j2qT4?y!K_tAx= zkhn^(o!6e3%P{Fo-v$H9o4z&-4|8Aum6^EP*n33?5;3 zb*a`q*~mkx#hfI4+lQ2-f2oekTmo24fC(K=c2t_N50N5+ld24s?$ykBriXpBpQ+Z} z8_+yc#evZUnF?s2axggPC9uHYSafV1MnQ?!H#~Y;)g0Xw)0N3Vg@^PbSvh7iKKf>+ z+Cb>^BXg~wy9q@6c-jBTe2WQ7^hP^n$;Wq9bpN)3oV{H}k9 zmWh{8Lw=7p9XrB<+UAt#Nr|*THqja;cJ5H5|Bxalj!3^~G?o(Ku{E^6EbY1xiL^EH zb=GzZ5*Bxs5_1Wl)b`0=+I}~`9m7NhCwpe?MWE2(gs31^h|4q6X+2Xl{Pypk4j_oc zVdr{sB+0p+5RK4i<4M#GMn{RofWwK`S8OcuJ(ne83gmC%<)AtzRzD3zFv;~-Q>b_A zh5_VhBp&J&w)4IM>2#>gVF4kpVY78uCP4&}x(sI(-OYraEyTHn9<_SPG6_kGh;661 z3z4c^mr%o`dpYQhOjp!bv7AfKldl%$ zY~j(XdNuK}4UhSEKDm$$mI}HG2qqAAykw{7@g!~F0GXkAwosC`ytZj^OA`+Ejtmj9 zg^I@_7%OZSg1WACq38G5GoZ;o94XP45mDd(B=sLHD-ydGc%L$CqCftMa7Cw zKRs(!g*^xix}Fe>_RLhYJ(1DIqSlX_mw*nh(@WrEWUIn&$xMK1X{M|D?v)&rb^>Fg z+F$iyJ+D@^oAe68qwSLmonC={{u{qRzf3xvu_4G#NxmGEO-NW)(dQ<#yc-_TCeq$P zq|!xzWHdNcCxYywmPzxv=l8@(i3iQ%svLS>$8C=r?lv!?YZH7N3H2Y;5dslY8+b)d zr3qF@n6{|?P;FO`NRlr7vq{n)`*uC%l6AKqTZEnqm>^ERo4>Zqyfw&PayaQ&zWnc< ze($s=w4T|!SO3HL&k%}^vuD!}NMNijy`ZhE9e{>P$8D?~2FLOh7zHI>XR|B4Tca)4 zOAbm3@TK5%ot?x(egfV1R}nv+c!c^92@o+6(sP6U@r^a2oBi)b(i^Eh5L?KWSYivY zqPwMu=@vsWgq%0~s5CYFu?kzBqzf;G7bpXN}u#n|P@WwD{#J9@8BVF_x&S?0aWA z{YMQ17{#`APMWmSRBO^d`)k7m-z-G^GmRTUmwqaa>} zvzgFar?^GSG5kvrp7bKWhi0N7zsEfTRwgMC4J>=#Qv-m=ds=F7KD1WrA&iyMe|&oz zVubn+*Zp?x;?LhvY)fh9oLp$tn6_My_-n)Hx9AdJ#to-$*QptLE4 z^woC3g3_hu%XwxVzBOysw8tO#6DR;AczX)0;%z2n9sL!;ASx-uwX)fV<8i~gCWMn6s+ThdiK;%Y; z2*wd9a+I}5fFmNE_6iZnpV@5DN27NWvZ$q6(;-z!((h&|k^Y&I^pAggc==WflLSoY za6%`IFXcP)%lmf%7&D%kZ=Wn^@K1<@8PEU=?|OnVzW6q#Di<7E(OEG%Sj$m7Mvide zbyN@O-6-hE3X1>A=MTzrg@@CRuztAs=%1(`ae4(4Sdkyk5&ciGQz#%&!r}haqS<%Cm*T@iQ!9Rcf3v)}vUY*GAF{ea79+uS;M*=BP6gZ>z=(YixU)}!*HB$Yz z-EN3Tku&QH3DOVr;2l+Iq%Opsei6-@mv%Bpn71TJH%R0Xz%U^u|7lr#>8(rTaPr^9 z+e)mH!HF}TIa;#hLK)Ark@*b1g$seh(W4&-3SzPDL*yxh#y>t18VHVCXtYrjW4XKp zoLc%dW~(d0BRd#%ij-`YbFb&&K2iNHe#^BmA>R~uIK4vt8?{oD{qNwk>`O=Z`cC&o zJ=-#2IfWGxmOZ|sqFc*L4WhG7nH5Vx8xV1?PI5}r%LqEHlK81|5l`u}h2O#u#Af>Il^arOV8nV+RR&waBEM=zkGp{Wb;w6+vusD(H46T6VaKcz2E(B%~U1y$f9}Q?00f^Se0EIcwATm(IA7kA> zY;1V+meby7!+Qxl(B#Hpl;<|$X=zmtRr8A9Ar}4 zGvF^YRFWVkEF&p(W2siyKjRKXTP-3&E729FcUz1&M5zDp$JVCN+2qt+66BnW&3-{a z^4_NB6D8?@WW-2xT$oD=TbKY78aHfcLDw1D&LDmLh@wK{nG()<=5~BQK%e=~5D6L3 z{7Bc6C9S?*T6`HY6$*#YKyV;e6?${*Xcrw+OJfwIw}Z(+F9!GTi~Z&FRVW|+yqn=c zy~0>SD13O8HC?i^GMPps0HZc9iyT-Q?alyw(|846;+x>3s8dBTYY#qKr z+PN!MyQqRhc}xA4m!ty{%q4OOKsEW~2gC_-7hPvs{0EUj-sJKVjFfdQvr>QpSa+7qaX#kE;_7uNW6~fksMsQ^_Tz|uAmP;b4ITxEFe{k${X*ne8R$vZqFdxw5aa168|@~cO-$7sBK{a zDbaJ=Neg0OM`2!rPAOCym?Ebjnbf3Pnc9GxMk;c+*rFn*Qsr)TBhsXu(Vejhu7Kf2J+fabBsmMozg zQ0RI>N1zpq4WNOkgGL*#(@WrP?Mjea!5 zL;QI0v4IEjj?<59z895%ndh@wDjIw+HEN`&HRwen4GWUL zm+;!Klceh;cb-PxCBT_!$l&Cr@4qR+P~qnfkZT`2*{47OJoKX~4))WRp^ zClHShZg@F>yt_q$Q0f&1p`g17g!aE$wir8w-YK3fWW)v*9;@g)#>%%v4EB-xY@^N= zHo++|Zn#Sd9J%c*>}<7uzgij!2Qi$%+*>-@N=Za53kZ-H-0J&m3szKl6wg(5xu> zG18R4Kxi;i2@Nyac&G9G?>5Twse|L1216uXA3r z$MHaXkdOTU`KHE)FulTm`J9D*xdz?0#E)mkYdb6@EbdG~w$OOKWwD}PJnRVa)w@R$ znA9RA%Jqj%iE(bb-bOHUOO3vB*Nsk*^Wt^aliNNl;=5mWk%dHRQrmTGjcLoIol>n$ zNwt&Q=VQBW-xQ>CPHb=uATgH!DbmeMLzsAnlfrU1`J_mZAH4C5InUU48h`RXJ`5mM z$bd#zG$$Bu%MobI=%%zivEX2?y2?u+J7`d17xjlcN2z12=&no-%JXhc&vR@Y@4*0% z(=I+d|Mz<773yQ6{3dqJ(YNU5?+OUn5VZd#y^*J{JX?qfOEY?M@HKuHSa;9|M7U9R zPKlVi<&@Y`hP!gm;3i&b13o#qReZ{;<0*ZDh1I}?Hg#rLQv#8gJAiFz&Pg_{P^)F{|Xp3(tqX?2gSI; zjErV)j2$!j^#Y?F7!et6Xxy({B`*Pq*HJtM1%tNM)bZTfwaiSQ+O+*Tx98=clIQY1 z&qu9Vaz6i9_Z2KY$Zz~f0^PKrz)owhrmXB?_Pn0Wfg`&4Cm$1?eZEW%Q?pn zr{|G=ME5R$2Nxzz&r|;yHN0xDs&ZYg6&7C?MQ=p$5xtSSmJwH&p_yQz504>FUsWkm z-;ol--q)28M}cEum)bzrCVJZak7z6@jXDLX4ct7Ef7F;a5*0b?>1@KZ)wXmQx|UHQ zg-RmPTFOmK!VJbh>6TJ{^cD`hNGy^--Nh0;~9VxelnnG+O+AY zQ6on_;INPdO_>s_d^=<^o)MVdn-;s!JNWH}B zj51PyWkm0?q`*nEM!v7LT9Vr8{s)rH5&XX(M1C?vxcfz)W{Mn&ZB+@+uw<>OrTf zn0q;B`Jdg;z!qFS~ z*n3E$eL=E?IHQ}QCsNO`NhHV-DbZZuNP)wAsEOTOo6tLX?^HvibB-)&ZQ1<-5$Qih zlOw8QYa38VbhQ)xuTI-a+G*E@4?iO-jT#3}-8H$EAgL3Jwi})iWaP-{%ru_QeP{yf z$$gtYB`8DRMLXOW2~5T_uMHb!&NEGbj(yI720?+S02GJ|U}3H&4X-z&4cVTsuNs^a zxhlAC+d(C(6H-f~cvSx!#aySp5gtEFE#20g+hh4)Imh@xJjU3*G%kO;2Q1S8%Z5S!pb#;BM4`7d&`PEm%~Vjj=LaVK1YGOLTzCB z4-SZW9ieKCF~SLvBvD>u(TPZ2Eh_5H-${|~g2d^cEpN#sz#s`-0%kmXO~5u-#tk%` zAtv_{lqYy%ZxrB+01_4$1q62MI@Tu|BePaZfJizWS6F;S4+}_0Z)CzMgi+G7h2-8C8Rkfd8uH}Z-i+LKQQ*GqPg_2! z4cPo@9I+cYPHn(m%OPU=4>VgQ?fjTCBKu#{KdS+zv~za(P8FX1f|7LVpPfs<l>Z)VqapP%GzlZtt~sUKwip66qC) zhtrP`4>r7Nu&7sX6JY{B-n%z)hn2PIE*5|(2NeKG6z4w2YX=9I`X zLffAvo&v|^t*H%4>Q&Ywq8O2~Ga9R$J4jCBHhzNL$YHbPHBwI6N!tc93E)gprhQqS zQN7Y7VCs~bsdr^!R{S$1rS1h_l2rWNK|zpCCm;vPHFRi7FrK0F3|(igvrqmraRVAa zAv>S}vY-JP^q>judF%a}vDi)-ZOBWY=9C(W4vN?t(8PEbLq}Ix|tK4TMNpQ{>oMYYOTJ9Rg~-8l)wYwmZS(&#$7itB_zK38kIu zT_tH_d20^(Eme04I|<1jJ$v;&nELGGOofRsxlpzBmBsSPAYRauC;BV@RnZ%f zu+W*6YafZ8J)1A&wsSKj!`-*F_+_i5tro`-Dsmi?Iz%KIOSzl0UEau%BIlL7ayD_& z&Z0UVWlI`!iE9pF3A2)8GO8^z=&LjX`7p)jF%P+mVK0wJBL?;dTn*f#u0EWoW zcIJwYFwlR71~iNdV3Bq?8~^^rYj1jDI+Ee9hz+JH8Er6E{eeaowS$+}S<6{GkgXQ{ zQtHbO6dqmU7L{pgCn~`#Kj2pAF;oi1@)43v{Q;4m3GNLB<ZGw>o37=Flc|FzJRKwgMF9E_~#lx~y$vdoi_~W@7 zzq#olG2~g!C6}hXgY-NDy8b_Spl2@K7851)3c|w*Iwug%k4MLwgI2zEJ-tz8Xrwnr z`r?P$AsEC;Af>I=jB2%{HjvcT$r$O)I_XObQ_ zo?*}=eD0qm*LyBoRw(H?w~XuGVJadcc4iml?EMrQGTLBpP`xg>G?JCzcxp`<_Ey{N zcy1UT7|y9!cwN-xAB*|C5hCK%21KMbPaAxgWr`e@c1-Qm zNGWZ_rJV>8m$!T=NpuP5B1QWC17@ZHOqjbS^=0y+?F_9nPDXpoKK97yhPf`&?uXyN z^h?6&M$2u1) zGf1U9)JWxJ+QQf_)tV0yiq?>~;*#_=cDP>hXJ#&%X?RR%rjbcd%;2Q%IT@VnJU?k# zr?%T#d@?#;!xwTP8ZKYVzL=dos9wFa!JA$jI=tC$troSLx$*=n5U^liC{M|QCIJl^ zZD8!6m%ywLj`-zuW-EjTW^?J?BnMUbK)SN?Zu|{@F1YD0=|@O#WPI?2(Cc|LfJm>v zH$qP6qHj4m9Sg{%&c1+n_eLhHwxuE_k&@xA3cFVua8sx~W3iUEe$@X;L^|)VEehX9 zbRsfo6Rj^<+U45dq@6l#>w?5i+oIo6DG9Hn&*^}aAGDJ zb$;>xGHp$J73|wq6O1ok9&e>5Y+t^5u)|bB4QBn4Jv(00TQoL_t(i8)%1OQrq87 zb3k;QM6X)gD@3UOsEL1@L;JI@0TC-CoJh#oz1vGWo1@)&_z@L%e!-9T5h>U{_4QI4LYX=Js zsn_KtKtZS%uivI@h1nwv58hsvVgc!j<%8{2TGqg}u?zIiO28u_Ux_b7J&!gfPOpIf z1CHL97CdPDc(Nh9d!s4dBJ}0sL{SE%*)4v@-;mc|o6zQ$1_2%s#0cgQ?zQwjGL?!P zB67ZYkezIW#M0JP(oW?q^;&(ijj1-4Trrb?x z_v_csna${D@BiW)Xy`vf0~$a<2bxp^qgPLwJ1|objTw}$BRGCf@DfmT9DCe}*WIaa zF9$_Snlwkend~UM6ZDKiuoGtD;HBz8|3BVh>atr|42l{=|4ov15$s<70&UdN) zKqVPH_RR+i--H2}0cZZf9eGQoBrZv(u5&@@wewg^5GJZ7NnMiMAQX;Ingk#+9Tc(yVq_psnFkGe+efUend|o+L&N|F}=dz z(era1;E&&V^ znrR?T=$BQQfwF(v0gs9dP5{d(0P`%N=`(rq8t8J7mVG- z_KN2ZYxL|P{m4ub>X5I5hl>w4fO!3g-lRmQ3)-`QxRr?sdR)S4z3%@2)r!4&X(|~B z00000NkvXXu0mjfP)h>@6aWAK2mtehE>{mxfBB370RYzl0RSBU003ulWo2$(a%E$5 zVQy!2Y+-U=bY*ySb#i4caBgQDSan=e|M$Kd9nv9^64D?gIm*EZ0cntKkkJBC(j5bp zE)fp%7Pd3KH&=#WCGI!nkG4Z5pIY*Gz>HVpans74AO->$0pI}$AXG;Z1UL`@dSF0^@Yf*V zJ_vXS253Nl8ximb4A29BHVBXf0lXl<0}QBwf!iQJ2MoxAfqNjp1PrhfiUR=7gmgfF z5(t2T0Ui*b0tTLd0XYz$PN)e8a3lhF0l`2AqfhQzF2NfC~W;FyKuDSP=m%03Zzp z?hxt#0_=zYW)NUQ1l$1tP5`(M2AqiiRsirJ0>r@pBM8s{0qg{P0pK1O&;bDg03bp@ zjR+6~03k5oLr9qjFe6lwkQAXYhyYOnSO6eK2#jz`=m8?Ygb26`0Kx!Z0s`&fIbo64+ad00B>U8F$f4E0>lXW zfB`cQ;0p%C0U&?~&>exX`}&KWx{oKd zrn9q`Gpcs8v(>*#zRyN=-Nf}Z;a~Jgy6*kBjy#A;y1cWyc5tqFFCoc*x}1rgjhRzM zF=T9}!|UKXBdve)F&W>|%At=AFjJELRC@sw~u8DUYR(B<+_&Vgd5jLFdo z#-a6@wZ>-8R-$NHn*NGI;SQBB1%=JWa093po2sQfJv~JHg^$G+Wn|OSkfEjE*`udV z@!+GmYNJL6+}AThcT-JVaxS_@s5~79D|}klAD{G)NTkf`^2Lwu zAp%koPvzCx1ta_{T%Yp|l*rcQ+|LZo~D zZ&t%dN}Wt^6leXR9>X_}$iD$rfj6&-{R^p{{lAX-o7J`X;g^*Nzr7pQVU#SFQMy;X z5U!(gCvuhZ#-k7;Avl>SfhZ|bl6oDF6`YuY9P5G8>p^wr(Wj_v! zFtd5Fsvc~a`N`9B?V_?)IFT(gUz&_TTa|X0%N*f|*e&r~-R9lC6@bt?X+U#Txn}ht zAcy&KBQ0Z3)GwkvyZO5>2$ zW+^;a*}FaI5v|eq-sEO^mIF?zarqO4;LkIEYb{g#i}9MHhuzQO#wM$(U@weUKGuJg z|7hVM`D?HZ!hRx=HBKYC`10cK+G^KIQ{0H#6eaCkly$3!T9}89&4keDPxvFl@#AgN zw3eCb=DRiT7Ja#A+@u3Aa5!e_hW$mmHyj1&d zTMr4dGK-X;JSC5K9d)-p@QeTYpSmggrIN*TNZog?S;_Iw5T%re1V~x~SL*!BaLel|>xE;s5s(0v7T}7gie#h2#wpu!}G_<>joE_ef>MM7EBG+YxPS5@i)I$ zUvl?)+FC~ox`X$wtyS7?BjA7C>HNrU-!oi!Z0EuZ{@Ro5FOt_MZo z_|Qtb)76R{u$17z&dWMfpThQ^>9xw*k9+d6_LN>vDQupdo16@M4r@@JygaB_NFwhXt-99K2QI-i!X%#i`>l5;qKs1)7>6Izu#b^vh2!_-9(jP#3pnF;MHQ&YOJA8kJGtk`_dD#z}iJwdVaX#QfN71Mb>U8tfl zub@GX({}L01!!N@#Bg36Q5rr>y{vPWpXf}5)FC*pL!zdQqn$Vme}7(Y)0)S$ltu9GZW@}IdltK7Bd z?sV07{1P8&wfcM}PC4wBLB_N3k0E88I& z+x16HmpWamaofG7_`XP98oL~tW#Z2yFI2GC5c~403ENZC`A@&C7^W#YJXKZAo|EBU zbYDzA4jFy@HPh0?moyI~k@sh8!jn7`9ro$i2Y28toQ|;Dvy)}YeQlPl`r$xt!KdN! zNkarI$F6*0yH#niUPA8lmZXaD%itZ-f6szPsaBRcW5fQPO6NCD`J_yuw++{bkM}QD z|D*YJF|K1T-IgvNsxkhmO)jT7a#}yUonAd=Z0>@D$3V`he&Cim3T@bV4@m8qSp7TH z-?<&WJ?n_*8|-iPaYGxebJ1^x?U(=N`sj%O4l{x)eC}ex_OFdq&Lp4jS9_T6vQG+d zt*uTm#)ttiLJD$jBv}y=uZuz}V|jjr=>cGsbuPE86FWn{({10ZqLR)iQ~Kyg7e)zX z+U4N*bW+Ey{DfawNAwn8RBYTh-HLgVx z1LW=rDdaM!BVmkWBs|WBR%UTmA30*L@{GkqX#RD!FYpmp?_E<36X`_WIU!FEhepUI zteP#+X0yiprQgITQLC?Hr%c>l=Q4LhNIXT*m5}q`4vGIvThnfG6KnK!$Zp+H&|4#y zP{y%1hpgXaMV)Q5>>7c+mG-12(OWX`YX*rT4%;OpGS{dhrIb~oBc&iX$v+ON7jp)B{-zThdc5}&iuAAi%_V{$IW*3o{Pmo(MZ4WFTyb+;TYP#*aYmm@br+*NI z$@F(zezo{4>ObPhp3Z#C;f8cTT!^na?(pN`Dv|nsch{-ZO&HkkgnBq5SiFn}1O4dK zvtm-TeA%BuGFB7fX?fjw9Ma;Acv037#Ac*HcN9Va`Wsdt4#G<>1B6|)tx>WbuglQ2 ztfL@C&-ZVsKcz2e%KzlKhZ~%l*!&pT4CX;Vqyb%Pg_IjR(}9tL{ZRSblM@Effce3D zsZZ~P(6T)DU!&n{q8-3KN)rDpi;begXBBYT7w~ngc2P;V^S|-|y{8>6p?Ws^beWyB zlR!bSh|XPV8#iX69##DT_Zd|I+Xjxij~uA|GFR3Hur=MPZ;KJP8_tEjw%zJeaxe9mJd2iT;N060OYj)d`JcV z7_;t9Z>;~@AU#w%`)I6aHFIrx&`CunJHWR~&!m&vVzV>t8+SFl#78U`bH%wjeY;yA zM*STzGY-A345E z9d!(+me5nfEQPSm3BEhjkMwnNTrl?Go3nGO_fX(Sh^G(YU#8FE6wm!ro4;RuX}?Bn z*L9C&bG`BzTJ33` zpF87$JB*cPe@u3pNb8YKysHz^(wVHOb6PeB&F1|I5iMJ|dFS7&l-r=>U{^7gq(?>xwM znD36`AaT;a4@v!|=ag<$i`=p+r^wn`+4rj|;v&Y(KG5d2qDE;2Ys?sxA81T@hFmkf z*mZYg$62rruuOK(42|%Xg?=W1@(y5q-q6lng9j`OIp?B+y=B+dU+FuX-kSK)giOQ} zbv)0tW1---OxS1YKfJVAhOb@F_4}G|s zuVQ@DQ*iUQ*QZ~HcucD53(GFf1{|I=KXE19d^wLpR>{4CU=0|Kasfab-AN^R{jH!v|lNm z1kdfiP|U0wk)ZeQy~3c7@w{aA@b);@-R`Ih*$us`XY^8BS6HdPVSg2bM9~c_8 zI*pCfHDk4TcifLEi{IiUh$kL6=zYAul&wL|bGhH@3tFE^<{*QN+cD|fDgk9NNRj~i z^x_C+yzdGNnzEU8t`{+v90>{2)XF?KzBr{r1t7NISY5JrD$#`rsQp815dUUL-BcLJdC#uVNLPBBLZVMR{r~M@-QE?Gy ztwypD$qwa@Saq(rq|+KDxZ%k3*4WKEww~#G3S^7FyUC(Na&QmcAafr~A&`ke@Z+ zzcwDG(iWdpM|_KIFxV88+!lu8bAi;?m!e@ZjF&2cH%p zVM=F5RhtxtT`+dm(6kh`k@ja2gIpj461JCM=nuqAr7jz$|0t$YeD!V7lqYzKMX5Ez zDxX=KcMJ(Yf7pW_x-P`@u$kAOet3KL|CRwZ{Ga zdH#4M565CHv0CY>X3V|L@jK2*AD+$|z&r5&u&sAjys#fxAaSz|{oi04%tI>LrTO;mTW3q3Prmi}Rt` zM61v@|CAjb+HD?V7Au9pDCQuZo!6=8qz*qiRAiSb-8Q++mp~8j`_e82&ix(E1n~ha z2i&DbL^2dn6V1sRa3SF^Pq39JQ|z@;AcoFty_Z7YS1*=&!omOaask~@9+FQBjZusx zVEF?y;ztzI4`bSTe|E>14*d{f0Q`^L(Yi#@EZ2fDZ5eu!! zsii|Uwy|^l-ne*OzOWjWc5jk|_3gxU0#%E6e0oy$rOMt9l9SaQwJ%}O+O&55QZidK z|76i=M}hr;!BNkHAUQY-W0_w9O;d(+5a>8{YS8i$c_fWf`h$m>U4Bv1`DQ%oZpvpK z6o*?1XJ33v^?G#wXUYnVcmKX_a$)p-1 z{k8SQc{ylOn7I<^CT6+9vKBW7gNtU_H%(dXCU>d2{bkZsaNV=5swV$?Fp!7n7jA^R zGd~WaDPT0`1<d>tiUO^?^p{C0trEN1&i z!!gt^JEYihNbp$)yZ@!GbiMM?}(ziJT)#^JRY6W zZG0#2V*%80#x$1<_ts?1zAFoS$bt*b;eUZ zEvs#PKSf`}`C&wcma+`7m0k}defbIa?Bub}}ymk=;q-QBIVW5v|qA8| z5*pLjqVF5gs35eE{!T9BVGo1+G0Ty51%Du0m=W#jmNiS|%pI#;$ zDXRdne|p#p!d?Q+16P}z07r%T=)GHWZepT%Xqd>8e!5bsBX-HeO|RvE`Pke_*ww*Y z{SP5{ueWebwR+T)DHI3BTwD9!H9816K6eW5Op)3Cj8c6iEdBFQj%@M;)3$DmR|AS#;!22n1IIj)FZ8zol?*LM=-l0OxC<)weu!i$37Wi*XhEfNkqJt2cU(wN}wuE(Bg{B;_;{wBNl=iCqqp;r=51SBflM!m%V~Gk5L?}5X-@>4Vi0T20 z5_IPVUPXi4W);uw@o&?cMo57zW=XD?p6rL1*q6~e{@XD&dSrae6xC| zkXz98f_!}$N-@?NHTeuS+eC53?H}`9RPV`XP)mtj?Z8k(dKv-9(~H}8$}M`2akN& zY>xs9`voxOq^EZtm&2*#^FQY+19=$y2TV_+;F-W8Cd#kResMr+YaSowd@|B*YX*#L z9==P-fwaNiFnADLr9lS_L&B4lQkdwbW#{4==03}cdt225!!*|D^L$d+mtrC}=gsW_ z&^U1>a)6izvC6N(7<3I|rq+lZccdW(c#DmB_J)*pjHMMRCA$^IEK-jM<2+3PgXhb` zn?(O*s+GW&<+jh3I+azr`E&nIs2bNgEqkTet`g+=kY>rYI{(Y9zP;6TBIFXqVc_=r zQfF-{n@J!3#XE)CInOzr>UW0v)xMd&7QNMR-!EOzGg<$=czba@US>5N9E0l@j9I!5 zI3-SfKoqjilRw{LKMI^+l@yYGgG6&n4HN4g1AL({}omH@G-~V`1fByJ!s(y1aJI0(+ zX7?&UZOz*exNt>UTZROim&nwf^xZ6k)% zPj(?}HB(0tdEV|!I12`XDpg$7tU=&|SpJg#UQe}>z%Ey5##}VyN2C@<=&{*CAEg|b zHC&SW?!F&h$A`pC(ssXW;pFR+gPAgPv;WZWr$mkOEE-&~dSbR<@~lo|~>!2?&$b!POD$3M_ETAXHK|7vGbja>q8WH(dw(N zpQ!BNPeE1Na&vMUrc@SG3_)6EZDE6zjB@;2F7bU8nn{g|OxujMYZSfW*q#og`R(WY zGkzy3TK$Q*yco$>a=O83B=!l+UBU)HcpGET~8_s)4M%qPAMuLdERl z=D3{`w`Ir^D`kp9k!MH_P-6WkMH$*RCFVldSLD{FG#b8 zE9)G|(g<-=Z`cwjgRvGg9f-Xi(-?a;%g6FLy=CLUQED)A@j9aPm zHD`lz)09j7PS#2+QlO(C8nMlZ|7Wo^YfPs7Q?K5aP@ooDwE>nUTGP8-os>3x4@EY_ z4Nd`KvKt)r1q!m^fm1C8Tl=~8BU7dmq zPMRB5(Mp3>sA88t<<_b-UFNq`Kd=Hp>5jjj%AHW#JqVyd4f)s_Sl$sFVg1n_$*l9i z&nao_3byjK`y+>rJrQQj{HHp}03`4MpZR*`b{WLEUw1Ym+AZYTSWG0fz@;4pqrL0- zjv+H8R*X+UT#!CUn7JKhLHG?AsJ&y~!E=4%to!W0+WpQCW~nV<+Ijj$qqjYaQ4cwW zq_-h|ULb9iG@E*!6bAK^;I|4c_-t2)quEc?9+&9zD+L%D9k9QD6Zjd`ky#uNh1`uv zPlR$??z{L;o9iQn>P^k8BmTS>u|90DD(I({n2UlqF7(*Gd7-~``mE|ZzePtTKgZW< zJa`kI5~rfkM}ywCbTo8rZ)ICFeNsr&&sCT>q!mRAfUUPrM$FHi^B5HejOF_xv>8t) zE}U0C8`MID!=6}eP$n^z*lwXte!gQ4KGqkgyX_{HN<*@K;#r1?RUpv>oNRQ1dH(bN z*u9h2zie+<;7)qV*lKGW`ChY<)c*df=Wa8(mVEF+s}{2nfmYl1Z;+X9C-VAB^=Gy< zTfS2IYe=&C-)(F*BOk~MOQS02XSQt{Gc^kf{!!2HWbiSo7@N&>M;3|NOf?(Bw)bH`2tJjR~h}kcuq#(+F~La|HA)Mhu$VAO1Z|J zFM@4s0sDn}1SvK?TE}*`cY;#8VG$FNpmojr>wW$nbI>7Nd&gMNd|mJ{&+l35FL4#? z|FlH6nw5(KK8Q7BLE2B;gLkPGGO8&H^i_J2j=$EK_N;V!{NzgzuQ!z~m>jNPCu3#U z?oxdN8!KUae@lWvW>fg0wIy@zjf5``T+xVreW|%Mq*c7JTZBagp>-c(yR5b!nvnik zI3B0WV;H} z?c~s?ny;V4>#L(<#+1&E8`)gjNe}a?U%mmwG?2v1=#I+ZeIC3uCt-z&8g1?55N*J} zI7*ET&#RnqwjmAgSA8>CIL_#=b)^sDxvs#NGs7*A%WoflCYf8FjG&W*XUCr3<6hVel&CXc7S z%qFi;++EAg*GGMPtF@>Q{j9;YAyd9RpcJMM`KsR+_I%&`XLi?Yc<|MGhFj{yhyNo7 zJLbG&Qw;$TjpDIysJH*ntNrZ}&}>V8rNuF zYWgBMRoHQXCk?MAM)F7i9l;bFhG0OyTQS>$vRY~JyGWC*SxSO0VN4ZPW3r3(UXPp@ zM`UIA)|zW$Ozu|OmO3x{Vz;E3c{}#e=UO(W)G0^J=s*Ht)THZD+g#q3B9Mo%40|&z zj-}T&B`AS_9+Y=5kJdPwMC}kYzH%T#6~vJcUkG*JWFn6@g)ot`$O%~PJm58Wupz-2 zskhhk!%8X*UtRS0)q@-!gx2zU?%B+gv2`je~qChILS4K?~obARdgP z;R2t9@55MifiFfk!JFf2w%*ra-g+x_Y+I-f6hjRfS8cOJPA`EfR^Ya<)XCm@B0~IH z@Y0fD`@{37k-+qEQ!|lG^%%E5iz9)ZyUKow0Zk%dd6w+IKQ#32JX5dD-ogGXw$4@9 zb9l9N5`o`EkJUcuPps0!F%S5d_0taIF{WlUQ=M56Wmhn^2}_6ZyJLcYk(Xy~;wooj zrp5x4n^o#}B(E!PZ!uor$4=`Ub<%%#ivO$0Qi~NUA6MKnd&h00q2qn3g3^B>v%@2WyuT^oXr7-xl7ii`;48;~R2p}my{KUvPx4Xo;6T;wcYz=LuUp zU`ik$FP|)MLKEkytRtZH13iup3rvYmdgP~lg1Ej6!R5wKLKx;BV=UkUxBRZYV7&iLM?%&9AJ@2jY(G)xN&btrJIA*(zV*o z3iHClc&DYeU2UvF?aS5nZs+$`srrjy`OK$I;W(wB%1lgQdAM>?X97dSCGOJK ze!TBOC-ukprzf}S@PIN{XSmG<#}}(I+B&2}yXoa5_2+nSekG@sz2#=6sd{20uEITZ z#h@f#oxe}OqAOUw;fl)iu3$8Et8JZ9uH}y}UGf_}dsTHGkV8sZ56o++M6}kgtf?W- z^b`HrZ>UjK!SVqTM#jNu5^yQ6DYFrHYh%(bH$Cd7k2OQkFQ~wGt7?U|g|mxS_utS` zz?RQiH~kUR2^Bt~3aikeus}hHmV@z#!d6}SU zsoj{!x!xnvdi(EB+PpYvf{qA|@0}{sah0bTU>WKxB5yQ282>hvYz5@d+ z>P2wyFLKx;cZ>TY(!71RKZp((wc%tFw}O96=AS7ynO2d4*$gjxGU7%@7HX2^JT2Ve zr_j@pG~@RQK!sg-mmQmJ|B&E&iO(p2)zK!-OH@T;oYZzHKY3D9)SFlAoq}aQ?bM}H zLZh37edE0WtE5IPavOx;htp0?=OL535zQw2cQEFbkG`TfiE4=5HpCWbu&3BsKqOgETE$o|fKcTK7Px|Z!h@=(CS#}YG8 z@RBtsjeMsfQJ_5$n(<(x)gw`odt=q6Z!R#kjSZsV0u1COzR|l%BaGvc!)T908eV4? zATENxeJe0LVw4yS=TJ%zA1ZV#bslDD6%Vr!Ywjb2eO)0D)U+$pFFR0n1 zz-hTq_u1C0=ulwT$^=t|mE!abzLG5=o#Hp8V)hs0P#pJ35GYEFc_6qwj@ynqLHz0$ zXUUT{$g1LWWG?DAbMw?9B01%vFRVhiFpsFBfiEVb7P`nMM7J~T;z44A8F)YW<#Rc% zm2Nr6J3M_p!$B5;yGunyR-PDos{_Urv%2~N>CEIUx&|@nv!vwbj6gfTzBYZ!y=>yZ zE8Y-G6qK(Si*)N36VtC%0F3u)PCQp)PrlKLGdtr~WL}rHs{OUY3LCcO+`1KCQED%O z#$jbXC!)^}*sZ zJo`-2WOQmEdPZ@=oa#SQ(-Yo`FFTd)Xk&{cHw=KB+2ziaTUW$?K=$>Pyl7*IXzR93 zr1{@_C&67;A)`dd6A)17*5Y z8L#Q%zJr*g_)$9f6K)u0?-%b^Cy``__L9%9SV{1#3KFciKZYsNf9=SJ=)2ojkrz zSgz8?WC>|qJf1(rl#A)1@SvGn3mM;WKO$^+90GiO`Dm7J+h!y-2DaKBF_Eyk+RT2Z zL(v2*(-KRxmIA!fiH(VT_3!UsI2jh58P}-Qisc~zh6N9DMsjY^ZQGvyW@uEF>Cn?C z6mAU~Jj=N?xhOr8FDjmH!nU;6>sqABx^KRFM=$ zXi!FX_9y0^cOkb*d0NEr+hDJgq4k}+>)V=Gkovg8kB?X7u{|HfteUf6=tr>~kib*& z2q;V}C#){kpd{m^R;vp32$k``C17-0uU0{{U`S{yCHO&aQWdsbfS{-lw*G-UH@ojA zSJcOSgtC(au9mb6V|gt^NQNHHttN^O3~jK_a(hE5W`_>NKN?wkR{54#g{O%$TZn8z zZCIM8WLlScEvh~5|7lsukuR@fPv&E{!e;rDj9z@LC%4&9(Q@-|R5hC;Th!^e?PN*b zZp>!Om|6!aID7PwwJj+i=;05@PWtZooqjLDjd%|k_d7u^^&;0!gd%10N7Z(6stF3+ z;_06hT;73sKXDjc3s1~UcZWAC1q1W%fb4H5Ly~rZ_kvr&sahhVkRHws} z`brD#l?xQNa8V}2CCvrCGEmWdUo8}1@4YqKVES681S28TVte)6KxGtLlc5&y44AsK z3X_|R5RaIgcv%h&c%eL`{a{liNvEeHwyaY0sMJXER0*vojV*OfTrZg$engM(k%B{9 z3MH+ZZjCsTvd$%M&4vUCFr*(S{#dFh1?Yt7z$qS1*DWuHgh+YH^jy`L3rtPAQwr+c z-&t7nf0aD!9d3b*3oxiZ61X!$OTOtNokq#o0kaTJhP00Pa9kOpm1?2BVY(^Iuz#t8 zWGC$|h#BRgZd#tHq@}obCc5<{NA%C`_1VjnJRfOkw6X7>l1_m(cK>Gbo1iWJ`M5k$ z(2&2i@Q?qzBTccqA`d1L{+!#XoI{VeW{Yg?&#B=lpt$KlQ#nAC!G6} zlCy-lK{I*$ok0)mJwq;Z7}6Tq%tTK6;tr5tD!-ZHWjy;snJVJhuDHwceY15-cam;f zXV)uo6vqzsuvbKTEk%i7>=j!x)VsYbQ^T)?Cmq~E{#>;GC(IzRlu(W+*Wxte`Z*Gq zd)_2zQ;UCCfEr+=vN0^dU(g+oqHd!!_lgzfx-h1ZIWsT&B+p}|x4m@{-vlfe-dam~ zu-WD-&b?#1CnX-=&E-5~C1L!d^P4YvAq*-0qv`L-#geC9Qi$O*f5q%ZTDeo&bUgth zppz*uTki$mNeYiqdt&bvake!OW+6kGtHSazay8n--}#{gOD-jGdY1)dLit7|Df?pc zheX43`x+=mq@Oym`Fh7|%A4mO#KfAn{{-V_yZ-nL-`M@obrL`;LY^rxaxI$l(>71p z3NpwQP&?tLJ$oo$@GvO0#tK^wwA~XGS4u;ET*T#7X`UFD$iM^C^9p%cP$~h2-c}dS z?C#97Y-6nZY3HszSgx|SRGBbC$FT~m7%iO1g#LZ_@j1YwTGnexMqd(=l()JnspEGl zSa+4(PZ8ltc9T7yL8E@p-O*U?iT6uB=kd|uhc!|PEj9Smc37wuTEg0ff-31-pdb1v zg8Z<#AD@P&;E8UI-`fc{ip9N?$W${mo(oM*?`v1dHBBv@e`;$%F~d!cD}D8D2>(EH!|2QcR0tVCA;O^15Eeq(rUgmD6FaFTa4eX}&fPD8YSG$$7_=g0 z;jV=WiWz2u6+i<6OJV;E za>XtOA~;4&!`ZvdF<=+F!QAHAAKrhg`JEYUE8-7(O;Wuf!5XIM0T$uD@bX@Fl&Q0g z1YEr5`};22V8I=?p}62ga*3JvbmY(xO$QIgU^}5a|MwTCC?Ry3;G{U5#ClhC_h*O05sSkolgM<&O52$nH7Yl9N#h!X8zuKhOes6`V$G zgB^mrK;Mx`_2TE~lM!3Rtvv7{1MvruZ*J9mYPb|!$QT!JM$|C0tB!xt1dOMO?<`76 z8C5|v8@kJWADW?H4v(IzYizW0kbB6)iG64!jv&s}A*nnOvv6t@QW_mlW{@Hg6bjpl zc+2g!5#&xHW3*+)yPTKtW%*MuPqT{o&Q3fQ>iL$aa`LWwv_>A@pa^8Uf042!0ynl> zy>MK%{7*&j5ylk#K$D*ruEGD(Cub{g*(+_o=cd)(a85A4jF-yjb-x>KeFluZ^T)H+ zj52KwhF2YbA#u!X;9k1!wzPDpmniP-9yBWJko-PX0hk{TFw5=pXp}Svk6+dl7^7qv z!BFN+q<9WMz}@EG+xVQ#4u9)&oHNq&rFA`lLpeuUV^vIFnN~1F-*IUN(~;jvGMv0t zzB+rCvzp^gzB(5a&V3>jjMt!(Vo_2lj{f-bi3#+V8K;s#QDH-YxW~Gle;M!U zI%c~}q1lvj>^GAlDa-vRx&LCcA2g6$SQYv`kl8l0E2u7j&utG}6twIT2PZzZ`#b$G zU)6`broU_I)|stLai$f~MF$hpK;A5n?ZdgwZ z_sYYm_G|1cUw@kE0|UlQ8Bwd5Epw9Y&S=9}tGycdmrRpXg|j(0jrd;9Vec$!g*q`$ zUGe9B+KmRsp)M=3!?rkw@VGDUeeYxG)fG0F%Db)?WE;`4EsH>3kU@SoSn zEyp`}Lm3hOGH0!=*-y7&;b&@|)H{CN1JP4R=D$p=tjo_a(hfmYgOP3?vSuK%a-sl_ zcfL^vuAFA;^M9SPq;-Gs#W*7F1-e@CUFHQK+G=ooTvuEM+IL6dm;Y(#**B`GoTUVd zZ3O!WxXZMzxAEL6>5uJ6a`i?8HgecQA05dI*Fqz+zI!|Oq8JwW9&FZaHx3)z^4OEH z-qlYF8V)4$R$}%LV+QAWzjD8gdQA!Xo$g~dF0s^5yJr7i$;Nv{y2(WGd`d6!&Kq-) zUfX8T1UB1SPXqByIky;61B_%_HQ@C>RtKQY8zuD3>Fl4k@0CT?AcL3MD}U!&TLIbU z_&GXtl?dXUkdFz6^+9qPuH9-=16|0lo#7TE zJN)}qkDx?jIJhtZt&Gd0g%NK*s*r$r|JyabPZkGR2b~waGA=9gv@*#IzNy#Ck5d{& z4)wcoEKfX?e;91zUL#E+H);A#0CCjxYZmvVGwFgZHLw5pHxnDm85cC^%aIw|n})3N zffLo+D(QExW^ zBj4ROxx~?s)-@t#v5i>>LrjfV5tMp7_4#HX!5`MWE#fEDNnfD;m%z=jtUQ$>fWLh47RI(%WKZopKJ_*DY2O#dRkFlCcV(7^O?^i~sKx zW}@JodG(fcD~UO~tyw*dFfsYB8^MWIpdu-qbaqR&>z^#O9x$UrFi z-KLtilA|~kuX9_RVkFO-GWlu-<`S!?*WZ#{FAMblrr=z6js0nSp4L@q-bVG9k()+; zCt*eFvCq7|XOsi!kaD%@P=yPw9xF~L<~5xv9-4km3>M^>Oxe=oe?N-(=*yv{QPcH= zy`(WL4a=++TTELiG!R z{7rvFnY~TW1hBu-8h;rGnVr5^JM7&W{2`%p^V+4jpjuk=_KrmeB=z$bgAAA7+P>lh2uy$0YTc%?@D?I|<_mblhDf=Rb=Nf0EZ0-cnP|oG6|&<;P?e z$#e-Q@L-DOl_swz%{V6`D1>OOn{q#v>r-XH&pftfrBwFrP0}VdY2gl~w~M!YdcQvk zUh5};uJb20h8wE%Ph3x9?ch}9Z>v*^m0#7;9mj#su+R&=Hw1=?dQI&HAMzJzIcjCn zAyMzEe`7IG2An5thDuj4#6$eF(IX`@%4t)xGisI&&W+|Wt@||TcbXt^;EKQY81zf_ z&nmHcWiG{5GefrPIIfl=`qLDWhaIr~r(b5v^x6(QC>WI-+Tg?%@9iwDyT5#9iQj59 zlq#TH8%cWl1U#tz!6jk5#yLNQm$b0PhuctN{4dOU<*Du6)nE$oyHNVxcFZncC zd)lzyU#UbgVBa@EOlQhdN^NK^qW0{BoL2Rh=hr5}Y-LV~dv%-@_(Z+)z3;oWw3L<_ z%$>mVu{Zz>LG!+VKkyI!%}b94mtaWBZL><#!#l3N;#78W0xfUt6;;$#y%+AePeS-2 zw57R~aW|G&=D?E`zlp~ri_Z5Bj%qtp|MN-RvZn}I!H#i+ryYG6U>Tz1R(@RQrv)mm zd&J+=E+I7QV-WO~Xz4aidu~e06uN?K!YVX&uWItPLpsG3b&c=Lncx^ z-zQ`5_hpP3P+Mtciu8a~G81oUL^Hl`U3PhYz$5}5^3SZ9Ez`}s%dJIWKPI6)qfHF zY-TiS%3g+6liKyBvJYpG-}E))0=`k>SY{L73kA8xhuP$)Dd(l*DvnfRe=-hIOJJ8< z8n-)C=~AmGZKg&78x855((Qed#}+KxeUo&qW>lUHBo>i(?iQJ&U$)q}J$)G5JW=Ad zjIHx%JY*U|{nX1p&vG$V7U*rS8~O5q?bi%Ysvh?5EmMz4LFl zM+i<#qK_}?!X7<6lz#-lNkq&8c}{`i9@+2#Yb&v8#xFFY&Lhj5=gcI!o76>KXU(3n zun_A~R%z;nR3a-*W2~m1;#^qCM<#%kCwb^>K$hS_sSvQ-TOR7W#}4S_dSNStEcauu zt%LYn@`KS8DQ;v#Q7OBiTI0?I$H0DJsgPlLTP*ltDrEWscYT-&InOs*&MbAk&thBF z6+(Ll+h~NtmHA4a`uOMPNcNBYc5rPJA;fgeab&5Ua4$nOzS$36j3{JF1mR{t+w|j)(^o>BCxkR(;wMVYv^cRiowgZ&3JxDt#K#^vkbmrrKf|J~rWLHV5{?nHmId4+ z5HWx}*zLFMr&3DAvx0H(SvN6wgl@}Pdu3swicw4@)+(2{f2!Gxu4#Ji+}=|y4lC`@ z9g&fZbw}9hEROBMj~~mN^F=(y>}Ec3L=Sx%MdE5(cc=>vCL>&68{No;BWV=pZcvlf zJ^D!kg^e<}x;|jJO)B$c6CqiytCy2l`Pq7KjcxHyvnvEA=*fr*^w}6uQ0cs_sXpaE zW~ORV#l;$DGg_XisU_G?bt9ET?CWJMP>CEYZAzZ8?y^80!O5DY9mwP0vt_|6>T&>* z#(b|;ZoB!J95$~-)dYVB@=%HD1EI~xf}t&Hp)-|eJ#QRXL@U^;>#;{C?wh&*#s68O z1FTMwnRBgs%z;3c6}WW|hB}XBBG?wg7Osyn=e#~xclJIPF%<%>Zy8)ZcJQ9y0;rF$ zrS1eTgBzDw%Im`}D9=;0RLGKHdl4_jmHE0(|~ zM?z)CZgKE6KNI)pOM$!<3}KI~g)V3_x~gE{5%x&0{s=&}u`luD{%uX*DE#p@3Ai(c+)iSwYRNVu?!mU^F6r$~{Qg>B>`6ZY7F-e|E2KP)^!*m`r66${m7$&2N# zu06!wXIJpEIx%`jOQqPBr9${9981QCJ{o9iH`iEh4hJ=<2EdgrP4m5qe!v9nJTWJ} zjV1*`mFO>vi<+G2*^IJ+-=j*T73}Q#V8PH>PQIIaMCQcFD7Hs-@atVl#kKwjm01;x zMP)Gx?UCRfP4bky0f@`V^G!<+K#p2fu&iLZE&p@hHJst=6z5bIWVssDwEoW!=bhL1 z0wkX}^56m@5pXdY?|HfE#tC*OFcrd~`#kR)x=(R#T_K10w3La&&0t%f67|RG6#u>F zDaw*xU*^?q1-o&u?rcLv*c#qoB2lRjwL|JnMPIycQ9j~BlT#qo!5j$o=#*?m1CW%= z(fI)RdH~4e@)u;c+9Q(;>`OGa#pM#IKm_s(PB#9G2fuX%*sXd;-5$wp$xQmtmGL@z zgjHCb(-oZR)$)q?jD`is0@ZJ!iDR`!H?&JAErXM?!mHlP56OW zAxtlREVgCaxI}Ok1jcg$5kFhNq*#Ok%BNId)ibN)a!E(Rix?6o{u)KO-GM{a* z(RNBfaosaytKDKUt$y)imM*D#ejku(GR2B7IuQW!;vAw$jbG!aAiWLbu}7@%u|0}g z@-;yI)NAH@RU#^ocL$&B*0n6?9#P4=JQZ7iGUTtz!lFvFwi1n~V-Ge9hP0@5@U6Pu zt7g;`s5=@qPn+>DUn^0**IB_Tyqj_@=TUt1d8i9z&ROttubh=4gG@n*z5_s!omNsj_DPXz!Gb7I7QvEu-llY-Ni z01%eLLnSf|VO3c7;&kw(63K2!mdl5klPAa%Ktz@;muSAA=eCUXyg5fKSQYGu7+))4 zrJMwfH@zlI7M94e2AR`lB!H*~+ef&Ny3oi54=y}UkxxsN11`pTa2MX;x+hN?>ZLe; ziSyo7YLjNv`)uMIFdg(1IlVeKPw|-boJa3S;OKx`u89y~Yxn~`?Ck^uz zW9EAioPI+hreYKVh#h=RuTeE47u6MYc~&sk?eE+kJ#@ndbdTt^Sa7<`;RBYZ+N`M? zz@rr&fLN>>SBxUE94Na{uAcK8vRo^#=DIIkHR{+_kDivoLfstUL!NqCDm3UR+8Zqz z1MAAG@BICR_3Fm)=2!}SSn#71lnNkJcw(s#W4VSt>I&gD=CV%F+EC3?Omi=@L7ao= ziRH3hj_BjL(t(-2OPT7`5r7~*9J;5Me=F^28&)$~aJqcgDo%e)Y8P{&+d?aN2ira$80aO|13DRN^eNu!8K?=4Wz`oXu#(kuH~Lc;u{LkKI1xyQ9;yyj5!4<$B#X^c3&?M`m!xyj)jaJx-hx^%s<7Y%XUnXn0`` z+aU>jVLtln2XOFf6`a2E&(A<`I>?6`m8bf%lIVI|)A#!K%2WLlmB@P(_J$p_tF=eg z*sfxfF_2d^qa$J`qow3&1v8fj>8(9dTS>QG1Go8^8&n+0SjUMD)|V&%={M2I*^Jiu z6TddA?NEw&xte1san5yT3d+3|lFx!4QO&uFJIA~&!t}FVQx^~`X2}cnt-SiEV_Ram z9$b(fxd$Z^IUoz_LQhMnRF4Ckv5os0=Zu&*ccpsx(<3a8uyfv2Owa#%=dPJEK`AaxkT)dp7tZiGlNSL(A34e>_2$o zd~Ds7Iaj@0u!)f8U15uLia;NFib{oOin53L3|nNmN4br`f}df&9ByKT-j=Z~t5Z~C zgv5Dpo+82){xn767nyE!fV)0yX1l(0(QAzqoawV(9X^|aXwuJKK6{;6(?pCpg_ZGX z8|JBKVVMGPDiM41VX$E6d^1M^F_*YqjqLz=9elMcI3h*`f_voHEr=L`lglMKBE|s` z%7JK)Yzh>WJnxZNSgPDk%Ca}aBxs&K&A%DeN@)d_>oT~kQ?&CQr$Q@Q@}8&o%zHL{ zxx6jQoO5JD>t?XrPeFOGjcOv~6lHpfFMr|#s#`Osvq~Q8o+`G*ZFFJF%BxkX$MY_A z!HD^HmOK_*SmvA}@g>B$>7=sLo9*Vd>M7V+K6minI#gzY)_a}#(aP$be^dWkMto-l z6XfG@QgM%{5=EA4HB(NG7^@ir%2O#g9Xd$>X|b_Q%*iV_8FP}72gtkEO3cC3B zCdh-`+7mEaNS0NMazmxoe6hXD-6Q3xa?P30k5>klO@vSvllFZCaqb3Wc_SM%Uk))_ z57$a022+28i*LwscX^>?wCxQGQEjZn4TB3VoAg}f;y=f`tBlx1)oTu_|LQ{?ptrb3>C-8GhL zQ(F9DEhtxjjBwQ=H?+pL3Iy#wN`Kj?+_g-3es5SKNEs^b{e^=dE^Q zV7cxJTQl4kSc(4R`BysFSbKn80wGRT=o@tug$0iz&k-f zkvPW(Wu>R%=dl?aD)A_j=s5TQ5S3bUQ8jb&VvP>|v8f17%=fm8@E&!V+wzJkacoHT z4i4&*0x=8AsYI`L)R>b~^2&i|!Vm#OX|J(rMuI#5#N|K^r+@D+Z1~9KsXPGTAoHbN z317-n^a@Wb|HiU*t5`5kkz(aEe!RpvM>cpYSK^%N!i6o>DT?Kqw`E}q>;6pT)oLQN zE+{Mb!RJ_Bjdj<{Y_81LW4U_4T9vMD?mTA|DT6!gPS7-_LU?`V`m*(Gie}1fUIGOI zmB=Le1eU}9BNnv}3d9vUh&gGGW_%)$_x1=NPv*pep)WdmD;mhrJVLCpL1x(W}G@DPj>rLFi(ZxWcSEM)LHK+`x1XT6OtGCf@8j~s0Wu= z>9H+)TIx*mMAgd?rkBS5WP^?0W#Ng1EqYtK5j)sK2-5r@Y#&DlEGT0N%EFe{ zjpK<#)+w4e=M-g@`C2MO8C*Axbg1VkEO2wRIN&gCiEbM;5UW^x?q)w0t$3|kb5zpPW#^|7uH zH`i~$KbWVuGyZ>3^CmNi-uuI@PVuV37E#}ur^t0@@3ShqT>-?X*TtO^%P8p6KwK#h z2B+B-JETDP?$)tuP>CvZkdjxP$^c|K07OKLZ-`mGmy2pD(ehN4z@vRtDnhylEN5p>iNB!TU-PG{z z{2I&Px=AyZg5vwb*86;ca8Xry(kuA!o;c!wEM;&NwhpAZNi!J@A)R2k*>0X9!&cb< zO%a5x%P9}NREXAHVT;?CW|L;#Qmjy)s+lba7nZ@LIImL%S8Pi)0Q+lvDj|2hI%YF+ zqPkNc*rTeg?r}}Mpl$iy%!AA&no1Dmk5VB29}xgV9&WmHZW{GvL4*efj;yUS=R196tZ{0pQ!41eG=6~$aP=N z^6Fz-l!DUrSvTUNIOjY?Jp~Jv3)HugvEM@^V%^V=;^rFLf>`-2Z>|elD$w`I!is%( z4*;@F!O7W-Josz|AFmQGuu?Af_ADq6<`U1k3I2T?d|@haQ>klNP)eop`cUh>x-Zx} z3bOp8+!jr*0TE04Sb3^ZC=m655I}hF<+k_?!JUwd%z z*6I}bfNC1q7D+STXwkYq8tV$Ncb~0ZF37f4^X7IQ{e*8>T3?aX%eAaofj_P81Xa5F z()DzF%KTswT?26$P40Eu^Zuq1r>Rm-0P?9>!63`JN8t!gW-}Vdv+(Zy3<{bWI${y` z0U)e6<)Uhgpx2k^4!-vU%qI59yVf7q^XB%5_K16M*9wNXM>(D&CY5-uy33Wdf8nr_ zF`Hc>o;Y`&qI;vo!j_r{?E%$G&F|A=L7W*2Tgsek-SzG>Jw?}p>k3c!*uiS+eGXn9 zJq0VaB~OtL>?OBV5H7}rt>;Aj@WPQLKSQisQpKgZ=g)`vqFH`;e)I;YER@!Cpkk7A-@ZiHmnx>KBcL-(yaK`fVL zzSfK)6TNtCNuS zIoPd09%Q*&D$zI8s-|gzJoX46|EZ}&F(<@26*?&2%i_pU59E39(===z_Xr1{%*g;m zb7+-BPZfZ;octKb-{yb_dkKSy%xk45|I=71MC)!8f0ScjdEUp>DYDF$>cTUlVxTUd z>d3Y@n2g?5=XN!-aay}8_%U&A*9Tz>EcZS4R!FJFk*XW#G3!H}r)a&;LVbB#3R~99 zX48CW4M%ar+RLaTI6)LEzFaD+Zs)IyIRQYV68kQD0Ajs5ra-`M4M6TXn~|fnG<-mN zL_7k!Wj~d2iLUj>8-Jz|b*eZ8$cLxV5zEWCvXsGzO1_v1grl{bN*tj*!W@VVGRGd# zW+cdue(Tk59Zc%I@)mRJ9HD%-K#GlUC-Euj9RpKsmxp5W!n;YWRL*ed;Xmig=1lw(tCRcf8&6o^!!Do!29S8eB_`ZyWo{atsV66vqI zic>mb$|dq11?_r9d~bS<0m!R3pt2i2aIZ~|BD-P~Rh$Ck!&Gr<{(4@%f|KTFxU|;o>Jw`(b$KeOM3wPYefHv}P&u7b@7=*ah&`eejC%wCc@_PD{Zv+rVpcGT z7!`>2DCuXY#CP2uf!zu~4CKjf5$pcS6*|Zs0f2b9L`|<@&zt4snNqRfl>P1x+#`X! z_9$4?Rb9=gY!K&s(#yex^_*kf?Z>;q6U!t9Um|3kBG;X%5Q)V3CrW;YZRGgfpg2d^ zq8Y`^IeVY&ZJcHn{IHpg;@o3foLUg{6nEa@TOnY%TdF@E^$PSyyctm)&)GeX>!aEW zf?1)@2I3azQ9vk=Jx_S>Gg&?20mxBz@EM%mvx9F1Ly!y^ADtl)bNOBeAhfX9JL)QY zLi;@{m_=QhFl1y|sO-%+WiiSabFzLaK1oKmMR}@yRbTKGW9*SeYXyr@_yo-2O6<`M z#%xwQ$(e{b=sZP^Y_Pe0Eb|R_g(th|<;qhO+cKE;GUxVOJ=gt>z0tDB3avDh0q#47np8uDrWdt0mxBD zmIHA^i{lJHf_W-L{17mwyC7m3t@UY#JwiEJrhG3CeytAvR5yG8Dv_Tv1@bD`m&nmt z7MyVKZ4NE=$N>lww9Ltah&g&)Mz&k=_=&LOwB;i*;8j z#7&yvyU!ZBPt>>30qP22*xC=aCAjF<3)XxJ_FnlmhOGi+62A>hyq2;U=tIIW@S8O= zqkM43t93{+05ox?j)`| zyKz|N3w7}&*w(M;DcZU-6+)Kl1~n;z`-Kf~Cd*}cHE%9aAJ@m`%SoJHumT9xUO2w) z3(&$UE#p+47e{lrsC+tpCIN(%a{fA1GuE?=SGh#aW;839`Ci>4FPDf#mD_UE_Q)%T zw^(Px2Rs0IUj;+lBfCezx(WAQq7_Fv3rl-sX|Ii(aZ0ujE%I``y4<^D zTVT06T2!Z~!V`PGo2d|7pE+S$#+O$EE;t#|!d7sgZG8Wf(09GL^S)`Ul%w>M`^1<7v8QcZo@%B62vnjgwO-{_GrE=q*&_^2Y-}f4{sqX(W^_GovKi^!>XikX z6->LHZi`vLA8Gi2sYC`R5iyqWmRLD_ZL51P(aTfOZFw@2W$ik2mBl}@Y(V2kRbKtn zL?U6@ST4I0Ks9aMWs0cqMB-czs4hu+iFLm)HY$Uu5Y{PDB+}cmumzSoQ%_6b`e5DT z$Z|P4K!b#74JY1_NMzv&#L5bxJ}X`Sc`Ag3;sd@IENWK3d*1&>>K*q0swt5EH!Mae z@oYxssa~}65lVOPKdZP&1;XJ2n$2jASm1L7CnL+zJ#aEt}yJ;4!;oKMa z(;Q3|ygrU?S$Q?XmZlakY(??<@R2N6GUnB7u?E10-{oGFWe$XIqY324t3*}Gu^8pG zDtx38W!KYYWHbrdjN?>dDXSTazO1#Dg|S>BgA)(Fn=th1AVHp~N!Cr&lieaXu{e?k zAD=^TT1NoE=iDH3roF(unM>pz?f+6EfjYMg=jO%%F4*hOu*Dbgdfgi>UMd9ZzA}gF zPL?ZiZUeIVHT>PKkC`H3Tb%HtZ5%^=rJ!c2Ud}|K>O--O_FTPn;{-=#u)Nwe5!$-r z`mpeXlOcgr6Ng>cIzOb^3!I>UZ%sR=1yFi-JmTxjFNbnfg~dEotpMaJ*^E*k>;s_! ziG6s_19=1|P4FjK=J>PkQ&hQa`CbGkwk&MrAajSxbpUzok#dRHBRR{S&G^ieyc*lB zV954JDHZmC;B(g4j##(s*?(E#JsYQTc&vMvE3aM|Z4amlE4A(r=U(Q#jhRH2SDyvd zWLF5pxhDL)aqF(J$LxIu%N5&t67Vv(db2`xikzaX3@!^#2-9M@yWC?3D)_NiekKmJ zG8JOtoRzNh6rHyvS6bUBomBm?&jSz_oB$xS8TD)m^HjlHB7zf}rmYWTs;NY}ElwqF zNreK@__LihqD~4Vs1nEa2%nQ(Z>UTn#>#D}#0mCViOBMg8f3m!4Y}B(rXB_-P2Hds zOd>Y$p*2m%TTvJHNb9c5`R!_ZK$TBRsi_Jv+fqJ_rq`GP;fpc?d5_&%E^)G|8SULsS1`nTv^#Vv ztBCivGXw;*p^Axe}Bzi7v zb@a3|Y%vu=xM0{aefrlo)0Ul`t;vv@Rb+fUWwEa{?yrf;9ec9Z%4UT6eQ{3F#Cy20 z;Ug?6E9JaK!n`j4Aj|&f^1Ty!XxCc~ME6L0^zk?=n1jrjr}`DivH*mQ?U3GVE4j`I zLuLi@!QC%hb$z+1L>~O0ps|Wm5HZ>H44uXOe5U7{HcdupPq|!ENv!PidT>?k#IR-K zNL6?ub#ZH9;A5=2z56VfZhR0nvpz2bBJoM^MLe4t&Vg`bTb@Y7Hfn0P%1z}d`Zy|s zEcZC@@&Qy3F-}IwWWJYqD&-QHrwY0)vao!?97rx_oO*V>D&<_oDHhcm zWR5*zzE@9@{RbnN<(sf*S0tkn(Emd5C=s_gMnUgHzbL<_3py6;|J^^DzUCQ@rkDL{JoG@)HSG~`I3+92kHn{24 zV!3&M3q37m>&~)v6X*04pHn1ScfwMJEe_rP_Z&>d7Irg5)V%q`#5vCUD+^m1wgR@r z=jvH_@)vrFHc0q^C(bpp0fb|@-c)3PKKr?FbG?W6=jZrXYOMsVEG#OJ0Ed6GM05+E zwh_o%a4LCo+MXZ!7JJ0t=(81=l7Imxb9F0sa@*KkNStAg7Di^7$sl7%(MT;kbD zz$3l}E(KyBuVR!7SEE>7tx6HFt^2H4_ zuoc!1Il_VKGg^3v*T+ZZgoFQ&C7~*GC@r(#lmn1YsGMmk(d9t6M>zOY@=YMiQXqV! z_90sqwzX6`yM7uiEcK4kuBVdcJu*j3D$zb?xkOsQ0(rj0XI8M6?^QJ;RU+?^1t+oF zOna>)XklrOBBwtqyjqn|w*~7ixS&Y9U3((*0>rsJSHA)4Zfpx8(aR)y52#X{Gi(hi z6(X_1b=UO~%O%?yd!vP8k6B*rEP07Ug{`)Mf=Oh#H&W~=SZ@q0P~WkwL|1sCSAICF zXkX?foM|KxJo%G z5I&0RHRNjefVGuu*t==M_OVn57HYVVo}zW*O!+GfCS$?Rwq5_$d5ZDl)?JqT3~b{} z7W}Ly%eAmYk;qXQ3|pW_Tz59JtwGpQSBTc#WpF#dwl-0mKO0;36qZ*yA4WGCIzJ9_t4nWu^7DfOu1(J87`v~n3JNOeE5nIlN++aDpwUt2DNhPv(RK+MR zPxZ^nZKy;ld47(=eYr>Dtl%+!Jsf*f> zLbgSUPj%teT|MVag&ZW?;#|Kw|9HXJmRom~S0ilArW`NzQj2nQ1yDBgDe-QkFfPR%Dk<0Xu^I znmuigYTX`*j;aqNHE*juUEx(To@P-O00aueU&c;!*^GAZZ-25i=^S$)*5Av5Appq1 zmJ#MaI0cILsGA4B^E%VV2u@~YBvHfR9mf1q5;ABM1-6N|wGbCNPsoxX_PB*s?)Qs#A26W^{eH?(D&3S$n<2IUhSPw&k8p|8~?XYe(1`;O896 z-Q~nNP~QzE15$ZmYdG6^ED$SSxV|D|xlDy@s@o#A^$=IBafvS@7tD$C46&$Pnw(_s zZk2p9Dv*z1G~u%;*?hlF_sBq=D$y3T@lpuLvZv&4Xph*JXkkWu-Z6&f9tC@dOwbKDLv#GqAla(A? z$Wt7ONn(9yGA(&mc=9;%6`QK!ZgRrUJMn!?g|P4>I67dCGPvAEhOOt=a=y{RDtTA% zgRmu*%XO!x=!LCuL-)aQz1LocPq*`?Tw|N2+FI#J6X9?n8mhe9U3< z>@mQi((rI?C5|j}kF2dkDHR1L)lJkmw}ovbQXn3H+?%k3Ep#Mehc8i_b98{hmKtG+ zm0-EboO2+Yx&D+6l~4%ot0Bq!9H# zwx?iUM$JjPQTz1ovF^dwW~7rehr94$g|6Cv-CfyUH8VGDW_{M$=JOrC<_l0DdYPcY zp5y?4&>j(h9L(}i+1^GodxQ$4xI|WPKY|nPQ7yX>MX4Dff z3{ET<(rYCvs=i>JO8vcT{V^+epk3^?GaQ1(W(9Av7iCz@=yHi>GrCH-p7_0VCRli) zu8^RY3%KY{us2%R6+*aRSBQr0d!qxm?kaFodG*Z3^UF2iXT9rvX4p~z#HILe(VF#`?Tn_4bxINnk4B4l?|cdLJD z`ucB&t9|l=h(XEcF;8W7y@=T9>Un>| zf*}Sc*VwMM5-*pi5p^ynZv!I6XI zOz+g(`vI}z5yZJSAnPn*Hh#Rl(K5zz7Y&zH$lKzeraP?LIvt=m$9Y#1p^0;^(2A)L zT%UCTU&?GNJjuLoDJaKsk9(eC&#>3~oTe0%MmAVkI|xU~5B#^!k|VcgUP|7v=tXhe z;liOckp;_o*X8G(NS%=zmL2JfwIup)Oo42l@Q81twS|Pek7$B}|JRd$l#;)YP`Scc z!3s_UkaVvhSHbB+f;<-$DltgJ)H_NAA}bhVd2;59_B2z80QpbUR)Wt>G6e!6M&=X% zbJFx0kY(39>VlJdrUwVaDG*x0%2P=t-i!1TE)YAu^twXqB|?qL@J2QerVG8W#WG({ zoV!#A!j{+;Jw<29a~oCg1JvixeQ$Jtds+%`!RP9$@Tb#OZTAB%-oCMBHhU3otNv|A zh<{Vj%iPh!wOMQ9d!mb{#BPZRG)6|O311bcnGkX*d2m(AH6NK%@|nzoi^CQfyG3w% zdPmiaovgxoOm>S!Z8Md4)|7mE$Fy6Bbu1Xd!Bmkmkug}%9 z=|A!TRTZ9?DbmkO;+|(-&v_c>DKhof9a5>XcCcKki>1d^UhM{C*?A8(vw6MG#OiDwa<<`2cD)SRk5 zw`9T>RWlQZwp1o?k4_$X?u#UV1T8EECr>3d@!`gIH=V^EU5~xO8x<7oce4;9hj53EnaYPEek<*o6j7G2ANyI zkaCGCyfa74tYF-uFm>Yqrqw|Swnal#-j?19F}QHz{F%W8*w#Kj-)P~R6+v+>YuMgs zdCbBSy#PrscPGoMcM0`bFXzIR>J;68tl{wbhpaF@7f3Cg8ap}p^q!{Tx)p`b*4{5z z);wo0@2P@Q=iI`~c{jtBYeZEraKEG1<0edQ7XAU{fhV2@TS2l9(S=IR{j9DvkYE>WLb+_`!=_s9wzc#oKqS6fL_O<11>lc~Hqu!=+X5w>2d_u1gW zHM6`zU6z)8 zfANKQpLw@k^qgMxyEmO6y%C&L=%8X0?9qy$|AswE0&}|fS5zR5EEDAU0P3dBURuEq z?{z<*N<5|39|k9{7zJdxTc6{i{!Z+c?@M^l zy}Mxkumz{NNng| zN?u8Hvl(4m342Fz@YS+#=#_gf(YAb?&B%LXvl(^pxkpt`58>cnRQ-{j>7kN$kGZ#b;_=zBu*EvXVZPB~o}xW=paKX^uXZmH z5-!vg!c1a?%&4Bx^#kt}tPOAdcQZ?~_GVA(MsV5^lM~}daGID_(-yKbIe%4&00eu) z-~<5C9{qql3Pf;^;#%vE76n0mjt>Ch6bOTpXJO4w5`d(bO0=H0q%8IFDT)3(??fE# z5rPwoy7&O9n3J`Y0OVa!7nJ`H?l$M!ewx+VnKygVe`()6yeiS zCeGRWJZpFF@z-Rz_Ere%a>=%+E|@t#P}6oab$9QQe|_`?f>XmEYRQ~<@aOg3JeTwt z5B?1@Cj_S{u{ZwI5micdI~ocANzMm=90Pz%IxCQW2#_yJ3y%cI?+HZs2q1it<7CMj z*(0G~P6YX!pMuFhDS4_yDfwf%NA~>C01Jj(E)l4#;R76hrh*}s@yceT0&#f6&vim2 za*(-~r&2Kr4!)=4r4kYA?x|PD60{s6NOnt*ziUZ!-lLDn8NxMngY8+kN96^THbt3} zmGrb!Z^?_XREP>sJk%##(0sMtXQ>NbpR0U-7-37+uu>spTg4@o3hAAam$310Ojuv# zH3p|`XA8QT=QKQptOF`nF5VcvD6lvQ2fv{Z!D#{aD0JRi0*Lm=0K|cOqO91Jx;PfV53IqpVEelR1;vQ*w zjb~vo);-T-IsDp|PM@q`wv~93Q9iYzt^u#V7tD#n=4CS)b7HL9X?=;45S+M2`>o=% z^2TcfS`At;3k&y%`Ci7l&t9?)1INh0TQ!`J+_x*3nA`>nkI8B{W&P6S4Uvn7;U%}~kJ%iIw7XYOBNWOy8 za_rFs1gA{{nF4utk3vsX-<+Ey0MQ;nC324fQJoP!z5IKz#xbY*g@d$Dvql)4n5SYc zvEfL(sYEO)Ei504QK;nE2Lj0;oAGy%FNbWR13+H+UOlDM@DZoiXpg)+Ror=jJYwBr zz1Z$5PVatd`QBfGytS2hbrZTrmPE%rQtJ;}7MSk^5u0_dZOMVphd4!99h972{fqKy z!G*#W>l6(xT$wN5eJ0zo-sgCwLim&#w(;S^tYb49&xPIDk5wNmEb(OByj~pq*<92a zT-0fsukA0NZ0z>(#8Iq~?|)x;8~}2-_1W}J?Gf1RjDJW2$nV7-;U4X7Y-sfpMk%qiH1gAhVm=l8&4?cp^_dsQ_+um~r24+|7OB}lG4*nFd+h_vF>WN_hc>+iY z6bSYx60z<`{vpM>_{hcY0zft;mGAc9q+VqXWaS4tL)y5g<3Rom&K`4#*dwxA>`~Ec zSy-k(OeJcMQtv!%cpxHXtv_!pI05A8wglTs81cT_9}D-(JC-(X{_L^=!i44SV7?9m2uSi8YWDi%v0s);M?oWdO_PMdFB$mJ<4=eFcrx4*gRvm?jE&GI}MQUclll~ zIE~9wRXmvsc3bN`cg1Jpv`1vOyhmPNBKL>|LyC3Sqg}3Gh;GZG?2QFncRhAsY>V?0 z5w<$202jfhrMT`IxW zklxce+AGQ@M|H#iKz6NK9?C^sIg!Ds@YzM}Q3mdjNpH7Dml&L;LM0Bx)`v3IVUMUl zDycv^JdlS=78NM zu&pGR?=2O`e`)W$9c&4s0+DMOdxsz|CC`IzgUrne)=L-$ARFx9>q)YI0wB!y(q_~Y zsO!wh3qYVi^lUjl+wrl9V!j_uZPFNG62h;31w>rfPdWsBN)22>P&gehC{6CJ? ztw;>pS<=EqeVRu%w)*U*cf&spTYQj?aLkBCjjXHyPb^4 z;%^7L#U5ct2MO|r3tx9d@^e&)Tci>NAQe&|Qi)E1^fY0QTBChsSk!BqH4s-hkk9Ph zt?~AVQ}T;Vdgt9?!H{f5F(-PrW;1Gj=JoO^yhjc|%pPHI8s?V!=L0~j@D8yqSpee7 zc$ufdJ#s2>Irqp7ABYI!J+cWy2u|FiV4;Kcjw(;3%I(;^LoX_7_HuNeL}+8>_u@|`!lacb(E1g^@SmTpGsc2r+<6h%hsWbr7hc< zuQZ&%!N(qzZhF^H`BGyB_Na*LwjKa7mjJ?puRS7w;2uSl{_HF)+9M^!kFP4$0YC=t zP=UBT8i^Pv2gon<9j`*!N(pkIF)z+LX|jH z4*wVS_wq#<>m5A?mAI!^nR8covXAw0DR=&U?fMuube{zfS^oCecjD*2&aJ!`awd7l z{EBNuR~HT)D@Aacd#++@>b~63nUz@=7@S^|1C?V+Bhq*EjfNmNRiEC|cCrWnVh2C& z=~Etj59G<5G74+CN6TaVIQR!{BG&E8wLR*|aly$SF$uBGl)ON`@HO2Q?9oFX$V)TN zo)tRiWr7uti%S-%(7_&Vv=_7yP56R#c~)LWCVg!+VzwKE$TxBr_N1S)RC;FZ*=g# zDq64*0AkWxi+Z~Ziizpb~@Z*52Z~!x43I#Qv6|Q)w?}1=Fr)F$&qO#X436lR4Rd2<#Da zAP7#X+?HL>7COc{zE+}PIF_ecm^^KHGt~uP+J^3jTi7Z;wV|+M+S4fo%ijar}rnO&FROzUD*g9i=0d zeTHLT)kIjWK~3_|M|YQAE5F{j@_175vBL{r*OfBj?|3o0>UQqX%SuS^M;4qmm(}9n z_bm_YM{rs>4FF=nX>IcrfPCtWvJn7?3r?3By9x4+-D;0!%}gBC!GF~8)qb$s9KPFuiEdUf0R%*BcT`+r%F(;Nl;io3-V<2i6D)L)O60*$q|G>2He(&1 zuFySlxkQgFv*J`_*@3*}d!5_jKF51B9^{z=(W1U@xkLsh4?wnev#6_bC-0HiEmh*^ z{9S8IfnbkV+ajAWSm)?dpUC1&i1Y4qKSPtRM||)ntZjbKeUHJ(7WH^o z3zHUPPDO_o4kI{q5kSaphh|j%dOU!%>)`Vq4K8wfM36tyaN^E5mH2~MShZ7`{Nur= zJ)+M+B~FKuFUB543sDv^0AU-AfN%Hkx)7PTFSyf5U~NXf(alu5bK}-M?kp05OGXXf$_5>@^m2z8S5a@L6cr*Ly~@KrnIe zDH-~PG9~M#m5@0-LvSLy1#<#`^hRQjJ`ZH^;5U3{!KtX0dnD!rcFW-OrlXyUiajE8 z0?0Ggee4tnKDSit76%`DlBs=uwhYD;6o zo{}7$gpd|0AIe|vwaPT8!%)qwg!9wOK+(S{{99B~Z``Zg4Bv0*n4NO#>I`}(PS2}(fNO0w4qAhW>E z9#)pq|GX{WBfi;{DLRxqg#zve3xodL7J0s#ep}1JXu1pYh*{UoC3>baVI!J1J>!z~ zXWb*l2Uen{N)NB~GF0iGvx{sNeZE)8$?(@PsIcT2MZ0T}WMrH~`6DD31-^`hp(GY= z5_0D6k?dQl^N$JmOhi9X=YP8Q)B(pl+_h7-2ZB&*Px{&NW<_Sp`P~WxDSb0Aq-%+v z_%wU*m*CAhAX(DJq+iDk4_irpgh({gU0Byr75_~(ceTtZ<-zECBV<1)EShBs^GWvx zz?`tuiZ+rE7&8+<6grc`mo8)(@tK&1p)Zvad?DPtJ2Q+x&tt6^QEwbT#|mx@18zdF zC<-pD#+PQ$6)sbk%`NzGy1A`u|738S>Xg%CQVNkG*4*~dq3`xn5nu7&`x!WgVARDY zm+~BO|9p2d{~Z!yHL*)Y3bOBMl{)^I%8ccYqM1TN|yo%9Soi@<8} z=b>(%M$AD#LVL-Q9Wn+$320uVzN$lq95 z1)Spv>Ou~mS;02RY?MPKxhWoKqvFpBxQs1?VIV+}2zSrt0o8Qsx+OTtamby-buI00 z*tsG5qspLBEM|VOKE@yI(zer%Fh7_<{Md$W|KHCC+nm{JdF}iFaoO#xByM0w)YC_@ z>ECrD$&y>7D#TBtXkhhek5OQ=TxEH*8N)M3my~~TDsZ0y_fa@iY1sB6X~T-Oa`uPu zOZev^tM20TuXMWxkv5H8hZv0*4m<;z1+5zAU*3V&YsuYiE?vF!MXU? zU)9yi?80cR4fPIVsB&=QG#My(RF85yI-Epuu>v_Yt|*0x{)KrKCTfk^@^+>(@ekE0)l zX)k&1Yi>t9)whEEsT{Xe3u zdb6SM-ZkYaypbY>O}SbTK&~GzB-a_pvpHnqpm&JP5b%6d7@xKPv#jmPKY+M7npRXu{8RoFWMX9%eq$3Ex_Yf?XBp$ zhOx<*R*rk8ms-C3^jn$-^i*kmK!opufaNDzU9-yh4cBGinioNKI}`mQALXnka>e-Fa-c9-1r6%k* zhWJ|_i@tK`?)kpyM89LY>O4m%S-Ea_@49y$FS{fD71^+|Am;~ax$Z1blAh@yEAdTKZ}=*d8igj$!&{HnrbrrpJ5o?ke9GW~K;YA)se4p|f5)0c4} zu?3^G&FP2lkp10kiW)c{uJx|`Sjy-x-@qwe?e^oMVI94NqcduJ zxIY|_t8mAeS9!03jS>t-b}xCq;`whEPeT!j+M*6K9&@z!xZ9yK3?U-(=TlNW^prow z;Oj!_TxAPXN6L3UZnjwLa>!PvtP^>WEcu2JKe(GowhNaz^{a1+2dV6L*#YNm<+in5 z;zFs8=TmpuDc?W8^g&xv)6ViT)mH5>HcC!v7s?vA8_6H&G5;fGwY8r0i0SWZrY{@- z3B4)pIVadcs5+(gkx%ir*;pr}WtL5gnc4CeePhw=xQDbz zrY42ieG}m~n(|IOW_n+1i;O{nglVag$p@?D&X=MRfxSKOjqxAn);BH)u^R3b8D6n0 z;8-I@hq-{Vmd8f;+Mf@TyjYp6}^L!Rr3C>)l>l9lf@aaaL)d zzl++d82Fp$EnZm6f2?K8Z#LH8s->c>H7i_kvo?2bA9{fD;E@xiMo6Fd0!igf77b2v zWrC>Z8+f7WkJWbei}u?uVFd@*KAg2oP%r;E+z0Sw);Jz6rpy|gNp5^C9xkscPR?ct zwt)dQ>|xQ-8A9x=bF_S^@ngSau2gU&}D$F9#p zcvrah%4O;`p1 z=$TPg4`i4|B};Iu6Eo~H$LQQHr&A!+A*oBbuX*2LH0+Gr@@e@VDWaTEZ(tWmx;=1l z?8!sOY_3KNPd~`gOvOv#%+$=EBG!$p3y6=lK-YVXIdA`#VnwB0Q^Tz-J`0Im(d}T& zs^<4vGg)0NVA7E57s&?3ls% zZ^L%}L#h8<<5IKVe|{W2Lj{n+E*y*xFPTqmoRlH^Z}Smx{!bxVp(5T;EWrY$s4Nh{Jy3&7s| zRY#;8)QSW7FS<%(_vYQ3u7JJ<#exlci}!t0vTm>tML@!T5p|upTA(pAx{w!X1Vg_a z-eD6QeZ;uqau^w84TM@RX#S8B-cQ6o)?n6#9WgcGlM9BkR|ly*j>5l|hCW4~XAZi^HBHNja8j{bk> z@YO%Bw6ud?gq++*?`GGuy8f!XQr#IvMPx3Pmp1oPASCosO%|W}bJ!@%B*D(h?cbLG z=8cFNifGVhi5OAqjS7x=BinhgeD<5=QAS9{(8({e53hK0XmJ`b1iHeULHO_V79 zG3+ODK_1MyRs8~6A*GC%vpfkPH(W+qUBOI)IrxGO#LiY%=)%NPZpjjsmQQ)s{tcuNK2JMn$1~(eL9M(5 zfMhBQvTKIWAnqv$PCC}{`ooz84QK*h0_FF#HamyMHlMo|&!1ZE)JNEtLD7wWy4)(D zqo}XxK;sK^UI=js@h85LxI7p+qH6!bWagS=I%?iPdz5UVa;anYq@AR7xuvm0HuVyL#OuDcs zp7{2mOoQ37o=9yWZ~a%p{9D}~XexK2IQfCMEnXpxXXy{wopkOsDP_Iv9jm&O_~gN8 zsiT)UI07mc_ZN{zCvRgjr*M4${0@u-drx-WFQsLVM4a41PBOTEcmGO^oPBLo_|Enn z+2+0xaVj)+_e<5}Ijxf;-qf8`M>nWyq5BmQL5`4M@`dx`>@2^}uxE8==5qLB<+M)5 zzx;+i9K$E0Tc&>0gN$P85$kVhr5yIJT^x1VC!TbYCTUdIax1 z+8dpXe!tq5Qlpp+3o29)0kZz5VkPnn0hEHG#aX;p=}YC&>8Kh(j%x7SS?xxp8Lv|D z)4P3#(K^&sXnLn(O~J-21jhOK4~uHA=`QawJh9o)A2l@x3H4SpAvt(6qs(bB6T8(^ zFFCvLWa%u=prRu+XYE$BLodm|g`#4Z^*dRoo|2p#t3qxtc*S^PwUsZ9qui9{V<)I$Ab}?uPGzJf&LkpN5LgbBfN~{*L2lgNyM(t4LCN!w<8y)@x7Yezah4%h>`+cg_)P{>LL1c&OMb@UMV2IwGdjn{lL-|%BOQ7aJitAn0h`H{pP>H3$?6|sKBEP2jNwYNl%gCC z-9N)Z!~5ineSddzZ}Jn+ZLR&}XFb5vOJ#AAIz4z|lzZ}J*R$n9bzE27+@`asWfJBp+{yJdgw3SD0y{UUT~vpXnlcp!gXxjAm8JpNaeE zV6xQn_$anxx{T;vVrH+HZ%ayos;*Q2&nEp?#~8Z3PB0UTS-WC&rB*$jXoK-Y2cZ}> z4%6}QVfP4J4||Tij+Z-U&Vd{Clsy; zI`#hiNIqPB;L&Y95^&fe$Kf&)pKDEOP5!4`$o;r^BDS)2}u~o4YlS6 zL_oM9^)Q0t>AW=t?o1k|Zce$AvJsYpFAJ$-WYvl_^c1^pFs0Od*#X;MZ=)2=>u2)d zIm*H3I+otjZ3k5I!ab@ z9l5C1b8U4LlBqZB*!Oq(Tj&~}|0SHxa&u?g%?6}17BpRsEH6<3PFbC7div!4m7VK6 znQjd!KxL8v#_#5rx0-^-zIfvs1I}K@0qAxG0Q@f6WeLvRO6=bZeiRZ^CnZAZS*yLk z<}L#?O0IAzJB!asAfXsFy7!Ip+RQP(=%<5P*-7OG&QT~Kx=Af=>w3zUUSMmW-RpXr z+AYLs9Ipzf?K!H~3YL!JUXfocj}?uf3{~{ZJQO!XszMt*E{!;^Vk~}0A2deg)kV5n zs=Y~A?ua2(3mqY$L8F!6FsevR!K=FCC+rQ-UUq;qOP7M~{|37;Nb!73)Gu8@HL>_f9^ z%H=0y46{Ir*dA${?7hhOM+se4mGb{yi;*@l5M2Cm=c6XT{j6MWxAaPpB<@Mw14Pf| z?F5t(`P@JFYqYkZNFMbAWxhp&hP4g+n3GSSj;P>*yMYO zviDyKT-Yyt+g(g@vPFK}VST@taIIA631ckJVf4t=a|@jro%n+UTKL=TjP|EK3yG^Z z)=%8V@(t{O1nwtc)~@$9A^7uiZn17yiH}b`s`mxl0)KmqQz>VEK`P+idqLc{c};Wb zSRi%69lsIJXoo85_6`ruIkKO1k=FG()3# zJz;9K<^=f5veKPIpLjW#Qbou+q)H;ovyH$q>0CSyWfb68>~hfs#`>z9qWQm(dIAg*&jF2Bmb<*@zJt&B6nV-K zNisWth`lL>YDi+RF`i*sl+;_donNUyJvh-uYsm@@J~xT%z7>oS6JUevi*1sPC|jdu zb}g5wp%2{O_G`Q4A&zsENGVH-1SXiZZjLK)UmbI0N}R7SG0X)^?!D&xbdCypQvuzi z%Xb{}=_+dgqz`>+Y6yJws(GFvS0`R7RpInYQ@;|;lJ-P=BIdg>_N15XE%nw!anqY zSD(>D1LVWrMUs*URTaIuk2kQ>Mbhs6sU+UmI?F!cu0KFKQ-sLNg|2w^UtE{ySg-%n zeU+{ad?*`KIOY&-v@;x!VC0{hFcki&k*rTCGjW0ybQ{q!wW_Ou(xQuqvy6P;?wI7= zP&Q40X=;*Rf=mWplrnFX zjS=QmyXHI52@+W5;qmiXM0a5TSFM9>aQh=Vr;kUx*h*)`Ph_5cjw>MLH6~1Gr!zb! z5qvGmYBO%E5ui0V!*J{OJM!PVIe1UTQ{p62(YKS%T&(D4S;6S2D%}^4(;AveZy6`L zzovJ#NE3jq8}ZFHq&-dfJ7^~r?3CaWzz-Bo;;Xs(tclrc8q3rCPIsZZLFbRV`d3?7 zAY8)9ME2rhE0W#Gj=i0man6yVE7zAFVVp5bs1#jd_p8~ZoaTVK zbIvHBTus%OZ>tP=LMe71 zY#7c}>ue6V?G>q?m+xJ`a??iE@(Z!Zui}_4CoUQ4(JBt8KN?xg=Jnjcws0Zjt*KzK z&3CRH5i7*qC0NIs1f16G5x(=*qBK9o!jWtEb;WQcHKPC3NlR3~=0*N?$x%x?#m;gj zSz=LDot5W^cD$i$HmgMwi0d(8PRjjDl}&2EG)nwa`Q6iVnnHd}RY<>ZnfZe`f#R~BZ+|m65TFYwTVPA5 zFInNyIQ9Umg19*Z>La-m@q_opYr|Th^39C@ISF_ff-*p@n^XTIaCzwS35Zp!`u*Rv z_H7xF>w1CRxqmQ~f6rv3y|uLk$rfK%YqL4xJ@x3Uw^=$7=!CU5)!PQY7u9{X!diJY zTVfi)7c8$Hg;apNtQbZllv8en*b{@NKRd5_Qg21r*DN%%sSm_ANj9<1wk+7?%%A-L z326VEZ^{*g(p}h?R-K@L)q` zGN4BgcDebp+e^m1Q4Tz2$A+T+0(tyku|GF_L!vD-&9p-I1XDOSt;G|L8s%%i*aVh~ zU_GFmKmH9R*0FBE287SXLNMlj@_viia;nrSFs6|1%BL7raHO>vlic`B!{NugN^0a{ zJaRF8Y}h-;{ld(A@o2qze*lN3BAf2U#(K~8)~{!e&tjMSHBr)Y0)A@5cmAbf2_vqF z_UI~`?A#32zG&eO@2%DXBG%y9xkES5`>e1y>~E~lBv&Tw8UJNTh3KwFCe_{fzc#Sw z)ORoVqLkn|=EdX(n;uWu96i3v#)=icon-Sl?DRU!!j2K%?2<~7I^L;9Ux*6CwApcw zhEb79(|QN>i^vBQZam|L9YBanLO!Lufq5I(b`;7eYReDZu=u7 zdcx8>mt*y!NVEGKDEVtzb%=vO3{x=kb$h}ZRRI>=%e;4$^6raYe|tbH9T!bzO8TNz zNX&YwA|JR)wb*XPa%Z?hZ!)UxZD9U)ucd9Sf{a1}-9l$xmYr8Q?`>v;++7L?W~THZ1n z@%MUfx@nxcv2zzE(#a{VAbQ18qGQTCvi#dm_cexHHAFXc^3*enxD3x&nwXR45qu^& zwTc(a>xzWpik|oLRi;qOt5t|@)^^^mUrx1v&W>0o$pF*DF zTuGky{<9%c4a7QSn9rb_%LU;|cvz`siuWsdziK=l6>Xz5M%X%)pUfUiF>p6VdUK!y z(+N_c?-(Ukb8n3b?=BNA!qx)9W~K!F8!s2~OY$y=eZ0Opk80}>?O5DZSlGp;r>VHR z?64*|#g$Z?Cx;W8=2*wlP;E6x#|@9t0np{>AN4fHFC!y4WOG8XAmE0^=`V%V2Dvs? z=b2T}Sl@-h#xH(Vk;9X5utIM6bl2u=ajw!S>J@pT)Ljykt@%eW#bMU7x|c=fA6pA4 z%g+>{r*%AD1e=t)J!q+pEn{$B2+=2(|NfIp1%|bpD_wj{=)@23^jk8MHBnuMxIXoK zPU`hb%S5Ni8z23M!TzOpsJ9&>!L*`V=x%rwZ~~^vVQl=JOQ;2R{*M9wRI5&QE(H8i zq3ZL=#*c7kP`I;rqL|s5`X1AfgQr(E@Q1)*uO-otcH5TruqLIkA4v*xEJ&744-!iyh=r~PF z#JFGCq)lV<`)~s>k_sin`EUIm%5A?%$u*8OalV0gE6Y;Bg2yjV=a7m7Z1H?ZB^}v{ zm0C*0ju3fuP399ON|svw<%xa#ZNfk?-oSQv03Ua+raSB-5XyXT9*u8Ct0&bbRBo;w z%A06nHZ=++0wAT7J-8F8p2doJckZhEjO;exs0SIi_pki2dn*_h-)!*eC`24Gtw(t; zNE3~!d0MV3EHHb~@sp;LR^astwgtLqQz)Xml49MqOw2Klte?e&{6X7L+MnK3IM0@~%_HyfWA z{T!QI4>*ljwoz7S?2?LpKEt-a1|UfP$_2sW*evcTGq2s9#Lo?=)M1T&{Fh>ZtqL6Y zMVT?jQ<;&QBF#YWP@YNPM!aW_3pChyI#PRmK>bA3*TraB^YOm!YxhNMmk1Mk#|VabkL+9|uu?q}65Ak+tT}J` zGCshL%^z}2xfMYidy=Ob$?T*sHqUxZaE2A2tz2UkH7mQ&mFx}}8~^!{6|lV>go)Mk zgN<1Wlb(-upW`qRT*re zyqWG5M9;o&OxBP;`fshCYO;LA9QVdKFJjN{lwr*`k)(o~N!;AMt^On@Ut&I86Ftyn z=5K^3+&_r#SiE!UG=b`D8pkuY>N5>h66WENeH#3C{Y?H%1YcJM6Ai*|2@=Sx7w^oc z#V!9r3brQs$Ju>xP&+_JDvuM3*LNnSt8rkZht{_s$@#BB7aVCT8ZB=HhS6Q9gyIr8 zY2GnJzkd0C@0`#v&xBu-dz9z#b*gxRwwnLHH~7o`iWIx?i~iz}lp|7fhs5vxzw}x$ zl9wf!-?87@b#v+FDt1Zo%vhRiueDw-_jPEMv4yz#4-VqIm##Mc#(I0yuqWnKw#`Jf z<@wr(0-^S@YvwuMR8nIo$?#Vi*jmeZ5ARh zucTq>)skN-PXnj8V2*ce@xY1%!drDs@?~XE*@_~^NB7ngee`^i+PA;$j{&QG_1~qu zyAPr&(9I&7)R#r$)Jjxqa^a3BkxYucDWXpX@f|A>QXWL|5blr`E^=MrZ{N397;S>3 z-@dUz#wcgIps_sp4Bd$U7S{&I7{9Ip`?+}4T(s>&KUu{ICMA$n>I$$o_KorE^?2pD zNP$Sk_ZY%mkB5L&-;AOo#SY*v*R)t94`>m8Fw$QR72!-rUE*s4rwPLjfyW-irbUMl z(_7gpkzUWMq?4WEEQqH$Duc{s5@A*QW!hehXtrJi$r%L#LBUUsP+l|P-YD*v;zhID z`sxzOcDbPIip=YEc_?$fI~snSxs%yJ2O?C*U&eVi9p1Y5bHw zneXK$!M?`l$zLVCsAE*`j(Wd3YnCxP6>G8oh_Q&{z1SNLAvR{JGLH7FTDTxb{Px(! zh$@?eIF}odHZadyX5~&O{GNymp8kVl$|QFu*bv=X5XK}wFsvhRF*eCn!_(!J)L)uH z6vNkcpf*A{Z2OpbekThV+F)6KJWT=r?edgQY*3$Gpp>5eL)qHttwDEjv3#zOtm(xc z-qJ9_W3@ilgu#z4#Dy*dDJGR)5^d^I;>9idLCSsWp1S>@rjiwU?iRFt&M{M+B86kX z3a4e`2Is3hQZw_J4#=`cJQrt8xkP9GBg@9`m&9Iy9bZP~J-CW;c|Y-OaT(!V#Gyni z-rmX8rA7Hmv#LTfPR8=a-?inH$jvx#;>=sV`ht8(9CX=uPql2n7*20o#n%BOL==8) zId<@mmA}ioNO`~4LAdi4U`k^PSgQu}bL+647da(Ez2cBuU-jIG@?y?f2sZ}Ja+Vf4 z5tN&-L}lK0wo|ndhB5O<`HGRd8ju1Z;7R@qK6npY$b|4Sl=NK;hjM zA?ijA_citLpk`?5XEt6S$jp=TO<;8wT6sGi<;IeV)nuM*{)m~wz4LG(3e(L9qx+pWJA;A?YvVD}`}J683Ll=X~5OCa6uJ>54}OE4R;>l^l(7Nebx;6z5^zghKaaI|(n z6bC^>`1^qytB6jlWn`B;?Q9Vd`nvdgN)!h;A3pu^!*-T!(ZGt=P7zKyg*zGi;i9+?W<+#qCjHF0~AO@HMGPzqTe9`Yf?f zOhA-V&p^O#Nw11W-Xac^bU9A+n+sGc(`X{<04b4`rwrG&Y<>4h6VwJoEn8*B;+tJW zb4+3=bze-Z9Jl^A&5ur zp6Pz=^43UbSN5wxu}25QmlKV1IfYf-Xwv?#fjba)YloMEMH;au;{SMp1HkK?GqhPT zG4^V3I}NxtyV1;(QnEl)Sk9s@bqpKv2llt{N;@KlvK9P!f@3{0gpJ#(4Q2JdUZH_{ z!*5X`nlNWx?4AF{GKM)XoP#~{-2)4LTEnzB?w)W&TR2XQmj5_s$H%fxvJ)=(rrFJD zPmw_KnMf@=Y1u;gJ_9E2^#0|3B9MdVNg?H(o6~#&WkY^4THE`aZJ$fy zpyWam7S-@jDxfbft_owd@i?%RPT;z_k5@u!D>Z;sM=Q# zO)&?5by`fpPpVg_?g(Qle{HazgF-me?Q>TmsxUj$GzjZ=&LXL=3Eh6|dhYqE!EyMz zJOpagG_zR=djD2|F30}~Z7U4CaNRFRK9^#<9oIgHB7KmAJ)vW708>Zm%oJ{8-Y>-o z{?5<_0C^#0AAIQSS74~_xaYXKJNLw;`*E3){!!Xl$wa*jmTg}I`O;G`pRH$BP!<>4A zTc8ap%>99_#t>dPruj3-`Zt1hEMpWpE6pcF-jzs=97@?Y{lF}$oo8>lPWPVSuJECm zB(XvyH<4+P3P6v{;ewSFftvc!D#eL-XI(Vp8s36USUL50*hJT8*uFQ24%N|zclP!5**Et#@gSR>_NYNz*t_RRNPSO2g^U+_UmSTG?oZ%ZDH6QR-Be}4By z_s@6ZTpd>F3oXBwV-?71pTlw%eYTxL@sO4|Wn*Op=ub)Ap10ED>)I)Xcjcps#utc| zDNU8g7f$5}@7^Doqz}?dFx6^_jV?_ITZ8pNUbzPedcNj^eBwJn7?^&p%D)~FR3H5| zYy5*oh9ecN%4%WcmS{t7yyZ;6AGWKO3caf~wx{Cx;S=_;@98{uAL3JT(%-mwhmFeR zQlmsppye1ePBv&AqZ``(-;d`-vbW?8yQ&}~w=uhacvA>Baor<+Kp>h4t zh?OoZ3L>?dFojgzNRbn1KHj|X@5*G2iuwiY?wI~nJdzlzazV%!Aw*pls?ewCS zMj4sSCWA$(*I@JvZI^hEq^>?XCACGU zDN}h4A}-%YkhqPhp3%p8LSl=}7VAm)4fcf+Hap3A&Uh>F=4-0DodCNSQTVkWK>+0J z*yUo}NEdFSbKLVs(;}TsUT(+~X~Mf7xf+ z9VV&3*u>A!WMkd~x(yBei&-yXAkjB*1gGZ!&1=yj=zhbqQ*l0+q%jLhMQ=`O5%@J_PL)6wm zM^mLa5z*KVPGos-$A6nZCIW3SlN#RdomC&M#TZF8OIraW3epj%f92Kgy}l7P`OtDMJm=WqK` zWh1do{7>ORWk>wT`ke$1=JNPJo<@S5j8R>j3m^_2snZc%|JyDb%KJ%~q_)S0Chu@1 zszyMQ`JWNu&BSL|7r6C{(N$Hr;g6;<(5{IcW8xut6g=0OBP*eTUnrNA2h= zrB3076k488@9ny(Hs{85BId0x4)X?Uct0UfbVc{i5ssUL5Q798KdVNrT#w9$i50fu zR8w9pbWFv}&7V@gda_e`7*P}q2h=nR@+YQv1j+?mP5XO)r>y)ce&)mssBg&7HL|8y}9C?4A+@IaD_55Og&{z+uGxyaOxymP8Iolb9rD_r-D^`T3b4>f}g1 z@5ri^D0*W^qfemeU@v5=seS|vy!J}M^i!`w+FJ{NYYQEzCQLN9@hMl_G(!0swD{H( zm-oyQy#ks-<+nyQ*LaV*F!J7&M5QAoRmS}85I+tq+zWzH>oRkti=vo;7-tYOnx7*l zUcux8umgJ9cD0L8EF(flHOYbm>Vbf_K8|`-wN?M+BfE3#f7H_7DrhZT)K&&0mGL60=p@w7EUwyUYBr4-6;?kPns7}$2m&d!9wc_t2?G&ic> znNn(h(7jxUKv~wz^LZr(^|=_v=dLb07@7+zpESisIT==k|9eiZnP`9SKSK0LIY0zm zLJxr-O2NKJd-cS-Kt3 zJ6@lymqWC7U7XM<`tvZ3d7(rUlYqNgIxX4;&o>@pB>YqnUjzOWZL$+{a;aTy)^GL0 zKrj(SQV}B$_BA{fQkaoP?c@IW*+TuHvPV=X<2?11ejS~36(8B8)xJBm*c|C@=dd3#wcnn zPBsx$XZZ3?POwcr8%;Xt-X%>#B%@&g4a3-{;qB2+G?8thm1=_NNFe$d@4SNd8-eML zsYBMX=DbV2L$<`n){O=H*6&8OCABYp#GGU`NLx2X3+GG<(FlFr&AxRS{^zUGq(7yW z>S(e1Q&OR-yik-ICr7(GEVBHiH-}$Jg?VGBXK=0o)?uTq=!MF{_ktOlRzgj9_^)|6gH zZiP&Ra}fERvb_Lh^`v=ytT!%%G&7Ax74+aGL_ZL3xOTzMS8HnfNp`b;4m64pkviyb^2$oOc9g*%mnncNBNf#`FydzLb-!7cN1`|`iD4`mf{s$^3Ise!3h z+cY1_qcpAatPvA;M`VFcO1B0Jl5f#Cd*sKZt9EXAg?<0}QP^*dAwsU0ocMPu*r$3z z;v|dg7{()3%+LnOg1&hg*OUlgg|d&@*1z@lgpnWFA==It|E-^1o@AyJ?i><*YLQ}P zw9w>Rj#Kc4cCE0po)@!Oo-Fm-dNHd5=*{9;K?kv#FQX9tCT;2u2Ir>2x&5wVctBiR z1H?Pl)l_;YkD1JDxz_03vmpReZ{3K}k}z1$G-;l`p5pRYY`2?9;u_ojHcLABWWL4) zo?o+(rPNZH_$VZ<_K9ji)tQlHq!YSBZwgCh57xx_-Fi_a95moT!tV)p(Y ztnLHY(59L7k1^l`D;Q3BYva;0|#duL55 z|E=j-ulo15ZJJ#|%P;kK8=iOAd{@j?IyLKQLTi&Cs2Zw3iJT86u=L1@*g_@4Q~$r% z3Hb(%R(M+1ZQs=50nsU}1`m+KGkxf51qQT{*)piSSfMgAqfA>hy$>Ya{*a>N^Lo$J zPSkMv@s%)HoBoT_{MxX|lXvwB-JAuKZ&HEB5|^*PUNttQ#lT;51#y%1Jx4tad66bo zw4^D>(a2X)VTU9yt?B{%bZz`B!|5-Nx?zp&aHpz#8}sF`9D8QGzz^f2W6Hsv{h5dJF$Akya8I?={(Ww?RVya4E@NT zXN5+{#{=am&U$s>cw2TtQyJ|QQmm*<8YiL%=*xFq0fBgQCDbg&MHjR3xszNa1NxZB z5;CB%SE}pKKi8k%2*A6i>*X0AGCV(AuF8sq$DzCmzIP!eos*9JE+6soE7fhS(M(!A*g8lMY~Dw**B{zH>_Yf zr0OY;dyv(iYv`p|5$gd|`=bX@&wby5&b`7H@xs6Es=$k;n26Op%LFv5zeJLjB!7)O zdjOr@a57U4pVA<1?CA4o(Y#$q?R%4f3(|pX436TF77OEiQlUYGf1^z4QD&42(34Jw zZcp5ftOCMo4Styip!*kUu}kNMB&m>87-c*0phpa#b;y8D{W`qc1eP} zCAzm6R5zVOY=)4Cox-N1>i&^{()p4Ur47Pdr52NoGn&b|c9c=(BGn+|li;(@c1SpY z`9s-HZwNPSou({#c2^%337TL~0U;ZW% ztN9a#(@t>>+rCUe^o)r#zvGi*$x&vxa+pS&|+(eR#%#cj|SKzMJV@Vp}l_g`UKtV9(=w-`s1_g2ItOmkTD+WNftyh{XW42bD34R0NO<6cpv zko7Z+sml1X6Zt9tZPK5aiP^zOW_8WQ@$k~~%!%Yw3g_%hjvqx|s)4XKY=navY^O^o zFvTsaZ95RpE7M-MQrGiWJtnxFv)2eu~j}NA4eM zH_Kx0s&9#+d5dgM%exJxck_OAKos|0C@S&#KIOP|Dif#P>#^X*70fiP$UM}pP}FU+ z$XbIJr5)g*1L$f;cILqcV(w}xZM8+6|II#}-ZFP1*-IB2*5Hni+B!y9_now48YDJ- zSK5n}Do&~fj?48;Kf3(wA^v4Tr64~@AhF?kjh2%fuTWuU@~Rk2v;p2r+@mlwkeg+(Q(8?C|rqJd%X5Cm*D;l@=Hzy^vu!Qm!UhxhDq4<(4eR6;v@K zV4ZPM=D2yzRFylt=zz>y-?fYi_DHT7i zO0*vktwM8S_ilFKoh>Ce>UogK=ao|%jnIKroFeo388#yWofw^C{Mh`=t$DV zO42(HTn;V>7rEozXmrz9Slpmzc64;L4U%ssBgaXzy-pK`O+HKk9=q#)R?@XJ*6rj} zm_#;Y`x20-tQvoQ=Mdg6LXIF9HwV8vA{<{Q%j%dq5UH`CoX&DWBO^aM0gU0!g}olJ zTt(jt#Vk!LdOb{98K$zn+Wz{?fKyr};z@0>%m0DG=orRA8E$1RZ2T_noK(7Zj(B$f zK0(30>o}xx0yKW8(lf`Bwzth0L~w@YF8dttbTCZ2uTEl~jT<7ZwQEHC7;gOEmlVbT zYy#PAXV3%d*ukg|8AV!L{eFFlYZ`hkameV7##CCtsxaJ-lV}E#mEHUw>9Sfbl5LQ% zqOStmMx~BDe(=*88NOCnQz&ot&p{$^>G;EW5Wv*Znsbk`8;lJL$4(%diw>S8wrgOF0QKA*tlxp>ZNUdg0Y~}i<1j1%I z>io-*_ia+jzc0f#`Z9)NKZTa~zTME71aSHs=NOAv86a^P18V89j+4EaEp-_~cs+bt zr)MgLrIKhQPu1+=hz@%GCW?G`)!^_B8>KZDuPf}4W=3mQ#SmQ^hCx)7cX5$x$FxEA z#PRi}ddZ~Ln$&+oe6w@Wa9@cDeP8TOy+}!g+@KfX^B!L<^KVTHiGrf9xi2+mp4=}? zWwC0~FMKky9je|RV1GLqiV9HA0zodb=dJjx&GzM57Wagxzy zk$6+#RUCZNLoHoFkBEsRb&m;3H6rVsL)wX~|1VrWqrkt_3))`?RU#G0su3J~l4T1{ zYgmOfhtq2ua{_=IC4j``TC4-R)#r%H!{d71f)-W~K)gLtQJ4A>@4Md7STCir3M$b@ ziZ7OH^`T_B5Q&;JquytEipSHwjlWdIi6@}=WVsAm8ri_GwT~>Crm^tE^Ay3? z34Hh^sTQ`LR*nUyb@_(?Ab&|yp325HHO84rqypihPPjLVc`6XG<{o3WbX$1v6`Tg0 zg>@Hrv=l1wJ)pAo$bwT4mF*rm00|Zh-I0YQ1)?5oQ;GQFMgG6i9&rlPFhJhtr9uoY zf;Ij2sEF~( ze8F<#eX(EJ(0vyCTny8MA2+?4go7T-Zfv94Asx%r34%D6SWzmZngg7zP&{^1=8c$~ zD4;T!6JP!nfGFZKmpCUs&kla4Rm$-mjjK4d90;p|LB!aX7<`M*nx=K|{SuvKVcirF z8@*hb^)4TCRiT3^kYVdfG?nOjN4q_hxHwpGszL`J69~HOD%ap;XOU)5oCn#KqxhCN zU-NgoKFk@(Q&jA@uT%)HkH@xBRu4a*SQ*!YOL6XbihyZ^EwU{(5!$-*i6g-U*w$PV ziIG+2<&Lpj6N$*WZ17X=_>x6ZTRLa(keiHx zSm)g%6?GZA?S+!()Q#6{S&)*~9?83v+tQe^%Ks%FMb;qm0|XG-j3QzVK*kCkSj}id zvQL%LwN$~7kL%7NO;z0l51qt0u~Mdp)ypvzQl(Ug8y(FgC4pITcz#iT>(TU zK1aB41wUGM%bW|QL;y^**ba?W(u#Vp|l61irD- zC7z)0MRxGLhFne)sXpf_<+w)(PQ_eQDG<<6*^J|ry!sOP4x|MqS1=?1@svFK5(ywK zm&hhHHBHNAB)iq{fkI=qs+2PYqKB)!zC?U(c}QF6sYah&b{71o&`O@78QHSrojAV$ zg}iI%d^O@s+aYGogBe_<{?twotW#tP>b2lP8C-gbo!)>f!1PL5@_8vMc;4@t$FfvI z$Xz$iSXYR9qXpu8^bwn?tN=m-vRJ3sMSDlr=bIBLGe+mwFBt>Au!V_ku=L;^N^cfA zY~-S{pGpAYy>hN9EWTgIzC^Lx$=sueHLE9DzSkPt$!@9SpRldO%YhiXwZ~k=Zm~x$ ziN2Ftu5p~S8M#ML@+!3^$Xn|VRia*I;uyiT@r!)QoU7pHoW>imI9By?wB#LJj1)qw zu=iQ0zcuALyKz)6=imb39AOLbJ=hhZ46X`KD9%}@$m_$f_35<0uUPPNKcst*=iQq$ zBXRzyntxg%kzwlxTp#>t5{}a%dV&jjECN2l#Tf93aIofuHUmqSvIC=yeg-E1NT?3J zD&;m(B`Qxf!>L5qvY`s74^d|gFX6D>ux$+b> zj@0XYrpH2WtKuJq6}Em^r-&`LQ5hDto=tn}GUrx!Vj~-3i*4xsT3nxf%;2(K&SSZr zms=Uyz?)mnzM^w6_?&bS2MMdM$ekeT;;yVZUNM`}TL9u-XV$@oN(9K0-SQcHN$+J) z@*Mu1J8DD>do&K@EjaZw-J8Xh1?A+`*j_dQ0Ab;sa*6q?Z0?_D1(P{h#pyK8IHjOb zRWSDm0P?0}EKkLPq1fwX9Z^24`*IWKNlZaaew<}Sw&y8^U;OcvWzNNN-ROWri5y4D z^6H?sWpgY)L)e-N)OX_iM$y0MXA_}W^2TzRg3{YK66aJGKz(c@)}4hX-^Ker@dh;^ zY;{rV)5~oho%X>D5RN>ekEKEcd^VBuGEWe760Kn?(68In)|nS_2B^$D)wiRx8LOY1 zKlnw}J_M)x3DW@}CwcG*@*6KZl{o19bt7Vf5810@d8$2W1P~iOu>M;wm&h@K3Qo)T zOpj;RyXiG-b7b9w+am@iFPF$CU}Oc`qsSh}mw1;w++y}k#&;9vSoeb#wwO6T?^rH- zpVdToUFFp^mcd2XVx1zNmV!7xrveBz5y}){y_^-EKqPYAKde;~q3a6yZ%l=Fb&9Tu zP?>X`_fI2#;LSCb%j<)6w;BM|DSqtOR?w9u>I11J$(1g+m$0nYz5HuKi}wUJy`Y6< zP1B|lRdKpc_h{KIF(>sUMsQKbf&8zqM?CoXhm=dSQtLDoqi{OB8$O^d-|h;AxaFMZ zjCjXoQX!gG;aD!i*1nB3u0Hg!sS6hTSj@(c zSIlNpmACG!Sn!%Rtv(cC%XQ;;mb})TVM{F6!WM_zloy{>y&Ri+)I`V$KQ0x5n=3nW zYG-gVq}R+=;x)5br7PPZ)x}+&zS$7?HJta?&=jRq# z!4OrVji{^faoxwH6~%dw;@cQlOF?n2pHfhqUTpPEiJ2a8MIdA(uJ5ns4K< z;Kv(GR-)c#^Aug-2@9tplJX){;=*3G?+gRdOO`RFgF#9rszKLJRM?NQ2?Q1YDnM+KsaQ#Q79fQ%!{ zu3*ShiB&IIFoe%NFaJFtYAnABFvT9|_RR@WzilV+eU zI8RYw%X&d6&dpOC|Fl$tHxSN7+wE`b<#YA;oWu%Y8td-D)@C(EJoR5+E|%*?tUc!F z0Luob_JTU86gnxYG42nJ-KsWvN!z_T0*F(IEaUBHEafA$EJooT)$;*Vs>E#^ed!PeCi#E45Z%B3;V|!PiQH-)Qa0m=jniXS2HPRsFZuaw#FFkg z?&TmN>ThuVIZn`o#%BN!Cecg2 zB*9!}4f|FpX8;n|6fAV0k~iiA0Kvg0$YW7Gn~`?CK>mY4H#m09*@BDB>w>*H;ta|q zQi0e#3PG&1a(H{pwI}YCS1Cub?Al7|fk(DS)k}Pmv=+8h*1k-mGIlU#Pf1~UHP+n< zPpIl}eY_^ZL3>)t8%zd~_@;UWyK&SNg7e;DJ?APsQNfRYU)T~B{Mhwj*wXO#Q%XS@ zX)f);^|^{|G*9s^>qFI$V%NvH(w?Vi-j>ZOVm3f*3%v2ZDqUH4a<7EV;2p&P5I$`a zF^TN9EtGrI?^QEibj2uT%e6;dHKSAV96s=N$9crM6;g?a_y!=nM~HO>AU1q}EeroA zm=gh^&cia18=NqZ{ngZPnCt` z-rc&vr)_$2v_}B|2yI4L!Gp(Bu07@+`7M`7kYDbm*R&60URN%W(`#hcvt@y$);4^= zfxOsl@VTp$ua1#3@Da*6e7Jh-C5-m5mId7-g1ibHUcckBIz_||rJxwLlsS(Y`dHpp z@kIQ1iWQruSpKT>F2a_&ae(?d?_76?6&JRAU|XDc!g{%sdDhIv^ZtgJ#9*P7%~O;m zKLW8LaZZAb&w*`mbO6PB!j{L^ou`Oxtk>N6H3j8h zxAC3!Hkts$DG==uL7sbL78U^n_b8bbmaJe_oH~EqsYE$q7Mxhr<=Cwi4Dq!R8{(mg zQv@dic`1+)Yi!p&B4@xJDNhAnw;p@6!4(YQb3RLjP@Hodspa3uwj|Csx@I<@KI;@^ zMzQyq&cv6Ud;yXok>%Bh?;J;JUalM2@JV~8_t-&cUBE^*)bp_e>%pZ+WD4rh2-p29 z@40%OcQp~(&<}OvfNg!P0tlN7$t0$1H4go7HUoi=8C=y~;O1KQ{IZ?NntFe=&}>Hf z>r^1vBLN7VKDS3I7^=SM?D{RTM{w|;-}n!5TMR%by=_tL;Ac#sT`yGjsz>6=_;$41 z79aK~XkkIgtH1ZpbXzQ!NU}WN>W?g!2t4vJ6~eIPu`RG%#2HUoX`UiY5jJmLS2G*n zLL%`Pe!P00Ke4c-a#ODx$0p5qsBbAKTp#lk8#sY>cI~*ieu8P68t$4m9bDwtGbIXJ z)JX#n=hc((r(M`$KNq%B?i{fvkIZdx z6{q(ey2o5`k2t-?mvsQ^-bI${g{{fpC@QZe%ME7E$+o<(Wo%39t{>l$RKYoxygpW^ zNYs}|%-mYb!V^|3l=KemdCP(ystX(0K)BHLp*Uy357+&+#Q8IbM6XoO_nZYkU|SUD z6p1T1civ;UdbR=Eh!e!*aXEeo!xn$ZjnO3wRfW}ib8eED6I3D=)dTsnTvW>?26G^1 zWd++k3b#eIs$g$23ihbi03`9*bynBQZMjhfJlgc`E?U6~PJRtwh5~^~biv61h-^mf z(dBPVB|5ua#VEcYM+su3tLZm&A(&3_dY>7#mhOJcR7gi*JW-!47cqOQje)i2>V^9K zDm)?E`lFkt*ct47W?B2byDG0{3Tkn!z0m@=kT@61HT0R%uby)jT9M_d@WeG$;RFTw zx`izOpYiqWHtGujc;$ar4#Yk0pHb+as^A0w>D|HLWEK_yWIL-F1AKlcd{vrMqDREo zvY!QM9iugd5=_#f<3x2c<2NU z{&G%%iWga?0s(Wnj6EVAv6>M?Y-s0w77Vd&f@QpxQejS>#gS5ph;@9ioz;x3KKt>W zI5(>K)PEywoeEC?7o1+ru%*X6tf`7G5$Zuns5*u%nIZ=T)2@loC=crt*N+vRh~*M} z_}GEFK2jHa8;6e_@I}1f^y*)B3d)rZmM*!9%fImE{{Obp z;^x7J0x~YOas#K5&ptgv^|e8w0C65W*IFpEuT6 z!H=;m>&7Yl+2;DWX12AN){V1bk{gvlZ|mNCdRr3bn_E|3jC~dP*u9A7ccusnexxqk zI8slnaNV~-B);7HR~2MiV!0={?tH%6t`AwR7q)I#4FGPgvv(+Wn=hqCw9M;0ci^B7 zzAUWsJouAia_nt1v#|I8ssMy4@yMW5BKGJ9m58bwzEwqCPJu8dzwh@$>PtNDCZo`8 z!J@XP$AC(njqM`KRu#-V)dlvv;T|=*B)UyTvA#s@5nuiBaWEO12uI`yO&J^6AeJkU z=u%Lwlh3g&EBImINfzH|v2~X?w~-B2r)XUv^l~|}fnm$a+Mjb|gXqy#iF2uox0<|R zT_L^p$`3{2*q;W=b(VY+flqAfbDSXPq)un=Y!4@El-hZ+8$8V zx_g=PT7)gaw84ef`>?|WVAy4JPUQk}{! zKEY|+*jvuzWA7-#2fVFBOx}(h20}@tH_bTBl@F^tOWjjF=N7YP7VHW!xS(?0<;`pg zTfr{)?)-RqaG#}_jnM&WX7e$yd3*H|HM8X_=Z$P4)OWwqa=B6!T#am4h2ckjIU|WB zGM@0==l8j$2)j_&O4*i|LIf8H^)1^H@D)+i0rl+$O)KjxYjNrwPC3@57|8#_{BKim zqLw)L4KWb6!-pB=`_Df1i+_QSYKa(fJu)K_mRMr6ENJ+E>b-Afd*0}Ii{S$gYU&1R z%U%Hp5|;fr5gGtQa=n`)%kRxOyU2`IEb?Sj#&31$$hI`$$H}>6xkeK0$3LpQ&lWpk z*}m<|zUf$){-9*88A-tgB~{r#&CYInO8HLL^aP%Z^;lY`>Mwx6Y1Hh9CJu zv9+g!15Hr_vJ|$k`KESAg)P=)u!7fq$+hyq#RpcBU}nvp>_!sLa4`A|19{B|cYwp4F z#aUa)K9_<6Kka;Cdp@oyy0&Er2QPQFkwi)%vRC{100%x9 z^f8`fz-M9WjqmLS74_`hyvaVQ*QrKz`Fej9XcJ3hqQi+j2J$HpQ!VlQUj_1Bledds zptf}XhDE|muW^u10Kyru2}3OLqfeqbrfg3_PLsE@W__=!?Tzy%xe9(ps&w$wz( zF&ewDur+S#AFhE>6pmisP-AJ=j(EzrmYL;Y02rT00JbF{6j6mqiEvRuiT z0emIRZFm4?T=)K#Q_UMKJ`Cz-b-V>9quvG}qe~Q=3hPwt7<{NuW`r^n$%W--)PBTR zVyR_Lc18!mBRiu6^2%DAb)Yq#&W?;bbEfUn;ta;9H$T-K$d zcPh|sagU6Q>=p@Y`_csh`7`d6gC(9e>V54nHeRn&!Q_ooSa5=Y1WVlZ(fI5gF?B<7 zy*?^3V}5!KiI|KIV@Q_mjNb4X$bEo(;rKJji~{-lKB7+5maC|dV!2{6gr*piX7I~lJOM7yay#_hRSGfGA3(M>TC1f9Tls9O=h2v6t*}KeX#JcwiR$O> zMFpqcJ$nm^=$$vX_?@M4oW{jJ*pn_kMRXWQ0LUBFcQth*-jP@$*=TG+Zc^0$`_eNykU$T?jhT@po%B%&$) z9(Li~37NKKe@>NiCja^%VGCO^MLB-S>h;Hm92&Z-c%WyuWKLUd8B5$`!3k8>8LfjQ zu7H93BZH-p8JmxGpKuB5+y=IszlGzJL@dszXhuPuN-YZ!>-3Ip+ZoB!gVq}8`WEo6%NEBi0 zaq7W+Mgy|k@DrM%N+BYNF%Zs4;*$+8pE9JnoH>j$O|hE${!Pii>6%*L%gG>#*{mWj zrmbFIzdp9jSlALte6(uA0tddetNxD|$jG5bMl_x;YbQ8uU)tZCosp3Mq+nz6FX@cJ zjH2weZEV#K%Pn&nUj9EV*2Nj|=h!qI64sD)DG~ECN(_VvLt`2WSRx=V8?U>9KRdMd zYouUFSoFLJM@eTrmT!b(V@Wm^_Id`HW|<*{VKVuzD+ z%W|7EI$*NJ4t$CR({sXTrI8IZvprJ2BYK}Hg^=aG+oG_=ax%uiUN(~GS*}+Sou6jp z`~u@h(Y8{;fja85TOwa?X`XOn>NC>>Am8`;cIN&I)Ts>QU-$x$w~POtY9Mw-gO=R* zbYa?JJoj2*BBtKaaLVZ&Jto5t0AlY5S1`i|kVlr&G+kJe50)qZ@y7NHKw5U(W3f(W z^cpf_Zlb<>rYVN;M7wd6ZI%2KS+3UA;}1=HpN%B;v&d8A+=V@~E$R`i+ZV(}ygW(7 z@MBqSjDclThJANdOQ{rM|FjGcnqIBSxk@31K7whHmCZh@2!5KaZFt;JwLuOPWh8aaQkQ)K= z7V(26&W6eFGLYwtVDfhHLkce1`l+3fhRw@ys(18a#u8KOkBkoVQ(<7Wck~ozWGpd9 z?LwmihN^(z)C%KC1{b9G7H90o$Jk?RzW&|cyZG@WG{uzVYFazKTpt5#>>`7UZL2=l zJjFDa%;3VvId)TNTMXS-S4dV0(aUvyiYPLjgu|DUy-ZV8RL+a+yD$CL6OPV!J_mxV z3}Yha^3BaTCOROGe16*dsr=#_fIK{SR-ewuSmG;MfT}YxCO@8wuUL1VCZc42VclFyqez@^1t3cJXt_^^B-P3I>mqhy{T7;%WzZ)u~Re%Gq}hMJO(v z3)#}PyaYv->jSb_SMP90W)C(%(Nv>jtqy8ofPMseZfRqj` zZE*(DV_)G(WXAXFK7Xp@@%eEAaqVt;P?cNa5|2S8x*qtc`cu6E#_A09!|=1t$chX30qC~f9Bua!WM>~0oTeQ=OQZ&=l&>L zyP60g=V<6?TgMC5)qesx_hvR_TPFn5%s&@d@hsQA`wBI)ag1t*6i-U>PP|-hkaux2 ztEgkUXdOm}lMfiJ3z@M-1zKu}n)@e^M;=*NcWx8GNpk(=hW=3$mRt%hZQCLnul^h# z^`q4m>n?Or?@cWW(bx{5`E?@Z!v_)r`C$cyq1slL>z#B;3a-ha#0jPwZx}hZOSefs zexFEU5zU*7$^b5u;;Z-Bzq@B!GW;mp@~PqZu8?d;%klik2A`*x`Q`HVYBZJyPicC! zL=nrjd~>*kElc$Da%pDU|Ie0fRi5xkGiZv0t;{Gc;M2>^{BqU%#snt? zok}cm<`-Wn+nUMrv8;*f-Y_F$a0g&s1;{lMevFYHfcG$EgJ; zpUr42(Ewy=k{PKb@{TYJ!Q`Xf>!vRIb9Fg>ygl}?4RGmH!R6A;N12HFTNS9Ejchot zNi$*>Nb)?NJ4Vh8E(%Ga8?9VZ42=c2us?UD%1co8=jaMC^eKM@lE^U*oExiPRf6)c zFSdq+1M^}{%U^cN7Lg^{_C+Ni<$W1%QdIpJZwa*cy<#Y&l&ARW^_9| zw61rLfjlh>*=Q|{4kdm@O2iHpUVO#6#6YlGTdad6(mU$e?OGXzHb@?EkpI=XEA_m& z(P2L$fwaDxF|ba~g_XdClXG^3WFs4lB(hpcVe6}PzxecOq5dFyJ{;ryy$Ak@@2;`O z_UH1pc1iMilBnk+)F*81DgEmC*!zrMPT$?zP!079D!2?=(etseWiQuDP?g%QWZ9M+ zmv)1^8en0|o{wc_+vb$M@+HAZPB}Jcb5obLvf<;_epsUVsr-y+SfY7oK zERk6ER{LTFC$Gs1>78`M1;(%$Da;~suI^*&PyQRUn&-DeU-@F{-AcegC}oLA1jY_Rl5s4q=1 zeRrA&3$!iEwlrzxq9***i=1m}csLf4;YUM1x;HHPQRMuRZw@!If(IaFTjc?Kf%>Zf zzL4YBI>GkgT7o{l*JFT`-UrlTQ*>PpVU?mS_MH+3l>#yJ{=RKwc6S zqqVf|YP7{V1t-ajOc(+{+~}Y`XNed_2V;r(<>JGBKG0zQyWCZ@+-349>akPz^h*b| zGOUyV!m#ppQG(*qm9xlGaFN2+^50VM`+$>_e#(lEM~~X1o*v z>d$x7f1$_7dFsI}(6-nW5*1vnK30~ywiFKlz?W`N>Yn#O!t3Ohd&`nHdUYH?&fFw8 zRf5XTdYy{kl(`uVKzx6lCt^s!)O)j@H`+?PkxhFLfDe5 zrtdCI@tTThp0C@H?_@G0c_TMS+h#Tx1M!PL(0g^twMdxY^sQ=%9-J(@g@No`W7#cd z1WTO!eea$LVjy-#Tzm^o#^isN86``Wkw@6rZovt;e$3zP9Vxq2vh34qzDvOgkXN0G z8K=P#y-o#7EY0!bg_Ws*aKU1S!j}6KBg+kY^|dPP9}afWy|ZAYSC=Zdqp?@RF0wAD zMfVqN%4^C&N`@Td2c>%}-5;5lf0rD9Gu?v!Gx_|y-yI9{{ z02h=(J8YiIBC!v3?YO}-_M$2;f?mM5p z+m&x3#CYQWbY7A-l1K^4R~)4xCf21Wg}6Gpx#-GR8i6$QLwVx|B?F3m?-tbCf>Vb@ ze3!5|qjC>UVu^buustF*O?#cnV_l^;wtKx70HJC6O0+CEOXSZ*vJ8MIbJ7_N(-}o@ z3dgC=$lFR_AQtQP7=S!|xuLovcOYW6TxLG9!DF@*AyF$~W#iZNLVf%0`B52Yie$N3 zR}WmcPm$G9unX6=SXYlvF|;izxX)^yVzr?@)2lN{J~Ty^!l7+_6a8E)lw>j_VM|>h znPqJC-TV!TQpjOfr9piX4xY~g6L#Fga!oc9$j19s9ov}=Kt${Q?$ld}7#}rdx6PJ_ zMJ@5X07PmFP1ANp2VDxb;Dqr`iI^REq2n}@8NGKD2BP3(&r+&outZ5%UQ1LWmXnYz zMMyE5OJ`ZGm;Q*^A8y{ux_aM@!=+={L1TNHQcd|3UCZ^b)$E2r1azjFN)S+B5N;)8#lL%y zbNlXZ^4+6yF7&Y&xD@a;4q$Cc>%Bgd)Bt49p0!%8dD(92CG}Hn zC~Gfkxl1BO<|LN5XrZKF#kzc33A9erYkZ=E&Zu{uB&<~LwRfcKmUmR`rY_D1LgVdO zDygJ~T(y!!#3cGkg{Ai{GcgX*pj#0vGPj!c1!WyUTtA3m7v7x z`0jEnxL-~r@iwC}&~go?$M2|D1$Xjr?A2~ODZiYS9hB08YjNfVFijzz2|t4w13TK6 zWaY!wHEj+TT!bXwc%Pn+=AR3FoFwk|#f=uCM>PclxnnG`uIiOnQ+9jegoFHsF4=gU z$?F{r@_s4l`|faISZQ$4;%=u3#=y4KKC>z2DH68a)^7J*L{|u5OHG9KPxE{riTUrI!j^q^vMqbLpnigj(6*QinSjqLxE{7n zH)`TZ*(d`Lt@j6&zuIzce#tNMzs)8(G(Gb0v$HAzke}P^qk1jTqKRsW?!vN<+OAq+ zO=-QqBeGj+i7*fW2)+r{BKSKJfKXDQCZA+R+2MU(Vru;f=e7fgW}N2u?jy?C6+)H^ zwbD+al%sT6mdlTSv;}f*-(8yG_k4HHwiLG9Xyrb|(F3nkjZ>*xVGFoO&!^&>v_REI zHrVqSv7}>xNTQMRemRUSY=xFv;m?POoSx+pwm_d$!Sx%&cTaw~m&)H`^|4AJM9lG; z3^|3xjn4;wl={U-W*jJ|+~P&XK=x-fkR)NLPNk2k;RCw(y%Q(M)TQ8bsOE-cPJgX& z$0_d!Q>|2x z^n7+N|Apc2P7>W{^(Kx5?FzwOed2$Fyd%TUT9ra5rlnE{+h%GX)MQAh(#$IQPs?rt zKu)VpB?i)^-Du7dX<0yKjGi~K#L>*rP_GUw(Wloa5%VnzQ_fXHc8k^8)fUN&)a2ca zA|@}E=!zJ_2b?7u$e)^h80|d8IzY&PQ9bX5;eg;&k|uEIOX!S#M{+53$jX&Mo@y1FV}mY?b2nQVYk$5ttNQ}YYyh9#psQ~9(Ku2sVme9uFh6{vY6TM3tR6{N z^TiU|<{TbwAGJe92bUSy9ub+7ozV$@M+#0O9YB)flnFx+n#gW{fh9T~xfHBAm3Khc zvlM`!uM?ckyLcmzS0cu&;ON!4vFO2R{m=i{2WvK#V1> zeqSnF8;-*%Pqk?0f4|jbny*98i)PW&L}|M7>LX$#z1_u zmIbFTI3v6G%b1MPD7ii{kfYt7(U*vXMNM8ZBLEV)9FJxEyN#`fcs8}*47(L2lBDQ6#5 z#lIs4^0(wTJxi~Soss1FS$*r}n@9k{JMzJ?4j`8-S=OHeNGU{iQ`ZzB=PhFv>{Az%D!86)8C>Kw#TnNP z_^99xe4!^a#l3nyuaC!jpI#V_!g4COdOj*a8Tzn5)n0BEw*C~Q5W?0xZ$s^(uZXZ! zMV3p#@hhh*6QnnnYVe7%?8)avzK&Wv&Iua--hSTWd*{wXMV_4C#zm-_0<| z!5W!f{h8Q>Ce4gP5n}A|>*LvoC!>{-#F#YWKE$QE z))VUv)ETYwrfHQ_y!A&CR;T28SYlT1oqn)Ytv}QfQ%MB?S%(?Lj@)aB#y||@8Gp9+ zI_tZO3@&u()V!&%H9VC<*%m#x0zM1Co^8b- zVfu>d%MCydR?kcTk}{|6i2`|JAiDUS-xQcp`lGGHS)$%ilCUlXlc%i&wZ)jcy(4EJ zTP@bz+F+Se0uc4;_!$X62u`Rir`>2`G zVGc$IV<5p2x%de{2HHD9W{fksq}hzTBMVL%K48i6+@(E7P+RPbB##_^(_4RF@=0by z7>M>P07M|~lgCk(k+3kM7|2H=CPKpykM z!%u-vk&zAFMCjk0=R*k!erhDqUan^50DPfsdDwd2UM>Ti*X*8Z3?yYvOue6UW|vsv zIauPi``QlQthnIR+Zo6^#^kG#+LDsxU?3ygj~IZIsoraUE_!to>+GXalh5kCcJXHw zuQvvgulEMXBQqLH+`P#EWN{dVm}OAioHKIHrE@JePpt$OOgzzQDf{usr`W3A=XWaJ z)+WNx6yv*NuQqbNH+6-`coL-$iyd+-`1I-#+dAyKZ*5=l6wQqelqEZ}31u$Mx1CgTamNVfINLyt}DU7=PC712Lo0qU^Qno?Ne z6VWscOT=oecjRXzR31z%(LQP$3`8;`7vCMH-rp-%uoAH(1@n&70U^2Ghs|GZU=zlv z!CIwci5Mjn9H*R-HpY!7QtlZzrMg@~NT(EWTD6xo(B2K(;*d{Se?4=nPOB!7HU zu~r%_@@3_hL4YbIPiN$tec|nRae`OY`aOJpmxQIio{E z?1Nqt{VFd%Je{uP;gHci$ zc3&;!72HiTu3OmZh3mht+u zUK17Ep>yGK8F-#>18+9QNIBJL(@S{w}gD$V%Su6OKzu^fY3PW7G}e`3xuyd1Sy6yZBGu?!jtp zs4N%OO`=KVD9mbp@$DV8CYf=vF?oMS8+1nAR+4J+c1AGy7(Spx%q6T!2N05F>b+81 zE+Pd-Eip#aX%mKne9SogT@H7jQ(D&)*EK`VdF<5REX#dJV_;P|=f~^2Ut_ft8z4Q) zweN0l(I=mx!3723Kj$aSpegd*x6S!O|1pa*bV0dKu?bBPa-Q!BF_QQ_Ry~7@zbI@8 zrZol@#{zIM*inCN)>jlV0}~2m6hB5k@S#>JNzg}%hG!Xrz>kn0VPBn8L5M2vS{1KHHEvQ2-^ zcUqE&H4ft){U9@ngS^h@b4?h^LC$&X(sCok_s8yrAB!DixwXdBENmS|%e^!ipJF>% zZjeN@Tq?LMJCLU6KE*JesB%u&DraN^js@)>9=7^NV;6?{3R`$y41HQo8%%3z_-rPg zM3!r>F|u4`Td6w%4}cw`tI{%xTL6~KptTL$pby=!{L(e%>$14mFvLJCd{#1k>p$imhv3tLAk?(O=Iuvgb_s@@}Vz9cq>$BHA!`AUNe*{h=j)heFEeJ|By z$eA9t3@)0Qrz_jaJTI;(ikzb%^hqp8z?Vczm zUwhF2#6GG(zBDGIWQL(=S>PST899J>tn=VBjdfS{j*=NAKwiB%)IjVV{flaegCt>z zB~nYYKR32LH*HkpD1@!U*fs1!6;dna`cc8n3_pt^Y^fB2ut(UEZ|m)XEZ4vL>Eekv zbUbWbUi80Ql7D?Xnqp0z#*v0U^d{NX&%5orYe$Rb&XaA~clX8nntxuLw5=$G*z+0m z7-7qRkH!dY(3#rzM%xX_^~pk`Sw)l1KmQ&8GSISHWJU{4rwu@if!tIdNCzrXu*B&w zke3ylPIsTMcZ6X`AdeLMlUjf7+DGkg?$O0>wPg7Zl)W_L6h*9mcFB(q2lY0V7_m-u zs<(=KC7I5M>F}0eOe%TNj-0+kOT>2Ns-pz8w0cU1r}%Arcf!^Qhl|1I-YO-7Rwyh_FRt1f>xBB_fG^y$yr)QEAAf6{D<%;PjsC@CNd;wkewU#h*UM-chXu zryrjm{6?o;{CCK1k?RRgr&k$EM8ZO5PS*pD}oxMSaRe?G(8_h2#DSoKi# z#|^5Smln@jL)h9nt9?p0+d?YmF1Kc_5r%Fri#9O-Lk{xIsa!Nt>^6UXZLjH^CWM&-Py zq4d3sYDS?s&8nV>F4$J5 z?6z|F5{!2%S3L0aS7VFEP^a2;KrAu!_YPcZ==J0=)q9H{COGXobJK!TZKoTCIwdns z-?D@=svKVa{{RASqt9glXPW>3002ovPDHLkV1iIf0|XQR000O8{Ddx7bRS_#vjzbG zng#&?82|tPZDn*}Y-|8|K!(3*V_$M_b!TX9Wpi_1a%V1ZZf5`{K-s^EP)Px#1ZP1_K>z@;j|==^ z1poj532;bRa{vGf6951U69E94oEQKA|D{PpK~#8N?EUAmEz7a2i2*p&noX^BQ)?}) z*-aYF_=E5NGyvWGi-?TM8e^`tan8MVjf7i`EDsNl$gHdywdR_8W8=?%`_n)05Bw{@ zpX>Z5Jz?;9)D(^g2r+D3fu7JQ`~yfk4^2u`w5H+lm&c?`29d|sV{9?}8_43{aP6O~ z=bu9?b~~*K1LfvDZnXX6FjN(7TT*i_t;X2wp)jJ`^rRRxdNC2{Bn-DRL51BJsFi}h zw1FuqHMM~)B{e#9>O>}g$tbLT|G+=+EAS)@=6M=mKnGWNL8-sK4aD77(U~H_$&*9m zd)QyI@*GxQ=8g^#;KJzO$^wPx#(Z6)YKlS{(d}cC4ci_~U%0v~XU*sqArJZ;{k>nE zT%|*|1MJ52-r3Ta*Hxd_T<^BvuuHkuC<9XCnQVD_sn*T^@xuBCKH=L_CBTJmp$H=U z#;8*%y8e_#w}Nxo257G~5BqS9cGl^MC7vG#5Gn5Sl(t;T&R!|uDEm7v#BpXE$gtk6 zy8>J~9Ej_UpdUtr5rG@hHVJCoq$es+mUz*pCqU6$yn`Ha>OujJ3we2I0UGuTs84p< z*J$TPNB4&P1OKKYxA+6lxvxWn%ls}MC)l+BKZTXz3lRDcoAdRU7+Q!}w)P9BHX|kP ztN_Ob>Tw?_--?PlHDAtMD4^gHuBaV_HJx5Sf2CJmC#Jb}9oKFP7N05MFT5xLjsPbabNBzp3+rDa zX6wIUZWd1-FxQE%0l=4d+qyX8&**Ycr8(`Xg=5T9$a7{}wDX(=^+)uS3{pkcJGG%) zOxGF;`94&i5Vy*VT^EJqx0M8`zCja@VcSstSyfiTyo1gd@BUIAKTu>89^q~aS&i)J zH;ab)0f+k~o)*+n6yXry5|C?gwx=5~g@uT8Km;ya;Ra@ba{n;Z%A`yZKv;QUOoDg= zS;t@g@xuDojde@7Blit>KBfWCwTp~N)bo(NxnY0_PD$zt;a(``mXfH)vK;>m{j zJNEj*4-j`ZVZbwvEiNfqd`bp> z4b|aUM?Zhywaq2u4go*DQ6bZ5wS2U>Adj=Sca89=h0mGD_Vq;Wx&aNaK(JZi z67rbVS7MvHhI6akf-m7*cr?g`sQ?%d4RWoThMTOKt7M>j`5!jd={6t zcX+>&fVh7wq6$x)AGnm6WZIrpEi2E`G-MEX%B#&N!WM_UAx7dePRl|~)=kL$}T)Qd7-QKjFL zzNa91@b%{xok+0nKJ^5I?p=|2MMK@Z7#eyeaX-^crnlm;i5|;KEyN)(}3i_K+!g?b8 zh^4a6pYhJ3Odl{;Iy!`ltv69W{rC3%5$rmCC}3nh&NX)oIeGAmm7aWI!m&gC5Zt79 zyW*g_P17t`nBSevC=3}?(Vz1x@HQ9XwlY$8p%1dg`bPm7!5Ea{4M-^<3xS{P?0I)U z-R1wvFzfmmWBcfTF5I}n-^0h=;G}3f{kx#P6=IVbEaghRy~2nhLK$=--q>QehbpMT zofrI-4r|vzXAG(;tjdhwS_*O0l zZy9$;3pUVnH*IVUGMy9-$2SI2Qz-Z*=5ChA_U0uEDUfb`cM#I!iI#Ti`~-?RtvIbs z&d{Ld`PO(7vK#0dHgI$4mU}(8{jp7dK&kuu>jU%8oI4c17WglMYYHLUvn9O0hieV~ zs^CfZ5zGDp2^=4N%qnzhDuC-=g%drhbqR?dUe^~jc8D*{kr^SQU=g;*VY7PXc!%zi zv;|I4Cba7FYkRkAcOw+WDRTdah6uvu14Qhu zNbE2VHcGO8-wTWO5Mg*)-uNi~A?#Ve%r#Sk@mw=rwx;eupH&K^ouYAHz_iDRL}26( z_>{YviPZ{o{~{RXZL$&-HQfrgLe$hiAbAeDP=HO0lxja&{$GU5pL|7 zPdoWbr0y5kp7;GV+VSYN0-dQkh;+;Xrna7YH@ix_E)y?Bpe=<7Q6_|dNtvX%vx$e3 z0?s7sRDg{=6}Q(jCFFPHhNBM~y@tq2nqh=p;4i2A$H*>Me4@<+JQIoCHZ#k_NQ zfhzR#-MlRil;Jx_o+(xn$QfT3IIxgs3}-1_yU7PkjgSk}$oJNy^B((JGThBI3Mg-k zc|eITd@3QrT5id9u-HrdOP72E2aOO$AA&ou&eVPp3ZMSX2A0ZblZRd$$M@9@B zMlT6NcI|-dRk<H4 zU7d81NYyf7k`E%7#}vzrE==!RBX#DZeaa=tkzd4?X}Uq*&kC)gyQ;a z-wTzWdLhz0vPSFUf(Y=f5MDUqb|ZC;XGl>^J+_UH;?ig-bXUWiZ^c%-na5opUV7Vi z9(@eI#{NR~_k@c@VNg^*Tr7PyIXiSX}X9qi)OmR7<_K8Wy9WOjvgsJSXIK^Z}y90lwUK29P3)0}x> zjdxpi(Qjgxf5hAHt$$Cg;abHXT~xVe(jBhu(A}Lc%*9*yBcNtFGqs}rVqE#3usVoV zr^6nww>!@BbROK_n2!>O@SbE01)lz^1{2{6S8__fRg8WNxH-=Y?)rxFomn?@>LVY# z@(1ON>bC=g57rP-y=eBYTdr2(t{SX(r>Z3Hf~KwJZW?4$c3ubow~^ z_=K|l0bJEH!_^*0IpMAZ+xiKUcaegwolc({j5EZxtqi)Jl>fD;05U?5aQ?Iw;pg4^XQy`=fyu7V;ZgA1)%$<`3apw-&dyP7%5IRxAzwBT$h` z`wt3-ND&{%6lb2pp8SuXidJoAE*$xf_s32*z7Gd0tj0X12`V|?{BpEjk_x)QRud!`}`YLf+l9p7}S+ zH@sIV+;1GArkjc6nV7G`VD2Nyu)3j91aXnE3Ct^r%MtACS!yv9On-brVwda8Vltgu zMa*ENx;Q|Ndxy6wLM!q?i+Og)eN&;3%XAs3^m?r^<0{J2eGh%5o+t z?83qFE_4~Gx-_)@MpX*(zLt)w>dFS_LZ!yqc=XJ?g2kwhY%t-ZQeh~B4-p>zT`+uJ z4XY4>Wia7xaAB%U=pyuW3A|jSlVnNDXrg2OZfj4`8>B7+T%rKSGZ5de>FqD10A1_* zjThGRsG{#&xWZpTX00Zp`1Rc|h?N!E_AiCg5B6?<&Gmb;(+uPJ@hxHHE{s?;CjN@f zvzsldI<}}oD!^O0P~CDGTeZzx3YheeZ|EjfzJU0s9Up4aY>SoYDwACm2=7); zMSQ1#l&uqj;TXmvY!86c=5)z{g6x%PC;YSCx(g}Q3+qw$33u-x6C7NjFbaeYn+Wh% z@?QXIL^~!LWC9P*$$pyp-Q*35`%6+Y^L^vl!ti@=?I*Isf%S=Znu3k2gvS>+T;m5M zhk=l9pozv(S2pLX+mv7W1|DZ;$3TDWm&1gOEzG>pe-~+Jd&np?)no%tSF{lyhIp$G zY%X9ZWDiOEJPD?lC1t?4ERSm>dtv;frh~>YEBshHm|C&D!n*sr)e;5I(*HJ=r{F{) zgGI4A9c6xP-PGI%RrK+DhGQTL>9;Wzrz9PDOy>D5`- zZXu0$YE4H3`=)mP%kscZuR_8q33pP>m#ebB0=C_I;8Zt1-J%r2`BtF$iR@)i@~GYD z_eg!Irf2-u!!7AUlWuK)1YvXGT}9F8+gxfK%+9xn7x$EW#FT<#PKQF;3Yo!eAz*hn zNwbifQdn<(MoN1K(>+Zwc4M+VX7_!Yv7SfYq5E;HZ z!RMbF*pm|-##yzA&ExyqpZge9I#vg`xm8>_BJij2(#d&U>Ib85!<%p0Wg zv^nf^VHaMc5I5hh+SP4sp_qVwHM#gN!FKtJmomQFMV{Th;XR|mi)@VlodOufR!Drr zUgriob__Waaz{K=obeN`4EA{&PS#V5w^;HtjCH3`f9_HJ63 zl1+s2jv8#k4V=l%gNxvCDLLQ3_DgaQRl3h}k7fs=PzA~makwzy_%{)be-a?K&%&N7 zKOjI#t>T|B6@y)hj`HEV-6k9Qri1C=)Vo{ax-SAIFxJ`FV44WqE>zf#?G);dhe2{o zJx&O(_zTmD6wOAg_?!z@l~79gG(Rsa;%fw(|0d`F5#U6-)wj;=|D62@Ia5W&nVhZ6 zlkpi}65a0uK2&Tse3VTz^n1`Z{<_WcwfQBcXYh=eCm=EDlL#{1&J3sMC#Br+yl^ElRC-@2M1Ys4f%w*isePw}Q_?;{bPL%WLyOUpLXD)m zDEiuyV{2tV3I*r8rD>BzVERVBSZ%x zwMgq)=6|RXzs7C@W_z`q0p`u#?hD#=X(OW#oZA16EES$E)tn= zCj8I=-@+~w;PX)jYmhPk!62uQ%MtGgEW5hs*^f z=O;noqi33QZ?me>hirc%gsaJfD*0;&&RTFn9sXNk67tJd|62mybp9RIeqqsaPq_IH zknOwSWUl^$OW&=2?0n%-_x>_`n^$ks@tk;WwXmEr3JRZpVl_2*1t3n{HVh;>H7|v{U+m;(WJ_^q z$jhjs&)uq4SXVS9;8CdEo*%{|?B-vq8maydS72OIz431R4YZ2i^xJo1$lr_8rrCK_ z{WChNbRO%wnR7)A61iLTkbSavg%!PUxdY6PCR3kYyamd}voeOpC9np@uZRN!ms33B z-?6c8@3#d=r2WP(rb^96&*xdC`jd_7Q?)%g4TS9Pm0kymM?;r-MM)f_T`WVCb@QH{j<`f;`fFm8=(qo?5n^d z*zc_5_X~?2AZ}wsl#k9G4scGg!w(>}o8a8aYG4xoyY4e^e43&%b6>~vtG-I6!BO_3@;vLpuM zC|{b-4&xU(OXY@-h&CNJYN`nOy<(sLqoNf5 zl?6BKS1nv8Y!08zQ8t`v^*G0u*im1C`?t-s=i2zF&`*HuzwQ}vxDb-L>e+aT^ohc1Ofvq9p{MX|zNY3vW>!LQr3TBh<%3<7y>>L>fIIr?Kkc>k zUhu;77$O`otjbZ%7P$}|Blzh08GD9r^Sq~+KL&*lCA;8>$<)rwo3y`~1-AXf#;MIX zkoZvA=W`Og-SCw;2Ejb)0+ zcF%O2wi<_nX(o~EW!6!z9#jg_Q=ju-NB7j>MY;$`QHY`u!$E(Ku+=RSsZHjE^+tSE z1gb$rM+e*a#25KT>>4gR{h6Pn7jo^?Z_Y%Q)o0h8@5!EN5z$l^xhy|ntO9+e5y&k` zVvXng8Ovx~E#I(z*QB6!IQUSzoWia?*fHrgL5mEl;cn$H1?W2THQoj%6Z^dr_?AuE z76&{bbYldBTM^8&6-UH(5dkhFJpP81z49T~j2Fo2;2OP>b#)Xfx|(bb%5eDt3tXcZ z291Fxoj0aFeku|QKXvYBw@lb*=ul%2aKl zAk~0A7oBI+r!Cd_0cEdBH0~CdCJ?6+UU=%Yt^3Iqa)TcVB@f9!n0^ZH!e48to z_N3V5JWnY*wbOSxq6mC>jUB<#ZK6xkB|kN=Wt!lF0sDjwnr3R8|g`(QD-`c~)LRke3hZ%r1)MOW;@=pNrAQ*?QgHaCUo+*Fde|~+DZC?ST%*U_^1VNYA=lKWb+Nw$hYx+$fg)6d%^ z@Erz2s7Qmt?lI^dM)A~mVCxn}Aq%;GLq}}_$Yd)>hbae}B>x%MWcZ8!6UqPaM8ckA z^FI!k&Lo3Texvh0V{${~@0q`1+^@2E`lY)GhL^3x6%EO_gh=U-9D7Gp8( zG*8M4h@vsci29`4*fvJsKlvcoL(l$2RjQAZ90tY$Q$sYKwB&hV{ftMy#!Mw->yHOf z7{l))%l-RsmhhWRdhtef$2U)O^j`d;U*Ym5zrZ-yw-7%fAs*nLz+*J%>Vt2E8}iVk zb}9Uq*XXJ>K7guoKjMjk>zgpObuzAt0+&#QsB{WA=j?E zyni$k%x3}Z#;{w#wfw*R=|8bDgG>1_6qNw^XW}tQR`(e*r z^MqCYs~O3ZE49Y5=9|{<6yKLI4S!aSCM@|ExZVp> z`RrW=nbGg;Ac<|@Ei-0P#OABfcm|kt?7xc%Z_p{J+xo)DBBysYK`Md>4o(# z0k>uTGxK+Ra-#hLQ}}Z4mcP~Ng-4YFX5kI;bH_Nh<2t%xYl zTju>|`>xw5;Zm+_6}PRT7Qu)j#_K`is%@i7{$PM^BvZsqlE&jy7odRdcSJ&nKuSlw zwt8N&LXk|TDYUSi6=q&R=iOWwg8s(i3)QXAri4)AG}l7@t{2wyXwdp6;Mi5qnqPPL zhF;9fa9R!fao)$JWD6Uhr93TEQPAqH+-6P{ukJO!8{YWwi6I55sclp zpBWi+%+FuGkhG%PfN&a+qDmrfkjgIKP>2o+Cj^)6__zwfY+AH}i01wHlOsjIg!h;9 z*(Gut>9YT(Nn8A=!+n1WS_?m6olf5?zT@{?!Jdc^@U=k{(BBLAv?S_Y@&XP>pY;%! zMA`iJVY?dNQU16licRjbI7Ir40+@P+y8m4v4S|tmiLi4ae_r}Y;kOZ zuYP^nhX;XgwLSQjN2x(&gF`VVSYBhD=ad zb>0j4y&2s`4^WDKs^~AdC&jrG=jXYs^qh?$Rq_RRzG6CN4MMeTh0Z3V#pCc6&6ng? z*nPeM4~8Me6bF0>3-k72F&WE?0$&Y>fVZKbAd&}tS}=L|9_XS%`7;)>Nb9jWm7U~; z!nM>(5JMLyDl$L;(zmBl-Edb`AV;fSQn&{8P}$npY1Odk7uJ6&QM(^r6v%h{m<*=R zg1-_!T+giJNBoAR(aldunAb4UuO{8e&t6HnQlE$8d)^7i8{2S3PA{OJOdiUrNtojB z09slK&$WL6J#=#Ckh*J?2+VuJ{+S~D*uo(~S3FhF55S|v6KKls-Rx3ikcUGyU*_$r~jIW+f&NN zMxS(qY4%Il)4mV=UXr3q`?QJ%Zk_XxA?RbezevKXOP7{B`IoV9iGFCPzp%Q<;Iov= zIlZFq#nVdc^2q{(j*OprO6lB{ z!VgGIlMOAve?s(Pvc~`PGVUTc&4#@2MgDEDI|5UDpJ?NcO-fsxpJnx&UGuJR3%@`d zJct<#YqZ)#c(x<6jTf#`%mY7CQpOS3mU}zds_M%5jy=#Tm_kQApN8io1?k^XKcL%p zamGg<_CKkI4q6w^9VY4rrNp1fy4_} ztv=@)+c`qhFkf;=pTA-ZmyNI*`jnl|UA9137f%zR?Pm>O*$>K`lT&?Z+&*p`2LGTD zg(mwEt^1pj0b>1!T+;LWw-5podNo8PJhg~PSj5o=JM%PF7N^8`%Dlb~j@FaOnQU@k|?=1g|qt^=y*NnmxjS{?sKZZx)?lsdb{GGGVR_7)LoQ^YBD`i}vG5#0I{~}-P z1}QvsgHL?KLUf*hWBwJ=sfa-D3zz3J#xxYS)=AvO(5Gf}wXw6C<3WMw<1n*W>~9MS z7(YPX2II3SG@Q3X>B;E5FsP}shPEuseIYx`i+HeZpUGv2xbOI$2>;a&Y}>}@HC5uj z{PpV;2X@z4AD3rgnCT|q(x|L|$ckaMeQd%eql@TZlSPEHV^h%K2!b?)*)6@W-kINk zUHZ^n7{19Da3}r{u%E`v!g!%Ov*RWSKjT_SH~gujF&PXMTovY!&FjAj*rlm3qBCd@aGcR`c_t>W~>c4n1+#@1Jlx(ZrNmxl<- zUBimZMMf0RZ$z!PTl+ocVjt4NC#^(qrQT*?lt&etOgtW)MgmE&!ulfc4(jg(|Aj*8 zBtACn6H;_T;{2`9ZPRBOA!?LTq*1Qc2_b-PLZ3yn%ztYMfQq(M8%7ZBjTq(kK_S=; z0TT^r!#&TWmZwUdK)cJoI1dQ$T*V*SML_?QHP+Mc&4s?Z_wj_}2W=F$-^hUBXO1B7T$NX}N+l}ZB%jaQ;>3_!7zXQzlK+o|N%-Q6|OPqc`cP-#+a{REX#e^v_ z)#p9UTWM(Lcv*bO8{blNt=^cj4Dq@*%YpL7v#z@Zgl~euU3kT@;y1QBYoD)?}is_PC?mLnFsW2AWrWdXij<3=X(p_{7}W^5D*2 z=@;suG4t@AfpA%FY=f{p)vXe10Gr)A>vOpr+VueKI!-sdHaZ>^I&@mn~!#Y3|UyPT}}NF!<}&POS}aW|Ki% zZgSywY*M_6*^rjJj3~7*56|*hb0VKQ{jdcnSfV2 z;r0=ET&@;2ta_45ICkJLQngyZX)SE>5V8-;4^c-AI@dW%VrdZ^AAeJs5Jq{vxW-3+s>ILsO(~X>1;RsQQB2sd&0Uge8_= z;a&L|xb6i`L!mdcUIE`4ztqpRMw%6=6Rr37#1W_zmJ>R-?)`?vPyOGZ=k;yhpogFJ zS;lmR4HimSL-4l$eh51*Pt4+)J{s#l8eUc}EXOpTANO3fuof zt?(#h$EL;9KI;|oo1^->)&9dEu$cAdd=TNG0F-K=Prrt$&nonuD8w_S5TXI{7RC2p zEeX$G4>S7Io%ypth!*1CVO=k*NeUO|ET#GHfUUfX2x=7>1Gd=a->N>pf+{nf(cxPJI1kC`c){iw$k)pS2<3kqd z)6}J93m>-RJRo*b7~>B(W;z46D=HA(t(JNU16H#Acn6dE(DsSRLZ(TDo%T7sMEC-f zD14s-dG{ zAHTT|svA6gY6UdxlshH$H~)k+FRbM@E)EI2aVz{C-1e(&=JM*t3Hv_Lk+FdnFQIiV z``AQ;=0paHaHI!Ho<8*a0=wpEo02A+x6H}nBuXtz^vaYS@;mroTiv}~)9~3>X9Vz} zPk=t9r^il^ih^wH>9UfUZc4{2GFf4CzPjX0w(nijVLc=o*4L7)2z@}Zc1GeD-spbD zalKV{VJMt08f4v(@qq0bqLIIFjgANhR$BGBrzmY*+7zA+n3_6T&w0Ix`bl<4@265) zn0}_C6)*I*pYeV(nyb@)0pUATL9?LhK{d`OJe_18{vYA$x!^at5u0d|u}+eP2<}zH z4WG}!1vgCYn_)0rg)(ocp&M*%>AvNxpvUqAZbe|$o(4r==bx?zn2stQ*(um1A3Lmz@}RSHsFj7Q>gla(dS?JoC>oVTF{k_O?0?KylurBeAehD)Dk zsVq9}OGq}_@&vOm#Lyeu+2N*LHRY$N=Dnt&_GD(M8AXIHOJ@e2G1Fb0Gut95!5VeG<>MrXIp^M0C6cyqZW4i&qQxuEDY@}fs__V8vMno_62c)Pif|)vUY$Qeo-tR+jt2>y z4X=9v*APF7i#pWVZ-mvUssaplCS6Yvcurv_;nd3+&g*HOd^=CFLXD6&Y-_iM`RgI$ zl+^R4HHfT6^$7W|g#X2oZglsJ5}v&b5>ql;APgf*;qs$d*r_+#-1G~Qs2lTxZQ*<; zf1`l>WfaeZ!G0RH6Voc&56wr6jPcdLKm;eXOr9kVrFrxpnp`??wjDN%iJIVJVWDe{ zY>pT3y+9cq*Y7)A^@vYT(}a(oC099O{UXV8*XVP9IpGNoDthX6Hu(m7I_>jJns^FN zpBTm>j7RvaFBQ>Ii~DPjYYMMWplHTHg=s=o5aHI0qAS(jk^<95DciEi=Dpi;us{1R zgMzfkh0~{sG^)pcX$is*pb^8FhDPvEBQ5p!8K9WyqY!HptY@EFLvyw4EUT=ZHC?flZi0w~<^JtxP z8wwL-C9YasXac0W+fxKrD2!(>K!ox{4>BD_S0Qhxy4M~GaK&p2A;-O zTVq}^53F^3%a1NH_RkAT!a85PXm)OEDlPDy)j3%|#HS!iB4ILOe1!ld70%b7&8O|4 zJ4$qELh@&Jp(G1Ue~t0f3aJ$6G7npPS3`ctLS)3)_5(x+z*9PFo?>jxP1o)T&UO<1 z*wPk-*`IB0tswqQp=wai!+un9`~0fy*%iWf!;NVUx@GAQ+L}id5$t^Kg91B4y^Q8U z>U{qG!UAoC-dNcSWu6+@Fn{RsQG4KOW6%vOezIT+*3^^yCD`ACcewCFTWxbr{gt@- zEKH`l-LXS!c>)ad;UeP{ov;a(A=N}!RfD(Yd3}2J$zmWqPmR7_A;Bz0Xa4$z2V9|{ zIBgH2l+tR~XXb%-O3K;Z2HiA;NPV$q3XL(J3e@M4S~GU7$pnpOWW1FP zvKrONQzX9HvPdog8S-=;MI&TqD}k7VJ=~`Dd4F_Ds6nqkuy49X`b*^fXk^FmsHcdL zd(nlWdSOwW+y^4W?h8g=;x~NH-1y14rt@&k_-O%DKt93-o?v_e)>qlL4uh$yo%s=u zyn>lMKscUZx%XNy@HxJRxdS`5{(=qtRm@=6PtD9l4)G*MIK)U|dmf#Cgbn*z`gs+q z;WTe@%ukRSOjzdB*8kmcr0Zek9QrJLiCt0np%Ad)6be`Nq4xzE%k8052a{3Tx|YuC z>V)xvSn@`gS3rO+1tN?hHsVHloDmQ2qY0Io7n55pVx)6Dfh1Cv!J)PMOIZAc`s$Ot zYyXKVze1iXpn3x;d3 zod<1lc3yKid5fGWi^twK@e-358z2RSPZsR(qoI)clj>Dj?!{P+2ktNOOk5V#B*v=CF`dEjzF==68|sP;gwHRW_Fy*VERemBKB3cIBEG-+k;Fz4f0M4$qo&)CgECJq$U zqppGbt4~}1_NV`f69p1q@I7#pFc;-=<8y|7QhA&q=ZOOVrNTJhi&L893*OgPUTMd@+D{c?{gGmX~!I!f= zZwH0#L7obyp5h*_tig1X%2pG$qp^yi5Joav1ed6*^OqJ$h`O(L+;!o7>-{c>Yx9lW zLx68_EAHL@)&eNKP1*f6hVskDNO7kQYO?3i~Ja;-@-Hc z3BPbEor#W(d1q|cFsAJpWcy0HQVUmrx1hBPPV9^^_>b)YjYEnQV&gJA>KEgM? z$la}}KIeyp%yJ$t39sjAK$~fXZN3lYXCA{c1#Rx=#$-WxbXx6>C`?;OMi2dc=)6ZA z`!}_P#5^aEuy*9T$DjGmjvln-Ag*nxU~lXN=etoY1BAVAu?0+R9*YRdA-`lrT^ zN_5O`IJI^Ivqs(;CY{ph{OA{0PtGO1438+4EWo!2!8tg!xwKOjD9Z02m3hCgsxF*@ ztmcRUyt78{*LE4$miovF7~WvrACZ!|{qDW`8_81r^K?WH4*|C!v^=eIZ#Ft(m{YLP zj+4#R*_D*})i$1ksSSTx3#0A)uJ9C|@&g`O2O+Ifud`9TN@=5U--RbCGlsr{=M@YH z(s!OHb68t--m}3G2b0~HDv#qprI;&?;(;fjow&HtsTfqYqQWJz&s)5+KHYv}xuxKI zH<`k3C|gdm`E!AuI(&pTl-es2W_B=!dDUM2ZFZ{TV@ScQYR-TA(|@g7{)k`YjpGqx zfx@V*Sa`R4uHl`3qdYpzTu&T>a9v}ETf3kXjeqb|lXRe!h76bXlFZr~eoe zq>+K3h}AANCM#^+-=);+8qw)?Hd#o5Y^58EK9NN>HTcd8Q&OGuA)#~sYU%7rnM^9` zuykArF`$4UgYnp3Ea=23fz7t`hU|?M$;@u$oeWXzVcjNd5YBmF{R(zd!6cuWc_6#V z3t!|1Zja(UufUd#z7YlOo&1Q0hn|o*kMbl=we5$wPTdz1hATr@`n(OpOYH|yr>pa# zzrfphdTKPQ*@XRV7pC?(xp9SWY=ijx+XfDLj`E;%!80l#LN4qSg;)7iV)I;k=UkO< zk;J_T5ys_9pd5_;#>bSqtpx8W`hW2AJgkJMXHP-##&OL)e}N0**5J$OM!IT+kJ}Vv zwuug?+xiZ-4yP;7g$rk%-f-Q5zxa1pys&nUjh^S^_sk358XU-x+Q*hrwt6}{z5cGb zPIc~_n+T}QC;HZ^3csa;pOVVWD@I6oD2aah&xW)O53X(SDHqj^ehCPv-Kh8_D zF>Ef7g&ZW{8a@1~zd&1+;P1g8b>Xc5>-uDBBitK%-5LSUlb=U1WJ)gY=z{pitY3D7 zkrbWZn&&ad`K}V)im>@WqHGX>%LL-}5B|o4Eg?^%cY9r--b!p)J^4=pDR)XX&QDxr~?X zQ&ChNFK+Yo+)tAb)29OA+^5e_QWfB%5rb2tr~ea`Oct*3qZfS$hU45ZJ3uHz!EitH zPkhs>=V?I$in*ETgBe62=coz+$p90_nT~tB@=^ znHW)E|1;I~!rJ|WG%M}2{D6{mnn_-$+q^xCpDg&sAM^%2bn+$+N-em2l%`FY%$1>Wg#*msn@8BGT~e3_ z43Lg=_1v|UdcGpJ0lYhWKsvZ^-#EqLq>y!I{c}P1%UGCj7ZR9<)Saqad}QWYuBIKu zcrqgVW4@>NVC$;&zo`IQ&?V15E4)_4Zb(C(%m8usn*b|7)W2}dC~BqM6FDz3HldsI zCKaYofKNlMC@-vS5t7-s@&`DRKAzRS)DY8YyY!7`^Z`E`yuUMr0GmK$zi@>yBi%pEsXXhUr_42I2`nt@MGcR1q@TYVH=%yf$*$xG#*kq|LSL-t~OR(;T?|uT&%Et zJnl)=)ZN$T=2Ow$!sy>X^KMfZzGpaQXcDGD@V_Ay^;`=zAvQjBaf>bRuT2Oe zmN}r)Rs=+S@~<`gHxPrXG{D+Ljc0i1Wn1_xOJ$;@&pn0+Qx>Hn3^O*aJMj@GPx%Y; zEE@1^I@&4!gm2kyvp~Q@u>a7;x5t3gc@%`Zqwy>{xD|X04?|~tUI{4u+3GVF-Iin3 z9Z19|Tn25szPH0S9G#hhE4-t^C3^Q}`o{K%JO8xZU%&NzGZ5=M!UI=`ff7!a!C(z# z3IPfC{y^~_^dHPRPJ^+I&ZY5|Ssq@XyXRR9dNyHxJC2R~20G$tRp%5Y;Gb`z2xAaf z28gbi|C@=vW7mI#4f;%Dff}!<^v_M6wugtpPi3F9!iORx2*V2BK>jkb@z53Y%_j7H zz0d`fBFqG*Flgfg{ zHYs!Pv_EjA>JjfsXK_s5oQXVcr<7hWz=l(p6uR&>52z~T5fpCiwJl_^2_&yJBl?#? zZS`3f(hukP+Gh$nP%?pQ7f4;Z0Z^)8!Zc{T18?kD>4TzCNJ<{&)!@vUkx{T-R8Ymd zux5fMO5?Ojr;~SiE#hOJTX=b16;E*eA>ZaY9e&wdF-^YUaYf5neRjLdWuYhS^8+Y< zG$lw>3Q|GFqB%Wkn;j*1_rF3Qk?DkE2y-eqM<@3{dD_46rNVGkli^~1nxWe<_%adr zCcfJU=raz6?<#CM6i1wA)&f`GyNuAK?!BjR59LG=-P{0p;R9 z`!9soBD)Lu^9@Z{zo+j~Dh7{m-D^J8m^zPWSSXz$|~&e!YA#4hhc(QAj-P& z7k=4a`0SCISU0!cs%r&uWJ|LFSw=+^eL^{Z0}$^7f@=(-6F)< zRkQQPBd9ZNKq)%qs?9UP&v*NY*nQG?eh)o*ZififLg`C+pw-&#nCGu-if60Km=iYq z1OKF^(^K6BLE-*o|J6a<9a`i6Vj4cqX=XIXQB zA3Wf7^^Zsat%iu=*i*EJCnNNU9e>-O&zi-VN;sZL^p{_e2|D7lRzSo8P!5vY;^W&} zbL2|r3!jogXNSM>mT55h=3>?D4a#($`bVXqT4g6`CF2Eqa@B7SVx@Yk9cE#VO=v)W zm=0Vi_iS>4OK%|V3y1q=&F!m`feRs)!tn7vvK`cUU98S<7p@&0{Q& zPJti#e*>uxg^4}sBo^1%5lY5M?yp(fA3(gbDHz3%xHOr9=?Rqm_z#sIJ>}1;gd;BO z!UsCm>^y~!zTr;kOp7b^)CS*l;n^TgrpTz6ec*b*D5JS5;{fG?L_FWjr;0T|G(0!F z7N$5PL%}5-GG5u?LxkeF(M==Ht&c-P=MQ)o=zRL~kVNsTYPRvyn~N+DjEMiYJJE&f zNu$Y02c(9uIT|!kVa-mneql|iK+}Y#D=ZsIgK6IS;EQaK`c~?$y<%fbGovtuSBqu& z07Nn`oNd<(qbXI(STPPRV+k2)6 zE8aL+k?t@e!c|f#pkeN{ql8tG6OREl5ndu?$mnc`1M5?(Fh0z1I^Yqj>y3Lv7d~~A zN_i-tZ3Re4X{T~hhrjl=(^F9B|Me38S*~sv?|a3y)b)n&6T1V;KNMsf1(nst1{_l)s#R(O2hgjMkKyu!z%#IJSP9qo}E zWfd>h%)NS)1A&Zm~e~ZvnT>yuNC161xg_sglF>;KW+rs zkjHL}zsb&svkAiJ0(KPmkg{<(#GPh6<%wRTF8UMRVCmQ(5Wh{O|H#}RisMB z&}ArX^w=2{{dc2%2Q?q@NYN50y@hXo$Pa$E`6oOKGq1?{0vs&KloejSlwX>{xh$bL zYGa}+rOQ8hRThB?BY(tBiJxwq9&ygQ-O7$uhPDkYZ|j3+bgXZ^l>ODBDMjm{WD<+m z7IsQF*-O$bewi$wKXDnEVldqLSi`O0USBPKA+4B)1ZFqmvC=sI(;A-hul!x zWyR(w)+sr9#;Xwxr_}6_G>_}Zd7y}}Wu~aCC%%r5f z?c% z4}7v4N1tM0E>IN5r@j^CF76DSp5B9pjIbMMDbDpc4ko)>+gF6UCG5O0p5V?lq(6-ZXC*^~zAL}PZOaa@#p-slqljG7 zLAddhtVdJW#-FiE1TSN+A*fW?xmi&i_SB3+-s6S0jshWJB@%a&>4Yg{6r}JMItYqT z1~wVKstg6o>fupYlb-TbJTw+VW7)zHCp(yLxPXml2-xMMQ2KP>Y|=P|Y4`y%ds{!+ zcqL)1Ese8Y2bnHIg*>SCuXfRj45;FMfwJ{JV|K&wL}l6aFhmx_y}Tvq%+FSP3PDPT z>hyG(?vy zA~x49k++7&(n6lo?8qgHOn7}OgwvpB#J>_%zJvo(5CD-_FODyTx|aIgeE7c6)nWbx zy4)a>0MU1CZJo>4=Fm)`4*!b+`@+{;_-W!bh$K~tMjSYM|8>w4PBR-m0mF6*>wlbYf+Bgx5tD>U#mz2Ib?h6F^iGISG{0onag6dkR?4_fwN1v&NjQkOwvso#`7(ShKMiibcJ~wPQ znJEk42vZ8b~3Gt;Ea-Kj>KJJ&Veq z*S4>`F{!r!Ji<01R_7G66-IIx7zO$Z8i#+xZRopcikj=1+58fTUdu@B7^nc1YMZdL zV86V0x#RH|=IS^ysVF~T35wJDj5??0wA@hZOKE(O4}LH>rO#`|qUgf2#WT9`Ei2tm zz@AIoY718-S%&5t5si1M&bt>p9*mO_c39-2Nnim>SIF}R&ne@V`};{d z;%)%=&d+mMh@)&-4^x{)^=o2xiJ1;ptEa2NUm8xkkS+QiiFzrvR0^zk>!~a9#u4M= z3x7IMN%*X{o!gxy4c@$aJW^}154{lS(iQG1VffVz&b;Cx7&x}hsH+wnDi(=@tq|&& z%uS!aw+hq_@|gVhEBJ_XTOdvk(-j&MitxZaXCsB{BmTaW(7jgx4ZrTUaon#|@KFSn z(rI0tqa-|Z^9<#xTk?biXLBC_%EK6`oQ-Xm)VoR}qA7jR;Wd25PF=RHHcawL0*{*x zk9GWmw<2hf?=L6)Ox8vjJ6ttGdve2M;++W9QJumj3Gtm>qQD|8r)(axdYR_o_ay6b zru;ei!*i$VY$$cU^C3c2^<0Wk*cJ~_L~3DyJuz=!8|@0=BTmYfsX$6%4gaJclz3cw zF@stEg|Y4C7FPnQmi@xQxRE5JdVuC5e8V?`b$zbsf=9`CARpJ$gvgjw@N|8^L*HA> zT%gs5`PEd>dm=>0g@+QKl{^9GYd=Jo<-Ml~@NUoTT7{S)C3dzgt!(|-K{&!I^$jO0 zHdP{%%873vhSl4sFXZb2+g>1qe1!K|V|SAqMTBqQ`JSh*w77oijDlz5gD%!zMZr-5 zt;fwjT^)XGf;oBv1+w9=t@h+Gd%OXy)(c2kQo_|=A;0M#!yDV5u0_!I|3I1<9mb@# zaL>}AGjm_OH~)iZ#6ud-=z&K{L8b&>Q%UfA9AF?7;Q(i=cee3SXX~BPXRj?!zdn_E z;$thR_nL*(c2`N;;o&e!rXfw*D6KkVTV zCLObg4S5z#o^DcjunSMi0iMZ1Mj7IfH(l1IjWyEUPycQKZRMFUc6g->N&E7Lr4cv! zd=*1dVU!o=p+$IIVn&1yF3}+YiMFEiqtNqzOI&scxKbJ+-oRKVt502>huETE(8=Yw zWJl{RxC)Y5jbjr!StFrFA?@|TdLx$Z@WRfnX4PB6Oj4#`vWY(8PSGU9DY$@-Eg(W! zcnO8iI%@ihK6~$GP2~oyq#0>g;qrOOHtCcX9k3M4!H^@T@?H7J1aVl)W z8grRqzF0gp)a3(vVm8m=FG+_&M~1U&+_1=BuhJ0M6ZcsRi9ka6uU;*r^x(L{SUD#8 zV=(urL{=5ygf5kzK7>L}h&NdO6BL#`0o=x(x~h6fII1M*`~wq;Op_rNeKx^hj)bjf z{`g;0+Z=*0?^eO79tqLMCZJr9cz*95hXz-j|I=v=ys&5>2@)*&VII?`AH0Ody4t&2 z#Jg}czf+BGy6_<-se5|3)*w2{npzlo9-81~e1I(E8LW~P$)gvUI9RlinoFP1)iFrY znsqkzCa3msMf{OiLFxOEbxDG46E^>b@toS8dQb7e7MSy;ZT&J7I=55em{m@QA(I>R zTKUv*gq@k%EmFM|gP47r`Gn}T^S$j@oN0YyqVjBi0(Ilxx=iGcUokrKvsLdlj;fle zP9p=VaaGiaA}qUcnGa^(pqk`9@&;sP{tXGzD8RNysAm7e&@DglQMO2eLeF+kPrxU9 z?FL@0ghvShp768RLp}%vg%7cV%l3v16fbfv6TMOLWCNlxK9h!&UQVL}pVt)QIsS1X z-Hi+jt&^-sC-AZ4iBfc0-+7B~>jFu}Q_>KBi-RLJJ70h(3jHQxJt!UuY{*my`w#gf ziwhGzkZ;MAqSh(mpcd8=3&VNU3L|2;$U$b$xw0r#&H@NBmL3cNlp|`SV*G@o3ts%H z8D7_gJKKJ0AgrqB!ADV0I4MSyu}SH>rfK!UQU=#^bb&tsVxyM(1&<5?-3AKOVaRi9 zuDOI~RH%)s=*E{$4&S11{tTSl@_q1s=_A}?tRJ0)ZFu=CTniJ;+}8qLr3aHbPhoJC zIHc{f^}F-r-zo6WCrH^|fKY$TBI49;^jpS4s({XyJ_=}K;R*-)51Zq8_Ns*;$EMvI zKrJ>0mvTBNsME>9&W6m4C>@%HO(>MJqDQ!G!ia!yMBW8F7*+M2sQ;@nBZ_R#U>gK^ zgb@|`muPw6nn#RKccgw8TqQ7C*$x&}~86 z4RPiYYOZBKaEQcGgAL7i zLBg!`TG!!Y3f|6v`(N!6ayvhiyp~jTpWUacPUqJRB!{cBtTB^*z$A%%O24e}SanHSa*yZDXw_hAa^Jo5xz{l|`TVL+Y^ z%g*WPd32l9r%e~cKmIs2cxN7p>laOqHJc_>e-YDb?l1Fx(q5wC=PcDcB1Fx7X%RSS zVSWlD40aJ1#kg)kFwXr<_pp$Zmyt->>5NnSE(u1oKQ+hJVO8p-q z#wmF`IWIgi31&Y{mRQ#G6j$`E@fN4(5@{opo)g<{^Emc~AdZp}Aj-1g0{L;lhaiG_ zT$3)^X`M||;Cu*4Hs}4Qk8ouaW`>{iy1D$cn7Q<*_Y3Q)<7*bvH+;){)^%Yl1kfBJ z?62IK)FsU?d`lgTRt@V#wXD-*_%SWs8u2Dp|D+e)GMqJoomvCM} zL17m?V7+VLV{rKLgP%nT0pg=iGoN`8#H~=rd@%&+dp!f&n!sng@YZinX7{9X`Q6&# z^IpVN1Z?GuOQ?KcOH!n}8+ROgJ_-&%{a{=k$UmvrtF z`RO3QX<}yrnQJ#Rc9np6nh*kNQ}lje1-DkS1)1id2nkt4g+d$Z{1m}6Dtr{BTBpec zC1wE^sCuC*ZhSNg)~OjX)_L_TLv6(#EAzlqfGZ2O$e$(dHz=h(WJ&I&cQP`E@oJ^u2mPX{(JWm)( zrD7Tn;fUuET#b^P=-_vh!IgUVw6eYuI1n9;o5kk~St0tuOGuxm4zC$W3q07>Iv9h7 z=&VPesG~y$m%>~hM4vK}>y-eqge+iEg6OpdEW*5TMl|7a!0=v!B9kclT(0TT7z)wI z@6o4^MnZr=?_#pElQ>v#`RC6Vl&sK-KN|k0E-1?rFTwaI0;lM`k%IRHV|zbv0@|k) z-HO6@V^B?UIMXlTwbOth|38HB$8o5#;pM)qGT5pIiOQFP^O5Yrm zDfo1C!o%Mo`EkJ^7v75SQ50$i%!=5FJ9{$!v}$Vv(P z(gY6Lb%>uC%G*cELKw+;FnGqCL5T-I%ijbL4o0up8@e2YkH)E-N!@cj=P01AqOqtk z;mkP7qe-0&@%A`?o&lKe$uJjWc-}EZ;1dgW9e-hbiRmAb4w1AyRPV5HF#!)61o@R)!BTc zmD`>V$mOtr-Edw9f=KeQMMW9Ig|}{n8jzh4YL@EWLs~P}$~-X5u2hASUP}Ez68+fv z?AiL^+~3c(6f7p80mA_2lz42127cVL5K%)X*TXQyI&YqFgdrN2>p+T*$7^WaWq~R- zfA;zP%Q&Uj3jCpl$Z-Z%xMdB@OA zt2~Z(N=TMPe8gMsNj~6_>fnRFbrJ5+#s`1nEtYv|G`2e51Y-;}G$Ot)|0>;>)LVZb zzK8g9IQqDx7d~d+qLljqwbb7M(RE45JaoRID(CxPUgv|!Ua1p`tpzANsq99ruvN}{ zjgK?1d5KqOAkCS})C!S;+Ko7@q4R8XLoTX#<%?4-pdYN?>z6H#$NNQy=ph~VVg~c* zRx;cq;b20@bovPCI_#PK&oxf-f!|6I$xM>+r}trSb3&}0(#!6jJkR@u6<^g;Q-NW6 zT0U?KUHCE@CF(@C2KW0t#m6S#V~WOCs&3(hm^@5smA&u8Gb+5rk2XVzOjL&U)iVbW zGqvseE74e%M*9VI`{DaPRrRO|n45oVD~L?h*eo`A0}(UMb0cN};8kyd$`BQHU4c%! z&dc*@ithXV2(y6lyfB){bDHV#@HFtElx+o~)vTF^zB+G6%wAaUJfku~7acOX4q-yD z4SRFMA*3Ei%mU<@AC5FcD9*?l1pk&wJn0cNN=Niti~tA2dnRzd5u{D3E>v3T2vP))R3Q#tTQm!8xirko80Mo@h7CSmv?Jx0LXW^-r}E zEle+Au2iRt$BEcj)M$Hgf>W_CsxeQIht6^pCgZObIah9m2fAMYJ|SE`D(ZACW;{j* z{#i_m$$hcbm$R18VXHdpRU*zk1&bQoUn+KLxQJg^X;=>fCH<9k{5T_8L&0cnOy5988m-{D=u2W`HTzycb{~4!Vi3<&DpJ)HC%vT=SOx z6xaCx%UcH@^@o`I>JB|oW&4)Do7=g+X~E?gj7}?^tn^$)9!-6ALruX0UFPQZq}$ep z!u3c1#NA?|r(O+T%h;}Ir9l)-cB91Y3--fSp4F2`s8r))XlGyYhgzN3=jJ~5yC%Ug z_`b`~-FOT(Xt2d9%qaM$n~=*>od;=Do^5><3R9=hN8g%RiQj{5+oF|r{*$UqEl{qQ zyMVJU*7cf!7Z$!JK4~9BckbP+RvN*~1X5hijaDuhd=$ZXwuI&nKVl>gm`BX`&>M>$ zd=!OSPtWt%2`CjrX8{4X`It}UfbtpNC=o}3b<8#m@W1cRzhO0 zQNSr2g$?we7uE~*jaRVM%F$X8L7Z6&nbUJT)WRpt_@GN-ijYw><^mccijPeQ*9Il| z0bBfN5M17u`7rfF)}9j--79&P3JSRmyYv-+A`3w3(Ynrg949hr`$|4$;Pnfi-a@ZI{9`{=*d zmhd>qCO+cAN3pP;8*t6#+UrN2khAR@bK`q?JcyS|xRkfSka}IFkhMXb54$%r9x2vV zYFoft5hQklGE5JXE%o(|fzh#$z^>VrCr0~}EV1Y54TnquuuJ8$Fd0C&D*zQhoGjuQ z!zRVQ4PYd#otHsM{fzUPV@K0O*z*par3H!8RoCtr;nJg?2beI_C0%A`>w}Nsw#`N9 z&_qhj&yv%bKNTd}CKfJ;=YRbCzjAOPhS!S3dzUK}McA%FBo8qZ5O>?~59N)IsSSuL^!!L`-U?u8Ylxz~}XdwtVnbgfY{J zl?dVv^P}keyI187P*}z!Me2PW%z~$G`sZI9eZ8>CN2g6h-#wyu9oWY3pbz|7Z&v4N z8&Y!-3a89?Mt3N@6-#M#r)I96h^@4k@B&7SQvojH*ls*K7r!8n#z_%AQHy-ea)k#o zDZnvbfG)E~Z;JIDYhybbT~YjvZ7|jXu5RXWdzeIr8k4NGk%FK@zz|{Vj1%f~wnEZ# z6POVk!dELT&N2c~GGJv2&i0H`x@}X=E2-jkT+;yw*1kLd0KHfS|K(0%615~u{l_Yc z1X_#~|CH1>qCAz4*x`tyyv>4|um)h7gq^L6dxaMM*ROsZ-B{1-L2vqjJUG!QnTgRy zgan7S?%`!VwgrigX2dAXNqcx$pU#D6ba3lmb^zkk4mHlx&sTz5J{>IexvMrN5~24u zkQ&1_7s(0zYE{b?nrOf_*nVL`SG3h)xQDs?Wsp=Nr0hj5Y~{)-ynadShU{=SC__)6 zGe61+@Qv97&WskqXa(9>D6W$d4Mioiwa-C+xn$TsZEZ|==>YLm^>cM}v3}$>xXdIA z&wODF5RFB0(jcI-+e{6gD+1Ci_f6L^e@~2D__Pn|suz~YF|Kr@JF%yyCE(`moK_Z4-F%th!O3a~dZEeZ;AHBSA|cvqci;kiny$?3W}!N z@jN>|BjZvg6rsn5l1pO7P^!#x+We#zeh!ICqzNsik#;QP(HU_`laeMVJmxl9iK;2) zz2g+tm}*z0>^rp#6A7nnATrehL~Ojdt}2kr(d7S|imQ{*`+ni0C>))6f`?f_H-GC| zhwIktM&E-%cYc%yZ)NboEQyJR1a{(X@yx!aZu-DS<)hQ+`;Di<3W8QVbgCMzj!^O& zJ1mb*`@tH_lNTIA{FVJsc583T+dwD3zh*{M%u~RX#>U0+m(VJY#eeD&H%Kj^U|p>U zm@C#YhgY`Eg`QoAtH@yU3#7(SwZv<>0ZYoz;R5=M$JWZAl-}XVR!IVF-TV-01M4Af z?1%?oS_#(=N`g{jfCD+^TJxNGo}h4=mUbe4cOXI z=bntXlZ~yfTH$tr{lo*x5ygkpu$1&Kap5R22yc(@P4rW~A4I0;RvR%MncHv~Uq!}4 zq)XFv>SYk(>6FjRl(FRpN?Z%#jIcQnQ3yHcfb20R@UYcTgryzuSifQOtDri^ud`~e zQYvr>@}JjP#J7L4#(wtN&ADs|;Cpr}1m!cJTV6@y3J2DcV=?s&4J)yO>-@hBBNm<$_f&?@D;&H8 z<1K!)2_K!luw3eg1r#ehRy)|=-h~n=X?nRVvI)<`jwme5jy2x3!U#iZmW4-A=I{<& zm~9bO_H7j2;s;xn!D6cMzWgSnYQZv0xL>;;7d+V}-J>9Kw!*>6L0j<(UvDB|>{LU* zkd23!pk{Aics~ZFvltKeY6YT^cXFpR`uh(Po_p?dIC+-m^@O`BsBCjW_#1DL3$#A_ zAJ7xM!Zpote!ATf#=DA&2vc2`AJDV9&6{*xsUWHe^TL|Ojc0W5)*nh*YCM-jy8D-z zY^TmOuVsw3@mZ8Q(*+1e?=qifiY8ua8;sF4iYJ6_m+%(h62)k1#c{hqXAMYF77zpE zB^(?P(AkVHVJb=qX||eX)JxP!7hEDu)E&h*kn*es&via4Bj0zw4p}`}Fxi z@9g`%AS`-NBDEXu3x0PP&m_8!^(CIdyTx0$6@{LJ{T0@#P7Zi>Nhg>*H?~I*nbz*M8qyM^m@QhAaRr=6y6d|I| zO*&zqE#b?pX&L#_SoG_@Wb#?oubb8JP27jMh-(JS+A83NHZE@i2ek3%&g}uCt z#O9qiwGnhoNg)O)e~mg5jZ=zAvM3(cW5BuVj4IszP6>npqr+Y#9OzvCK26lTu!Q)g zS)U*HD!LKT!Ebi>V2k8+PcHmpL*deY*S4Dr3fB%)GTZd&;>!L}KUj~*LB#W?Qj!M(O4qxjWSbbgBxM z0ob-eVBw{|T*h0JQl*C#4wWt>UU}mRfy7mD@KZ=IZy#Z$&s8_66$#p&xFt=G&K! zzM`iL@W*ne1S_c_WjgLI^tf)#%2yS5b7}{NPq&6rX9~e_T~V*VgA`k$L_2pxWOixr z7U9$mMik@+B`zO3gF$aAmcd!*xU{Up>{$w$+zfH^_+ri&6Sf%9zTpk z=gTw8+psZfAmbuS$5?u=)8XXG8$xpmUj~m6X<}060ydpWp9S>1Rqo-SCS9b@jrDJh zPin4rFNvqlG(;giJ)BAt=-`Ktx>7o!Zl1UQzmASa%;}7=na-1$Dpul`U?9ui{`9|3 zWtV|xRQM{A=q8@Sr^kY@T)4&QkZ9vWdEeIuwy2u$s(pt7GcrzU;Z|ZUk&^u6yYHzF$$FwH1S}V{5Tgt z!KkA;XT0#ufM_Kuk3?7uO%pCti~Q@(hF6mW9{Ofg z*23c}>Vy|m#5VkIaVznR03Z@YM)d6Yq2>*HlGS)!8sGvY+R2!dJo@pC>k=v(1+FU1 z*m*;Bvj%jg6Ex~j32h74l0;VjME{4P8=ZdOEq+$?ndyVFjXT*}47_D1LvD*9W#tc! zf}?36VuhTpS)NU=#jK36!)IO0l0uvdDFvV-u1e@=?J6we7d;J?GL1?o%0qUXh$vN% zQRm(IK@GyOXnfk6&mYvSJZ!xi>p?Vzi)=q3q%cxoJ-NBwvDW3Rc6~OGe^5lEKy>&$ z)pt$f&I-Pqj*A5K7G6KROrbz6Ae*5)k8C~`=P_=N&1tK+c)sVt5u?w)(*QW{iH?#r zTS=>#Xs~&qFI*}6=aPftZ^rAdJP|!<)=8BY?9-HddILMv9X~@^4lrDEH~DZ3K8i#m zK2xPfJegJs6Sk=R@{Z3D#VZUqhJY2CE?b2tqh~{fD|^Chx|cBu#}>hmrZwu+ajiTd zT7?899ih(0xE99Rsw5{55ec}E8atJtLXYTR5jDp{2&prt zRnE1{QD}CQ%MhSg?8Ldh>{z}(wOuSM$FYUXwm1}N-9vxlI$0?G7gu=-rc0tm9K5Rh zDU>dQr!l;$ep?AnV%UohA}2-MiVmEGtQj3<(%ETYG%VMI{1AV zu!4Ct7x4wUkLH{g7Wp(D>xe#&ngt!t!mmXIGez@4?*#>$9~&OT##lxcxm<5WsPLJ4 z%qt{SqA)gno*fr`;GrwH?!?n@I4tvW3nge7Jet=gENWaKczbvV;QE|vOltg(!u=%l z8ZQYXPj*I3ot&CiA9lLSlF+3MCe;Qz?pdQBmvH zSp8hp8UpD8wTwFd?N9%wkgN>!`GFF>-ZnpPiN$yA*xF#sMGn&N7T?{#^Qs;p1+N?I zaCCrarXv6U7Bovb`lE79FT;pHr;u%q|X z6Yb2y((AFPPy0?QO}caw0e;Tq$Aum2FW8ui#ckS-V-ON}ZO=`3cw(@`Si?j+V{CCS z*~{&7Hrxw9?>ogEe_YyCm{beqGAkUvFxm1yw?a@jp0f$Tym4k0#`;Nz1uu}T3rR>` z#Js15Tc5sxLOi2A^M*o+zRa~YVRp%i7a#$w1~`r)vN`E_`giJecz|PIm~kKW z_d0zc>%d@6Hitec83ojOqvO%Bxz?c99HPsmWMs8Qxyt}iiRDzGF!I)c*g&eUh1^!) zx#=4I&v86q2e)qF870(*Yi%4)r?lK^H6x-xEnMe!*nIF3Zsk!Wk}Z353ahz5=N`4q zPhoI{;1(69+&UA%!E|2JRmD=kGb&u-+cS!YAFr&T9wRmc?!mCq1aZQ_!`!A4I~-cV z+JWLq?bf{*qUunAai~%qth{lBK%x$>b5;eGL$olR^{>tA6{5=JD}GzXcT?wSM3!V@^e$4aR06LUTl2m zhNw@Q%hT4$l+Z!v3s)$-6(kRjj;-jl*FZM6upZ(kw$k43Hl5Pk;q!?s)K z7V^Ml>&19j3v<=0-4Q6cItw&M7kl1`395O<&7JCMq3wkV+i_)$>tf?LN*C0O@SYA~ zzQaOW8qZ66EIXH}T4$zQ?<_7q`Z^o%ZdT(C2fvdQSctFXYB)Y2*~f0c_36-hL@SehVZ`$Gyh5dPV)Kd0cl&MVNsl>WG=p(~ZBoMx;*}N3 zkXdG1e40vjDL9vZk|P+K3-g*YnrRr3k!zHw*gtb&a~wneY$Z(9DEUt$|6eLNjnDbu z?@<(US0=%-cF3-M!b4ZkH@J|1>ohv|@B~M>E|A(NB`<@pXrQ>)5bD!o0VzLEuT%|OMV|}Iyy{bvEjQXN>I}y8P($PK8+kERI0(a;|KC(#sTU|LM z*wDHz-)$GFs#{P3O!{7Q-ekXb2u*moEJeVhk$)}2xd7d0rt>Zo<{G6Re><(@T3*>l zk2-&a4A1HUI+xwt$An<(AiTC)SOyPU*FdOobZVDUXRX6aF_w4PAR2c%VF@CWTKb4* zACv8LkA2iC&>IzT>bDgalA*x1c<9t7el(r0E1kzwq6)8V0VjfWe9Y7H;yrrOr$-0p z0<2STZ`y=9FK*Ctp2Tigm*3aoXpzw7K2?(l(SuVP10%#(r09BvRa@oBg;t2JQEL|E zVX)uE1hy81BWQHV1PhK>MoeR=H4uP5_iucnBbzLP4hl_1sk4uKB8H4O7b_@5XrdF> zE4B`WbZ{R3&Z76Y_h**ps+M8wYK6)A=+oc+^nbOfKuejpxJFT(YR&)~NXoYD*?kaM zT=T)9?f<-h7cTvTu>6^G)+3p4#4hNw@k%ab=bp{W{8U7(SI(I05~=4vSmHJhO4F%d za2K|rNQ<6G$MWPE6|?ddaF^ubUQZP%LAA7-Jpmr)3t8HZ4EuAR_>Pl+7rm3(4aHOHWh2eTJ+ z&1lyOK^YdFk?~f9Et(TQyL~9^-8W(`k4~+>tLky@=8r4g{$aZ_cq_rJ?Bz9ff3o#0 zx)h%t=DMOtM;Di#ELA%NZe^weD?P2yh|FJKq-_A-l))7WJa`_GtJwqbBL+WWOk+462M(ka!&BMmJPR!$WcK? zpG&G*g+mxvmDf;O#xRNAk}SqUzR&)BrNr^qsg@z$LP##{F-526Z(;7&smc=CRikSs z4)=NdW6NJ`&53iy!@pC8@|WQurZlwHBqv?0GJpHi|1BB8`;qh$x1yMr-$vr|kaepO zhE+IFtEY6TU?C3SLiA(Uq0kcvb#CSU4&0IPvod^l5^+fUo=Zen8(aR0Ze04KDwW&7 zr0&)`;2*`eV|4(3H1wn*Tm_0oPQ(mc(k!n|rPppSGHm|BTRe)`-0q%^ub+Dgk!r#5 zKyV6Z6AE*)#x6Vzbv|Uag{e!==3mXhD4UEEK4e7U>h|DqU)Mg|q~CF@%2HDI-zXb9 zgv;swqK=SQLtXN-e)a)~kl#NuNV4J6{_H~F?d|`V*AdNk%VcZ?BBN}P@jAeZoG(v^ zLk^&egwLq!SyztYaIP|A_32Qfs%qto8&NczOZ$kOh0jWK3gb3-g(x}PHd!OYgFd=L zc~-7hr)xKaEE{Fk z=51X3+CWnj#GbK~-qNE`qO(qUo~^Y$q!Jq#SilgbYehUXdP&6zO?WH>OFar*I|6Lw z{n4Z>z);C?+c4p*Dyk1|_k62|kD=%gIN?kbrEbBxuAHn>(t;KLSQiDDbcl`1;qP%I zH1Ly@iG|2%t}vMrOUFiRX}C6eB$Auj#H=7$+(>Q@GnxB#&6#H)2BYukgptK)?+aT4TNY1PUX7L zdqRV%?Ju)Px0@*#FTXIU0h|VJ{Xp?t>{R7c-&;g%x$sgN!@b&_c-d%co-S6}<8kfk z=@h2|#LygZ@G}%nME*4e9n(sp=4C%=VFGeTYHza%#XS@@2U6^8{y^gC$zOXtwTXs* zr|SZH=)|b=gbNid-REz%;F&qN^b^ANL;roc3d7F!MHu{UD3$;bK<>ZJr7%mLvp1~G zGIrGy^R)-%>8cZUD=q;&_o>Z7KNBN7uJ^UhXHPf z7N%`5S=yW|@wEn>6LR6!uB<+(Dh@u~wJp4oR=SPhFp`x|OR|sR= zDq9&tp^}gXxSb(5Ffh)I_pomE#uWmKDST&XV#2&&meGb;l}I{0P#zI-PY1PSh_dlk z=wLm$S&6Zg*ih_;1c$a^B_CXvaL9BW@a!R;Y(&n5Fb~fbwJojfHQ2H@FlX5Gtj=qv15Tkj_pC58jl( zD0ByX#yYjUAH&IyMd3f?IjP^HIVJn`!jfG^&=X9HV(D^h$fLxAaXJX>&V?iN7HlZ{ zC?eLm$vCREt*1m_wJ+S-a{eg)rV zX+HT%J^jMF1ROr>b?cRKvU_StPOe1?n+rdL@Oy!?15vNt@W4a=@`!TRc0;G0%PZg; z=d&RxfoG{w!el@8OkT%j2g5Po6fp1XK(l|#?hYP_HDq%Ji$1teVJ@6%k6hR5wz7>I z`29Pn{vR{@_fRh=gY#KkqE6_w!&!{benom=l{c6*VTu0J7k)Y=Iwgq?dg8)I+uDx1 z?ZQ^xS>cFc>7%J15A?iogl0#fq=E-VtcDdC;=Sa9M;J#by%<*IOcIq{jeLHFdlo{- z;NsPpSwuKB%3P;a%QP_`F_`eZ3BnZ$?!oo;RU-c{zdP`O%{~FfXT829<>crIebXxuCOBEFLU+I299$NHKxUKF2{r z6qEeyBZ8&gG6`&tE|I@fB0_}KHH*x)vmxUF6}Eb=~Gy8{4=-R>Tzt4CA!W|PKc*MNanzxs+ zQyC^Q;G>^}z$IiXnYUF$9l9>&YQaZ79_NaHoZf~qEkrry-Zy`}Lp;P&M?(4c)md(_ zEj_j(FpCD1Sv(S=P)f#=ZAA<7KbQ-zYz?I#qRbRr4yidb$iSL81aB0|_%oy5RxvR; zgTc(CWuJKBWT;0kxw-^lbTV$PoB4llAxWE?8!4_wIUHEoVVw&!xJ1S0D-p*rggyLS zzs2`5glF{hXS`PO@GD5q5<4Uw=XJ5d<3b+i!VclnL*v~q!H1shI~U#rijl0Jk5<6& zZ7yJ@+eZW9+_LdEsp+B+QqFjF6L*`{ZAe+KH)Dcn4w%B3seOX1i>Bv%CDyoM*Lzg~ zwJzyyHgt#p{3TzZJ;5f*Y9jvZOH1g1uck25CB^J;;aaB%mtP(c78CU`e-=B@d z=dH0l;dRaxR06R~3ByaUyZIAaG>@}`dkvgP0n_ady>N>xyJ&-Q?h}zd9h_y9aZYnC ztfGeEJO@IFOEIV+?qX)Nmh71Q)V5c!tpoT78oOEXtpKNV$$Ba?g|mBVPE_I~o3Uhk zDsNIsu;}4i1XY?nVRocy#^vk7)Xq%gk?`1_7~vw`$t>G_(&bg!3fG$e#Ur~B;%jF$ z3B8^0Xp@2KRByM1Z4+1(m|6ok($6BrYFOKSk zMJCnD2(Qi4XhN5W>$Xnmu6`!?bsn;6Zt&CMtJYpzA-s4l<@O5?g-Z*$Al*%(d*B*lu50hY+8kj$p!kIm1o80-kn<4mG=pZdydj0@ zzdHInp~jSO)`)!KS#84V4|0LHY|{`YcCfkdbBf;c=dhK%Z*xw%cusa6S1q#kzj3nb zpL_`-o7dFCI%@&OhGaejapsy(FptecCE5_|>k@VfSmE+(wtSeEwq?o?TX{6}yUD!E zTe!o46;vG9<~0|7kY(&y2(OZ91s++AtD&PV-^k;~>kG(~dIK^clW{PRic)J0r{Pfo zhs}-61+s`UULtEF3{0QbBivV5XX9DzU@x*($N?Zb9A(6yglxytQ3bL`!QaC!Q6AcsX zJ-LC(EaM3GjM2Te$*#7739*hmbvn;B6vhy>&xfhbbY_{O&%RK?jN)EZXq^(WAMnZw z1jL!CB9sZ|DFuL?@;6qMZ!txzE!`?D@2X!`@)ww-qr{-u>gwmt^gn5iOTF5bXMnkX4XweXnZC!hT#VlZ< z)~UIoDCc6Mdgiq}9S%E%IZ?725~9>Z4U5UuYpZRAnL3>FV=>IDLs+vcW_^Te{AS@SrK6c0UUqgj6sTu0EjD^akev^ zt0t<$m0~gI!v!iJj1Ch_2U63Ac@ej>(26wE^2O7O#7?I3g)vCifzwFfmqo>Pir4SI zgCPRHmJ)VD0GTO(Z1@Gean8I6K{a89nZH8(ReZ{c${k%harNUCv9BHBe^eDiSRNcJY2vz zI}luaHyeE8gY#}M}6a_YR~_WbUZf>vDg%vNF7mu);DN;(pEu9uLKu`yI?u;zMX zpKB1DU6aaM3c|DR4t-A5$uiXxjG3H>6>5y5 z`tK}-5ev!rOVNvAUJoIIp-VG=y37fg;Yv+zY(_72TI~_QWtbNXr2og;ine_)qUM0d zE_i(+I+-2g80tI;;yG#v#AX3xhciM3Q8I1l7?6a^FW|`?D*gu9kOIyX`Vkyuhf^zb zRu@$U!r?;pQ_)<7KIWf1gSrL6*k&Aar)pawa(FS|*;c5Xi?34K9PTHqyAcYWR7dJ1 z@KlwFgRoIe@|~%|?G(_9K2t+mnR1mlB%=@4LpuJ6hgU8=SIBe@ zG3wJL8a#P;LA)m&IoiQweXIm}P7DUCtrY0awIOB^mqEu_aGyMEPX>6 zHrf;=n}G0_>=?jbJDK5JLXTORF;&7eQC38UluWG){eQ-yAUE5#RVkdk2%gFH=6*%;x<>`(-Y?Jp2U0}LbETdFbU44Q z7nsY)N!gq=RmmY71Nc^i%l77HDic$5fD+mXj^-mI;I;s+kyb1lk#bxWGXxK|C-dW*ZaO)zb=y?R77BBP_c@*z}@XRPAi+)4_n;xBtgY9Wf zxoM|#W(st5P{_X37`<{#MA(K2IUho2Ayt)XMMp)P+At!c2^?KxF0wk8WpXF<+H|hC zQa+>4F>e+vlCRHd(kxfGofQkwMc!~Hhn4!C=5o1_@5P|{BM7rOKCIDQx$$L8-0z?fwu`X*#>bLYDt$NCttoz=g{!s8ly88H>Oe{P*8j% zvK{Jt3@r|+#V}4=5xz4Kjob^h+Sal~S6#87aLdCNe zcj{6U@FUwte+OGEey#0^hqx9~B@>E2>np<0g}&cCo#1oAEeg+0KZXkWkD$>_M$$Nn zh9w&B^*nJYghzxig*a`~lPk5x6wwXL=m~4zEOlW7VF;MeOO7AO!{!?Kj=^&{-ySe@ zX$6$gsoQJQIL(eI$)}7BMO(NQw&52<2)agh&K?TSwHw<|*~5A4Ylk@Q`?_xlFTTeHF6?0RkVqLmfE(=HFrQ!ot-}jkp{h>4UP9!p|W% zqS!R-t9@iScS_313bR^Hy$##*@V`>C`w2qE)7FRKS^!z3y)&Pj!udWo@E||@31;e( zic;Y!c+^tZ(lJjr$gvb0#m2ccw%P{5|=bnNDk3PK~JDV79XPpi4 zsS^lwQa6hg8{-kq{R$z)=2%#pBjD9owu}N{yUGSBK3fq}H5btrdzP1Bxs0<61#{O3 z(PzyW;Tm;VnRx?5d+>Fw_2sNR2Pt4uq5S=ZYT3~HtJc{%RT_k{QPW|G$OQg{>HL&N zsX%T<$l$H~fbAT9ksa!iMr_?@!d4C<*2M@R$3?b+#xj$dy|6tcH(F8lN;;g9ztlgA z5zbWr3iZ*H8vEGyo~cV{;E`+KU2j?=Aos}Hmn3ZIdws+5A6gnKCc`3vt{q3zF67$RJTId-^t!dcU#Ok1gQ_yOS_2MV8x zzenSw`4e-^9Q&y%%`JR*H$N^fOyul!5|bL3cIb0=Chm+evh|gy?OLUiERsb&a%;^9 z!dz4Vz764wq>R5q?0wV1TvFbcLja);zIaYedPu+&h~lSb z#6;n_BB=FbD-0}TRipbn=|){9$aHg2cJB(;wB;)1%D~IoE#9G{>0-Qkdl9`#My)lL}!aa3LU8D+&lLVuNftUaPygZekUc5 z!>;OoJt~LSUHCA22c9Ba0~LPK$VSvroFk!2OsXQsg;Cxqu=Q{az8)aLF$m{^*^FMq zIV>O!2s3`n_%xoU;~CX9jan-aGVZL05U@A&Al!rg81K*{R{`g|QgtLe3kYiK*qMH^ z&we(_mD5QQ@(gk?Ot@AB*{jN6s4a&f{pkTFq@K%*(5D3n5zpAnN9tpgsS-h}DdE9IJnBP}pe`XJeT*o0JUp{bMdWt(oNF1ul-FaJ z99jJ+>UjmmW$*^~#?GO^V`r~}$7cu!aGyNqqfCFU!eg$2e^SXBd^FeKwkSEy${`>j zwpqOA!sSPt=mOS3^E!8yzUJBN_X>O_?88bWf*}uTI9xVSPomQt6G8-TX;5nu;+d$x zTSn+aEq<`VlM>0nJ*Z<4UT-%LQp@hqLlcf+#f5$lEu-v9Xk`u!_xc6t{l~t?IE}gX z3vI2e9>*!lXD4^%Cw!*|kpvI);K%8+luS0{C=ir=A zjoSCgK5V>MTDgI;UNqka0tQ2cR zDKW0? z={lj?)1i<2-8Q$b4BTt(4j1Cl|60TU+rc2I1B%!6Jq>-S21^l`YMdPkK40O&6)M*1 zAfAXc3g)FaJLbpvU@OHlh3k5*;}PqIPIbcmTq(qgK5aNojg&9pVEl>jZo|WjlWuVN z+4)IX%8<1gh<#my$_8)VMZKOdQegLI&&$@BtQnpsM%Wp9@pm|gyp&}d^=x~HsFZVe zGFB)9VMg@5Mdi#CI)vTlOdWmy+eX$yG>!)lZqapJe)WbL!E1#V;12MvK>1tp!7?(kULkYbN&K4hZMe%w=Q%!H>~iJVU)nNsjAdc3jEwLkIYU9QHpHjd6-2qp0H>1BfG@Gp*R!I z5C8iC%H^A?VfivFiwp zu5$knl)d4E$+R|R4V|LIB&S3X+h7G%3P&Qo@%ofGqlt$0Sd6@N!b(us+z%VI;|%!s zBz!rjIh=){C&WN%ZtLULP^g7@LlpH53uw1Zt@Nf!* zUyid39c?xp(Luqxi4akt%LlKOoH)dTK7E|2XRpNr2{V^32XH$>Y8a9eq?S}_6OAel z738VeJHI>2=MY1bNgYuJwHt9Th8j-Dc$T+BjjV@Ovo?cNNMXnm+oh4?3j7F{8e~Qt zdq$rn!t=JRq-_0Xc3twMdK6G56myvPA>T2Qg*u{pvefvUKWYH_AJpM%G!kz=RoO8<(PM{F_+X3zoz!kWOfboxlm4e zW~_S_(MKB!Ll&&DU1WQOM15+B@J(`76P7VquLQ!GQ&3c=DM6Yh;K@NGB$^2BVsF-?Ces^rw|<_DFc$j!d!T`md&5T7F#yjgEFIznVpbBkT~3cA!X_j zSU@dxzBa)S!IU*O@iVJaq_f~OsmHlnW$?*ggZ@PiS&?ZnHj}JmlsPvwCR_TI>sc5) z7aY>M&pAafyyhHdV<%UcI#tSXjiWH#4jK7J6B2ntIjFL89<5HjwxcX0-^X$m>1B95 z33c*+JqCvkA%BrHMj66+`8lt@5(=d?ZwjyM^EmS{&au&=Xd=GA?}H4 zHAz__^4Z;hJj0jA$a7%r(l=JBC}HcR%(ZYZG}l+}!v}ngo@~0yxY&a5#OI=wrhEV}Px|Y(3)~b>2EQErnVX zp}Bh|^c1?ZF@S_;mlkRb0=3QM&1^(Y$yu|MbXMpl;jFEQ_LmSP&bkZ1a1Z_Uw>gKh zUc+?bu){;?cJ(wQZUPUu{2R+q(}1O4eJWhL>B8oZQCMGqItIB;kA%69lYeY~9c;7@ z&>ccsk5W!ThEaqe!n&_p7FfAPh@k029d$~C=Z2%4=x`w+<_a6*Sz7(0{lZjDb2RoR zhX<}Ai?M=p#(JL(E6ab4m#*E(HESq@zDMgZt=Vna2ZU z{j`ngtgsMegfiyjL7zUNh3bsMbSzxo+;{TS{EkNk#9!U zBLz{Ph>SXBhdDG|iS5haQ}RB%W0^X#56F#`D4TAmp833L9e0adO05u(=g3DE+`M7 zZzXW3kFp`YPwhB-*M8$qY~_)qgozs7LG<<0@8KYUhnbWPZK`v!N@){2^GFFAOi?q7 z&2$T#iD}3YH6-*+D&b&>|Q9(It^I zs8wwpjmVVA1geFxjA#W2qdZ4nfzB@Mn|p4(kSWg2Q*jDz_$72BJ{{a zanw>%S^gcCH0Q$Ev~ZXPi>U>ts?A1kg^`u`=NjfrLLD%iDICO;n0wLFdW39cuf$Hg{%W*$(#3 zz_XkM6#k%yz-e=h?sM^0G}PsQ)(y5;h~nFWS(vpge|;--MDgzX%g-_lh;BNuQaMiu>WX9@J>(h1*59SXbr3=3h|u??6`e|K;;Wn4@nGnkNo-{} zgmXnX!5=>la+l4DXmCCk!d^^gCiQy(r5ozQmV#2&|14~Xj+tDdziLs%$ubuy6sIuc zIKO2}Nj~NLYxk*zD|8Y|d3-qb%v-C1;-g1A;qE6OW{F39!tzeRGivaeo&vGLN-)f+ zL6)P1;yrpj3p+aI##>Qn;r-7tbbdJ*&lb&H84h-;!y?&09e&^fCs(%i1T^=svpM=) zIC~k%JvC86_yHW7jx`IJVtqv(_0d;-KS-dL0T;L(byG6+WJN|d$x&UVS1s`;#!Ommrgc@2L-_&g9>(G$T2`eIHw!U_xyP=P4MeRla#bDA~FOzO3roDUgR5#;B6c40WWJnzs#vp`gV1*j0f5x+Hfh&qAV!CfMe;^B_^wGgqa%jkC z3;}wEcz)XEGhybMwUwBCGgVeo^epB*mcMcMP7T*bRTUgmYS0U-F@&{gti6ivM6w)_ zd+f#QqjCSRBZ$`e_3n|W<3@o=eo&fv$6F7M6jJnGqN76XAOO^I$XtN;;)F0VHZu{H4i{->l-t7 zg6HwX-mJHwqSmuT3uBIfJv!ktd$6_!z0ZBBAYg;Q@f9>9vv?S1Y{j!;vdzk%L(AB= z%>!#Aph<_E2i!^l&)%7-(ojc2pcy@>8txY9&m&l((7Hg0o&mL*yt9Onig0L1w)Q8Z zG3AiV=!Il4wHqCdvssg^fYCLqFaKHXYDkvl75LFOVeL;vy`DIr&!@pBI*M9!F+lanmO4i%Fo$Ij)&mm(bt}EMt}z} zca}1+(uUIqplgK80yyh41#?wwN2m zcn;3`oGEHZ6vbhP54Kk#hp5w0!g1+=;F&O%L9^f~Q4ryP3;PFzpr*i>=YcOL%0bUw zQ^5Q11%kxx*~fV)bccLOgx4BcafBH?aw+rJ2;<4Y*(AK;2?I!F8JirsltOa*hohNv z@=gs8oNrO3+9Ga{B(^K`dFT@mp0CLI;mEbDfjHfG9QNJV0FVT&3d!=brSV;-}n zbrs>2R1n2KdvtK_%=_+VvT`FjCj(SW%-=PPj_zEgA zWGiN*O&cnx=y2kFpMsMTLMCFnF|hlUdlFNW9Il!xLfix-+yc!#GfGJ|G5ljZ<%uBcD2-laJ# zH-Yn`4cj<-qO%JzKm?H-9y(iEzG(y=LbwX9%dCMx<9=BB&;}>%%AU*sO$m!HwkWYs9JnTcKkwGPFja6#kX>5Km2loo~Gm zg@g6tUIaAEdV$NV=Wg(g04zQ1evp+f*n)5S-j=GSbXcqRKw}dizoYe}gJ;r(q0LMPa>f~KV1WAoBvXQR}IWEeb zm;c-j`VZj^g_4mra4^4Vs)4YA+6qw}Ox>9v3bR`zvBO?)MT^e}DnmBG>`ye~yubPl z<$cH{)k~_)>QF~S)R zf%yEva-|w%8NHb$Jt5^O;@2_qpa06KM~=$ng7#q3?euLFbNNfGrX4WUQ=w}fSEV1$bJ{uGW8hI4Z;C7tql3~NUBAH+}s zJmZ8H_hnDWoFbX}(RNaZYg7mJt`OoO;%R~&(S)lb8(TSJFqL6@7A%apT|yZtUywmc zO`Tv?Wo_5DE|ji)&M&aMLaPxU#;)V^PP`5cm1GKH&Yfem?5#LEdDM7x;)LS`nD^rj@7?q!=nSW>SbCo{5AV4(I>s`7{jRvvnmm znQiyx==SC{Dmr^bcBsJ1V!m^g$_X;ctfa1Mj#&wNJ4dp0N{G!9BKEwzg59qoj6DXXWG4jy9eQf8=W6(hvik-lark_$Hd=Heo?2LXGKK7O&&Dv1>F7FqzlrfQ z5tMS)n~U)Dba)`X)~S#@mLdc?6BE992$!Iv7T#Ca`8u6lW%s1y3?|8jPbdcWI4s@V za1IR=wr)+I-ZzP!HYpv7+6^8 zo%JAdZuFFBusPi5Nin!tU*>tui6@(6&N3*QKD0V|YJmuC$)dBLOf)FU)Uk>%HMyDy zPDsQ_-)MC#Nm1WF1F}a!BnRs&>?t>SAKVoYMu^a)&aoBW9vw;fYEjs!y=+08qbK|b z=c&pTm@YRNPQ?LE0Mpo#7j}su5^Klyku0~Q(5cfzm>-J*J(R`UQ)Zx$tDqz)o=AT< zg0qKF+@yIC?P@l5c7>#HU5keag{;9ep~@frIghfhs94dC8myUy$?`Q8t0^j~Lm$SR z0x3_CvkrP1JohUv zV4-b>d(bDh)LOWQ$%c2SF;fo~HLrD|_GmH3RW2I};;ap&)ES|eq9d@c`oGBb)OI6YR^z<>|v>N}u5R<4wiW3I4JOFP+ zXCEu%M2U4Bb458kdvo-&_#VAW;qH^8lBw;@SlAurSIZG(t|l4g7-deZO2d@%Et)ly zEh+w7F;B_x9jeeoO>`1r%%Kk?S`j9y*~7V5giQ4gfI_C8nbjr0iI2neUVZt;Tm;-Ap*Sb0nn3Yz7Db)gnCmXb50mECd&`% zWl#pDRBXmI<|tc64^8SSHWa=uYR||=2dhnD-$CZ8pH`k=na1|_N#v|2M4`?!uT*oN zC#;WgI=r#W<9f8shjBnyd%ko-B6>JO-9)omxyP}+f28Fc4?Ay#8IvQ!DMIs1WE#~& zN=6A|IHXd-5TS3DvUR265zpd72#><3i|SkJ*Dhj0`CZbmoR@_T1qw|flJc0N5T!cc zXQH)E!P`c*l668CDW&8&9lwsh{e`tO=2HLgvCtbET&ZVxqIF?ry3`8V6mbss!J$I& zqWR?d;%X$=3L94lt1zbPY0kND?~f%Dl@Os%@~|czAW!{_5Ftb}dO7GjD{vKC98OAS zPFWsw!L-G2amntS!~4);uGk-E<}PH+$*6NqB_rX?)QFQEauBY0y$S2FwVZiP zx(}E|YNVty?a{bAKDSce=dDLTf#l>38xh?*8Pqu^XJwmgdp4x?7Me^S#phZ=JZWt8 z$4vhKGDkiu4Ccu+DVAB?da zdg$Qn+H6#K9SJ$tlI9RDg?li40@hql7~I3!UKD*+wy;|LIX~aA2F8wmN!y`PS{5Rn4V@_|TAX`~&IEBs=di4K(2?H;us{4B@CuP8ujY z7~VvN=MoGTV$o;yOk}u9A@QEHkI6!i_0j0*mQv32L=^GQ^6Te8oMbtMnUmc+j2gl+ zv~}Lj$#zd{@X8Fq&>?rDf-GBODRZY*iVUc0Ow3Ij-qTtBj5rV6U6^swdbszUB-OTY z!90v4a?+1*%ZB25E<9FNryLGhKr4|J5k|*9;b6jNs?TP#mBQ@x8Fln;H_(82$D`Nx zwlVkKpSt0}wc62zx!+5#uWnv>{Fy0KQYV$v`6xqyN%T#MYfyc!ymq6HT1>L$`wx}f z1O*RA|LRAF@yV>S{XE1I=BTaf={ycez&b0$+^G}xI4Heyj~lcOEyJQHB+kUiX1NFj z^yVkk{jE$Z4Y5|}Q$HR#5XerT3OQ$g*f(bqddQa^-NP(0%B+nL5UEh3o~yd*HB0?~ z7GD1rdu58ibUD>OU?obOYf4Wh;zE-5Csbrze`z~h3acUHJmEPA8FS2j6`t9H;n8a& zIy9Y-afiYu#an~U^*0x;{>6nSuf?}GdtF}-v&Dt56K*cH5-fmGgGkNA9PrtPgmumd z?yy0M=et^EM4z8Q5Z{=ID&O28+>Z}&&_soJNbV=hDOAFIF^rH(b@|~5xr9k^T`IZP zIfyc1m9yrh53KAa;yH>$F(PaLy5E7Wc9O$6 z@u&?}Q8@8dg$bW`QHL{YE`K;yIJeNG|Gd&ssU{@~JdqPWAFq-*QCL!oDfh9T>=Z&T z{y>MdY5L?CZW20dQl$tH>sgFgmwAC~z6m>TPAX)lMyvzQ(28%bs#rrVM5n)ImU)kl zuEF3mN~sY>{QnmB23nBiHjbso{{K&2&jPq0#TS|F?cSYp7OkK_kfJEP7g1T|YJ1(V z$mO{D_|o>yg-s$SoQq|5-b6KJS4fVi6h^X$bh*t+u`JO?MJSHWVdZL6PmH}{Uc&ex z;j4SC4#odKQPegTkv#V>TM(idqKLIR*Zhx(Rpb`P#E_?uMdyx0DsZgSM4^omBD(gj z6()frrK1ru_B!I@^+`32yHGO2%@E5fg9#B6K0-DENEx*0 zLvXn%D@s>vL9Vc0bwq!|`EOQ;YfI|55Vim`Z#~;Je=?e;99|EiSvAy?iR=(Z^NWMH zq#@7G?b8pDb%jCQ{9-z%Z;0#JuVsY$2&-c^DlF$8q`_|#%Lw0~arey?6y)a^^-waL zHX&m}GRt=aGneJL3@8{Pm}~H~qS$a*D2P7^Z%PG4vxl|Ojc}Q#Un}s;AlQ>|H#L^$ zK`wEhUwn%2p?-M0knI`35|&XB3(4V@_^NcfNz11eYH$$hAWY5T{x~M&q0jy6`~|Og zs!`;n&_-agZZcx{6p!w{yqIBq=tMQN-zqQSuhVD~&t!7~YIVEiTWUj1OqSppBD#TG znRl0)zCTfM-Yu4wYvU!H`Q{SO%p|qQjK#k`_HTVUwi;^4gkURffG?dby zF%21Xn3CQ6#)&~Dy#pv!Kda%M`7k@HV|1hy&^gV!Rqlk z{oE}t<+@-(3`H%-&Q%Lu6&~+HIzafZHvTGtdHjvywmrNMgDCpEYz_Y84DCHnGx|ue zNEn;Z^Rl&y<18lGztPZHNT@YKmNz(+HT8QIpQw{jv#-9lwxo~&nVD-b5{>yK{1o;O z1TtKFTRpnZo`t}_9q4j~eD;l%qEd-xuTwz>z-2@zk#2rt$5vz7fyu&gsh7d?SR`Sl zI>9P`9U$D^ijZ;dUc`$%d@_GpGN`Zt+Dp_3y$ouysjr*|=69L4=tQlTe!Ff=`iQ(^yFU0|XK6&K3@mkoof)M4h15m7pJV zia0I#+b!t|DQ8OwaHpXpli>(@w%Z*aKgs#2sY!5!d|Z_cf29G6DH zOZ(=_YnoY~><2{uY)k|ExQFUyUk@k2M_-(mPfYwKTIubi)C?a79SrX=Cy=bV=KMEO zRVY}(%i)}j-o{3^5_O3zx(5>oX>pri5|}l7;hJ<&-0%WUQn<~Z;;!O;##`~hJDq#e zhkxvHhO14E-Lam>HQ;QGGN8go53$(_qhvnhB-rp5bC8f4Yce^T`8}6y2))=pqP-~o zMMF%AKF&|*8?f~fB2&uN{tTJ??f^R9h78y+IbQ_3 z&N2`M8CBw3oLW#O$rU0R>=>2H;jk;JtSjR=boT6=JdL@BU~)`8ffKq&g>BbZP6rg> zWOD_K7Zq8YgyKd4(G+|@jKayqJs*AfZMew?Pav5l>%|T0S@sB@2NkRv<(~Z}_YXJ} zWYzatx)3NCZ%>EKp?a>Ax~obvi#Bs3Jk0nzfq88Kk zOGq7v`Rr09kgL&MbUkW}*DQlRH58Jyl2uCBKTe_xd$Indjz)qXg;kUCq|u3U{E6%M zTtNqX=-y~s1axypKzZ8m4EKlZv|O~Omw&BYqJV+&WBJie&4x2MLQL7KRH}f$b+IJ z0I{6$H*tM-p|VCgG2-_`yW@^^WmppT6P#~_K6^9bYEST@hA~D@TF1l5(|^xC+r5?!V@*oz|xU%dLxT+5DUCB+GIq=)2IvlBTQ#or&|$ z8iYyc4FW`5NFLQoSX_n7p=SybrDNQelkr?^{iA2J&vjF7PFn?!sr(S*K4amJW~|&J z%d$lMpNGNly*n0Xu8J74@tComlKmhusLWtyYQnf3>`K2(lFi{&{&A9U{ymsDLBhd$vWzZ86*E|IV%ZPqm zkB~Y`<{L{snxOu976uaun;|TpOoZOX{2izdH&--~P9=X6uVU=lPByC9m#tbS?%wA` z&G2v2w6sINjIyHowCzCi{Za_*@_ULygvY~p#Q9&%A@bcrXn#N0zUvJl^3;EkBw93* z`60q6)~@(Rg3WHD=J{cr;W*KCL%RD~Nba{B;Ia^G!i1oHPGFaoOr8sU=pg3woSIHiM|ub9PY}=kj7WB=VixSw8GYDA`irENjMqk)$p?kTtG)RAUZgCpKHf+(KZO|;>jXJVZ;Y=U zokIc^E?C`t(Pu9!Sdv112D;$K!QVfk(+Wt!7b`(zwQDHg`Zlfexa31TkLwEPnn!#< z`7ds*dI+shp^x+A_kPE1j+RfI8?wHx>g1wt!fR$kDWi^AP5#^=j3|iicJ)D&x1)fod6dqZ z6Jm<8E(&I9Ru{r|F!|ny^0O7%-5I*RFgLS9CiW^uc(q+pV^Dc9@gR(zWi`X!id zLY*0J$3*v4!Wya^moJ2o-_We^N#TW(^Bgr;WepKE!<$AY!)A8rV5+GF=2|`t_)znk z?z0!?E5VKY0_EtYujN=E&KUl-flT8omtA{MA**~&VeNIn9JM@lA1^uwd|z*Q1cZc| z_hUm`JPYkbw7WS#N_O-{6HRVmfO&} z5bG_;kD8qf4$gMwo`36;E16`CAr3u796<(oFI{{6kHm#&KwDhaju2^&8ZE!KvVlCbbEPnP=bm zA5v{F5Wn@ZpaU$QhKw7DF`ZFdhqsh}YsXMIQ%r(b=?-JLWc z`n5Ln9OId39+mM!WYvFGx#zvu88WOz5O-RJT?OW~VA{8Ygz~^{T5pBLpPXI(gv4{( zP75`%eELs1{_dqFvNj&f)olI4U^Qd_(4$dQ+)W5X;!Idk39H_dL9Iar@sJ zPO|wb(ey03SH^pvG=lDO2R#Y6GIJcGugr-uH~bZc-Ce9N(L7gFJ-8y;dk|Pcq>XMj z#OCTlHSKLwT-T1l+#9CJ4b2u4jIyOrLIiYu_*JT&e1SG(MEvV8b@X9tZG>BhWbn5$ zrc(+uJ{Pjp?43-{^8`2NzZ^Bs-oXUvt=0_oETWd zZB(WHnSXIVBAwXoP1^T{82}DI@xNJSV1-o#r43hZIe+f}qHtytCb?0vQ@H{8ip~M6 zGp>U9=@!B{FDA5Ib*4F@8v2=t(D2;dCg7Ft+QpYAt1#EA{Kt~}fW_bC>|TL6C&k$q z{OGDsAq8Lk5&y!qB#m|VCB2Ju?#aV4QA-#@OYT_~^-yr=@Lq+US43QgE+HrTwj+`^ znB|Jv#cqzQr%%rSgqeVC?HesmEbiZVx45=T)XX6PGvV_wHm5{Kc*KBAM1T z8D~XfmaKS+4dFeEuOc3ZXt=o<-*(b`KM`-T#NON7Pye!tA(Z@QQ}!X8YJ#t5I1{UY z0CYf$zh5hwWuihEu>TD6E zcb#Dou<`RJM>uU)*~f#Dca;d|k)^8w3a=4v-1#AX!DjNaM$spI>U7uT4OtmdCSX_DV5 z{#xw>2a=L#n4u6~FeQq3uPq3Hjv`AMIcNY;ijN?2_H{G|>ow0^^;{Y108!|z1|?BQ zV`~B&R%6Ak-Tc65-TMvgo}p*q6Xa&L<&CCCDD?=q)1B%MMa% zvy3=Owz9D;$n`q*A?;rV1v14OQjPhu_e?L}3QcO2_8MMLDmoldqiKvMKbS}QQ&hp- zg?G^&dFYHUdf1GOOYGasSNG2Z$T~a!!DU=6qY5O|AaLtX@W3Y@_vOrQN`mEkd=ah} zrO$ba@Zc2k5hv53d;dw`4}FYiwq7{-H5-emazt7F)b{FUa5Ret1Rcc}D%Ij~sC|Vg zMc8v({)5Fv5;c#ah5JV{S;#+=Lq9h2^jQ))Zs(9VxkGx1ZI zSa>XBS=|-VJj*py5YDfd+l#AB39)3?cj0!DcPZ51&ptv$a=z})ckT954jn=p<(Y8Z z;cbA4Xe{K4@O|7qg+htu0*L3%!{!falUE*lo8~%-qrWAm&W8x!r(P1?`;1G_*aQvU zY`Bap=YX(&-u{B+KB)kd4vaMuklP>uThg!Fz`?Y1L1Z(6u{oWB2kA zgp%=z7fIt?F+cA;Y5TkvWNPfHOac0kus|fqoPwGSYRArFmu2AXYhd@QHzb3E$3buy zf1t0r8ryy*t))~UMVtVM4EOR$*sqN(EsY?U@Uxh-*Dxdh0o47SoxmAKw6|i=$oCW_ z*-o{+g=MsH`NQ2FEtAa<6oE8c|4M4{xLktZBM7%zEJqtJBaV!I78UHe)Y;3CCA#iL zmQTbB{}tXdP90gw2J9m&5y*q*R8D4rEP?2zE%YI#NkIHFrMLlx4E2L|r*=NW2Vju% zgD91iykE;0yPoX6%N2&)9wb&yrt`x+M+pnEYAXY4Nj{p3u)`GvyGiv!(j0CLvLk3G zU_#s&5!rr)2%R*)ue8C59Ghs!EnaylxEal-piM{uUQ1{-1Mv@#lkI^Rx&XKQ0Nv)v*dK60$%@^Ckn_jLJ+D(b~2dp811>y6}QkXYEU( zrjR3Wr@LL#V%zr50P1$TLEI4Zo4jscl(<|)?T9<92-;8>UIz@y&P;GazLZ+-n$#vl} zu_`Oln#u8?a;UadCmR9flLjo#|ED)@G0RPJ3f%pY*X76|6-iDk#?3(P*4#|f#1ZGG zVTWK6r%KeAy360=*7*Kyig&obI~v>8yp8>yY6!HN9Gg`B9hv+fVo}Ap5ZkF>BD2ZW z*jGCV^41DV1S)EfDf}c0Jz1=(&aS}Q;2yCU#fDIn?kIx@hHKsg+tIbt9@tz;_~*gY zJYH8MSM!S%rz`II(->zI68a!6OE%Qx+$a4On}#&II49&C1c?06$;V4V(<&9uB>}*b zymHBY+hcqGGdN)HhqQ?8|K}aMB;01)nTPkX0;>~d40=D|ni^z^s^0o*7|d+K51D&B zQIFXgccUV_W)(Hg^7XV8_a*;Hg^a&lI& z+@DFv>fE9Tr)tQ!b#^C4j#+g?&?N3h+RuWaB=H#7Yt(^-4OpeZr>{Cr6O z?j9S6%&4}?cxO5;YzZ^&8GRmHpD>Td_$z^PeS^j~L>dEZ^8#!WCZJAyVY$EGaj>3t z=|+KR_!7fT#?e{TobWy=^iT_g7&UWC5liP$Zlo;pbMHN)XbJlYEZ@*e@o3g;Az0VG zgy%mnasq>l-9kvI9yaJzfK))J<-B0)^V*Qisl@HfK*=mv zOM-tEXGapr;ou%rUd$FQ9>M}Ldy3r5ror=>(4={Hemw))u+MUoEI~#u5|Y_ft~_bt z91Tf)vNC8JX}%e&Adgqr<$dqsOTwAuJXyd zwu0S#d^Vm|{~245d{R0o(K zo@-^BxMLBw|Bp34QOLSroBE0Vkhu#_JL3A}vz;lrJaWc0!fi%$&O$t+8ZJc0llG0T zB5cl}LeflVOqfgK>miqQ%@5klN)Wi_{rwf@q&@6S>Z)y%V=;r6bvQ@U$e#%MWeAvU=hwl` zu06ID&X92=le*e-X^`xKA?|A0)|IFMU8YCk`E!N7YbdSiJJkpo^b!=#!*(C*h9vJw%o!xbWOcGz6$utl%Ds!9X#^aAp4n*$? z{&%f0fA!cKE5U=z_q_UTd$@Z@NV7GPRqcGANdQkIkP4#g9+QaptnJjm=eD=!D;Buu z-_S?Xl-|6Zdj9s=zuB3_KBpd0*XWy^OydyKA04hIK(!;PEW=)+=G>%^X`5sbq1yQ7 z6W~w_YdrKxY4hu6$T&HF{ez>oF}ez|4NKe~?Wky#c?asxyc^d2xrY>a$Wu^F4KK{r z-eP*tDz=WIH*2H zg$wit`sDru`pFdPMOjk7Ikq+?hnB?8@P#H?{zrlpL?NZ%f6q4wSjoNlQJHg~Nvhx@-=G`}FYG~hZZ zyOyXe?p9#R@V+D3!c((4WZz8kQ!RhOx=8q!0Ff=RxLlCSSp1prhuxGCO$@d}Z!Gr$ z$XUQ|ROTm#D~b92=4Y?o(cgAi;o(T8WFnrBE9)Vx_C2q5bd%%illmkZXFG+-4Gre)A>guiL@&+6OVfsv-;4UOAymWbDH8RpPyUF3rm1HMuMJ^~ z&GhaeGi7e<4+55l$h;dqnWp@KV)yFDfcN!q>V$F|M70)8sFJ@z?4nL#GCuJq79_!X z2*0&G23d;d^_NSh;);2T= z-scYyg&ul!D6yXbmi&o?Y{=nU;&FKcisAb%YP<^Yi1Zg{hKxSi&Lmgy4oePZnvroO^oh3H<@)|RGHHA=Y^IZZ zf^6Ed$`RW(X30iZEJ<<|R*j2-$?T~4Bx{f#lSgqCDxcHYp6Dj*7d2jxfofYHIKb2f5Vmv4&A>1OoB!WPN{9czwcf@Ko4=9dfm{N z`H>%3-&ndaVtva%?&{|+q0h22`)>Dj01_dlwaj%Y~m1mfeSs6Tpx1 zXfo|`6>&df#9mbto>2R|;Fx<@GACyrd25jdzQiU`5e6_x!M*<*_LGG}GAu-d)TIJ$ zaEU^cvZ&Ab!wW>%-fg!|rmO6cMMPgnmP=4^a)guCuRUUjdEBdb=o5n9*Ly=ptWq{x z%r_#Mht9^AP$B=-1>8H~suN;~+n%n-=;HmXkNOHk^Ra((_d6mcP{uF&Co8&-1I2>vKd!wK?Y^S6XkkTk7I(!BRL@jd998~iE?-3$?m1k>a}K^Q%KaTN zq7&ryiQ0t7G7Bp5d7N^^cbterosTF!YIP>W#ddh3vUy`x+}9;)ZV9r$<=q%;N@~QV z)!FoRScskRb7b4|fc!oGdFau@4x%{i`#4P3O%9`*)v}I8O`Q zkRbOe+3qLhgT#DCEM!FYd=(+Qnd9#GOWPN#|5xKDda#v!(%}RZHjo=j^*M zS{@37$t>U$U_*>mE)j-!zR~a>5jV8@XS>yBeB3?7A};T@KC<_{SV+HPCR@|_lB+oN zZIE(@>-HttgdH*pTu29Z2g=kgAyXWPktP%U@ussMv3yE?q zz674^&9rYY&*n_taD{-CU~^?}j=*EM%VTfWZY=fbpNz2@6bx^Qrq`?q7acnJT*2n# z5(&xtoV+*=R92%8W8b+H(QM*cc0t9!Tpxz8t8f9F95}nst&r*Rh3ozG_Cy$?`JEd} z2z|n%RITDp!u|dwv=w?GlV*#Os27qcSSl>WZQABD0Qzjr7HlW*;*wQ-6AZXUk}L1o zg(=cg#M?(~AfXWAFAsS%CJ9-e^iHQj|Ex5b< zHM4DOJ_jDYMCNq}*=j3-ds4p48xrDH$aJ}pgpQmG+6ZhL*Oc(cpC+5biSCG~vHe@) z4r{89lT194`BDXb_3svwRyMcs-uRoAiQ@f!W-lkt(YkfGbZG~$v5DMD6j zW ziOi-UiKp8N32AH`xOj^2IGm7Qx7L|d`XtAfAdE|Uo5>Y59|aOCo%+ODP(Yc5&rP!b zu(P+3u^g>Y|8EJ7+n@+ac`Al+65#}6n9Bi z1umj#m9ZA)>rfJobyxA4U~f2gTfcYZF@MI?j{2L;ig9IeQ2d$S%KeiRKR*8H+^(XV z=I>#hFFQ(dkTc>ZHVSUB?zX;TVDBo2uV#!xrI)s3(8rMB%5w;rB4;MS&*>cJ>UD)n z;BDfN=)IsH{45{0^Nq&1q&mNq=?9KcbooNgSF}JKLK%KDB2v>P)f`MhOh~wqb0O~7 z<&4{*F#*-~>lFpfVwwNn`ZA9l9h62ikN-p>a}~VG;+)Rf?iir#=t8U0ys(7eBsBAm zfoYU#Y>G2ttpkb9;8ld|1*$RAak;CQl6(TxEWxwQ_jYesc$GVQ@KC%Lf1;U3%-A$o zt7RbW0e^IpFb=EI_K2wtn-a>B`;D^VxL8mwy$WDTB$VZDKQ}J%MzS9{>{x+>EU|{Gw*j(HRNHM=8_g=Z{c}m9 z;%T0stlzwKCa2383C&z_BjCMYGQ?K7YlZLCy)oEF&D(}nY5P$OJ;9s z+()!BZCINdd;NpB`wR^J#1VJ3;)=H!AubQeP|f@mn{U+OF7dfOB?@cNUG3*VYnNPy zJPn^O3AntAoQ1URj>s~Zgn)%4EJ{^j^pfmiJG)q;)Hh)tG;%XXDo>rK2km?l5@QRL zF@-|?rQi<{Tm|2{K*zT$Ke}VOP#=GWz$DEGLI8hnH;qRUkR7kpxa_KoZ-3%>v^q`>95e5E&Pd zfBQt7IuT|s9L-f@SJY?fR0}U4Nw`t&9WLUh3rHe)5<2*Xpy$yFt`o8$k>!j%C0Y-K zbIFdQ;qn{ALYut-#$pnHNgSJ0Xh_rQnf4UzO)jr&L|YgjN0>J3ZA+i|3l=8kIb0Ua zThC+CV6vgU2>qm6C-TqE(C2azZ$1o{uOgx{;aW%-*S|;Nsb*!?dSvoKLb*(%VLu?= zpth8c^UvQWBFf^L^WC1x7N?CanN0J{Cg&_7-8WY7LNH4xwJi(ccF#r{%=*w$oSutJ zpFN}TEU})R;f-a}|NZ%ASQ9f@ec?Sv!M+1UnTP#Wt$UQE##Rt0097ktwZ4&OEunf{wD78R4o8U}C&Oc(T4dD^i-I4R4Yx{RjqFMq0YPu{0eJ#fe zS^0yzn6yd0))fF`I~1}-0mD)`{N&D*VUvA(IRYW@ClRI3K&wpkq0~fdxisl%qi=k| zHrq1`r%sP)sY>}O2V~(ofLkbk4@)o_Ja8Y}A;kALOl5(*6C;Iw?uM459 z#-5RgKBo=O+2VoI1tc0$5JOz~99&}?s2Ei16mXVN$_1W{kOZg^r~M4EMrU(0zpSMW zmus2>aKv4{);Q<2%6N0xa+6V(tAd7Id2E)koKPp+^UvlQ4bJ15kgO)1QFcaqj)iJ^ z32}ilNAw)d1&R{=$q}sR%R^23)isHj&(Zi2Z9grwNCoowU3kHB3UvON`nn8&-r{M# z+yPtqtS}tHOdAzv?EU{#Yh$a8&d@=!t}kA>K2wJf#amfvNtR1KA(6VBOjuWRc49W$*#z`=SSkDz3QHc128E+vN{f&#b{U+f43}ISYSmrDmbAIez zsO>(~xF*lRG9!XRrRT?#G*771PKH$V|<=$!Z&lV$gEQg+BSRA;8nHx|}o4Fk__J;i#GKaM%LbaO^ z5zLG{fK454)g(z26!=Ae+s;2gz@j56(?e1QkHeuMR2IBd?i|;4pVVj|5r0a++!Y~4 zw~oh-J^tG4sJRCWZ?}Pt*;X+;8v0Dl37VTsCMoQ`r+7VNjNDbsy>m9opFMgmaN+xiF3~udMV74QRS8u*)w(}V8J$>`Nav?8ALR6z3%90n5~rX(u1UxroA+bd zj*xceYb%j0eESaAii_W)wRQuGt5vm0<^*AZyG$rm1d|4nqJzRO6 z6@M1>yr}ly<8Qr${CFml_bHEoU@dT#d~SvzGplx3&PIOMXGaZcYTT=SEfEvRtdAQT zK4z{WSE{Y)yrHRYR|(sdOp9XDNp_?YneGmz>|9eK6Ln%5)2DB9ma_v{=)lVUp-dyB znVI0YeM7>%c@&lw69qli3%7#Jk_;L!&l;DVuO%*rGI5oViTZNBe^qXBgkBxa<-%0! z@l|H7PQxU>6e22O?UWU0)r6TPpwdC2s=xi1a7J>K!0zXdu1q2tS_`i-|7jpmI<%6j z*yQ<72}QkKpg`OY=HCAVjXt>Q9A4wpf55#~`4o|x;hUWzdnp}B^L!QIW-iG5K`yaf zcQTsE^WJ52QaaP_MUz?5XpXEUO^_L4gxgRv%NyJr-q+dp9$$Q)0<)vpQMu-eZ(2Pb zhcLxejYg!nxaPFP0?(|R^V*vpP{M4Wp2~vZE9>i2ZKNI{?mHdP?Awj_KG4JD$B9A5 zDa#V(jS8?>9ro3j9o$yp+JMMxr{nOOdQksU}R zZ9Dp{W8-iM-P%o`{@Pc_Py&_DF02r=WSjR$1#jnAcI0%EVX_sHU?FME`G^07c!-y8 zsc3E{oSq4ZtAeXGN&hz;+sMjQ9!kWLb(N5urwBu~`;F3fn1?wbMqIB7dE$A5#pU2{ zQO|QnoLo|gFbbpc5`hqOF4y45f5o5urvWXoyvHZ9S2v2&$X_JN!-G${iSP5@-`=d2 zc>eOfg%NGCP^1;mA<=zTB8n!#%h@q0$z~c57}4!PG=_P$KjI-CbGLXV!+Kj&fr8ya z?M>*F${<05gkqu%9JF9UGQ2IaK6^TFBqChs(?OpQVBx1FftPSNmf1+Y@0^@TCV{UDxQqPLU-);xB)5~eD5C0lj_c3b*UA35l^1)6iu0@FKF`gUWvzP<&CNL3 zl8u&7e|JeFJR+6*-`0TcxM->*8nrB7gODeuL;+_gBT4QcA@}4KP90&%(oBEn^m-BT zVcl<@PZ7oDCubkw&$yuNyb7Oxg%v$_>N>3A;Nm-kkbM{k#j->h*5H^WnY>Yk3#Jn( z7PK!BeS%~aCi@f;Xeg&3I;Qp|1Cez{tfy++z-ey>2Txw+anXbZ_YMyvxQ&zAcXC7< zqFBBX8r%lYV$aBTEYMy$9m!7MXgi$o_1G6HOthy{wTK>hReBGj>BIXUEn?3538lCS zIz&!}K6D2Vt4tgEv;nOZQx74krp$l-+grGDQk+kP?5H;QMPT2Tq`_H2^D*ouU(Cl0 zVbE32K11fYU`ep2lch=e;Rnu9dPx0ATe?k1?f&MY4W?t@imT}EX4Qid@(=b*VoB&; zy3Neh5rbFi5YKR7n%q*j9`m=&24r0HdYaOIled4yLf0pk7vd+%?Foo;6^@^4>A1;4 z8t&~0nTexy{&R>`>{_mhR*BBAqdXq`Vm^=4IqkN{!qgEVbL7L)sc zKlUEX=^Oo~LGTQvenU`pd?A^u2szreYQC;=ICwb6RA_{O#8~))_>OT+_2D*z*`1W0 zJ(-0%(Lnibb}CCfS|CewQZ5j{h4=i-(1IS1^Hg2=v>)K6$D>&Idrj(<4FAsFV&WRRobec>Vg zZ|3_(JsoZ{3Ap(LatUZmOYI?{4Z76tNkX{fONhCy!0&NJBc0}8>^kD(<8C)q2HsAJ zrW5jvQs-cM7IsxgUlb7CnGmz=m6yz)C4nD?I0J6iuuG6naL^|2%uA-DXCl~IgqjQBBzCF2ML=SM45%+3jdo`{y z@$f>ea7Zv4xm^a_(<66S^JkP)@D8{Pfy(fs@2#=K&3qf&Oogn^>x(Fh>xHw7H%}l% zAf4jm{|tpR>%-$Tn6xC%zE`Tsvx{6Vq(>kDOjjCi=0qL~*|pswa-Y6hA}Uz589ECC zrQJ{mf8G&El)-d~2S?_0Pi0(|Z&&#gP{cjpdp*F}$UM5~D6hS=>YI9T5!aGhIFC7Gv4MZ-_XZWZ3Y;ldyB+`;7Qc&5|j>|5CW8<|)mcCq^@H z8;Y-z<21zXqeL_5@i&StjP5rlp*)!fXHI?>IA@n(l5tyE7^T3SC&Xkkq8CO)DZ)S9-q6_H^uB6_8WzaO=g;;k;LZVcmpto62Yl-a zmPBFSzGoNm>7j}5lCz>{C=*ffzHC5bqE{=lFHe8}prPn~fn#^!S`X>*;X)5+Kh@?( z*6zrm=Wh9#8xLi#VlSMlMRiKb67*r?;F^qLo_i2=ZyNKAEN+oi6z88E3;98D1JUn} zPSeEicqE(o0V3Hpu0WLGI?1y~ZyCP%=(-Kc&Whfl`AQ5R!jCMaq|8^i1^=xfmdzX^ zxR@r~7Q1FYM>g{=fjciGd;Asc2K=mweLthv;BC5?mwrs3;?t+0z>mv-|JV)YHcVJ6 zHkXjk<`vsp3Evht-y$whF5+3z0BuJUVHjNF9zibOfvIV*JYr~g;3pl&CV{P7W$hn# zt`6TTA;LQy#RiTW5*i?)m!;wK>>|3}JrPk%c7&MA=#!`H?i?^^qs<_a9Kvw@5v|Lj zha1Cv%n(l+=W@_1<1CL$x9vID3r}EN(h!$~mtU6 zxKLq|9?%7a^kb>asqV#|CIBiVMu!)+UMSL;va7 z3r}M%9&w|YG9XwjnGhNq-TeK>2r(W$MgDSX?vE`uxi7$LEUqjAkhX3izbEY66X`R% z=QDQxRORf|hkTBF^~~<=hERvtC^Ig{j2ssdpc695wIIFFt&l0V5^&WbopPLz){&v% z=#b`&(hL7di_Yb^!iZAb%$_0nx1fhGm*Zru%00_EIAukWQnA{E#?Rn2n0;NFwaMY9 zu$kFQ#M?zVfB3r~`)4e)IJSUr=5QOzg1c6jMvYUXXA|=3JCtLfyaYsj_U-)k<2i>k z-vpE$5sOrAsUw@^6tA(3W?H^4Wyj8G)#ToWkQ`(B6ja1@G!T8H32K6c*c{g*;0W)? z8wz**a1G}$b{7)j5<=YJ3Wy*~BDg*{7KAorL?^(PM3~z)eek@UL5)Ey(=2~H3~z2kT#qC` zWZm0qWL`cG#pb>`QCFC=O>7`nBT?5;KJJDlYV4-M5+U5piR3nmn{#?G(6=(k?iDm~ z=JC3IpQ79(G((WiUp*3XdP3@oXD@GapEIw>x>iE4$R&w-Rt&C9(ZPz4RX|9dpLyey z0usX|G7w)vPysRa&@6=Ob2|~jpP*^`N1}m-M~h;<)#P&dqUoRO1t#Q-uOoBGyUK5T zimY|CbAU6tKI0iC>0C>BxW<(74JSWZ#%#g=gfeaPxf4wtbg!L!!%}uZQx0hv97Ll# zq6&Mln9~(5#H9!!(pc6K1@3M0<@*z$%8W%|YzrNk&`sIv{J3O_INSM(G^R>-8QtwA zU-12e+eNIy6|rZ|(^$Z+Z=2gPBzBvtviGtI^)ZT00Its_hG3vzg1YURLh!a>XHz`A)lP!$rxh4A!H+@(Y7*isgh!u90pZk6COT;jY0Nw0n@E~AB0)np z(_0UkQCt2CvP{u4UmP1H_&oR~onx8QFpW-_}M0*iU(&)q8 zR7je)o-~^kg&fKoLGJRT)%)2+*!F-5=C6lV(wH9xt8QBG@g`bPYtJe_VB}HElR*Ex z1fjJ;T}}Jz5b+*U)OtdM-8?*Hf8aV2ry zjSl*p&PCaVyw_!;DESE^gwEaCewF5g>%3fhBl?oSX`orbW92qW2Iad}AHI0_jJfb0 zLAcQw=pUP3YCnP$yd6 zZ?bas+WoadAxJE)Xf8osBQg2>M_a~SwW{`YCC8q~OpQ1uTG}dU+ z_(4RGP2ARN=hW$_3H)1QB39^DTuMZP>^N>II(OBcn{o1Ch2D(lnb}%|TwE*p6`(Q+TR%bwlhV?X;3EvU z5ZWM7pAy+L3*N zU7b@nG$PWkA}s$#%h9*DDE$hJTg%K}%yAepspcXyNi@bGIM-|DoR>9gDN;4V3z06t zMDbtqLzL+gOir|7`*F4TS#g3taRyxPCYY}%W zx@Jg@hTJwBj4$X(S-IyVSJ~{OqO+yZ)zRVE*!Z#1|GH2R$DJ*3Z6GK)HsnJ4eohPE z6wH~?{MmomH>(WUUCzit{(8t*#>0*Apej>(-K{iANVZPAS5X5U;j=&3D^oDP*3o@Gx3b`-`0ST>J~2u4Vp_1A*2Sr zT$r{{DsNO+enlWf_f_XWI>~EwDp^uDnqOt}J*Es#*DQn^nT5SOFx*7|goQtu3=q!N z)iPO#vfC9~@C@guH`;SJ9H>v`KNo^=pLYKaV!-QLguTX2re}R_@`5e0s^qq`1kU`! zGw5x9Hrugj zb5Qo5smH3wzwDi&H0}6UhOF0)rQtCBYS3eeU zmEa2}1hk6(7i$fY(%5EV_3;)V=AFl7$>{uBY+PgaC7=Y(6*Afh`L}=-VCrU?Au;E4 zCOQhZI(EoyqB^Uj=xpLFoEO4o%Dx^z^ad$7u$bk=a!9xh;?CpJ0|jN=fG4|~(;0ma zi%K(jYumgMAe>Ric_AGFbwDb}zPEOx^ImF~Mw7tn*q1)VH>kb3op{n%(zI}`BF7$4 zLFDY~{u1r0CrQc}hZ`9RR=US|~RAzrl#$MmA_0KN6=+n7j-@^jpx2sE}W zWrc?uYewwo%72@ZlXt+vhz0)+(Og$og;>sB{>Y`PQ^$$q&;4h@1T0^Yge_|&HR-nQ z=uVhsG-hss+E9>R2Yt8%9pOGH20scuUBqly{z(q(B5q5mkiIr6IX*_Y0x@ohTiPP* z8`DneCINTbXiXeI8n-cShPJs)tywxUZlYW{toE$dN7Veqn9+Bj3acZK^RLG-Au2h*K z&Dm0;tLA&G5jWqNAz??+1#5TuRB}W&`OVZ&!j@8|Kw}*dt&=d8zgjqIlbkf!C=El% z3D-ksFGj0TB?%MpQ2(+M?5Knb*Q)H%-81H&VI{*`!gs8@_bAJ;u|RcmBTde5@VEv` zdeEaUVJ7X5;Rdn`!j}!`FyszvbO_|nt?>Z7iY+vL8W0X?edUlAcp>gwMI%hHGM-nd ze+YGWzL(f@wCpseb2yv^iyFU2B5+U4mhRZg-?(DVt z*6bJHzLeNFxB^PFBqxBFsZoEXl$C_)!mvvMdSJNEh_)y)`b_it=LOsRCh8?no zdbEIrUdx@#s&^+1$IV=YwB<7T$WFm3T;P7gSweJrrk{7u`H;_VD2k6RmxN60C{1ai zyY);qEt_hU=r){woHIotPbs?9$QJ=o$!M$+GtUvM&l8Dbw`FG)0~beYb4-<(2jhb| zp2I;lN=RIxK@Z6guu4!`IKqXO5WQo-%~px^ZnI+(&l`hlY**~gMnGhbaO^v52)FzJ z{+#7?F34&lS=>dg^&_HO%qkmr+#Z3@DNfK91u@BcT}1uvP7xxW$d6};O^2J@AtQ?@ zM3H3|o8fj+1kup#O|{-Y^d@NJ66(tA)_|NLelN#+_Zj;r=Ur*%`OX*{+J~!Y+V)wd zNTVV*6Em5s_d-?-F+Z;m`gY+VN7G);4xqKEkb4>(XEOOWzZ@YN;Breu@3&GWpLY8S z#`-rRT{%cvB1|?8ln`=(QSPH*?w1Z7&a8x7&W>%;CQ+?&L82nK>@o$#|Mx$WWMANd z1zc`ioE$(%U>iuRvt>^LTn)89-Y|eNdhG#QOU@@C88ch6Dc4_}k>;SDAckOZHBZxgiyg@apVACh#F@z)hkEL>V%? zxair@?z~~0=qIkvPPO^Ta2pJal>EWmmk{g=N=~h??hFelZZfu*$7D*wd$%}DdzM(w z;mr{yU)F=|H2%E9N2qX3!r$MfAqmoo`fbXf6!+5S5gh5u*iy$X+% z60FQQGNw?kqeh8LC0@3x0?V_cp#D;3v=L=&LaZOE6kH+A>%eWE z{6uX~7W1}oO>*DUq7Ixb!W~ZziMhLEiZJ40EwC`wa&nYR%1k9qUTm85Ec}6mh<`08 zN>{6y#qBXUFK3_O$Q;)9Z-2lsl`lH~SU|!OAmXvlMK$w>0+YF66O;@)L|GCzRQkYa&wudO+W{&D)%k9e zZM6Jtrz@hXO@`)v9Fe2tKh2npO%zI5Yq=S8C!_(pyLA?pPH|!1p+Y>5yIi2i`7L|t zVw(xcTEC-0+JJPIwlbJZvi2=Et_9(at4JP}9lDxvIq=N)N1WXgeK#-_WUCBsPNE(Z zq30+RbTTCQ=85`uGuA(F8`Z~-xt~{Zof+amli6Y~O~%O7I(SpTBzpJi$HqI-Gb2lu z6Y@s6CDLU7uly1<)Ix7aWc(GJ;D3t$qFAwi(>#fD%fNqor36_BP&CSGbe2@$E^ZWv zPzoOVGG%w{UL;S7CKFlG!aJ0U0<`diWVFYT954Shm zW9tiw&Y?az8BI$C)3f79F+3D>u$;6EReQ&&`2s1oz0nfA_t`=XNU@lJIa|jqIn8*} zaDQ`dlS@QXR%y~Ki{#(5HbKeSpM{-wP!q})hZ8_L0#Y=91}P#asFxzrZwMVj7eOIF z5&{@n0tq#thzJUTA|X=LAX0@(^HPllX(D1lfq*nAf)oKmCwcMRJM)}3@6A0sJ7;!h z_M5Z&$NA0dY)P|i6R*hHXVoRwvs3!8NthBt>E5hM{;j|(p{n|cyJ2#E5D24!Xl^EX zXd&+gz8L9Hl#KT~)vF*l4c8r14pypN?iOR8&t~PcjRlmlK`?oBHc8V1a=43qZrAN( z@fnuz-fTX>+wNO@cDT;=R0-60fTufpBssZZu=s9uRnkm(m1Sncw6SRie{n+PGj)NC z^Af%m;(gnB#gm=48g%`QR^#{<(t>`wu29?8(D|r;-1U*npZ7{;TEVzj%Tc-9{M}nk zO@-F^%|5(6<-0uVo5B=BqxJo)+WA(y3Sa(Z&Ney7)PCNd{Gs?VOVXa5Cln2?)=bX5 z>KfPQGAJ~MRb_0nOOR$Y&XZD&30tJk-{IB% zTbCe_GCSu7hbiP|=3o(ObE>w-5VCPcXPhWc4mOfTJ(*dcdV9Yl*4I^?R(J(Z%{bPT zU%j$^?H&3SsP*GL$7(k5!KA2I7d1-)r$&3yh?82Y*i#$+=ir`6nF#o$9_#hHqeZs5Q_H*B>M;gr4ht;3IcMRZA$-ZRNRn8wfVOKCi1 zkSvGu*tM6~d<%5;z@=GG9Iic9Y5SAe!9Oedj80$v9n{VFi1%xiD78i(X@AU3K!QRV zN#}mK;p*?%K%)lsWVDg6E8NNp(pK5w!?S+DLgK_IXg@cPNtdWpRwVqDhH!e`_Rd+= zLlGm%4vaqaf~u?4f)|pQuE7Q53;v8&uHK?Qbah#yVqG^NE#0PO4wg=7?9M7E!1 zgoT)6pp**z3&WoFSUjkTkkBKss}w1J9Gc!?6<3-(DcsZN$n-BUd9!1X&K7dzjv~wYRTM5^0BZEPiCwWPoD1 ze^9@pLDP4R^(_02Wbb_&F!v%8_v7`8x&rLM-`CL%CS~IN_#;_q;c)?45|cBep#lY!qWM{0YEGjCZBd+c2qfQ3*9%9o4wNIwT!E0n`p4q zSoi7DW9y?h@NRvp@ds)4!wL4B8>@JWG)Z%t0JAq~K*p3nnYO&)EP!KHW-w?a#4Ewx zhMPx{)8+?sC2F|(yqr2H4+H=#MFD`LoZb*0EF2N;gTld4f&Mt0KOP0gg`f~{G$t5_ z)9~>RTD;?irJojqf{zL*A%Qi!+e+69r;ayKaCJB3$}{Bd_TP``a(P05ED}vW9%}hw zr^qBqMwtm(OsG2Mh>Uts?Mbj{nWeoslE_Ap%u?TyBW*V3_6W{V#R#{wAbL)zae}gn z4vwI+DFK_kDg>j(jYiTO#hvVYYN}yGV)kMUl|XS?>m+0sIKqZ#lSzaNkNJ)?&U($p z+db2*sl|25R#gl!8mANWg9Zm1CsGXiq^r+zURFKV%(w-qvYa?>U?y1mLe^F?fSvvp z%ef+pyWB|#qf|AXDtvpwQCH9|laBe$E0M(50IPRS7wB4yv407EhgCTJGmNi0@Lxz{ zq&hir^rYNCHJkRI<+Z)4;W`J?Ov>j00M6VTiu?*(S}^>ucNuHb0s zt(z15dZeX|s0(oj+dZv^pBkBS@y9~xyuU{f3^&ClBhu!^XZA4PuU{)XTJuEn-K4ma z`R$zHwugbt=u<8IJ|`*>`NP|1Yv0gtS0)P4?ZlTSo|pxN9lLS6zGjZO&{DxS#r=x! zwx~tS*jg2txa`?QcDH8o*YeFp&CV;f>w0GR$*Lu*G7-|-{^_2fmkc=j^B6lB=SQ;< zd(5_$QnfOdB&m=3yIH=@*0~i9?b~V!H#03pxTZV|Q=Hy;Vpir6Ij28ej6B%rb6vr> zJ!{h&mGjB!sqjid#R@~u&i@)5^vB|DVn-?X8dBS?W;}jY@{U!{39?Ze*(&Gl71CXm zDjp|GwYzxsTC$g9#$%@ER7bZC6%`4I-X=+x;tz76zDM@5KpfC#d!`dca=Bq;EU z$%na_me28vHbqLeO59&=GW;iPAN;ps>(el+x1Wp69Yq& zGfTefW{Vf83rq83Jwq548C~9Mo<*ee8pB7VQ_55>7SNE&TA6ANZH2QwMnYvn4oS%# z6NeRdzXC=BfpB;6bb4XscZA~Z6oAO!I-+xx&_qwX*K*EQ?(H$0y(!B9O+?NR_HS9J zZ{{1ae58x9D^;%%GdB>tAJOL7$grg8DOG0k8(}TYL}kGPTQ5c*-i2;1bzb1nsfwYc zaffSVuXtVqtq*3~8~;f4l;i~|&ul*wD}CR(WU**kFzRa{<7@y@hpkFocPg-0QDc5F zIl0(T@X_7>zS+e+sXpH9x7JAVkZfyXX0(RXB&| zqV_I|i^uLKx;ifxC^tKsPY!=nX54V1h}9{Mq|g*kC6xgwd-Wna&{57mD+Ce~x6Pxr0PD^QCH^I?g8N4Qz0}IjDg7gR-TjLS z0ZK{hvQx*QUto`Bx~H082OTu$ko_tB$5Z1zr1THQPRS3N_1n(V_1^mgs*jGBEu)Dc z=jqP6_tL#X>Ek1pzXHs!HT z){JJMCQ?oVLzW+$Ryo~Z!Y*P<*xLZPBmsY^$08@a`So_@EpPz=uG~PtK~8%6%jEy| zKA7;I>fw*|#=yf+5n}}2YJRES3Q#l#;Z Date: Thu, 14 Sep 2023 17:36:09 +0200 Subject: [PATCH 064/144] removed debug code --- tests/test_gltf.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 5cc75fb8e..eff9e133d 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -1020,5 +1020,4 @@ def test_embed_buffer(self): if __name__ == '__main__': g.trimesh.util.attach_to_log() - # g.unittest.main() - GLTFTest().test_spec_gloss_factors_only() + g.unittest.main() From 97b7b2088a107f96c7186782f0327445de4f21c4 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 13:52:23 -0400 Subject: [PATCH 065/144] fix and test #2033 --- tests/test_voxel.py | 23 +++++ trimesh/ray/ray_triangle.py | 41 +++++---- trimesh/voxel/__init__.py | 2 +- trimesh/voxel/base.py | 102 ++++++++------------- trimesh/voxel/creation.py | 91 ++++++++---------- trimesh/voxel/encoding.py | 178 ++++++++++++++++++------------------ trimesh/voxel/morphology.py | 33 ++++--- trimesh/voxel/ops.py | 80 ++++++---------- trimesh/voxel/runlength.py | 54 ++++++----- trimesh/voxel/transforms.py | 84 ++++++++--------- 10 files changed, 323 insertions(+), 365 deletions(-) diff --git a/tests/test_voxel.py b/tests/test_voxel.py index 9ea20dbf4..685625db1 100644 --- a/tests/test_voxel.py +++ b/tests/test_voxel.py @@ -384,6 +384,29 @@ def test_binvox_with_dimension(self): exact=True) assert octant.shape == (dim, dim, dim) + def test_transform_cache(self): + encoding = [ + [[0, 0, 0], [0, 1, 0], [0, 0, 0]], + [[0, 1, 1], [0, 1, 0], [1, 1, 0]], + [[0, 0, 0], [0, 1, 0], [0, 0, 0]]] + vg = g.trimesh.voxel.VoxelGrid(g.np.asarray(encoding)) + + scale = g.np.asarray([12, 23, 24]) + s_matrix = g.np.eye(4) + s_matrix[:3, :3] *= scale + + # original scale should be identity + assert g.np.allclose(vg.scale, 1.0) + + # save the hash + hash_ori = hash(vg._data) + # modify the voxelgrid + vg.apply_transform(s_matrix) + + # hash should have changed + assert hash_ori != hash(vg._data) + assert g.np.allclose(vg.scale, scale) + if __name__ == '__main__': g.trimesh.util.attach_to_log() diff --git a/trimesh/ray/ray_triangle.py b/trimesh/ray/ray_triangle.py index a254c7f70..1f0cc58de 100644 --- a/trimesh/ray/ray_triangle.py +++ b/trimesh/ray/ray_triangle.py @@ -324,30 +324,31 @@ def ray_triangle_candidates(ray_origins, Parameters ------------ - ray_origins: (m,3) float, ray origin points - ray_directions: (m,3) float, ray direction vectors - tree: rtree object, contains AABB of each triangle + ray_origins : (m, 3) float + Ray origin points. + ray_directions : (m, 3) float + Ray direction vectors + tree : rtree object + Ccontains AABB of each triangle Returns ---------- - ray_candidates: (n,) int, triangle indexes - ray_id: (n,) int, corresponding ray index for a triangle candidate + ray_candidates : (n,) int + Triangle indexes + ray_id : (n,) int + Corresponding ray index for a triangle candidate """ - ray_bounding = ray_bounds(ray_origins=ray_origins, - ray_directions=ray_directions, - bounds=tree.bounds) - ray_candidates = [[]] * len(ray_origins) - ray_id = [[]] * len(ray_origins) - - for i, bounds in enumerate(ray_bounding): - ray_candidates[i] = np.array(list(tree.intersection(bounds)), - dtype=np.int64) - ray_id[i] = np.ones(len(ray_candidates[i]), dtype=np.int64) * i - - ray_id = np.hstack(ray_id) - ray_candidates = np.hstack(ray_candidates) - - return ray_candidates, ray_id + bounding = ray_bounds(ray_origins=ray_origins, + ray_directions=ray_directions, + bounds=tree.bounds) + + index = [] + candidates = [] + for i, bounds in enumerate(bounding): + cand = list(tree.intersection(bounds)) + candidates.extend(cand) + index.extend([i] * len(cand)) + return np.array(candidates, dtype=np.int64), np.array(index, dtype=np.int64) def ray_bounds(ray_origins, diff --git a/trimesh/voxel/__init__.py b/trimesh/voxel/__init__.py index d2e5f88e1..83599cd2b 100644 --- a/trimesh/voxel/__init__.py +++ b/trimesh/voxel/__init__.py @@ -1,3 +1,3 @@ from .base import VoxelGrid -__all__ = ['VoxelGrid'] +__all__ = ["VoxelGrid"] diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index 58ec95422..3d4fd476d 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -17,44 +17,33 @@ class VoxelGrid(Geometry): - """ - Store 3D voxels. - """ - def __init__(self, encoding, transform=None, metadata=None): + """ + Store 3D voxels. + + Parameters + -------------- + encoding + A numpy array of voxels, or an encoding object + """ if transform is None: transform = np.eye(4) if isinstance(encoding, np.ndarray): encoding = DenseEncoding(encoding.astype(bool)) if encoding.dtype != bool: - raise ValueError('encoding must have dtype bool') + raise ValueError("encoding must have dtype bool") self._data = caching.DataStore() - self.encoding = encoding - self._data['transform'] = transforms.Transform(transform) - self._cache = caching.Cache( - id_function=self._data.__hash__) + self._cache = caching.Cache(id_function=self._data.__hash__) + self._transform = transforms.Transform(transform, datastore=self._data) + self.encoding = encoding self.metadata = {} + # update the mesh metadata with passed metadata if isinstance(metadata, dict): self.metadata.update(metadata) elif metadata is not None: - raise ValueError( - 'metadata should be a dict or None, got %s' % str(metadata)) - - def crc(self): - util.log.warning( - '`geometry.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') - return self.__hash__() - - def hash(self): - util.log.warning( - '`geometry.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') - return self.__hash__() + raise ValueError(f"metadata should be a dict or None, not {type(metadata)}") def __hash__(self): """ @@ -74,29 +63,24 @@ def encoding(self): See `trimesh.voxel.encoding` for implementations. """ - return self._data['encoding'] + return self._data["encoding"] @encoding.setter def encoding(self, encoding): if isinstance(encoding, np.ndarray): encoding = DenseEncoding(encoding) elif not isinstance(encoding, Encoding): - raise ValueError( - 'encoding must be an Encoding, got %s' % str(encoding)) + raise ValueError("encoding must be an Encoding, got %s" % str(encoding)) if len(encoding.shape) != 3: raise ValueError( - 'encoding must be rank 3, got shape %s' % str(encoding.shape)) + "encoding must be rank 3, got shape %s" % str(encoding.shape) + ) if encoding.dtype != bool: - raise ValueError( - 'encoding must be binary, got %s' % encoding.dtype) - self._data['encoding'] = encoding - - @property - def _transform(self): - return self._data['transform'] + raise ValueError("encoding must be binary, got %s" % encoding.dtype) + self._data["encoding"] = encoding @property - def transform(self): + def transform(self) -> NDArray[float64]: """4x4 homogeneous transformation matrix.""" return self._transform.matrix @@ -171,8 +155,8 @@ def bounds(self): indices = self.sparse_indices # get all 8 corners of the AABB corners = bounds_module.corners( - [indices.min(axis=0) - 0.5, - indices.max(axis=0) + 0.5]) + [indices.min(axis=0) - 0.5, indices.max(axis=0) + 0.5] + ) # transform these corners to a new frame corners = self._transform.transform_points(corners) # get the AABB of corners in-frame @@ -220,13 +204,14 @@ def is_filled(self, point): indices = self.points_to_indices(point) in_range = np.logical_and( np.all(indices < np.array(self.shape), axis=-1), - np.all(indices >= 0, axis=-1)) + np.all(indices >= 0, axis=-1), + ) is_filled = np.zeros_like(in_range) is_filled[in_range] = self.encoding.gather_nd(indices[in_range]) return is_filled - def fill(self, method='holes', **kwargs): + def fill(self, method="holes", **kwargs): """ Mutates self by filling in the encoding according to `morphology.fill`. @@ -245,8 +230,7 @@ def fill(self, method='holes', **kwargs): self : VoxelGrid After replacing encoding with a filled version. """ - self.encoding = morphology.fill( - self.encoding, method=method, **kwargs) + self.encoding = morphology.fill(self.encoding, method=method, **kwargs) return self def hollow(self): @@ -319,8 +303,7 @@ def points(self): points : (self.filled, 3) float Points in space. """ - return self._transform.transform_points( - self.sparse_indices.astype(float)) + return self._transform.transform_points(self.sparse_indices.astype(float)) @property def sparse_indices(self): @@ -353,14 +336,13 @@ def as_boxes(self, colors=None, **kwargs): # encoding.as_mask? colors = colors[encoding.dense] else: - log.warning('colors incorrect shape!') + log.warning("colors incorrect shape!") colors = None elif colors.shape not in ((3,), (4,)): - log.warning('colors incorrect shape!') + log.warning("colors incorrect shape!") colors = None - mesh = ops.multibox( - centers=self.sparse_indices.astype(float), colors=colors) + mesh = ops.multibox(centers=self.sparse_indices.astype(float), colors=colors) mesh = mesh.apply_transform(self.transform) return mesh @@ -388,13 +370,10 @@ def show(self, *args, **kwargs): Convert the current set of voxels into a trimesh for visualization and show that via its built- in preview method. """ - return self.as_boxes(kwargs.pop( - 'colors', None)).show(*args, **kwargs) + return self.as_boxes(kwargs.pop("colors", None)).show(*args, **kwargs) def copy(self): - return VoxelGrid( - self.encoding.copy(), - self._transform.matrix.copy()) + return VoxelGrid(self.encoding.copy(), self._transform.matrix.copy()) def export(self, file_obj=None, file_type=None, **kwargs): """ @@ -415,14 +394,14 @@ def export(self, file_obj=None, file_type=None, **kwargs): if isinstance(file_obj, str) and file_type is None: file_type = util.split_extension(file_obj).lower() - if file_type != 'binvox': - raise ValueError('only binvox exports supported!') + if file_type != "binvox": + raise ValueError("only binvox exports supported!") exported = export_binvox(self, **kwargs) - if hasattr(file_obj, 'write'): + if hasattr(file_obj, "write"): file_obj.write(exported) elif isinstance(file_obj, str): - with open(file_obj, 'wb') as f: + with open(file_obj, "wb") as f: f.write(exported) return exported @@ -445,14 +424,11 @@ def revoxelized(self, shape): shape = tuple(shape) bounds = self.bounds.copy() extents = self.extents - points = util.grid_linspace( - bounds, shape).reshape(shape + (3,)) + points = util.grid_linspace(bounds, shape).reshape(shape + (3,)) dense = self.is_filled(points) scale = extents / np.asanyarray(shape) translate = bounds[0] - return VoxelGrid( - dense, - transform=tr.scale_and_translate(scale, translate)) + return VoxelGrid(dense, transform=tr.scale_and_translate(scale, translate)) def __add__(self, other): raise NotImplementedError("TODO : implement voxel concatenation") diff --git a/trimesh/voxel/creation.py b/trimesh/voxel/creation.py index 55a67e8ee..85ee37507 100644 --- a/trimesh/voxel/creation.py +++ b/trimesh/voxel/creation.py @@ -8,10 +8,7 @@ @log_time -def voxelize_subdivide(mesh, - pitch, - max_iter=10, - edge_factor=2.0): +def voxelize_subdivide(mesh, pitch, max_iter=10, edge_factor=2.0): """ Voxelize a surface by subdividing a mesh until every edge is shorter than: (pitch / edge_factor) @@ -35,19 +32,19 @@ def voxelize_subdivide(mesh, if max_iter is None: longest_edge = np.linalg.norm( - mesh.vertices[mesh.edges[:, 0]] - - mesh.vertices[mesh.edges[:, 1]], - axis=1).max() - max_iter = max(int(np.ceil(np.log2( - longest_edge / max_edge))), 0) + mesh.vertices[mesh.edges[:, 0]] - mesh.vertices[mesh.edges[:, 1]], axis=1 + ).max() + max_iter = max(int(np.ceil(np.log2(longest_edge / max_edge))), 0) # get the same mesh sudivided so every edge is shorter # than a factor of our pitch - v, f, idx = remesh.subdivide_to_size(mesh.vertices, - mesh.faces, - max_edge=max_edge, - max_iter=max_iter, - return_index=True) + v, f, idx = remesh.subdivide_to_size( + mesh.vertices, + mesh.faces, + max_edge=max_edge, + max_iter=max_iter, + return_index=True, + ) # convert the vertices to their voxel grid position hit = v / pitch @@ -67,16 +64,11 @@ def voxelize_subdivide(mesh, return base.VoxelGrid( enc.SparseBinaryEncoding(occupied_index - origin_index), - transform=tr.scale_and_translate( - scale=pitch, translate=origin_position)) + transform=tr.scale_and_translate(scale=pitch, translate=origin_position), + ) -def local_voxelize(mesh, - point, - pitch, - radius, - fill=True, - **kwargs): +def local_voxelize(mesh, point, pitch, radius, fill=True, **kwargs): """ Voxelize a mesh in the region of a cube around a point. When fill=True, uses proximity.contains to fill the resulting voxels so may be meaningless @@ -107,11 +99,12 @@ def local_voxelize(mesh, # this is a gotcha- radius sounds a lot like it should be in # float model space, not int voxel space so check if not isinstance(radius, int): - raise ValueError('radius needs to be an integer number of cubes!') + raise ValueError("radius needs to be an integer number of cubes!") # Bounds of region - bounds = np.concatenate((point - (radius + 0.5) * pitch, - point + (radius + 0.5) * pitch)) + bounds = np.concatenate( + (point - (radius + 0.5) * pitch, point + (radius + 0.5) * pitch) + ) # faces that intersect axis aligned bounding box faces = list(mesh.triangles_tree.intersection(bounds)) @@ -137,14 +130,15 @@ def local_voxelize(mesh, prepad = np.maximum(radius - center, 0) postpad = np.maximum(center + radius + 1 - matrix.shape, 0) - matrix = np.pad(matrix, np.stack((prepad, postpad), axis=-1), - mode='constant') + matrix = np.pad(matrix, np.stack((prepad, postpad), axis=-1), mode="constant") center += prepad # Extract voxels within the bounding box - voxels = matrix[center[0] - radius:center[0] + radius + 1, - center[1] - radius:center[1] + radius + 1, - center[2] - radius:center[2] + radius + 1] + voxels = matrix[ + center[0] - radius : center[0] + radius + 1, + center[1] - radius : center[1] + radius + 1, + center[2] - radius : center[2] + radius + 1, + ] local_origin = point - radius * pitch # origin of local voxels # Fill internal regions @@ -152,12 +146,10 @@ def local_voxelize(mesh, regions, n = ndimage.label(~voxels) distance = ndimage.distance_transform_cdt(~voxels) representatives = [ - np.unravel_index((distance * (regions == i)).argmax(), - distance.shape) for i in range(1, n + 1)] - contains = mesh.contains( - np.asarray(representatives) * - pitch + - local_origin) + np.unravel_index((distance * (regions == i)).argmax(), distance.shape) + for i in range(1, n + 1) + ] + contains = mesh.contains(np.asarray(representatives) * pitch + local_origin) where = np.where(contains)[0] + 1 # use in1d vs isin for older numpy versions @@ -169,9 +161,7 @@ def local_voxelize(mesh, @log_time -def voxelize_ray(mesh, - pitch, - per_cell=None): +def voxelize_ray(mesh, pitch, per_cell=None): """ Voxelize a mesh using ray queries. @@ -225,16 +215,12 @@ def voxelize_ray(mesh, encoding = enc.SparseBinaryEncoding(voxels) origin_position = origin_index * pitch return base.VoxelGrid( - encoding, - tr.scale_and_translate(scale=pitch, translate=origin_position)) + encoding, tr.scale_and_translate(scale=pitch, translate=origin_position) + ) @log_time -def voxelize_binvox(mesh, - pitch=None, - dimension=None, - bounds=None, - **binvoxer_kwargs): +def voxelize_binvox(mesh, pitch=None, dimension=None, bounds=None, **binvoxer_kwargs): """ Voxelize via binvox tool. @@ -273,21 +259,20 @@ def voxelize_binvox(mesh, extents = maxs - mins dimension = int(np.ceil(np.max(extents) / pitch)) if bounds is not None: - if 'bounding_box' in binvoxer_kwargs: - raise ValueError('Cannot provide both bounds and bounding_box') - binvoxer_kwargs['bounding_box'] = np.asanyarray(bounds).flatten() + if "bounding_box" in binvoxer_kwargs: + raise ValueError("Cannot provide both bounds and bounding_box") + binvoxer_kwargs["bounding_box"] = np.asanyarray(bounds).flatten() binvoxer = binvox.Binvoxer(dimension=dimension, **binvoxer_kwargs) return binvox.voxelize_mesh(mesh, binvoxer) voxelizers = util.FunctionRegistry( - ray=voxelize_ray, - subdivide=voxelize_subdivide, - binvox=voxelize_binvox) + ray=voxelize_ray, subdivide=voxelize_subdivide, binvox=voxelize_binvox +) -def voxelize(mesh, pitch, method='subdivide', **kwargs): +def voxelize(mesh, pitch, method="subdivide", **kwargs): """ Voxelize the given mesh using the specified implementation. diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 8ec3d7981..ade24e022 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -11,13 +11,13 @@ from scipy import sparse as sp except BaseException as E: from ..exceptions import ExceptionWrapper + sp = ExceptionWrapper(E) def _empty_stripped(shape): num_dims = len(shape) - encoding = DenseEncoding( - np.empty(shape=(0,) * num_dims, dtype=bool)) + encoding = DenseEncoding(np.empty(shape=(0,) * num_dims, dtype=bool)) padding = np.zeros(shape=(num_dims, 2), dtype=int) padding[:, 1] = shape return encoding, padding @@ -36,8 +36,7 @@ class Encoding(ABC): def __init__(self, data): self._data = data - self._cache = caching.Cache( - id_function=self._data.__hash__) + self._cache = caching.Cache(id_function=self._data.__hash__) @abc.abstractproperty def dtype(self): @@ -109,7 +108,7 @@ def stripped(self): for dim, size in enumerate(shape): axis = tuple(range(dim)) + tuple(range(dim + 1, ndims)) filled = np.any(dense, axis=axis) - indices, = np.nonzero(filled) + (indices,) = np.nonzero(filled) lower = indices.min() upper = indices.max() + 1 padding.append([lower, size - upper]) @@ -121,16 +120,18 @@ def _flip(self, axes): def crc(self): log.warning( - '`geometry.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') + "`geometry.crc()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) return self.__hash__() def hash(self): log.warning( - '`geometry.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') + "`geometry.hash()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) return self.__hash__() def __hash__(self): @@ -168,14 +169,12 @@ def data(self): def run_length_data(self, dtype=np.int64): if self.ndims != 1: - raise ValueError( - '`run_length_data` only valid for flat encodings') + raise ValueError("`run_length_data` only valid for flat encodings") return runlength.dense_to_rle(self.dense, dtype=dtype) def binary_run_length_data(self, dtype=np.int64): if self.ndims != 1: - raise ValueError( - '`run_length_data` only valid for flat encodings') + raise ValueError("`run_length_data` only valid for flat encodings") return runlength.dense_to_brle(self.dense, dtype=dtype) def transpose(self, perm): @@ -199,7 +198,7 @@ class DenseEncoding(Encoding): def __init__(self, data): if not isinstance(data, caching.TrackedArray): if not isinstance(data, np.ndarray): - raise ValueError('DenseEncoding data must be a numpy array') + raise ValueError("DenseEncoding data must be a numpy array") data = caching.tracked_array(data) super().__init__(data=data) @@ -293,45 +292,48 @@ def __init__(self, indices, values, shape=None): """ data = caching.DataStore() super().__init__(data) - data['indices'] = indices - data['values'] = values - indices = data['indices'] + data["indices"] = indices + data["values"] = values + indices = data["indices"] if len(indices.shape) != 2: + raise ValueError("indices must be 2D, got shaped %s" % str(indices.shape)) + if data["values"].shape != (indices.shape[0],): raise ValueError( - 'indices must be 2D, got shaped %s' % str(indices.shape)) - if data['values'].shape != (indices.shape[0],): - raise ValueError( - 'values and indices shapes inconsistent: {} and {}'.format( - data['values'], data['indices'])) + "values and indices shapes inconsistent: {} and {}".format( + data["values"], data["indices"] + ) + ) if shape is None: - self._shape = tuple(data['indices'].max(axis=0) + 1) + self._shape = tuple(data["indices"].max(axis=0) + 1) else: self._shape = tuple(shape) if not np.all(indices < self._shape): - raise ValueError('all indices must be less than shape') + raise ValueError("all indices must be less than shape") if not np.all(indices >= 0): - raise ValueError('all indices must be non-negative') + raise ValueError("all indices must be non-negative") @staticmethod def from_dense(dense_data): sparse_indices = np.where(dense_data) values = dense_data[sparse_indices] return SparseEncoding( - np.stack(sparse_indices, axis=-1), values, shape=dense_data.shape) + np.stack(sparse_indices, axis=-1), values, shape=dense_data.shape + ) def copy(self): return SparseEncoding( indices=self.sparse_indices.copy(), values=self.sparse_values.copy(), - shape=self.shape) + shape=self.shape, + ) @property def sparse_indices(self): - return self._data['indices'] + return self._data["indices"] @property def sparse_values(self): - return self._data['values'] + return self._data["values"] @property def dtype(self): @@ -429,7 +431,8 @@ def SparseBinaryEncoding(indices, shape=None): rank n bool `SparseEncoding` with True values at each index. """ return SparseEncoding( - indices, np.ones(shape=(indices.shape[0],), dtype=bool), shape) + indices, np.ones(shape=(indices.shape[0],), dtype=bool), shape + ) class RunLengthEncoding(Encoding): @@ -446,18 +449,16 @@ def __init__(self, data, dtype=None): dtype: dtype of encoded data. Each second value of data is cast will be cast to this dtype if provided. """ - super().__init__( - data=caching.tracked_array(data)) + super().__init__(data=caching.tracked_array(data)) if dtype is None: dtype = self._data.dtype if len(self._data.shape) != 1: - raise ValueError('data must be 1D numpy array') + raise ValueError("data must be 1D numpy array") self._dtype = dtype @caching.cache_decorator def is_empty(self): - return not np.any( - np.logical_and(self._data[::2], self._data[1::2])) + return not np.any(np.logical_and(self._data[::2], self._data[1::2])) @property def ndims(self): @@ -473,16 +474,18 @@ def dtype(self): def crc(self): log.warning( - '`geometry.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') + "`geometry.crc()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) return self.__hash__() def hash(self): log.warning( - '`geometry.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') + "`geometry.hash()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) return self.__hash__() def __hash__(self): @@ -499,7 +502,8 @@ def __hash__(self): @staticmethod def from_dense(dense_data, dtype=np.int64, encoding_dtype=np.int64): return RunLengthEncoding( - runlength.dense_to_rle(dense_data, dtype=encoding_dtype), dtype=dtype) + runlength.dense_to_rle(dense_data, dtype=encoding_dtype), dtype=dtype + ) @staticmethod def from_rle(rle_data, dtype=None): @@ -533,8 +537,7 @@ def size(self): def _flip(self, axes): if axes != (0,): - raise ValueError( - 'encoding is 1D - cannot flip on axis %s' % str(axes)) + raise ValueError("encoding is 1D - cannot flip on axis %s" % str(axes)) return RunLengthEncoding(runlength.rle_reverse(self._data)) @caching.cache_decorator @@ -563,11 +566,11 @@ def gather_nd(self, indices): def sorted_gather(self, ordered_indices): return np.array( tuple(runlength.sorted_rle_gather_1d(self._data, ordered_indices)), - dtype=self._dtype) + dtype=self._dtype, + ) def mask(self, mask): - return np.array( - tuple(runlength.rle_mask(self._data, mask)), dtype=self._dtype) + return np.array(tuple(runlength.rle_mask(self._data, mask)), dtype=self._dtype) def get_value(self, index): for value in self.sorted_gather((index,)): @@ -604,12 +607,12 @@ def is_empty(self): @staticmethod def from_dense(dense_data, encoding_dtype=np.int64): return BinaryRunLengthEncoding( - runlength.dense_to_brle(dense_data, dtype=encoding_dtype)) + runlength.dense_to_brle(dense_data, dtype=encoding_dtype) + ) @staticmethod def from_rle(rle_data, dtype=None): - return BinaryRunLengthEncoding( - runlength.rle_to_brle(rle_data, dtype=dtype)) + return BinaryRunLengthEncoding(runlength.rle_to_brle(rle_data, dtype=dtype)) @staticmethod def from_brle(brle_data, dtype=None): @@ -639,8 +642,7 @@ def size(self): def _flip(self, axes): if axes != (0,): - raise ValueError( - 'encoding is 1D - cannot flip on axis %s' % str(axes)) + raise ValueError("encoding is 1D - cannot flip on axis %s" % str(axes)) return BinaryRunLengthEncoding(runlength.brle_reverse(self._data)) @property @@ -749,11 +751,12 @@ def _to_base_indices(self, indices): def _from_base_indices(self, base_indices): return np.expand_dims( - np.ravel_multi_index(base_indices.T, self._data.shape), axis=-1) + np.ravel_multi_index(base_indices.T, self._data.shape), axis=-1 + ) @property def shape(self): - return self.size, + return (self.size,) @property def dense(self): @@ -782,7 +785,7 @@ def __init__(self, encoding, shape): if encoding.ndims != 1: encoding = encoding.flat else: - raise ValueError('encoding must be an Encoding') + raise ValueError("encoding must be an Encoding") super().__init__(data=encoding) self._shape = tuple(shape) nn = self._shape.count(-1) @@ -791,23 +794,24 @@ def __init__(self, encoding, shape): size = np.abs(size) if self._data.size % size != 0: raise ValueError( - 'cannot reshape encoding of size %d into shape %s' % - (self._data.size, str(self._shape))) + "cannot reshape encoding of size %d into shape %s" + % (self._data.size, str(self._shape)) + ) rem = self._data.size // size self._shape = tuple(rem if s == -1 else s for s in self._shape) elif nn > 2: - raise ValueError('shape cannot have more than one -1 value') + raise ValueError("shape cannot have more than one -1 value") elif np.prod(self._shape) != self._data.size: raise ValueError( - 'cannot reshape encoding of size %d into shape %s' % - (self._data.size, str(self._shape))) + "cannot reshape encoding of size %d into shape %s" + % (self._data.size, str(self._shape)) + ) def _from_base_indices(self, base_indices): return np.column_stack(np.unravel_index(base_indices, self.shape)) def _to_base_indices(self, indices): - return np.expand_dims( - np.ravel_multi_index(indices.T, self.shape), axis=-1) + return np.expand_dims(np.ravel_multi_index(indices.T, self.shape), axis=-1) @property def flat(self): @@ -838,16 +842,17 @@ class TransposedEncoding(LazyIndexMap): def __init__(self, base_encoding, perm): if not isinstance(base_encoding, Encoding): raise ValueError( - 'base_encoding must be an Encoding, got %s' - % str(base_encoding)) + "base_encoding must be an Encoding, got %s" % str(base_encoding) + ) if len(base_encoding.shape) != len(perm): raise ValueError( - 'base_encoding has %d ndims - cannot transpose with perm %s' - % (base_encoding.ndims, str(perm))) + "base_encoding has %d ndims - cannot transpose with perm %s" + % (base_encoding.ndims, str(perm)) + ) super().__init__(base_encoding) perm = np.array(perm, dtype=np.int64) if not all(i in perm for i in range(base_encoding.ndims)): - raise ValueError('perm %s is not a valid permutation' % str(perm)) + raise ValueError("perm %s is not a valid permutation" % str(perm)) inv_perm = np.empty_like(perm) inv_perm[perm] = np.arange(base_encoding.ndims) self._perm = perm @@ -857,7 +862,7 @@ def transpose(self, perm): return _transposed(self._data, [self._perm[p] for p in perm]) def _transpose(self, perm): - raise RuntimeError('Should not be here') + raise RuntimeError("Should not be here") @property def perm(self): @@ -876,9 +881,9 @@ def _from_base_indices(self, base_indices): return np.take(base_indices, self._inv_perm, axis=-1) except TypeError: # windows sometimes tries to use wrong dtypes - return np.take(base_indices.astype(np.int64), - self._inv_perm.astype(np.int64), - axis=-1) + return np.take( + base_indices.astype(np.int64), self._inv_perm.astype(np.int64), axis=-1 + ) @property def dense(self): @@ -888,8 +893,7 @@ def gather(self, indices): return self._data.gather(self._base_indices(indices)) def mask(self, mask): - return self._data.mask( - mask.transpose(self._inv_perm)).transpose(self._perm) + return self._data.mask(mask.transpose(self._inv_perm)).transpose(self._perm) def get_value(self, index): return self._data[tuple(self._base_indices(index))] @@ -899,8 +903,7 @@ def data(self): return self._data def copy(self): - return TransposedEncoding( - base_encoding=self._data.copy(), perm=self._perm) + return TransposedEncoding(base_encoding=self._data.copy(), perm=self._perm) class FlippedEncoding(LazyIndexMap): @@ -913,19 +916,18 @@ class FlippedEncoding(LazyIndexMap): def __init__(self, encoding, axes): ndims = encoding.ndims if isinstance(axes, np.ndarray) and axes.size == 1: - axes = axes.item(), + axes = (axes.item(),) elif isinstance(axes, int): - axes = axes, + axes = (axes,) axes = tuple(a + ndims if a < 0 else a for a in axes) self._axes = tuple(sorted(axes)) if len(set(self._axes)) != len(self._axes): - raise ValueError( - "Axes cannot contain duplicates, got %s" % str(self._axes)) + raise ValueError("Axes cannot contain duplicates, got %s" % str(self._axes)) super().__init__(encoding) if not all(0 <= a < self._data.ndims for a in axes): raise ValueError( - 'Invalid axes %s for %d-d encoding' - % (str(axes), self._data.ndims)) + "Invalid axes %s for %d-d encoding" % (str(axes), self._data.ndims) + ) def _to_base_indices(self, indices): indices = indices.copy() @@ -961,22 +963,22 @@ def copy(self): def flip(self, axis=0): if isinstance(axis, np.ndarray): if axis.size == 1: - axis = axis.item(), + axis = (axis.item(),) else: axis = tuple(axis) elif isinstance(axis, int): - axes = axis, + axes = (axis,) else: axes = tuple(axis) return _flipped(self, self._axes + axes) def _flip(self, axes): - raise RuntimeError('Should not be here') + raise RuntimeError("Should not be here") def _flipped(encoding, axes): - if not hasattr(axes, '__iter__'): - axes = axes, + if not hasattr(axes, "__iter__"): + axes = (axes,) unique_ax = set() ndims = encoding.ndims axes = tuple(a + ndims if a < 0 else a for a in axes) diff --git a/trimesh/voxel/morphology.py b/trimesh/voxel/morphology.py index fc3cf41c5..c4a72b335 100644 --- a/trimesh/voxel/morphology.py +++ b/trimesh/voxel/morphology.py @@ -11,6 +11,7 @@ except BaseException as E: # scipy is a soft dependency from ..exceptions import ExceptionWrapper + ndimage = ExceptionWrapper(E) @@ -21,7 +22,8 @@ def _dense(encoding, rank=None): dense = encoding.dense else: raise ValueError( - 'encoding must be np.ndarray or Encoding, got %s' % str(encoding)) + "encoding must be np.ndarray or Encoding, got %s" % str(encoding) + ) if rank: _assert_rank(dense, rank) return dense @@ -34,7 +36,8 @@ def _sparse_indices(encoding, rank=None): sparse_indices = encoding.sparse_indices else: raise ValueError( - 'encoding must be np.ndarray or Encoding, got %s' % str(encoding)) + "encoding must be np.ndarray or Encoding, got %s" % str(encoding) + ) _assert_sparse_rank(sparse_indices, 3) return sparse_indices @@ -42,19 +45,19 @@ def _sparse_indices(encoding, rank=None): def _assert_rank(value, rank): if len(value.shape) != rank: - raise ValueError( - 'Expected rank %d, got shape %s' % (rank, str(value.shape))) + raise ValueError("Expected rank %d, got shape %s" % (rank, str(value.shape))) def _assert_sparse_rank(value, rank=None): if len(value.shape) != 2: raise ValueError( - 'sparse_indices must be rank 2, got shape %s' % str(value.shape)) + "sparse_indices must be rank 2, got shape %s" % str(value.shape) + ) if rank is not None: if value.shape[-1] != rank: raise ValueError( - 'sparse_indices.shape[1] must be %d, got %d' - % (rank, value.shape[-1])) + "sparse_indices.shape[1] must be %d, got %d" % (rank, value.shape[-1]) + ) @log_time @@ -70,8 +73,7 @@ def fill_base(encoding): -------------- A new filled encoding object. """ - return enc.SparseBinaryEncoding( - ops.fill_base(_sparse_indices(encoding, rank=3))) + return enc.SparseBinaryEncoding(ops.fill_base(_sparse_indices(encoding, rank=3))) @log_time @@ -111,7 +113,8 @@ def fill_holes(encoding, **kwargs): A new filled in encoding object. """ return enc.DenseEncoding( - ndimage.binary_fill_holes(_dense(encoding, rank=3), **kwargs)) + ndimage.binary_fill_holes(_dense(encoding, rank=3), **kwargs) + ) fillers = util.FunctionRegistry( @@ -121,7 +124,7 @@ def fill_holes(encoding, **kwargs): ) -def fill(encoding, method='base', **kwargs): +def fill(encoding, method="base", **kwargs): """ Fill the given encoding using the specified implementation. @@ -151,7 +154,8 @@ def binary_dilation(encoding, **kwargs): https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.ndimage.morphology.binary_dilation.html#scipy.ndimage.morphology.binary_dilation """ return enc.DenseEncoding( - ndimage.binary_dilation(_dense(encoding, rank=3), **kwargs)) + ndimage.binary_dilation(_dense(encoding, rank=3), **kwargs) + ) def binary_closing(encoding, **kwargs): @@ -160,8 +164,7 @@ def binary_closing(encoding, **kwargs): https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.ndimage.morphology.binary_closing.html#scipy.ndimage.morphology.binary_closing """ - return enc.DenseEncoding( - ndimage.binary_closing(_dense(encoding, rank=3), **kwargs)) + return enc.DenseEncoding(ndimage.binary_closing(_dense(encoding, rank=3), **kwargs)) def surface(encoding, structure=None): @@ -182,7 +185,7 @@ def surface(encoding, structure=None): """ dense = _dense(encoding, rank=3) # padding/unpadding resolves issues with occupied voxels on the boundary - dense = np.pad(dense, np.ones((3, 2), dtype=int), mode='constant') + dense = np.pad(dense, np.ones((3, 2), dtype=int), mode="constant") empty = np.logical_not(dense) dilated = ndimage.binary_dilation(empty, structure=structure) surface = np.logical_and(dense, dilated)[1:-1, 1:-1, 1:-1] diff --git a/trimesh/voxel/ops.py b/trimesh/voxel/ops.py index af69ec453..ac0123b85 100644 --- a/trimesh/voxel/ops.py +++ b/trimesh/voxel/ops.py @@ -7,8 +7,8 @@ def fill_orthographic(dense): shape = dense.shape indices = np.stack( - np.meshgrid(*(np.arange(s) for s in shape), indexing='ij'), - axis=-1) + np.meshgrid(*(np.arange(s) for s in shape), indexing="ij"), axis=-1 + ) empty = np.logical_not(dense) def fill_axis(axis): @@ -46,15 +46,12 @@ def fill_base(sparse_indices): # validate inputs sparse_indices = np.asanyarray(sparse_indices, dtype=np.int64) if not util.is_shape(sparse_indices, (-1, 3)): - raise ValueError('incorrect shape') + raise ValueError("incorrect shape") # create grid and mark inner voxels max_value = sparse_indices.max() + 3 - grid = np.zeros((max_value, - max_value, - max_value), - bool) + grid = np.zeros((max_value, max_value, max_value), bool) voxels_sparse = np.add(sparse_indices, 1) grid[tuple(voxels_sparse.T)] = 1 @@ -72,7 +69,7 @@ def fill_base(sparse_indices): if c < 4: continue for s in range(0, c - c % 4, 4): - grid[i, j, idx[s]:idx[s + 3]] = 1 + grid[i, j, idx[s] : idx[s + 3]] = 1 if not check_dir2: continue @@ -86,7 +83,7 @@ def fill_base(sparse_indices): if c < 4: continue for s in range(0, c - c % 4, 4): - grid[i, idx[s]:idx[s + 3], k] = 1 + grid[i, idx[s] : idx[s + 3], k] = 1 # generate new voxels filled = np.column_stack(np.where(grid)) @@ -123,13 +120,12 @@ def matrix_to_marching_cubes(matrix, pitch=1.0): # Add in padding so marching cubes can function properly with # voxels on edge of AABB pad_width = 1 - rev_matrix = np.pad(rev_matrix, - pad_width=(pad_width), - mode='constant', - constant_values=(1)) + rev_matrix = np.pad( + rev_matrix, pad_width=(pad_width), mode="constant", constant_values=(1) + ) # pick between old and new API - if hasattr(measure, 'marching_cubes_lewiner'): + if hasattr(measure, "marching_cubes_lewiner"): func = measure.marching_cubes_lewiner else: func = measure.marching_cubes @@ -138,15 +134,15 @@ def matrix_to_marching_cubes(matrix, pitch=1.0): pitch = np.asanyarray(pitch) if pitch.size == 1: pitch = (pitch,) * 3 - meshed = func(volume=rev_matrix, - level=.5, # it is a boolean voxel grid - spacing=pitch) + meshed = func( + volume=rev_matrix, level=0.5, spacing=pitch # it is a boolean voxel grid + ) # allow results from either marching cubes function in skimage # binaries available for python 3.3 and 3.4 appear to use the classic # method if len(meshed) == 2: - log.warning('using old marching cubes, may not be watertight!') + log.warning("using old marching cubes, may not be watertight!") vertices, faces = meshed normals = None elif len(meshed) == 4: @@ -155,9 +151,7 @@ def matrix_to_marching_cubes(matrix, pitch=1.0): # Return to the origin, add in the pad_width vertices = np.subtract(vertices, pad_width * pitch) # create the mesh - mesh = Trimesh(vertices=vertices, - faces=faces, - vertex_normals=normals) + mesh = Trimesh(vertices=vertices, faces=faces, vertex_normals=normals) return mesh @@ -179,7 +173,7 @@ def sparse_to_matrix(sparse): sparse = np.asanyarray(sparse, dtype=np.int64) if not util.is_shape(sparse, (-1, 3)): - raise ValueError('sparse must be (n,3)!') + raise ValueError("sparse must be (n,3)!") shape = sparse.max(axis=0) + 1 matrix = np.zeros(np.prod(shape), dtype=bool) @@ -250,24 +244,21 @@ def multibox(centers, pitch=1.0, colors=None): from ..base import Trimesh # get centers as numpy array - centers = np.asanyarray( - centers, dtype=np.float64) + centers = np.asanyarray(centers, dtype=np.float64) # get a basic box b = primitives.Box() # apply the pitch b.apply_scale(float(pitch)) # tile into one box vertex per center - v = np.tile( - centers, - (1, len(b.vertices))).reshape((-1, 3)) + v = np.tile(centers, (1, len(b.vertices))).reshape((-1, 3)) # offset to centers v += np.tile(b.vertices, (len(centers), 1)) f = np.tile(b.faces, (len(centers), 1)) f += np.tile( - np.arange(len(centers)) * len(b.vertices), - (len(b.faces), 1)).T.reshape((-1, 1)) + np.arange(len(centers)) * len(b.vertices), (len(b.faces), 1) + ).T.reshape((-1, 1)) face_colors = None if colors is not None: @@ -277,9 +268,7 @@ def multibox(centers, pitch=1.0, colors=None): if colors.ndim == 2 and len(colors) == len(centers): face_colors = colors.repeat(12, axis=0) - mesh = Trimesh(vertices=v, - faces=f, - face_colors=face_colors) + mesh = Trimesh(vertices=v, faces=f, face_colors=face_colors) return mesh @@ -306,20 +295,13 @@ def boolean_sparse(a, b, operation=np.logical_and): import sparse # find the bounding box of both arrays - extrema = np.array([a.min(axis=0), - a.max(axis=0), - b.min(axis=0), - b.max(axis=0)]) + extrema = np.array([a.min(axis=0), a.max(axis=0), b.min(axis=0), b.max(axis=0)]) origin = extrema.min(axis=0) - 1 size = tuple(extrema.ptp(axis=0) + 2) # put nearby voxel arrays into same shape sparse array - sp_a = sparse.COO((a - origin).T, - data=np.ones(len(a), dtype=bool), - shape=size) - sp_b = sparse.COO((b - origin).T, - data=np.ones(len(b), dtype=bool), - shape=size) + sp_a = sparse.COO((a - origin).T, data=np.ones(len(a), dtype=bool), shape=size) + sp_b = sparse.COO((b - origin).T, data=np.ones(len(b), dtype=bool), shape=size) # apply the logical operation # get a sparse matrix out @@ -338,7 +320,7 @@ def strip_array(data): for dim in range(len(shape)): axis = tuple(range(dim)) + tuple(range(dim + 1, ndims)) filled = np.any(data, axis=axis) - indices, = np.nonzero(filled) + (indices,) = np.nonzero(filled) pad_left = indices[0] pad_right = indices[-1] padding.append([pad_left, pad_right]) @@ -362,7 +344,7 @@ def indices_to_points(indices, pitch=None, origin=None): """ indices = np.asanyarray(indices) if indices.shape[1:] != (3,): - raise ValueError('shape of indices must be (q, 3)') + raise ValueError("shape of indices must be (q, 3)") points = np.array(indices, dtype=np.float64) if pitch is not None: @@ -370,7 +352,7 @@ def indices_to_points(indices, pitch=None, origin=None): if origin is not None: origin = np.asanyarray(origin) if origin.shape != (3,): - raise ValueError('shape of origin must be (3,)') + raise ValueError("shape of origin must be (3,)") points += origin return points @@ -391,9 +373,7 @@ def matrix_to_points(matrix, pitch=None, origin=None): points: (q, 3) list of points """ indices = np.column_stack(np.nonzero(matrix)) - points = indices_to_points(indices=indices, - pitch=pitch, - origin=origin) + points = indices_to_points(indices=indices, pitch=pitch, origin=origin) return points @@ -417,12 +397,12 @@ def points_to_indices(points, pitch=None, origin=None): """ points = np.array(points, dtype=np.float64) if points.shape != (points.shape[0], 3): - raise ValueError('shape of points must be (q, 3)') + raise ValueError("shape of points must be (q, 3)") if origin is not None: origin = np.asanyarray(origin) if origin.shape != (3,): - raise ValueError('shape of origin must be (3,)') + raise ValueError("shape of origin must be (3,)") points -= origin if pitch is not None: points /= pitch diff --git a/trimesh/voxel/runlength.py b/trimesh/voxel/runlength.py index ae8a0abd6..a466cc7fa 100644 --- a/trimesh/voxel/runlength.py +++ b/trimesh/voxel/runlength.py @@ -85,8 +85,7 @@ def rle_to_brle(rle, dtype=None): for value, count in np.reshape(rle, (-1, 2)): acc += count if value not in (0, 1): - raise ValueError( - "Invalid run length encoding for conversion to BRLE") + raise ValueError("Invalid run length encoding for conversion to BRLE") if value == curr_val: out[-1] += count else: @@ -117,7 +116,7 @@ def brle_logical_not(brle): element-wise not of the input. """ if brle[0] or brle[-1]: - return np.pad(brle, [1, 1], mode='constant') + return np.pad(brle, [1, 1], mode="constant") else: return brle[1:-1] @@ -163,8 +162,11 @@ def split_long_brle_lengths(lengths, dtype=np.int64): remainders = (lengths % max_val).astype(dtype) lengths = np.concatenate( - [np.array([max_val, 0] * repeat + [remainder], dtype=dtype) - for repeat, remainder in zip(repeats, remainders)]) + [ + np.array([max_val, 0] * repeat + [remainder], dtype=dtype) + for repeat, remainder in zip(repeats, remainders) + ] + ) lengths = lengths.reshape((np.sum(repeats) * 2 + nl,)).astype(dtype) return lengths elif lengths.dtype != dtype: @@ -199,7 +201,7 @@ def dense_to_brle(dense_data, dtype=np.int64): lengths = np.diff(np.r_[starts, n]) lengths = split_long_brle_lengths(lengths, dtype=dtype) if dense_data[0]: - lengths = np.pad(lengths, [1, 0], mode='constant') + lengths = np.pad(lengths, [1, 0], mode="constant") return lengths @@ -229,9 +231,8 @@ def brle_to_dense(brle_data, vals=None): vals = np.asarray(vals) if vals.shape != (2,): raise ValueError("vals.shape must be (2,), got %s" % (vals.shape)) - ft = np.repeat( - _ft[np.newaxis, :], (len(brle_data) + 1) // 2, axis=0).flatten() - return np.repeat(ft[:len(brle_data)], brle_data).flatten() + ft = np.repeat(_ft[np.newaxis, :], (len(brle_data) + 1) // 2, axis=0).flatten() + return np.repeat(ft[: len(brle_data)], brle_data).flatten() def rle_to_dense(rle_data, dtype=np.int64): @@ -240,12 +241,13 @@ def rle_to_dense(rle_data, dtype=np.int64): if dtype is not None: values = np.asanyarray(values, dtype=dtype) try: - result = np.repeat(np.squeeze(values, axis=-1), - np.squeeze(counts, axis=-1)) + result = np.repeat(np.squeeze(values, axis=-1), np.squeeze(counts, axis=-1)) except TypeError: # on windows it sometimes fails to cast data type - result = np.repeat(np.squeeze(values.astype(np.int64), axis=-1), - np.squeeze(counts.astype(np.int64), axis=-1)) + result = np.repeat( + np.squeeze(values.astype(np.int64), axis=-1), + np.squeeze(counts.astype(np.int64), axis=-1), + ) return result @@ -318,8 +320,7 @@ def brle_to_rle(brle, dtype=np.int64): brle = np.concatenate([brle, [0]]) lengths = brle values = np.tile(_ft, len(brle) // 2) - return rle_to_rle( - np.stack((values, lengths), axis=1).flatten(), dtype=dtype) + return rle_to_rle(np.stack((values, lengths), axis=1).flatten(), dtype=dtype) def brle_to_brle(brle, dtype=np.int64): @@ -350,9 +351,7 @@ def _unsorted_gatherer(indices, sorted_gather_fn): ordered_indices = indices[order] def f(data, dtype=None): - result = np.empty( - len(order), dtype=dtype or getattr( - data, 'dtype', None)) + result = np.empty(len(order), dtype=dtype or getattr(data, "dtype", None)) result[order] = tuple(sorted_gather_fn(data, ordered_indices)) return result @@ -390,8 +389,8 @@ def sorted_rle_gather_1d(rle_data, ordered_indices): start += next(data_iter) except StopIteration: raise IndexError( - 'Index %d out of range of raw_values length %d' - % (index, start)) + "Index %d out of range of raw_values length %d" % (index, start) + ) try: while index < start: yield value @@ -533,8 +532,8 @@ def sorted_brle_gather_1d(brle_data, ordered_indices): start += next(data_iter) except StopIteration: raise IndexError( - 'Index %d out of range of raw_values length %d' - % (index, start)) + "Index %d out of range of raw_values length %d" % (index, start) + ) try: while index < start: yield value @@ -564,7 +563,8 @@ def brle_gatherer_1d(indices): or rle_data.dtype if no dtype is provided. """ return functools.partial( - _unsorted_gatherer(indices, sorted_brle_gather_1d), dtype=bool) + _unsorted_gatherer(indices, sorted_brle_gather_1d), dtype=bool + ) def brle_gather_1d(brle_data, indices): @@ -634,8 +634,7 @@ def rle_to_sparse(rle_data): def brle_to_sparse(brle_data, dtype=np.int64): ends = np.cumsum(brle_data) - indices = [np.arange(s, e, dtype=dtype) for s, e in - zip(ends[::2], ends[1::2])] + indices = [np.arange(s, e, dtype=dtype) for s, e in zip(ends[::2], ends[1::2])] return np.concatenate(indices) @@ -672,8 +671,7 @@ def rle_strip(rle_data): else: end += count - rle_data = rle_data[ - final_i:None if final_j == 0 else -final_j].reshape((-1,)) + rle_data = rle_data[final_i : None if final_j == 0 else -final_j].reshape((-1,)) return rle_data, (start, end) @@ -712,6 +710,6 @@ def brle_strip(brle_data): else: end += count - brle_data = brle_data[final_i:None if final_j == 0 else -final_j] + brle_data = brle_data[final_i : None if final_j == 0 else -final_j] brle_data = np.concatenate([[0], brle_data]) return brle_data, (start, end) diff --git a/trimesh/voxel/transforms.py b/trimesh/voxel/transforms.py index f2b50a7cb..ac4729465 100644 --- a/trimesh/voxel/transforms.py +++ b/trimesh/voxel/transforms.py @@ -1,5 +1,7 @@ import numpy as np +from typing import Optional + from .. import caching, util from .. import transformations as tr @@ -12,41 +14,33 @@ class Transform: for the voxels, including pitch and origin. """ - def __init__(self, matrix): + def __init__(self, matrix, datastore: Optional[caching.DataStore] = None): """ - Initialize with a transform + Initialize with a transform. Parameters ----------- matrix : (4, 4) float Homogeneous transformation matrix + datastore + If passed store the actual values in a reference to + another datastore. """ matrix = np.asanyarray(matrix, dtype=np.float64) - if matrix.shape != (4, 4): - raise ValueError('matrix must be 4x4!') - - if not np.all(matrix[3, :] == [0, 0, 0, 1]): - raise ValueError('matrix not a valid transformation matrix') + if matrix.shape != (4, 4) or not np.allclose(matrix[3, :], [0, 0, 0, 1]): + raise ValueError("matrix is invalid!") # store matrix as data - self._data = caching.tracked_array(matrix, dtype=np.float64) + if datastore is None: + self._data = caching.DataStore() + elif isinstance(datastore, caching.DataStore): + self._data = datastore + else: + raise ValueError(f"{type(datastore)} != caching.DataStore") + + self._data["transform_matrix"] = matrix # dump cache when matrix changes - self._cache = caching.Cache( - id_function=self._data.__hash__) - - def crc(self): - util.log.warning( - '`geometry.crc()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') - return self.__hash__() - - def hash(self): - util.log.warning( - '`geometry.hash()` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geometry.__hash__()` or `hash(geometry)`') - return self.__hash__() + self._cache = caching.Cache(id_function=self._data.__hash__) def __hash__(self): """ @@ -69,7 +63,7 @@ def translation(self): translation : (3,) float Cartesian translation """ - return self._data[:3, 3] + return self._data["transform_matrix"][:3, 3] @property def matrix(self): @@ -81,10 +75,10 @@ def matrix(self): matrix : (4, 4) float Transformation matrix """ - return self._data + return self._data["transform_matrix"] @matrix.setter - def matrix(self, data): + def matrix(self, values): """ Set the homogeneous transformation matrix. @@ -93,10 +87,10 @@ def matrix(self, data): matrix : (4, 4) float Transformation matrix """ - data = np.asanyarray(data, dtype=np.float64) - if data.shape != (4, 4): - raise ValueError('matrix must be (4, 4)!') - self._data = caching.tracked_array(data, dtype=np.float64) + values = np.asanyarray(values, dtype=np.float64) + if values.shape != (4, 4): + raise ValueError("matrix must be (4, 4)!") + self._data["transform_matrix"] = values @caching.cache_decorator def scale(self): @@ -112,26 +106,21 @@ def scale(self): matrix = self.matrix # get the (3,) diagonal of the rotation component scale = np.diag(matrix[:3, :3]) - if not np.allclose( - matrix[:3, :3], - scale * np.eye(3), - scale * 1e-6 + 1e-8): - raise RuntimeError('transform features a shear or rotation') + if not np.allclose(matrix[:3, :3], scale * np.eye(3), scale * 1e-6 + 1e-8): + raise RuntimeError("transform features a shear or rotation") return scale @caching.cache_decorator def pitch(self): scale = self.scale - if not util.allclose( - scale[0], scale[1:], - np.max(np.abs(scale)) * 1e-6 + 1e-8): - raise RuntimeError('transform features non-uniform scaling') + if not util.allclose(scale[0], scale[1:], np.max(np.abs(scale)) * 1e-6 + 1e-8): + raise RuntimeError("transform features non-uniform scaling") return scale @caching.cache_decorator def unit_volume(self): """Volume of a transformed unit cube.""" - return np.linalg.det(self._data[:3, :3]) + return np.linalg.det(self._data["transform_matrix"][:3, :3]) def apply_transform(self, matrix): """Mutate the transform in-place and return self.""" @@ -164,16 +153,17 @@ def transform_points(self, points): """ if self.is_identity: return points.copy() - return tr.transform_points( - points.reshape(-1, 3), self.matrix).reshape(points.shape) + return tr.transform_points(points.reshape(-1, 3), self.matrix).reshape( + points.shape + ) def inverse_transform_points(self, points): """Apply the inverse transformation to points (not in-place).""" if self.is_identity: return points - return tr.transform_points( - points.reshape(-1, 3), - self.inverse_matrix).reshape(points.shape) + return tr.transform_points(points.reshape(-1, 3), self.inverse_matrix).reshape( + points.shape + ) @caching.cache_decorator def inverse_matrix(self): @@ -182,7 +172,7 @@ def inverse_matrix(self): return inv def copy(self): - return Transform(self._data.copy()) + return Transform(matrix=self.matrix) @caching.cache_decorator def is_identity(self): From 5cffea114aa095bfb48f91d625269df2863a2218 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 13:57:45 -0400 Subject: [PATCH 066/144] fix import --- trimesh/voxel/base.py | 1 + trimesh/voxel/transforms.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index 3d4fd476d..c7c3d40e2 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -12,6 +12,7 @@ from ..constants import log from ..exchange.binvox import export_binvox from ..parent import Geometry +from ..typed import NDArray, float64 from . import morphology, ops, transforms from .encoding import DenseEncoding, Encoding diff --git a/trimesh/voxel/transforms.py b/trimesh/voxel/transforms.py index ac4729465..0cf7969f5 100644 --- a/trimesh/voxel/transforms.py +++ b/trimesh/voxel/transforms.py @@ -1,7 +1,7 @@ -import numpy as np - from typing import Optional +import numpy as np + from .. import caching, util from .. import transformations as tr From 7cac99e1167baddeeda404c0de2b9dca79ef1fb7 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 15:00:39 -0400 Subject: [PATCH 067/144] skip broken extensions --- pyproject.toml | 10 ++++------ trimesh/exchange/gltf.py | 20 +++++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1b7d87e6e..59e791848 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -128,9 +128,7 @@ ignore = [ ] line-length = 90 -[tool.autopep8] -max_line_length = 90 -in-place = true -recursive = true -aggressive = 3 -verbose = true \ No newline at end of file + +[tool.black] +line-length = 90 +target-version = ['py37'] \ No newline at end of file diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index eb6d66e2a..09418dd44 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -94,7 +94,7 @@ def export_gltf(scene, embed_buffers : bool Embed the buffer into JSON file as a base64 string in the URI extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP (using glTF's EXT_texture_webp extension). Returns ---------- @@ -186,7 +186,7 @@ def export_glb( Custom function to (in-place) post-process the tree before exporting. extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP using EXT_texture_webp extension. Returns ---------- @@ -621,7 +621,7 @@ def _create_gltf_structure(scene, unitize_normals : bool Unitize all exported normals so as to pass GLTF validation extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP using EXT_texture_webp extension. Returns --------------- @@ -777,7 +777,7 @@ def _append_mesh(mesh, mat_hashes : dict Which materials have already been added extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP (using glTF's EXT_texture_webp extension). """ # return early from empty meshes to avoid crashing later if len(mesh.faces) == 0 or len(mesh.vertices) == 0: @@ -1258,8 +1258,9 @@ def parse_values_and_textures(input_dict): if "EXT_texture_webp" in texture["extensions"]: idx = texture["extensions"]["EXT_texture_webp"]["source"] else: - raise ValueError("unsupported texture extension" - "in {texture['extensions']}!") + broken = list(texture['extensions'].keys()) + log.debug( + f"unsupported texture extension `{broken}`") else: # fallback (or primary, if extensions are not present) idx = texture["source"] @@ -1784,7 +1785,7 @@ def _append_image(img, tree, buffer_items, extension_webp): buffer_items : (n,) bytes Binary blobs containing data extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP (using glTF's EXT_texture_webp extension). Returns ----------- @@ -1821,6 +1822,7 @@ def _append_image(img, tree, buffer_items, extension_webp): # index is length minus one return len(tree['images']) - 1 + def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): """ Add passed PBRMaterial as GLTF 2.0 specification JSON @@ -1841,7 +1843,7 @@ def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): Which materials have already been added Stored as { hashed : material index } extension_webp : bool - Export textures to WebP (using glTF's EXT_texture_webp extension). + Export textures as webP using EXT_texture_webp extension. Returns ------------- @@ -1920,7 +1922,7 @@ def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): # add a reference to the base color texture result[key] = {'index': len(tree['textures'])} - # add an object for the texture (possibly according to the WebP extension) + # add an object for the texture according to the WebP extension if extension_webp: tree['textures'].append({'extensions': {'EXT_texture_webp': {'source': index}}}) From f5cc44b35ff5be20935c0b946d03a8d30b6e2f19 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 15:03:39 -0400 Subject: [PATCH 068/144] make extension optional --- trimesh/exchange/gltf.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 09418dd44..209175716 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -1253,14 +1253,13 @@ def parse_values_and_textures(input_dict): try: texture = header["textures"][v["index"]] - # extensions - if "extensions" in texture: - if "EXT_texture_webp" in texture["extensions"]: - idx = texture["extensions"]["EXT_texture_webp"]["source"] - else: - broken = list(texture['extensions'].keys()) - log.debug( - f"unsupported texture extension `{broken}`") + # check to see if this is using a webp extension texture + # should this be case sensitive? + webp = texture.get( + 'extensions', {}).get( + 'EXT_texture_webp', {}).get('source') + if webp is not None: + idx = webp else: # fallback (or primary, if extensions are not present) idx = texture["source"] From 18fbbf4e60079530215e29287a9fbb07974e6a2c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 15:33:35 -0400 Subject: [PATCH 069/144] add second moments to polygon identifier --- trimesh/path/polygons.py | 47 +++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index bf54c339b..506d4f7cc 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -412,7 +412,10 @@ def medial_axis(polygon, resolution=None, clip=None): resolution = np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).max() / 100 # get evenly spaced points on the polygons boundaries - samples = resample_boundaries(polygon=polygon, resolution=resolution, clip=clip) + samples = resample_boundaries( + polygon=polygon, + resolution=resolution, + clip=clip) # stack the boundary into a (m,2) float array samples = stack_boundaries(samples) # create the voronoi diagram on 2D points @@ -455,8 +458,8 @@ def identifier(polygon: Polygon) -> NDArray[float64]: Returns --------- - hashed : (10), - Some values that should be unique for this polygon. + identifier : (8,) float + Values which should be unique for this polygon. """ result = [ len(polygon.interiors), @@ -466,7 +469,11 @@ def identifier(polygon: Polygon) -> NDArray[float64]: polygon.length, polygon.exterior.length, ] - result.extend(polygon.bounds) + # include the principal second moments of inertia of the polygon + # this is invariant to rotation and translation + _, principal, _, _ = second_moments(polygon, return_centered=True) + result.extend(principal) + return np.array(result, dtype=np.float64) @@ -486,9 +493,14 @@ def random_polygon(segments=8, radius=1.0): polygon : shapely.geometry.Polygon Geometry object with random exterior and no interiors. """ - angles = np.sort(np.cumsum(np.random.random(segments) * np.pi * 2) % (np.pi * 2)) + angles = np.sort( + np.cumsum( + np.random.random(segments) * np.pi * 2) % + (np.pi * 2)) radii = np.random.random(segments) * radius - points = np.column_stack((np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) + + points = np.column_stack( + (np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) points = np.vstack((points, points[0])) polygon = Polygon(points).buffer(0.0) if hasattr(polygon, "geoms"): @@ -654,7 +666,8 @@ def repair_invalid(polygon, scale=None, rtol=0.5): return basic if scale is None: - distance = 0.002 * np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).mean() + distance = 0.002 * \ + np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).mean() else: distance = 0.002 * scale @@ -668,7 +681,8 @@ def repair_invalid(polygon, scale=None, rtol=0.5): # reconstruct a single polygon from the interior ring recon = Polygon(shell=rings[0]).buffer(distance) # check perimeter of result against original perimeter - if recon.is_valid and np.isclose(recon.length, polygon.length, rtol=rtol): + if recon.is_valid and np.isclose( + recon.length, polygon.length, rtol=rtol): return recon # try de-deuplicating the outside ring @@ -682,7 +696,8 @@ def repair_invalid(polygon, scale=None, rtol=0.5): # make a new polygon with result dedupe = Polygon(shell=points[unique]) # check result - if dedupe.is_valid and np.isclose(dedupe.length, polygon.length, rtol=rtol): + if dedupe.is_valid and np.isclose( + dedupe.length, polygon.length, rtol=rtol): return dedupe # buffer and unbuffer the whole polygon @@ -693,7 +708,8 @@ def repair_invalid(polygon, scale=None, rtol=0.5): return buffered.geoms[areas.argmax()] # check perimeter of result against original perimeter - if buffered.is_valid and np.isclose(buffered.length, polygon.length, rtol=rtol): + if buffered.is_valid and np.isclose( + buffered.length, polygon.length, rtol=rtol): log.debug("Recovered invalid polygon through double buffering") return buffered @@ -798,7 +814,8 @@ def projected( adjacency = mesh.face_adjacency[adjacency_check] # a sequence of face indexes that are connected - face_groups = graph.connected_components(adjacency, nodes=np.nonzero(side)[0]) + face_groups = graph.connected_components( + adjacency, nodes=np.nonzero(side)[0]) # if something is goofy we may end up with thousands of # regions that do nothing except hang for an hour then segfault @@ -819,7 +836,10 @@ def projected( # edges that occur only once are on the boundary group = grouping.group_rows(edge, require_count=1) # turn each region into polygons - polygons.extend(edges_to_polygons(edges=edge[group], vertices=vertices_2D)) + polygons.extend( + edges_to_polygons( + edges=edge[group], + vertices=vertices_2D)) padding = 0.0 if apad is not None: @@ -914,7 +934,8 @@ def second_moments(polygon, return_centered=False): v = x1 * y2 - x2 * y1 Ixx -= np.sum(v * (y1 * y1 + y1 * y2 + y2 * y2)) / 12.0 Iyy -= np.sum(v * (x1 * x1 + x1 * x2 + x2 * x2)) / 12.0 - Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + 2 * x2 * y2 + x2 * y1)) / 24.0 + Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + + 2 * x2 * y2 + x2 * y1)) / 24.0 moments = [Ixx, Iyy, Ixy] From 15b9b77eaef78d901f530b71627b70f04b2d108b Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 16:19:22 -0400 Subject: [PATCH 070/144] convert constants to dataclasses --- trimesh/constants.py | 82 ++++++++++++++++++++-------------------- trimesh/path/polygons.py | 62 ++++++++++-------------------- trimesh/typed.py | 4 +- 3 files changed, 63 insertions(+), 85 deletions(-) diff --git a/trimesh/constants.py b/trimesh/constants.py index 30bc2a0da..07acc2c36 100644 --- a/trimesh/constants.py +++ b/trimesh/constants.py @@ -1,8 +1,11 @@ +from dataclasses import dataclass + import numpy as np from .util import log, now +@dataclass class ToleranceMesh: """ ToleranceMesh objects hold tolerance information about meshes. @@ -24,23 +27,24 @@ class ToleranceMesh: If True, run additional in- process checks (slower) """ - def __init__(self, **kwargs): - # set our zero for floating point comparison to 100x - # the resolution of float64 which works out to 1e-13 - self.zero = np.finfo(np.float64).resolution * 100 - # vertices closer than this should be merged - self.merge = 1e-8 - # peak to valley flatness to be considered planar - self.planar = 1e-5 - # coplanar threshold: ratio of (radius / span) ** 2 - self.facet_threshold = 5000 - # run additional checks and asserts - self.strict = False + # set our zero for floating point comparison to 100x + # the resolution of float64 which works out to 1e-13 + zero: float = np.finfo(np.float64).resolution * 100 + + # vertices closer than this should be merged + merge: float = 1e-8 + + # peak to valley flatness to be considered planar + planar: float = 1e-5 + + # coplanar threshold: ratio of (radius / span) ** 2 + facet_threshold: int = 5000 - # add any passed kwargs - self.__dict__.update(kwargs) + # should additional slow checks be run inside functions + strict: bool = False +@dataclass class TolerancePath: """ TolerancePath objects contain tolerance information used in @@ -82,26 +86,23 @@ class TolerancePath: acceptable. """ - def __init__(self, **kwargs): - # default values - self.zero = 1e-12 - self.merge = 1e-5 - self.planar = 1e-5 - self.buffer = .05 - self.seg_frac = .125 - self.seg_angle = np.radians(50) - self.seg_angle_min = np.radians(1) - self.seg_angle_frac = .5 - self.aspect_frac = .1 - self.radius_frac = .02 - self.radius_min = 1e-4 - self.radius_max = 50 - self.tangent = np.radians(20) - # run additional checks and asserts - self.strict = False - self.__dict__.update(kwargs) + zero: float = 1e-12 + merge: float = 1e-5 + planar: float = 1e-5 + seg_frac: float = 0.125 + seg_angle: float = np.radians(50) + seg_angle_min: float = np.radians(1) + seg_angle_frac: float = 0.5 + aspect_frac: float = 0.1 + radius_frac: float = 0.02 + radius_min: float = 1e-4 + radius_max: float = 50.0 + tangent: float = np.radians(20) + strict: bool = False + +@dataclass class ResolutionPath: """ res.seg_frac : float @@ -119,12 +120,11 @@ class ResolutionPath: Format string to use when exporting floating point vertices """ - def __init__(self, **kwargs): - self.seg_frac = .05 - self.seg_angle = .08 - self.max_sections = 500 - self.min_sections = 20 - self.export = '0.10f' + seg_frac: float = 0.05 + seg_angle: float = 0.08 + max_sections: float = 500 + min_sections: float = 20 + export: str = "0.10f" # instantiate mesh tolerances with defaults @@ -141,14 +141,14 @@ def log_time(method): and then emit a log.debug message with the method name and how long it took to execute. """ + def timed(*args, **kwargs): tic = now() result = method(*args, **kwargs) - log.debug('%s executed in %.4f seconds.', - method.__name__, - now() - tic) + log.debug("%s executed in %.4f seconds.", method.__name__, now() - tic) return result + timed.__name__ = method.__name__ timed.__doc__ = method.__doc__ return timed diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index 506d4f7cc..0ef055455 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -6,7 +6,7 @@ from ..constants import log from ..constants import tol_path as tol from ..transformations import transform_points -from ..typed import NDArray, float64 +from ..typed import NDArray, Optional, float64 from .simplify import fit_circle_check from .traversal import resample_path @@ -27,7 +27,7 @@ Rtree = ExceptionWrapper(E) -def enclosure_tree(polygons): +def enclosure_tree(polygons: list[Polygon]): """ Given a list of shapely polygons with only exteriors, find which curves represent the exterior shell or root curve @@ -157,7 +157,7 @@ def edges_to_polygons(edges, vertices): return complete -def polygons_obb(polygons): +def polygons_obb(polygons: list[Polygon]): """ Find the OBBs for a list of shapely.geometry.Polygons """ @@ -168,7 +168,7 @@ def polygons_obb(polygons): return np.array(transforms), np.array(rectangles) -def polygon_obb(polygon): +def polygon_obb(polygon: Polygon): """ Find the oriented bounding box of a Shapely polygon. @@ -256,9 +256,7 @@ def polygon_bounds(polygon, matrix=None): """ if matrix is not None: assert matrix.shape == (3, 3) - points = transform_points( - points=np.array(polygon.exterior.coords), matrix=matrix - ) + points = transform_points(points=np.array(polygon.exterior.coords), matrix=matrix) else: points = np.array(polygon.exterior.coords) @@ -305,7 +303,7 @@ def plot_single(single): return axes -def resample_boundaries(polygon, resolution, clip=None): +def resample_boundaries(polygon: Polygon, resolution: float, clip=None): """ Return a version of a polygon with boundaries re-sampled to a specified resolution. @@ -364,7 +362,7 @@ def stack_boundaries(boundaries): return result -def medial_axis(polygon, resolution=None, clip=None): +def medial_axis(polygon: Polygon, resolution: Optional[float] = None, clip=None): """ Given a shapely polygon, find the approximate medial axis using a voronoi diagram of evenly spaced points on the @@ -412,10 +410,7 @@ def medial_axis(polygon, resolution=None, clip=None): resolution = np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).max() / 100 # get evenly spaced points on the polygons boundaries - samples = resample_boundaries( - polygon=polygon, - resolution=resolution, - clip=clip) + samples = resample_boundaries(polygon=polygon, resolution=resolution, clip=clip) # stack the boundary into a (m,2) float array samples = stack_boundaries(samples) # create the voronoi diagram on 2D points @@ -493,14 +488,10 @@ def random_polygon(segments=8, radius=1.0): polygon : shapely.geometry.Polygon Geometry object with random exterior and no interiors. """ - angles = np.sort( - np.cumsum( - np.random.random(segments) * np.pi * 2) % - (np.pi * 2)) + angles = np.sort(np.cumsum(np.random.random(segments) * np.pi * 2) % (np.pi * 2)) radii = np.random.random(segments) * radius - points = np.column_stack( - (np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) + points = np.column_stack((np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) points = np.vstack((points, points[0])) polygon = Polygon(points).buffer(0.0) if hasattr(polygon, "geoms"): @@ -666,8 +657,7 @@ def repair_invalid(polygon, scale=None, rtol=0.5): return basic if scale is None: - distance = 0.002 * \ - np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).mean() + distance = 0.002 * np.reshape(polygon.bounds, (2, 2)).ptp(axis=0).mean() else: distance = 0.002 * scale @@ -681,8 +671,7 @@ def repair_invalid(polygon, scale=None, rtol=0.5): # reconstruct a single polygon from the interior ring recon = Polygon(shell=rings[0]).buffer(distance) # check perimeter of result against original perimeter - if recon.is_valid and np.isclose( - recon.length, polygon.length, rtol=rtol): + if recon.is_valid and np.isclose(recon.length, polygon.length, rtol=rtol): return recon # try de-deuplicating the outside ring @@ -690,14 +679,11 @@ def repair_invalid(polygon, scale=None, rtol=0.5): # remove any segments shorter than tol.merge # this is a little risky as if it was discretized more # finely than 1-e8 it may remove detail - unique = np.append( - True, (np.diff(points, axis=0) ** 2).sum(axis=1) ** 0.5 > 1e-8 - ) + unique = np.append(True, (np.diff(points, axis=0) ** 2).sum(axis=1) ** 0.5 > 1e-8) # make a new polygon with result dedupe = Polygon(shell=points[unique]) # check result - if dedupe.is_valid and np.isclose( - dedupe.length, polygon.length, rtol=rtol): + if dedupe.is_valid and np.isclose(dedupe.length, polygon.length, rtol=rtol): return dedupe # buffer and unbuffer the whole polygon @@ -708,8 +694,7 @@ def repair_invalid(polygon, scale=None, rtol=0.5): return buffered.geoms[areas.argmax()] # check perimeter of result against original perimeter - if buffered.is_valid and np.isclose( - buffered.length, polygon.length, rtol=rtol): + if buffered.is_valid and np.isclose(buffered.length, polygon.length, rtol=rtol): log.debug("Recovered invalid polygon through double buffering") return buffered @@ -814,8 +799,7 @@ def projected( adjacency = mesh.face_adjacency[adjacency_check] # a sequence of face indexes that are connected - face_groups = graph.connected_components( - adjacency, nodes=np.nonzero(side)[0]) + face_groups = graph.connected_components(adjacency, nodes=np.nonzero(side)[0]) # if something is goofy we may end up with thousands of # regions that do nothing except hang for an hour then segfault @@ -836,10 +820,7 @@ def projected( # edges that occur only once are on the boundary group = grouping.group_rows(edge, require_count=1) # turn each region into polygons - polygons.extend( - edges_to_polygons( - edges=edge[group], - vertices=vertices_2D)) + polygons.extend(edges_to_polygons(edges=edge[group], vertices=vertices_2D)) padding = 0.0 if apad is not None: @@ -873,13 +854,11 @@ def projected( # join_style=2, # mitre_limit=1.5) # for p in polygons]).buffer(-padding) - polygon = ops.unary_union([p.buffer(padding) for p in polygons]).buffer( - -padding - ) + polygon = ops.unary_union([p.buffer(padding) for p in polygons]).buffer(-padding) return polygon -def second_moments(polygon, return_centered=False): +def second_moments(polygon: Polygon, return_centered=False): """ Calculate the second moments of area of a polygon from the boundary. @@ -934,8 +913,7 @@ def second_moments(polygon, return_centered=False): v = x1 * y2 - x2 * y1 Ixx -= np.sum(v * (y1 * y1 + y1 * y2 + y2 * y2)) / 12.0 Iyy -= np.sum(v * (x1 * x1 + x1 * x2 + x2 * x2)) / 12.0 - Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + - 2 * x2 * y2 + x2 * y1)) / 24.0 + Ixy -= np.sum(v * (x1 * y2 + 2 * x1 * y1 + 2 * x2 * y2 + x2 * y1)) / 24.0 moments = [Ixx, Iyy, Ixy] diff --git a/trimesh/typed.py b/trimesh/typed.py index 2f2aa0e97..da6fae33b 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,4 +1,4 @@ -from typing import Sequence, Union +from typing import Optional, Sequence, Union import numpy as np @@ -32,4 +32,4 @@ ArrayLike = Sequence -__all__ = ["NDArray", "ArrayLike"] +__all__ = ["NDArray", "ArrayLike", "Optional", "FloatLike", "IntLike", "BoolLike"] From a1697ca971b84c6535cbecd3c83dee0036736c06 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 16:37:16 -0400 Subject: [PATCH 071/144] change arc_center to dataclass --- trimesh/path/arc.py | 107 +++++++++++++++++--------------- trimesh/path/entities.py | 8 +-- trimesh/path/exchange/dxf.py | 10 +-- trimesh/path/exchange/svg_io.py | 2 +- trimesh/typed.py | 4 +- 5 files changed, 70 insertions(+), 61 deletions(-) diff --git a/trimesh/path/arc.py b/trimesh/path/arc.py index 936e1c167..204271a51 100644 --- a/trimesh/path/arc.py +++ b/trimesh/path/arc.py @@ -1,15 +1,39 @@ +from dataclasses import dataclass + import numpy as np from .. import util from ..constants import log from ..constants import res_path as res from ..constants import tol_path as tol +from ..typed import ArrayLike, FloatLike, NDArray, Optional, float64 # floating point zero _TOL_ZERO = 1e-12 -def arc_center(points, return_normal=True, return_angle=True): +@dataclass +class ArcInfo: + # What is the radius of the circular arc? + radius: float + + # what is the center of the circular arc + # it is either 2D or 3D depending on input. + center: NDArray[float64] + + # what is the 3D normal vector of the plane the arc lies on + normal: Optional[NDArray[float64]] = None + + # what is the starting and ending angle of the arc. + angles: Optional[NDArray[float64]] = None + + # what is the angular span of this circular arc. + span: Optional[float] = None + + +def arc_center( + points: ArrayLike[FloatLike], return_normal: bool = True, return_angle: bool = True +) -> ArcInfo: """ Given three points on a 2D or 3D arc find the center, radius, normal, and angular span. @@ -25,20 +49,15 @@ def arc_center(points, return_normal=True, return_angle=True): Returns --------- - result : dict - Contains arc center and other keys: - 'center' : (d,) float, cartesian center of the arc - 'radius' : float, radius of the arc - 'normal' : (3,) float, the plane normal. - 'angles' : (2,) float, angle of start and end in radians - 'span' : float, angle swept by the arc in radians + info + Arc center, radius, and other information. """ points = np.asanyarray(points, dtype=np.float64) # get the non-unit vectors of the three points vectors = points[[2, 0, 1]] - points[[1, 2, 0]] # we need both the squared row sum and the non-squared - abc2 = np.dot(vectors ** 2, [1] * points.shape[1]) + abc2 = np.dot(vectors**2, [1] * points.shape[1]) # same as np.linalg.norm(vectors, axis=1) abc = np.sqrt(abc2) @@ -52,37 +71,32 @@ def arc_center(points, return_normal=True, return_angle=True): # check the denominator for the radius calculation denom = half * np.prod(half - edges) if denom < tol.merge: - raise ValueError('arc is colinear!') + raise ValueError("arc is colinear!") # find the radius and scale back after the operation radius = scale * ((np.prod(edges) / 4.0) / np.sqrt(denom)) # use a barycentric approach to get the center - ba2 = (abc2[[1, 2, 0, 0, 2, 1, 0, 1, 2]] * - [1, 1, -1, 1, 1, -1, 1, 1, -1]).reshape( - (3, 3)).sum(axis=1) * abc2 + ba2 = (abc2[[1, 2, 0, 0, 2, 1, 0, 1, 2]] * [1, 1, -1, 1, 1, -1, 1, 1, -1]).reshape( + (3, 3) + ).sum(axis=1) * abc2 center = points.T.dot(ba2) / ba2.sum() if tol.strict: # all points should be at the calculated radius from center - assert util.allclose( - np.linalg.norm(points - center, axis=1), - radius) + assert util.allclose(np.linalg.norm(points - center, axis=1), radius) # start with initial results - result = {'center': center, - 'radius': radius} - + result = {"center": center, "radius": radius} if return_normal: if points.shape == (3, 2): # for 2D arcs still use the cross product so that # the sign of the normal vector is consistent - result['normal'] = util.unitize( - np.cross(np.append(-vectors[1], 0), - np.append(vectors[2], 0))) + result["normal"] = util.unitize( + np.cross(np.append(-vectors[1], 0), np.append(vectors[2], 0)) + ) else: # otherwise just take the cross product - result['normal'] = util.unitize( - np.cross(-vectors[1], vectors[2])) + result["normal"] = util.unitize(np.cross(-vectors[1], vectors[2])) if return_angle: # vectors from points on arc to center point @@ -104,16 +118,14 @@ def arc_center(points, return_normal=True, return_angle=True): angles = np.arctan2(*vector[:, :2].T[::-1]) + np.pi * 2 angles_sorted = np.sort(angles[[0, 2]]) reverse = angles_sorted[0] < angles[1] < angles_sorted[1] - angles_sorted = angles_sorted[::(1 - int(not reverse) * 2)] - result['angles'] = angles_sorted - result['span'] = angle + angles_sorted = angles_sorted[:: (1 - int(not reverse) * 2)] + result["angles"] = angles_sorted + result["span"] = angle - return result + return ArcInfo(**result) -def discretize_arc(points, - close=False, - scale=1.0): +def discretize_arc(points, close=False, scale=1.0): """ Returns a version of a three point arc consisting of line segments. @@ -146,10 +158,12 @@ def discretize_arc(points, return points[:, :2] return points - center, R, N, angle = (center_info['center'], - center_info['radius'], - center_info['normal'], - center_info['span']) + center, R, N, angle = ( + center_info.center, + center_info.radius, + center_info.normal, + center_info.span, + ) # if requested, close arc into a circle if close: @@ -181,11 +195,11 @@ def discretize_arc(points, arc_ok = (arc_dist < tol.merge).all() if not arc_ok: log.warning( - 'failed to discretize arc (endpoint_distance=%s R=%s)', - str(arc_dist), R) - log.warning('Failed arc points: %s', str(points)) - raise ValueError('Arc endpoints diverging!') - discrete = discrete[:, :(3 - is_2D)] + "failed to discretize arc (endpoint_distance=%s R=%s)", str(arc_dist), R + ) + log.warning("Failed arc points: %s", str(points)) + raise ValueError("Arc endpoints diverging!") + discrete = discrete[:, : (3 - is_2D)] return discrete @@ -216,24 +230,19 @@ def to_threepoint(center, radius, angles=None): # force angles to float64 angles = np.asanyarray(angles, dtype=np.float64) if angles.shape != (2,): - raise ValueError('angles must be (2,)!') + raise ValueError("angles must be (2,)!") # provide the wrap around if angles[1] < angles[0]: angles[1] += np.pi * 2 center = np.asanyarray(center, dtype=np.float64) if center.shape != (2,): - raise ValueError('only valid on 2D arcs!') + raise ValueError("only valid on 2D arcs!") # turn the angles of [start, end] # into [start, middle, end] - angles = np.array([angles[0], - angles.mean(), - angles[1]], - dtype=np.float64) + angles = np.array([angles[0], angles.mean(), angles[1]], dtype=np.float64) # turn angles into (3, 2) points - three = (np.column_stack( - (np.cos(angles), - np.sin(angles))) * radius) + center + three = (np.column_stack((np.cos(angles), np.sin(angles))) * radius) + center return three diff --git a/trimesh/path/entities.py b/trimesh/path/entities.py index e8a92dd76..6106c29c5 100644 --- a/trimesh/path/entities.py +++ b/trimesh/path/entities.py @@ -633,11 +633,11 @@ def length(self, vertices): # it's indicated as a closed circle fit = self.center( vertices, return_normal=False, return_angle=False) - return np.pi * fit['radius'] * 4 + return np.pi * fit.radius * 4 # get the angular span of the circular arc fit = self.center( vertices, return_normal=False, return_angle=True) - return fit['span'] * fit['radius'] * 2 + return fit.span * fit.radius * 2 def discrete(self, vertices, scale=1.0): """ @@ -699,8 +699,8 @@ def bounds(self, vertices): vertices, return_normal=False, return_angle=False) - bounds = np.array([info['center'] - info['radius'], - info['center'] + info['radius']], + bounds = np.array([info.center - info.radius, + info.center + info.radius], dtype=np.float64) else: # since the AABB of a partial arc is hard, approximate diff --git a/trimesh/path/exchange/dxf.py b/trimesh/path/exchange/dxf.py index 8768f55d2..0a49fee20 100644 --- a/trimesh/path/exchange/dxf.py +++ b/trimesh/path/exchange/dxf.py @@ -696,11 +696,11 @@ def convert_arc(arc, vertices): info = arc.center( vertices, return_angle=True, return_normal=False) subs = entity_info(arc) - center = info['center'] + center = info.center if len(center) == 2: center = np.append(center, 0.0) data = '10\n{:.12g}\n20\n{:.12g}\n30\n{:.12g}'.format(*center) - data += '\n40\n{:.12g}'.format(info['radius']) + data += f'\n40\n{info.radius:.12g}' if arc.closed: subs['TYPE'] = 'CIRCLE' @@ -710,7 +710,7 @@ def convert_arc(arc, vertices): # and end angle field data += '\n100\nAcDbArc' data += '\n50\n{:.12g}\n51\n{:.12g}'.format( - *np.degrees(info['angles'])) + *np.degrees(info.angles)) subs['DATA'] = data result = template['arc'].format(**subs) @@ -945,12 +945,12 @@ def bulge_to_arcs(lines, # have the same magnitude as the input data if tol.strict: from ..arc import arc_center - check_angle = [arc_center(i)['span'] + check_angle = [arc_center(i).span for i in three] assert np.allclose(np.abs(angle), np.abs(check_angle)) - check_radii = [arc_center(i)['radius'] + check_radii = [arc_center(i).radius for i in three] assert np.allclose(check_radii, np.abs(radius)) diff --git a/trimesh/path/exchange/svg_io.py b/trimesh/path/exchange/svg_io.py index f2a05027e..4f04c2877 100644 --- a/trimesh/path/exchange/svg_io.py +++ b/trimesh/path/exchange/svg_io.py @@ -448,7 +448,7 @@ def svg_arc(arc): vertices = points[arc.points] info = arc_center( vertices, return_normal=False, return_angle=True) - C, R, angle = info['center'], info['radius'], info['span'] + C, R, angle = info.center, info.radius, info.span if arc.closed: return temp_circle.format(x=C[0] - R, y=C[1], diff --git a/trimesh/typed.py b/trimesh/typed.py index da6fae33b..435b2049f 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,4 +1,4 @@ -from typing import Optional, Sequence, Union +from typing import List, Optional, Sequence, Tuple, Union import numpy as np @@ -32,4 +32,4 @@ ArrayLike = Sequence -__all__ = ["NDArray", "ArrayLike", "Optional", "FloatLike", "IntLike", "BoolLike"] +__all__ = ["NDArray", "ArrayLike", "Optional", "FloatLike", "IntLike", "BoolLike", "List", "Tuple"] From 9d766974b072030482ad1f7b5bce5bda521fa255 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 16:39:24 -0400 Subject: [PATCH 072/144] wrap getitem --- trimesh/path/arc.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/trimesh/path/arc.py b/trimesh/path/arc.py index 204271a51..271d0e807 100644 --- a/trimesh/path/arc.py +++ b/trimesh/path/arc.py @@ -30,6 +30,10 @@ class ArcInfo: # what is the angular span of this circular arc. span: Optional[float] = None + def __getitem__(self, item): + # add for backwards compatibility + return getattr(self, item) + def arc_center( points: ArrayLike[FloatLike], return_normal: bool = True, return_angle: bool = True From d2d236b11c6164fcd6e272ee9126a5089048fed4 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 14 Sep 2023 20:54:13 -0400 Subject: [PATCH 073/144] use typed List --- tests/test_arc.py | 2 +- trimesh/path/polygons.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_arc.py b/tests/test_arc.py index 5aba967b7..7e5420364 100644 --- a/tests/test_arc.py +++ b/tests/test_arc.py @@ -26,7 +26,7 @@ def test_center(self): [[30156.18, 1673.64, -2914.56], [30152.91, 1780.09, -2885.51], [30148.3, 1875.81, -2857.79]]) - assert 'center' in c + assert len(c.center) == 3 def test_center_random(self): from trimesh.path.arc import arc_center diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index 0ef055455..45c192d39 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -6,7 +6,7 @@ from ..constants import log from ..constants import tol_path as tol from ..transformations import transform_points -from ..typed import NDArray, Optional, float64 +from ..typed import List, NDArray, Optional, float64 from .simplify import fit_circle_check from .traversal import resample_path @@ -27,7 +27,7 @@ Rtree = ExceptionWrapper(E) -def enclosure_tree(polygons: list[Polygon]): +def enclosure_tree(polygons: List[Polygon]): """ Given a list of shapely polygons with only exteriors, find which curves represent the exterior shell or root curve @@ -157,7 +157,7 @@ def edges_to_polygons(edges, vertices): return complete -def polygons_obb(polygons: list[Polygon]): +def polygons_obb(polygons: List[Polygon]): """ Find the OBBs for a list of shapely.geometry.Polygons """ From d2aec5b3d5281fcae0f57f2b4287f5f37a06825b Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Fri, 15 Sep 2023 11:11:21 +0200 Subject: [PATCH 074/144] added test case for pbr material fusion with handcrafted example model fixed texture interpolation --- models/pbr_cubes_emissive_spec_gloss.zip | Bin 0 -> 201626 bytes tests/test_texture.py | 18 ++++++++++ trimesh/visual/color.py | 10 +++--- trimesh/visual/material.py | 42 ++++++++++++++++------- 4 files changed, 53 insertions(+), 17 deletions(-) create mode 100644 models/pbr_cubes_emissive_spec_gloss.zip diff --git a/models/pbr_cubes_emissive_spec_gloss.zip b/models/pbr_cubes_emissive_spec_gloss.zip new file mode 100644 index 0000000000000000000000000000000000000000..d5d4e8333ee9d5d4dccf1875bcdb09fd0a102c9d GIT binary patch literal 201626 zcmV(>K-j-fO9KQH000080PTb>S7^=0oa_Pt09OM602crN0Ag%)WnXh}X>ws~Uvy=7 zbairNE^uyV99jiX7Ec>rx;v%2K}uS9>69*M6lp}d8>FODX#u6XyHmQmySw4z|J}^a z&F#$X+&=M(y9-rTltx1)LIwb!$;wEm0ss~LI#>{4UT5Yl@J?S(P!6inVxVN0WbgF? z)=cz+C;;VAD38W)0Kfoc1$9XPngDnK-~xcx8x8^30bm_~WdP;?m;zu7fPVn=0?-LS zD*z1u)VwYMKoJ0W0OSCW0YC}>NdUwG5D7pi06_rw0^kXND*%oF*aBb$fGGf<05AYR z2LKHKUZ3^@05Skb0003%2mn3+xB*}XfCT^s0H^^V2Y?s=0swFTK!4rK>plT!1K<*X za{!J3*au)6fCT_%0GI$^1b{&R`T*zxpc#OA0IC5f2A}|dKLBI{kPJXP05JeW01yVi zcK`waa00*{fUf|U1Mmd^V*qpk&;mdm02Kff0gwaWHA-Rt2m`^Nk zXaS%CfCK;{0B`}o1^@&6caQ?$1;A_Ap8&W4;2MB408Rin0$>w>RR9)WlLWvl0K)(b z0MHLWHvk;~v;)urKqCNk0Mr6d1wbVL6#$e0Pzpc^0EGbL1MnAsTmX6i7y@7vfE56? z05|{uAAmOiPy#>$z*_(~0T2Y>HS5v- zLR8(=z}gEgGvQ0+)v|AVd|(v?Ek%fGu(%5~A{DI+B7+-`n|U-Y6r&_2l57IHETb&e z9%hb;SgzoSj#c{25^<}?@zdkFh-<4)>HA94p`xGkn20^0#3wti6?zZ+Y8Z%-wls#KK&UI zHc$U6IjDIjY`ydcvJfTwb*4o*I8su;DKaz+J*dI)Xu4T?gs;5WJ?U8UkJ4ic()|^= zGMma>7an{I-)2m}YM^G(kB3w!7^u zPLh&3oAI1?+e$dxAkDD;ou}bM10)wB}O!%aD6ZL8_rGmJ>?uUBGoB3_r4<5O6 zhW}=iGANZ77mixP<;=!6J zgK{tFA)<^ZWLS@$>*YB!)j9oE&okMr^-d8|4X?#Oa;-m0X)Rxy4I7X9D^{mgS{4>e zhOO#TYLn|v#5=RaV-z`59x&9AdN_pcaWH)OJ-hnGZYK~hnp2%`)~MfbvI)Ce8z;hv zSPxt;J-RvU{7X-GxY;-`TygodMC_BQ_n_J6|F?mUFDwZ83j?W;K9%C4>RChc>C`?~ z-0-!M40j7GQ9>-LA~JgcsuDVT2wY}fy8YoS(S|BiAX2{C-x7_3)%E~E?@cA5r>kCa zm(v}iVx4?7rDTXTP>|~oQ&$BEYktQ4-_i4hIQr@F3`w*p?e6s3HV<>Syjtcz3K*2m z^3e{%Y0WTiijJ4dXI-x}U5)ia+YI|8pNI|Ox4vBNSk?1z&n?ucbhcKsX!$5@E1XOh zrnD#?)oT684~n|XwbX#=X4Y~q7rV3ng46XDMq(W{PBQ3sc9+5tNzZCLZ_z*R$bLo06GR*Ar@Wnyg!ow!TWBoKp*D* zM*f@WyUrYS>wdnafH*E(C+;U&W$yJk^J@4Q<7lUi-QXW@Z z<81|b&L%apyDf2q-YB&A-FNj0kCjMT8hLzn<_QJVy+25OzCDDc1V^!W-Rwx}^1PJo ztv^Pbj67eQY_Q&>nLkM{`1tm=hD7OhTYldNaAg-3CJwN!h4B&4zi-fG)&JskRU00$ zv*k&?HumUh@IAOW<1%||K<2t=a&#A_`L%-15*xHkdy8t#; zOVZ?rJCfgPNa14mBna6`wa=~>EGF0ty(b>>hgw-$>pk7(t1MQ$J>UQ~XB%7@NU#gR z3q+>cQiGLCSQihK?S6|%r=V{}-+iKHy0fWj_gOw2y=PFH#*tZIUuUfF1;2%(dbO9l zI*n(^pC-!RWMlcH!sed&AR#C`^7$7>XN67UU5p}O(s^zS^>=#tUnZECd$u86R8nD* zy@XV@$T9e>JKn5X!z69o2MJo7W&_DkA5XTpt5xAXZ5-*PWr`r?sE!-=7dFS+dlqvi zCC#{*?Zsw9u{Mnxsb%*N^Kw63oY?Bt2-uOiR;5_F23op3j`_^Ig<)1CiDkL_n*sH2 zQlv!BF7C4Y*6eg-o4iH3VV=)r*vf}WNauT}_@*5_)Be}pW&f6Thw^gY&t`8nnb>>T zFnbB`+KGRl1@`=%68>xgP`iT4_^Rw_{U}hYT~3zM*!0snZ|Csu9B#j*2{?`C%GWkJ z+kApCGBBurQYL~Qgf(UNJs;1DnGz=T+x(K;gs7l-BsWX`)9M|?9w{%+wd1#|M8T{F z6&05-MXZFT>g!E%#8|IVW|?0tD>Tv@;R?FC*N#I~utI=puH~~>LHuVaZ#fAj^v7=T z9GXu#+VPloQXz*avJ%^yiw{e-KCFG`ax;bpQ%g01M+4;auyRfZ6Sda-*fJGsj8b00 zY*$gjS%&@hUNwgoJlttlYUx)5im4gi*shLuwG-952wCDWuBi5|lY|)pLj$#b((pNg zMqw6TiQG{c zS$5SKD*Od3Twi|~5NxwI(Rwi)Lp_+tTBuw5^*idV;nV8MWVes=W}o0m4n5DF^GS;v z4pb#{UJewKj8O_FZ~0f*vuJcJB&vHefUmnZZxaseyy&KTFWo{WxfnYPjMVciZg zcu^E=kGPN~rn^^KY|B1MR5*RS7vL2=!cmPNy_oyk8BCkTCliu-Cl>-sc8oyv;>>q7 zq-%?B{zQp&XLSAWVH;)~?&^j_U(fc9g^6^g<4>+9`1n$GM5Kd>5n%$q!{wTye<>kDgy#QRSca@XZ#gy@MTF%m+O_zKA*H zI$lkju<97}{XKOJug0s?P``Xue%@?1N}a9Cm7^ArA>G2J)Z*a#PNBiijY*W>L(NG@ z_!cL89ucWa{N5!J1fj|&eO=NiHE90vCh2r%$>mB({?wLhy0F&iU^*8Bw*i7Se{kKxjaUu^cn#PTb_d$s#(QQ-*?uE-*>ucvpC4Prw!#J;6~3QiTSdNE zgilQ~_eT29ujD&i*x>1Y7M73koW-Pwb=7*Q42fATFK_4Q{wmq+$SLs*s92+TUnVf` z)|y$vBU>7;`qomI(cuiVBf@S5!(_js77Ex`A?94KebHTk%d*AK8nH-xxWQ*|T~N?{ zuzMgT_3P~%?KT>TZI;W{ygrlRu>7Z;*l1xD;3ct>=j;RXlGz~EFd$f&5XKycU|Hz% zvzRO}Y|@Q{1&jA9BTb<&K4~k3md3ASM?_1e(;hPj^_K{B#E1Y9gk6)s-*O{Gml!IzTe-9AhxH}_d?-$Ic=JSSvxz4AD&4rZaKj5#3t57RxdHBMsdP(&=_Cu zPcBeiMvg$LcFvh=IX_>pu$?vy!^TGprG;a;!9OdyqI(lGzm{wue7S1=!V*YFTqN1hz5YyQ>;h#b&qxZuzhA-w!#yWL1DhuIz-><0oXFKm6 zxB;wqorp>=rB5&KJ}+JFjl(CGsh=?!tN2lDnGB$cy|j#ZV)kVWsTg_9HngRX6R=F$ z$?fd!Du+A#^+TXK;1{x#^G3vo*UGs#%u7m9^{i?FJb$l&2jR~Zi{kbx^ zmXL;&kj4h33afUY(TiiDWt`2jW}G8MF>XegqDFcOzXs*%mA`_cZ4{(R}(?&hUa zG<9qCZMCX=SEL+kGz9M?nu%KZY}%y5ANsDE88I+Wm>}@^=w6fj_i7Nxf<&QjMJ>GN zVUy#M!oM^k#2CP!5lqy|bjZUP?adG=f9)8iQT*;H#tMY~M@SDDP1JI(p(}bMW%tiQ zsYs%yyIDK2jnP4786u~eToHd7jkg=vpT?@KOkrXD0P;z5ty{Z)(MRIb9fX@~pSX8c z({Ly}tvAaFEFZx?W?xzd)8GEua}BsL6-Q-J{i5-m3i_X1F&~Yjkgy5%o(c)%h!b)w z$=!Y3!$rr*M4{J?$2BYeV1}+dT|Z%@9AtZXxiAW=BVmi13HbHlssVjjrOy2QAvu&;NH$$?e=AISYCa zR>oXzh2qTUk2qsAY&GYdo@)*cnWjO^@W-S3z5QZmhLY#zf>U$;(F#1d!WOfKBp4Dy zKEC9_9tu9oKXkSfJRv=T3Tm(cc7A! z-K@0U?;8f1wQmo#eL*o2EpxD!z!F}1Pbah7S*V~n2yx!A((glvkW*?L=nY~jCt$Zmvj48;TZ3MW|roXz2PE0 zl9yqQQreWv_PO1P#(A_aQ8Fh6Fc-CV!`!NWJY!RmhR&kG%@WTF82xBeZ58FuOXvRL zm({K=F^bKOJw~qya{outmmJGSDP(RrPaX`_4E;`TWxr%oUu4jVJ zCO1Bpwr>rrCmD2kRx9^BR>vurJK#dC2bn!0DOKB;-uU%%8bb?1(dAqHDTs-U{-GRC zL)lDEG@5a^%=&6*W*yqs==Gx=875mu>qsq7rtl@HLUK6r+}iQ-oD*C37_h9S^Nu+l z-Zsw8KvVr4sRjj>R{6I!Q|El4UfJ#pC{cb=(4j4!WAfU9nk~(OUixYjs(}=a2gybH z6VLF-%XtXhs%w>pOGkw*PXh_k7Vut*`)6m6dK4i8n~-y4M|3;Idsi6$Se?_A!nEw) z$HCY2r><*-w@3qG@ZO;V$cycq1m7sMM8A>fg#HREx65I(WDJC-7B`|`@ut9q{2r4u z8g+esZ1!;2Ob*V7c*c0qk8Jk18O~U#v%5Zo-R2Dl$|GdZkD^izL0YHLw?T;uz)7x$ z(5>FA%OYqM+;%5z;h=%W82#mw4M#!ex$W&83x!F~w&2-R=S@wQg+SI>wi`8R0>zKlx~t7{$2Q|pVWNClU? zbssnl37#LQn1bLph+~m5wrw(sk|OvlqHpJ+;ftuc>}h7%p=PPP3~Ub}{?KZ*HCN_N zS0drzv54i`S*$1Kv3AW6aoGOn=7awH=Eb>lv7X<7#{$~X{*ab!MW6eXH(=rRm@ADd zaFex@eLs-Uu*orrCdIELl#yTH9~4itW5F@v2sTy)Su(eVY{FdULXK{g3D}_VGWE`} zM1Pc(N=i8Q1d#l6e22@*KYmP3Q6QX+C3s2sYz^=duUyqU@ZP?XSeIJhVRFc*V8EyY zya&YB(`nmUY!!+EElMYtO?2U?6)ihJgyIC zPA6C%1AKyC$}CFs>bDVp(!tALpI|blv}on%iDt69F}i$Arf~3ERdL{#`L3CME8FcC zDofj^o4dgw<356M#QW83-V!B-Z$oTwBXj!?F;B%@-#I~nAU2khimyy-i>?lVdjglNbt{N|>qR4prfRE0E1F@AU0 zf95-V0#kt7Y-@oN;^b#nNRzTRca}gf5e?bzD0D+E{U!9FJ+#n~(-4t-L!U2&Gzq~p z;#q>{xg3nWQwksUDPB2ppB2j1mnX0X!w2ez6u}#U5M&4x%`fQhrj^f+7XcXdJHt;n zKF`h~w@J5A!ndohatewVn#(L8i_@nC0XgqEEIh7TexFjIW@@hZ-xkFiBzFXwJinM) zW#~of02c84|^{$rhJLkti~C2^%WG% z7x7d9Mu;d7j&KO@-yZTAxnT8|w|ZGGZVN?-A@T_#!GeD>?%Jd7#@%rqj;4HQ7fRW7ixMWCg z$QXXfl1)(zSh0WP5E#!YqH-|VceloHqh`jQvYlh|o^iE}jNsqlWFy226nhmvO$Rft zRuD&|bi|WXgy5Fou`!W#rgS8Q;MP;#==quPCc_6if&Ya1)li_ue-DdexzbOz9`gOo zyfjpmq{Sqs8c9iGbQ7A92}@VcC8PWmE@Qg3>dGlttjOoZmn&{FH}!4k&#kKPXm zWx<5DUI@*h%Xj-6>2D=+te6kFG|m}Pc~a1_Q=J9%p9NdD<9988mIcAVK|{M;6x$-> zrH{X{{K`E3!4|iPp^OQV>0pq;sbOj9v5#>s4b$&{Ke$q2=B5Gu5A$`z_xaqXIoIsh z5LZ;w8t5c(7X?g`Y8M>zcnuL3jRKuEg#@%vOWQ~~e%_|2HgleEZ4kBret(hod?wI2 zTBv<%*y71`>fkT(NWf*fDl%7FIu(S{_Gjz9$#+ZlRsXoz@KAmEIvjCv%~Qii*=wtk z%#yz&>dyextVMp52P+C~Zx)@m=uT4P{b34Ug6fOlf2qfsOL}xDxSw7Jhs*y{8 zLobKu$5*e_??9^0ruQ^XwP&8lps?u;47UW@k54 z>~$xf$WB$#knt|Yde)g>V`%Rb*|uklBl>+EnU3o*);L}ce>n5^-maW(N~ZR$&uUXe zK?D0oDSK_3^0J#I=&-#Qn`m7H;;#cmSd1>9#wbskq5M*YL@;Ddbf7HSbYM%^R4jzy&i> zK{}?eZ3FH3UKkop`8!O2w%&iDpAY!+O#FR0>m`RnR$yiydSLV=e>9sw!pQ8;AUEDn zwy;ah^kgr51N{Ytqu*O4-|E7{2nPbq2}QGtt*%!Mq5UkS;UIb*l)R}x{>`I*)@Z~n zYe7YY!k;b28BbRk7n5d$vV%?T*JQpbbPQ8TC}%!K*h*}BOwzHmVdMOnlt*Nta6D1} zJ6`GQm1ks{Qut5Pn42#jv?(wnAzxQYSj1+&ouS}r7scD$4;zGE*8idJ(`pkx|6F{! z>xi(Q-5G5}5)zd*>OV?`PeyYwhKONOn82Kf6N!4Y{B&h*+EKn5ti}7}%<Lx_XNhJ~SEjmxM)ltjUcx zCQ`mW4m}N7eCutxG{revN|1%tGopk+fo3%_?C-FZ(uQ(^R@^RcGr2V};RthY*K$h2 zW_KnT%tJ|McY=k}>Zo0zm zTY{6N;+JipJh()S%&8Ba^bQ-6q8Jau<{+Wo|82eDN%JlZgAP;W(b9jGdKQ;ky$aDh zFv74P21ES;z60B&f223LtmOh*^O`-F_^nI#U#5|HD6UYdUF5 zf1WC)i7#U1yl~aD-h)C0liFBfR&uW42Tma=PLJ1ftkxslCTzKu^-Z0X=wX_&pjt$x zy_D-a$#LXwOit#KHi|Zl<5S=4gs~G1&nkkPSOwJc7P4mzatBlS4{!vK4G=MrJATBo zp*&m4Jhfk!oKN*f1tOGrVh$)0h@X5lj1ktPeTE)S?M~O7g$|7FLK=V|QK zai&YX$pkAsF;^*ki}X{@{GZV;D#8$yPf!f=N^q#W4UT)tb=2N1JtM7;Ypu8U2a%*G z1Z<00C~cJNE1s36Xet&Su}DQ%0`SNif%WqFR>wUg7w=`-X!Gi7rr{D;d_1vy)P)+&g!TLoYKTQN!vx}E`XDl!=h8@itJ}cBiQ?% z382VQujMl0MA~*Huyl@sq$i6<$5%gJAACIe-SDY_i03s3LBSPx^-auc+N+&V+eQWV zHE^7GqY!fj>ii>E1SIT0Lk#AUO1kXiEB1ltnZ{=v5* zPSZ+Z`RTG}RR1CtSFiHgB;^7fYZt5XhGej-{%~lpqAlW)n_WFu^CPAp-opoV%e22T z5#MxZCpvoow*Voj;blgY&DxHf?Cni$?5=|LFWE0paLQQMD?xAT9R6yzqloso9bxRB z4EK;%-4BFE<;iDp%Q$zyT(Vz|3c` zf-sLs^aB;XXHu=pUl}vxcbeczy{QPtb+%8tj}L-KTDWtAz!&6id$U>MNn(A*NAq5f z7~?ZlBJ)jl%k2Fgk2kIDryq8F6}HU~UMT^$UBCVKpKe|)dZ?`O*U*Jlqj`TB6M{|0 zZ^pZccg6EuQY2J)-WDf*$Jeais(W|%XD zG87Xjip!qrE-sa%AGCr_#>;7!@F>!3pyW&{+W+MMfn$GpcB`q$GF3gbjP`3VEU<%Q zU}Gpwgpf*5^;Lf?j%p`{Kq#rCJUj8|w<_9lN%V`ELiOL{GF=~^%?C$UwH%K>Z&bsX zxZmc_r9wV%HqeWjbaKk>(iY_{+#po02SIZ{=To_aA*NkR)l(AX-*kJ2PH7$4@`*Sj z-v=qPs(H~!Rm*qgzp3z99M-uZO>BtX`nYU$O7Ol7pOl=h9zxeg|Pe!fJhG9$Yr_q`EYGoAa$S!p% z=9cZY(@j@nDK;pNXHr%uI3^_-!k9ENdUHHulwtniZiBBtm}uu=w|{MjO$$p!6N}wG z*}Ypkj;S1ApTghV8sjhr_wjlZI*H6aFn5|EUPm^rDcKbdS1_2meKUzH^ZH0zrV2X6&evcB-H2wN#|_90!Ms+;`faG>D=?dH=kBi!4tylJ@Up zW~e1v4EJd zkJ>_>5Qea&hnq*MQnrt0Zf9>tE+dI|K1TK4x}6tESeFvOd!FyLUzTq6HPUSh+d+K_ zOBjaD&1ScdR%oNQO?~;+BRr^w75z8`Dquy$?CIk&7m1Xp-;LuJ6#IqZpzZ#BLxFks zBfV=lB*q_~T0O*iW&B(9PwL2G%n$}|70ZZwN7u7~49}Qk7>V6ZzV^>WUKXgI=+hIX zi`vj9b|c4lzq%d;ywDVR{hj07o6vf`ZSNMPm`ZELf}3`57Ff{2B(qKm{2k`uI!cc5 z>I4Yi!RvfOs744Ybf;4JN0f7ayCTtJF|Ls# zT{lffA}ily2%R3jO46`Y^4>qYqI4Fi zJl5-)Tj;o8ZR{6m{iV>2e`VzAS9qi+eRLrUAF?8xji-KKmS5BbXy0urZ68o_&zMG7 z&X(w}yWhK7d6mNDcJ9K$y@m&Kwe#ckR>#;f*n_0X$Q!VpzuBQu2x)qKr15@)S#2yI zW?~C05I9vMCr>-e>t@iEicvvTCw=h!R7l{}$bs!fisC1ohxxW>q*D=b=a5c1ku2ZA z0j~KdTUxbToSbSa!FXKm@f14;Rq)K_FPFtTjIvGr`x0(}H&4|W=MaDGj{77eDhLVvMIL0!dlyDSqw%( zbc`>|xD7)IMF7DbFF7E;0ERPRG^7D#zQx&iWOu4-ojZ(oO}Rk*Wch6ULywZhi`n_6 z>zVrvn?^zWw@}@6S(nF9KEsl_TMX>kTo_{oWmigt=z2ui78KSCZxrKmof5RgcbS!G zrp&ORkG9M;gERsKIvNzvU!5Pw8GHf)O6`=NAD@G5h&@-pYn$T>w&ts8kbfY8N>!#V zG&EAP;ahU)A+)7kk6*mA&+>5-P(0#d)93e_NvC(0DT}Dxm~4 zpJKo4{j4#^rj1c&G^odZ_xpk-(J^H989`o~XOe$C8nPI7H}m5S;-M!jBJ~>5kH4_yKT{Im*FnZjU>sLh+;obhza(U! z*jlYv_L9a%z8J8Et%fuZAnS|N=gP6v{g!5FXMke}k@#79Ut~Zk^8Ay_>x`@aWpCj| z;gRc1s}!CO^cZ0=D1ia3jS83cl@o~izv^LBoH8WNqyX59h6Jep6~$1?R* zl2pdLX%6<3=LzK0F%RUa`}V4AC(!c6Uuc{eud)2HthoyErNr>Xf|tZe!?|(3XpNg>cHZ@$g|Q zm@tFr(vD02{)>XvDA^~BK!kXN$|(ihhA{L=jlk@_soyTjD*DJl&-o%bX6(0o=;aW- zcpe7Ol>=)wa(ysUYCZS-^gv%l{#f|@G`KDzlmbRmq1e3uiZT=fxOnr4WqD^#@Vi*8 zirW&S<*yFdGVC(M&4Ne?&EI)rhkF& zoX+{<0;6~BQPu>H4q?Cts4ZrfB|VkX)x48MmX9vJfhfmrC*`2t zhYu-R+hQX@p5(tPs7T=cE;Iat>#|zhLvped;#6e6g>^%Y@xz5-|zw6bk4cDkE zCnU)cS*vf^p9#6K&O7fm_(v+d(zPmBPOl5=AnU~e^Usud0TPPYa_0ICTp?(|J;MaB z@a4NAMv!{_z}2L#(ALKau^>M@_=XJR+q?6@u27%5T%k-xt*?-d=P#kiVnLZ*7woc= zSH;S^ew+mTfzg@k?w`|CLZ?MAY1XEOi=Aw!r60(ZkHq1v7Qhwu0z>;+ZRx}^jCTzd z!WJTd;GT`4xxCeQurhVYwUaEf-DELCgcRQhnk&@r*rXpV$5AyE?3F zOGQmPybU^T7J0NNYkV~*3=?M6Z2-R!8!Pl}D+%9eIDui2w%ur^(y5xP$fH&Oyhf4s zS@+u(LPQg9Mnmr@HhB5gN{~CEg&sF+)^5~qH)QJ&!47XCCIdNH^fCC`v{me^8fRUi z6vq%6NX(xaml8)rHL}2y*(^=Wum48?W%1s&-X@y5amS`~a1C(^(Q+j#&x`+))_4K2 zd*4`}dPoc}G-@=<3!_`anC%KNqSV_D3wgB=s#;95n)%TnsvNN!6qa{o++IEd7AHP8 zc$OB1YkkwSMTEy-LnQJzCQFTV$HhY4;+1k7N{^#u!A5(vZnJb8y(6H1pW0mKz zW6GdHBg)dDlj+slZ{<9`;gon$uRmWM2%uL~fc^##mE?zq$ySI>sZPk)XV3vF02ibc zE9D|KL#b#@ODcIPorzsLf5-b1_E*<^k@^v=&MI?@x+lOun@_N6o}OzZ0I->f3#5 zE;+**{X;&j#}TC>7188tiRo9U@*|NWWR4NZAf0ahzZNGsJ7=d)XY=H1fNT$WK47gK z4qISz=9}A-;W4??3C5DP5Rwzq=&D#=%}mUn0jwX$Fs=W6UgM-7)M;|9FzJj%D_nm< z?Nq8T=?=$$6G}k9fM*li8tm#b62gu)iY*qosbSx%{epaxQR>v9lk+)P?Py`nlHR|o z*m>JzUhJwMA~iS1+4cp73M0R_(A%$Xy3Z!SiKUvV`6v^s=1rE=te9xa=Ug~9LSDYh z@h1kJ_4WfOt6zD8g|W3m+=)%2yIEpJm&;4{3aS|4#I`jZ$BSZGbsT8Wlv&y6e0MSU zVxNDDkvjXkEO5Eu7bge&5t61PuSBTPCYsPu0Mxo}1bkEyU&?c+txvSj9 zSw2o5fH?4W3ya~&4{0Vt9wxRfkKmqV&z}K`){6FtxVD;Sqkd{wt3GDp9ZtUf!H5Jb zl`Vs?Rxsj?2$9DEyrw0;+WMo^WUx+*KauE>py?(eva^;&cMsB6P3rU}GR%NvI*I;4 zP#${&*~R>;nu={7Mph1~`0^ra9&~-b!WW1Ramei_T)6_>~V7g{%$zF1n>6 z>PM(~ZaO-rMFp2a!&*q0+^x+{9c91DKLqnQ7mJyDwojnrU=hH>l?vv_s1(ol#bkJ0 z&*hztm}Nwe@K@+m<92D;DWLKn^o}iYUg4KN61shB?cdM#yQnQx$SVFim0U)AQkj{& zMq`I`QLsT23qs+fGVDr%3H7nNuoF$gSq7^{sXelJW;?HY5V9t%jq3LvE*><_U!I%H zjT4J}vIHiCqfpz{K12Wi54J^=_qSH6r>U#6{dk3ys}T}gtwp{<8W!uTIpS^~jQFUa zm3I%3PSVwTmE>Oj-tT1i2!BuF2582VOhSKR;`atgzpj3w?|}V@5}k^YUXP8_K#b?~ zH?Q_Zs5qjQddp_B&L*HZW&u90IiUui7Cq$jWXJHlG6JIgeVA^^JB1Xyu*PLVeFmfEGDlS`h^I9f4_SS9$k@px!4>7kbfk zgB9v{$I`>g+%7%y?s5Y67&;>Defa~I03^Vt1LN(;#rlhr=0W{;ov39ohQw*h?(LyP+v4Nt5G@uX{;j&qBQ1H};G`zjYR_tWh?JUGDKm z!ir1hs4(a1O)UPV@$bMm&66C${=$z_5&nKJZz0;}E%=_MTK-kWlEnDf(_d;KpYTd~ z97+Amp*WZ>C+g{O1I-Avz8XSZm2@L)_5KcOs%d|U(Tk4jt!^G(4|~wiL1-ow;FlUa zfPYo{rK5Ga(1>Vh&V+l0@82ZJA(*@RNFkUV?yQQErjPl>sc-Z|;xP&QiAnW~b^I19 zcbp7=l5&~|pG`xET7`@k9yx7kmUS(J-8Q8tuZtV1Q?vDqfK?wl$(7Z!^Y#tZ;>+%! zQjH1w+|q%{Oi}jNfRAH{Qz1uAWF=2E_bqf7K3OW^pJ@7RPTgZV)L;;!S6T+wpe25l zp`>VIFi}Aivg5|8%i72i@n!TnR124N%lyTnkkgoor;}Psjh?W9#oc~hap@p=sTH&i zf>&H5;&+4@(GyZ;S^1hHaym$f&edSjau(WezdlStXFqtz?qPq{(Sp`I6w$L2EyjrT zig(}ZHkW>uGm4O3jLV0_W3>g&{8V=1P^hv#7*2^tbQYQPa~r(Q=%>*;^7MMDUF^e( zIaNo+9Mir&r>yxyNPTEEmy;SYDR$=3yGTP8k<1u_Ks*`RT69LawU&uo2%q}SU}WHZ zdZ{h!ej`{*Wfw`a<89fNZtPEJf*J7m!G_d9h98@1Yb((`h^CJvGkK)j?_XZdJptC!^bKusTAGVMRKB(R&SyzJNh*Eaw~(+n2jB%Pfvb}lPoL@G+u7%IqbdSJK*F``0=Qq9R?3-)T@Cz2>A>|zu zC(fgYM%mrrWyCFLi<4_CbPR#aQ}q%Ho}s*@CRjwz!_S9%lv)<6W?olx!apbgqEU~_Qg`^r_RV0Yi` z=N5Pr%!`*lU(B?#arR^?c~xg&B%UcNc`Yd?kC=au6&^S?PGqY)yYl%w)-ICUJqRs( zPu-u#ZwX0s!ZFh@+q%7vepD0Bh8NpdMK^d~Z%ttCx@4S!K1wnM^{c_@PODMyLeYIW zP4;giSHrgsYAC(xCr?yu1LKZ=-JNJMdYLXqMDd7;+OBDX@C>zWM_;Lfd7(|;pV;iY zr={$uFR~%Nl)e@{d&s+FoMCfhRz1J@SE<;fq4V}7dH&riqd(z9dyyqEV2Xvn(u|wE z%n5Nq*qrts7;11Mv{5m?Ub_A`{)bt?tJ8tJK&ZCwoxYn!V4=-&Fht^k94myu z%Hl!IgRFpCu;{O6 z3kOCK2puB?`_k}gD>IZKLFlE_osKANGoB`%ym5)~`OSeRKeXKgvcUD`+@3O0;F&q2 z5A{y7zOufgHc?-g&P!Gsr86isyWmqcH5o1Be%fwtKJ6z*lHU;mta zd%ljI9{(%<{Ik*dJ+jf_(wp?OSIi)!e@*~oU&|64e>vl%o?U0CIYV<99!S8X+H68L z`01TssSd5|79jX_p_-PWZb_c_|7g7k4f<5Z#d53bTaMrHajai7B(!v5CHt-LA)G-6 z(k7BqOmF4Vne;QiloF*3BvhD-#ojP%3iZp{zb&FHS{~E z&npqDO`3?-ms=QrLa4LZTR%?pWi1WHu=(b-RFY0|giLbXRPk*MpG-LPHs6r^1qmY+ zlVHh4TTD5!sWKv&!X@_(D?j|Fl}Q*DI(CUOo#Vp@tvoEw=9VRWAuDF+wMhC+4Gfm0 zv(F2Ry+kg4C}~)8t*u;D&Q&ojE9-m#mJ$xKtCuV1FL@k)PxoG76dn{wI6{ibq9NjT z3Of&%gU(9s$v_r2D~s%2F7Z6BqPgGibawIG$B0|PAwn);*LYMn(j;V>26q1yn||!w zL8czXnt=IMR;Qlh4;TjqKFbr~e@9ojA*g~?YbEJ=Znl-^qqA^CPbNbJ4|GDgb>CR_b5be+|Quy)~@lYMz|`8!}amC zkOt;$5NgpazkPFCYNR#dHE%E#?hz;c=!zU7?fdM0^>DHT_3Jy@*+>e{-swL>5llh@ zzK=4>n2dma}HKT|8%Tr#e;UnaqB#y}y!SHj=d zdxS?ZGXQU+7;@5pg86212z6~yOhHzEs!9`$41eu0*&;cOJ_|x!vLl7co^oX0m1`aunf63Q@nvhhJQ&+65Rc zV(duVHvHxqY(Vler2}FhJxcl3-mq}rgg_L$+*(^ zN?CdiZajB|f4+>pU3SL68v2s4!c*4HVjkEyQr><(mXyhyT(z9qf}ViCMM->?N+z}M zl+)nXnk>MAWlaLP&zkB&Lk{m*YHiG5gLwcYdbsfWj&ktJXaj$E{QL zTQ{TcTLdydbR>>DN1947Oty!|47D+g{eoM_vMbC_j>^AYLREdSbU3f)I; zt5JMv(I)^cnHS&7|+THn(}pX$gB2k0`jOFvU>;Xj?D#71GBbm>X;?y&+Av zn0G|S7}?wm^=*!*#ECz#q1M-e#=nqjb70=nJIaxE#2jS(!zvfs+rHJ!mgSo!M(-r? zqb$5bw!Q{U{n39`g==hKShMNQ!@J@?X`oZ(WMjDR6zQP9r~6UgFi^%iRNC*R*s4S6 z2EOIUMO;IV00jyf?Nv7F-{;;INY@3Wz9f}=Z_L8o##hajEOsBNNPYFhxTGGRC|9ow zg@k^`T4W$xV%4MH&zaYr)QF8i@ez^;RryIsav#D*xcL2r3CJIGaO zx_A2Hc=+ESM{Bv@pS2QL-!+?;pUkQvu}jiXa_yw~?Taz7OUM%bs#V)ZPh%V;gHiy4 zKzzRxVngU_A2gu&J2Z5q566Rv64bOVnG3urpWaW*lw*k$=Jy`_#H4D}vKHADGl~8j z!}lJ#KC|wB0Czx$zc0*S*Tg32t-AN*I-vzf6*7pHrvJw+ z*fs3ULU)S4uVDH^ctnM+Rd#gF{Sof-Buy4Tc=*ffwqV7DH!UDGaECT>@R-2+!h%;% zc=qJ?Ji0)m6EygzJbCi`&cid_-5I-Z!a#|<65y|-uUJWT_k%>XT~u3Kwgt5Zcu8M_ zat{$2t|+{@ZJ;qA-6Xtf;zs3(AiXuM+Qv$YJ8N9DwGF8z)azNm>(LO3SQdj)}c}c9%dK1Gtu+^+B&EVry~^}KOO>?*GYfG-4f99`m38j8}ot2 zT!PVzw;ce54$$!aj1JJu_&4kAzzJ~q8>j#j9&QjOb|ql5JKR7>w2)(W8?+-u zlPEWGKqNqeJ!YV^oL)Kz^69)uE1l8+F1*{ovop;QC_1LWD*}NTI6+ZNP*YwT!TvApHcc!9i~e85>fO*B*2fC z1(Uw+w_^GQ<3beL90YpXb+KH4(5Yx$-t&K~MS$Rgy%(x32(2pP=)~MYMj)DHnLqwp z*&E&Y-jm_Vz1yn4a|G;OBy z08N1c1?=t&&znr=DGi=&IN^?I%{SjowOT|VhKE8^7*4EdDYJbG`dD7apv=&G<^8qVH2iJ2E9C>(4pjAGdcF@%m zUfTFa_h;Ja$K9QAhi8ZjP$4J|6Fa7X%;iJEg`A^qwexMrMBB(2o{Xg)z`vHYGM|g6>*&9h9p1slR zbkOG|QD5=A1MLt{gP?P_&qD(+*n{B#IK1U`-aWBH8@weDdG%z?H#~b{_h;^o0x69JkLd0VcP!Il$C)X3SF2-iW$9$S>HxDLt3s_r}VOqK}P~UxA zYJA(b)y?R4v0RDOmI1;}{SRzJB0$PHGlc1STeN19V~bavFO&h^`QL}3c{=}FCjMF3 zg@8v@`Auzq#O?Fgb9-6`)oF+0d3A-x-4gicEjvNuuAYSN&vfz*&*;EO$HG8`7ft|* zFkzVFyTB^}ZvWX<(itRL?bJs4kGv5ePog2B7(w+rCUi?fCmq~fr^QYk+|~0R&flX? z7fH+S;KrWjtcs?H$tyUkO0wVw4yp z^W4i`JQAR^#X}m}|ER_X34sWwfSk+52_6dFwYz+to5W0s8kJ|}UZ^Dxg{pK!i-=WUAA(1E^p@>FV?1>rQ@74N-{tm4iZ* zuhzwq2B9+!2)@TIuiuDFPXDiW3{4WQ!)M1MRAEVr-lLVVjFc^;4S%b*2qTY3}2 z!!573@R!%${K(Moub%L@LrhHcfxGu_-@3jZKi4nu@=P@UQ{VzrrcB}Ch8xIUc1)v9 zYb~vLE9qM8WRPgJllQH(Y|CP+sv&|H(T-G!;48Yz*HNVw4=;#D*ov3Ghh^;LG;U{& zVub#lc(FZBf5_6;R@TTO$aE1bLX(z!jGV5QD;r<}qNL~?Q>=Zdry}Tbzfj?+=A|gq z-His{U)-D1pY>V|aO+M#1c)`UDh`&6G&2Msv!5#5Ip)A^ zT?Q(4+g&)(kpLdk;7YoK1X0q$TkYhIl!8PXIfrf8R!>A4BHsRoJGkpzN;Ge~GbLKm z;`)b%9B$db%nPFwA|0mwhebRs!ap*)T5=LkqW*yy=L@WDH&u=I1|S5gbfa|6G$;yD zFJ<2H9tVZ4%`1`Kkqna6&3Y|BJTrtbx)K)cjcj3QgPm@n=mVjfW^c6jF1Qa`&Fm@W zH?{o{zRyEL_G^E-bx>}QwAHrsDLZ=-arLBA>p5ArjSqZrwp*#Vv2o+}iWfgo<>2xGErjc43IqyOwns1kf5)Bb=|0DL? zvwbRl?eKzKN<`7*{5@7_9jY~6jd-?wX8HE~^z^j5Jaf8a|Cj3J{Fo;o?lOPbl>jZ+!B8wbw+kHgr)gpUsB6wzs-ZrpHiJ%c}+Pb9WDlJcpa8^~;%nL*IKM;$o zS?fY1*Id`pdUvZVK{aVWQRN%RKOGqhpKna)WKW2Y7w~;ImF_5={Jii=1J_p69E4wX zGRQ@L*2@4HF*1BDkP_s3&)%qo9uWxs%$sq~aUoRiE7qC(mn?e}Z_Ip&%j z(>PVIovs9&|1ck4_L8-|$=>XZHtG30zN4yb10uq$HEuuuQVt(2_<$#_Zj0DOBW6kr zCoK*+%)F37yzZM9ntBZs_83izcuilr=14pehXYpyP%^bUrO$Vfno^ofNu}4Akmlu^ zhxv7{4@x&tXoEr)$x-+3f8t84h*_`tESwUO(f!7kE%Z#GKb{M3lVxw@zJl$KXxhv3 zroYN>%6$dx^Zd>5K(_khB$w9_9kwkUbvG1>{{8T(r@`PkU7(3PK*OJSc6o-kH0;7j z+$sxCUJn(sV;V(}xez7)_^)=#+RoKZzR2S3f7CHVz$3Q*LGJ>aQ*mfY2eg2oPQbnE8bpV5yl&$z=g1f>L^ z>p3LL8t_e2)U`WyCBRlY`5?_CDN1__V*Y^ z(@AZ@LcH$#d-|OLi>QHOAu>&c7D7aG5xV9)afm+wS`Y9GuZ&gT$rF~a5Fu*3zm&4( z4&@)VvG8?Q&8xmUXT6}DP*oG@{gHbH5B>kl5F7Xc$p!B|HY?tiT(FWqUIj0YZ@Kq* z(gTmKytV;4xBU_B^YFbSWGinSRIjxb%~l*A@H0jRzyK8P>WO!Mrms%V@W9DeFTAkR z9n&cJ1ab2D?0cd8sE6-#n!@uY?LJ1{dgHuyq?-Gyo$@69nlXbAar+sI>eeg;@1$tAEz5z`{B={x@-r|Z^Yq}@TZ{$hZtP#No( zYnw=fFk|8S4#`bQU+`)pe%)QoOBH!1Dm3GCkMOLQC6;pwp(wLHiz@=y_|oVk`c~90 z)~aXc%k3-h{IBq!earB$eFeAAW6$k*%j=;~!HkoAL&V?z@ZL56vBwQo zzAsav_WV)%`a_PaSvb%2eY?bwA;Lnd1mj9tgmV4Ddj1iF2#CjtLk1N~vx)&zg~(V* zISHW%`J$5RD{hBM-^$x&pmfi^rB4^X|1k72+q~BI^2`wI@@7yI%QU((Ll8oc&co0$ zxrNxOcWxoGH@bZuo}2x?QR5tQ>2Lg&j~S!Z<$Fnd5QdNE77Kk`yI1g0oLI!h`_=_dhFG&5G)XHPf@>Bo%a`igA4*ZY+OjgQx(bWUzj^Aqm?7jAf+R%3n#o0L>qK&vrR$AcT-5nrvu`!Z&UIQRkBC-Z#{pHop%K#%#j$;E_%e+sdA++ z%5yRsFZNmR1B8fmv@|*t@ic6}{t1H0#v-Kk05cXd(0f5X_ch6Dn{dp;1%xF(dn#R& zZuTu^xGng7>wDQskH;Y1=@cNC8;tH5zB)(WzF#TGMQc<6-fbcOdZXLt;duu=f2e&0 zy$(wDV79s!idr<8`-cb)uIKhmpsOdkKvQAh7cW*EWcO$EGiXr77kT02%U^z3m1x(1 z{$&Z)mHgQ5c{kgy3e8Grrybm(4WaSNmlX&FubxbF_h*LSua)h=zQFVy>VxW)n_xKW|bl}dz= zpnXb@3H^>bhc&Mn!|Hpn?XD6FH9$7s4Dp8gEDXVdrA}K_7I_RU4Fw48jov|YP_`b7h za^?jK5jS>Hh`6y6C$k8vTA>3d&OCAWZSL8qe5ofh6GHr-89$bPAn>t$z#D^c=r-Z( z)-`&WvGDos$&z)`MH}6=V#dP9+v(cFP-@h?sL8xp@9426((5gD0m3UNEnZj2D>K9a z9z&BvC+Sc}I=4_d_c%4(5Vp_Lg5?$czXea}eV(ZOq3n~jK;o9y2khcQ_%BxcB+10e zL;R~JIzPkXGrT>c%QM506(1r{!dC+LaJ{W|KCR1T1MNR^wbS1=;4x9nWr2wI+5{In zxke~cqV)^=b)qS;<0*c;$Uw)veVqR>FR;)Oi#(3~7$L&xI>n(!>)%toFv~$svNB5+ zXm_gPReZoND`O>asxTsBp@dam@uzRs?_-G49(3JHGJ1qZO`6FJDtVHNzwI%T z0UDm4vHLUl=aD0;rAXmlIH?Rw?3hNvvS$eix7MBn6Sss=`qJO%G92e1>U%|$SO^fNI22_M z_79N+p`XP|Sf~R0V`y}zaJUHm03NMTUae_=qwtiX*P`!+T22a<;C_|XK@;vOucy8z zCe_uGv0+AYnW%8=TH~ zn@0k;+UfQ`_7M>>I}vdQcg>Wjr8O`@V%5IAV|$#NiKOL@j(PqbO=iO(GFD?J%608M zKRVm_Qbgqo-XVm@a*~ty1L`maz_=)S^F4*ilRpmikEaULOBEtxv2D^kuZa*!?z{Vk z`IL_O)vL)4`?Fql6)ixli5k~ccL|?i_VXIDU{_j4X3ue zZZEGRIEDa?(GCs&O^Orc9{|C)E%s!o;nl+m(Qwr4b}Og2M{pdmet< zK&LBdxbpWuC=k6%iSo9q8KOC$j+0F3V7&gkd4U!bv9udI?JCQYKXHQDBP-t)&1KQ@ z)Vgxk=Oe2I>>K}zsl0@%P%|+DjjP3s#n~;s@LZWv+XNYlmr$%RtVy9YhOOr197HP8 z0E`R}njzxq9`kedMtX)deCJ!Qbf?(<$REU`i>?V2yL&MBSO!P$<#h+gaG;^9C%|HS z&6=rG*E-1iGde+Y@L(-FJTq==%IW}RWQtD_Cw!9o6I}`De|FMpJ%3j+81FT=+6ij^ zsO|LDA0lh9`qld%?rj5^5=A3i$e}6G)<1Z3Ouk*cZ16v0C$fk-TAIwR?$$eQuz0b% zvOBy5S|@}~t@95^iU`3|;OKjy&s@=KvqGru9V^wxQjO(hERM2Fgk)yMQkz8qWt7eg zXDU*oEELJw>lK603=yx)^JWN}EiTqQf9)&SdobsxD_;?h=uqe4LGO7td&_uGlFEFo zl@H;M{yZhY02I7>!uvBXue94U+YcQ&lm@`qg%iVM1j1xUz9*c?G|3Im=m5&y2RE|$qwJ$5JZly|g-`Qb*gn1z%M2?B2EkrmMfyVE5 z=7q_qd_N4uu<{Kzw3_e_m=s~o8ap~gfU4rs)iURhP*B8L8-yP)R5vpg{fc{Y=7K70 z#jQ7FEQpZ%1Fg+M(K2M1qja_vIqPMcx9OjMn+9a`r~;kC4w1 zAACKxO?kerpy%Bf9$!>UGI(e*z`;WsL}LO^JMhrPinr6^N7rX`d1i$!oB$PqLQJ@9 zo1BnT8pQRFg#9M*XhL(p zf6NO`d^1GU^fgP+s{WI{Vi&%4=%Hv`4K1m4R%RweI5-^ld9{F2P@YFUDbxEWLea~^ z4dYVl1Ah1DD_2ls<*4WQ_{SrnjuUB&~uh-)C`pdv#xqpAgzdTdB_RZzG zaFXW7gQ;t#;|>toIOiKJeb13|M4>w2T? zjXH*=f|uu*`92TrjnSbCvCfY@N!!x4yiPcHXaljqTLNis*4|y`@V4~n-|?@{EL+CQ zGusXW7G5~v)tQ3|6GGxv(itRL?c}yWqBlZO(RZfsSPYnb9Zw*;Z37ZJdnnxG@Rpc4 z_%3V92A}gd2|iyb3kwlni>T?#(*P8Qs(iK6=`31n3-Ay8JquDqNzSXY#*Pkjyc%<_ zSPGYdKp`65T4Ry3!UfD&q-gfBTx0mwMDN$}efLzlvu|0=>(6>Aa-u>F|Hll$s@lvD ztjdf)_|~ht-l%=I*JJU=dUyNkOoIO2Yn1kuG2z&e2C=dHCa<3G&_;%>TX}xQot~-3 z3nx6LvF&iW-3Sz0N$2**s1#L)GjCNK_{*vR{U*h?*Cu-9L-`9q;_ZKM+kjPUcC8W0 z-=igF2@7T*`gW<;k9%V$niu>Kv5uA|v(kE%KdDeYVS+w8OT`9LgzjCFT6a>UY0eyK zpO|(@lX~2k065`NCsIh6zDz-~oA4Wd(`0&sP3pYMj-}ZxYpfo00IRR4(XFX;eBF;( zK-t=&GMvqND~dr9$P9rLr=pCV%ZSq{_#;}FwwJHC@0Kt2QOVeN-LwwMZE1vuy=6SE zX006v4nBm>I~y5vfM)Tp;o~#BJYyG5GU&odZ5}vDvmi@Yo|M$ zURO_m#m`5(c0IH(~q@Zve-)JHy9LxqE}l}Y0ZzNqF1v*4%6Fl4e?&+Q)2|xSl+Kkv{}>`RzN`% za<4bJUt;-Y2yfPFW(dCUrr9FhLUxY1bE06}xQLIw_BtpZ&*eVPSZ?R?bx`$SP-OIU%>z^E|r9UBBS0NA_bnAh{%ReVw z&ayhoc^Hx70#~mc2&L91>{AG5Q()YTnK=4mWARTnD^%xwf1%zqII_{J8cX(3Cqg3H zEKU>x?hjFu;rBI*z8X?km{yZb(*u zgNHV_yqUQ&v=(-+=UaMa!gkO>MgAcNOUEjmUg77Rpn!Ad~6tq*n8T& zK9mKn@F^Z%2wzJ4PDx9qM6N#n=Qzo;;A<8lk&{{e5w&m~t#j<;sr4S`PK2EOWIA^a z!8&aDvJ7C31Au`}F}Dv4U?a=>gh3%3j!RXz;jJgrQS{;i=9U!`oU5_SB5#u^In0bGA( z*4o)66s7$ST}srb931UTiFS01i?(VQhI{HmuaiZ*{?u{KMN~JT@?EwKe}X+<(fTTm za`6Y2x+I63Wvs)Tg)-n*j@AKS02K1iv&S0#;S)1X&>EZTn~0t-?BAo3n=RhIhwR?l8pD2# z%+FUD3%{PL8CAOZ??~z2+oZ**NEssC;=89>%nZR+GOIGWg-$;)cZ%8RigO*bI?@%w zL)*D}{_qt;17I|=qbJ_+8Qz|mufsFH>B7l-+<)$x2~px!(sd+&ZyNv+JxC}ET*=_b z{kQo(I*FYvb`yTVUnOTtFzkb z_c^9Qj&lHlL;|Rq-eu#@k+eF6W^Vw6ux~;NQKwf$Z&c`y9-xFW&XR47VbxeXHn+({ zh;FBVanu+tZhv?6E#0j5`O_(CMC$qtw}7ri?eCO8vn-uscISV&4(iqqX^_`C=w`d@ z@LgjbJ!z6t*JpTuW`kWgAu8WB`c3=KSGK%@D6!XuLGFCo*1YYNx&OhTlMqR(ONr1G z=54p6MR|B-Pgi$L^gm-KZ!%l>)Ed)zY~jl8;AoxFLhDhMvw&bmKWAx7HH)Kl)nz1| z?FR$b?iCwjIFB(Z(BP*j+$VEpXvYTkO_^K7X_5p_tj^P@MlPUXkwpq?U7Nw)+ zeK85)R)%vs4m3l!SuX%bmCQeJFO-Ue&SPl1h{sUjkqUTlTUzUfnfKJcvY^4Mn|{0a z-Tb}tfQ62p%;xDC9-o=rv}rCLo@ugi-dr}V@3&^7j~~u3UapS{*yFzS{Zs6)2SnJkXpFFa07CdXqsx>=2Yx3 z%6@{JHR$#OUmI$)o)dK$#JURKmv;oN-HtBX-NxyXg>HI_lp@^?PQVAoJ_wJYkV1Co z8_$|aOC-oZTtlQ?$R;7P{EP*a9v#|hZ5DP-yvFclZ}I)FhS_A4%`G@ae7*{nZ5$ZfZ0czi}DPMXfqpYq1w{x<`KJ@8J?NgayqDIp;%5BSie3y1wzwywE%L zY43u?0HW)k7_GY&E`8L>S=E%YtbOvN=QPV%Zy{$9K+X-V@Fj_)3B(lpxWwbQrImxM zYT{R(!#`_e=@WK+6R42e4-?vKQSJuBlQNJ-Q~1Jj5@H$48pBcd@0E9L7E(GVE#9n` z8zf>71xR?-TdYgzbDAx>w_bU@QFwqpxPJIE_j&l--o?f_Hs%Bxy0h_j^WS-XhPP*Q z;pBH7o|)BbcCPp5^1#VOF63}WN{_91+XgB1AR!O#I#Xh7a8=ADBou$hfu!}Ed4b1C zJQh)Zs&w3v!OWrP80Se0yGi^OF2zeYXHgqoLC%^|Sfh2$4XyAsv5vww5EJ_I(KEJs z>Bu6dL+6FR^4tn>$2Z<8NX89K>nTGW;Tvo+o$-VWq&}9bv6!(;gv{h|A!pxG&C52K z!R;}KS-7?M&V;3x;ZT*)Ei_T~M(_vmIBfoS?eqM0vGLxGIp5dv`rp6jInJXeEoaVr z{dGG&!^<kWvy&B_-Tw(+IKZ-3Dw8kw_DXCb6^mWmrp5xR>US4eY1D}34iT6oMz1b{5G zRxiH8Hk|T0i{y033R*}bRLFNWn{x1tVo9c=*J}*hF&j6^iViK)DALvz&ELopz|DGH zfN-M+vYx6XD%3pqjAn>Q{dc)@%&4ynk0VR@oS64mtJXp7vP0gMGhc7jY9Ei!oZx|z zeEIfWxbS+*^}2BK!7PLcAmKx*3KH&ryaOW9`m^rx@f|7de}LFafJUPD@lJIwp&~@Q zo;q_%I7F!RG#4qs8@-_QscsubEnJ#J|C6&!8^(+KG+u}6&^r|CZCrzqgIp<>!^>9& z9@20UZuQEMb@}GDx*9fdddsi8Yh>{g-fa}ZPOsq8Y!Q^8#xUclxl@I#vF_jfMK!X| zIBc`Ly&L$M=!^^y9Yec);0tk%2*jOZMt!yNZ9D%9Hs|`Gds_O>-0rdi?`-5>=Wfrm z*6o=SGxD|4HI3gHCV<3yBhq=>Y45RvyWEk|RyiLPEO{>8RObyb^MXyjr7Yy1TE6nf z36kqJc0%10mzA#>uGG*nX@Qtx&Qd$X>MXlaR=O8`*DvlS!tN{26#w9MW$(AcC7E-as_32lfvI}8<1piN51O&)PGK91zi$3U z=NM++QhhHglJ(ZB5|+#m;d9K2k4%|g$wL3{cihgMO*mStpV^9+9nQCIGkto20@Dgi z!%u-fbm9b{@R|lMH{_n>y%DMXkFHwnWQ>s6x&Ohfa{7BnBcuJ}B-SttkDY$b==G;; zGMm`M;o~^}kfuABS`X(eAV@XoYeVF?1gi<)P0kHj__7LM-JmHINd!tHa4Tjtd++v{ zx!x^Xy-f1P>7FcPr||l}lb^bhj0QQQ5URZAO0sZcI5X0rOK{9sY_k}pyT&l@*+neA zU!QWOdu%EH*3&S(ccJ|%cOHAe5B8i`QzWZAd|G;jcW0yRn`_s8^wB^6@c7J|Z?>H_ z?GJ#W%MF0!@7#Ra!0mt3-;){CE<|=rsDUchUP9&f7nD#UxY5iDD1*SdZZeB+yqnBY zt}6%GK_#_t_lTw=+98x8{-_+k&oNb!7kM-e00zt7qgZDZU?ie$i=-q-BoO+j1XxZuePx!OyW>0ypB*O-}KJVr|C#LP(8TtLo4j->+*Y2Bw1$BI;-I}-l z`3RsCX!FLp6Egq_5|=h|_Hx?*L;~RromU&o?p~V^jhHF%QT(fRDUm&OZy``2axA<# zFmi-VW=ZRDgB9*Sh$*TTZX0zNg`mbx_C;(OVz8qbE1?Q|QzwUY^l4jRMo#od0kJpTFZu`up~@?E*XR8a5@;-{X#xFlnX# zk9mQ|0bHSl%9M4JKbaM+_w3$Etl*EGnmu31)=I_EI_k1j2@|OBZRpG=OmP5w_J$l6 z8-hfnL;_Ua9Ot3xk0`PnDO@yvgK`je*`q_tJx-_YW)(|LkP1Wk*hq0|?eprqJC98~HXvQ;IWcYLzR5cqQ+a2jaL2E| z{`cR1b$zA-56`r3*N&GP{)l);bxzKCTJ6*cc5RaF?MFcrItD<;$F9E!>oT;k%lJ?;((g*KMJcBu zzzSB$hR05>lHJDD5BjYj)=>qp|Mh6sf!o)hL_{KVFjCuFJL{d@flw-@c@(B@7&3N( z2|cx@v^a$#t91djdavEO)90MV>AozKAxp!;zx3O(0Fy#k^xl6_e{bJ0%o@u{9H%*L z6lwUeF-$WARixP)m9U(9%bmvt9)I)PEcX@O1RQ(^|KB2=ir&%v86MKmjgvb-r3f%F zNVIMHI-mB^V{3+pcBFP|RIY=&NLs5Bo~0@AS#TB`fG$K@>GFbUJ@PIaPNeCGc1X|e zGnun&Dne~&V=4pC3SSkVf;JW95+>T-?eg6s3X2|a#hDzaw#qZ7+PssN6- z3P7EDe!{eyP=&m2za0S|OEQ7Y=<{o2F%CBtYze4bVyegyTp+Pg76Opxv30JT&xvu} z4dK!LRELhbvk`L_Slo(PlP^!xIe%aHr|1K~ay!~CH}LtpcJ|uvzL`4j>P(6Cx&LuQ zj}OVbV5UTwLrMhadLd$3@C<}$QCVjT<;OJ`vx#I;#8oMH~L&LY0@ zey4s-d!?YQj-H$wf~p=R8NctY#-iQJYGlrOx62H{XEI!4SUV1=c-dzG=kRojcdmTx z$KPn(t#eGw!~LQC?Hcb+)+f&6bHQNu$0fRblBU7=d%C89F!{G|m#N$~&?2EBGOsaT zLY2P_I)-`R5!5=Z!KIc1cKJRcF16L7|TrV_2kJL-q^`{@chni z%39wFr^_G_oice*QWE$jVGEw^ECm#$Ap=T7s=$X&przSb)VR=n=T-H(P8NiMzQlf! zWup_eS)5K+4zkXos{pk6es6y@s{kB@m`u0LcWq@^myB&JM&I?0JE2gm&BBt}JAT82 z#YUvuTRd0pJ(t9FH=PnF)SyaiFc>`dq{p)oC7$(s0$74iR&Bd*DRe8!ZQFW_gs-o6 zCy!FWoPYwnj$LYnNQuyJClU}hs5GxZKUJKoJ+qUj$FBc>J z?MgnV?Y)@x++=phe(lU^Y7yDOH8+XxfU_1ZzVjnOckMer4TZ1G0qM_DiX(}b5@DST zlInTx+c2wZu%6R%lpYCeq73opw_5hpx0qEgKQw&jWpLmyMAHp#mL-<<2xp;Z;Nt2u zXCWjUQ~*#Frs|EJr0P}lT4Pvk6SI5SJv-UGv{lD83(!VBM27QE+1PQqwI93Zc@Bh% zq%RpfT{3Pjp4li}x*L8&cd58>V|xaJWwRa#-HVFpJmqV9+RG3@t;K$!-ZtRn zg^0mWE%z3xWIbQinQ!NA=o5U5 zfapEB^v)N)>eHvs>eak)Qs#wjIuj5l7_9J9Xn!=H_TqWhH{~Yb)*4@%(Cu7JX8C+` zN*xoeXj%^+S*p+hqiKGLWw^4OTA@?ml(fZ^^uIlBv2VQV8e+q@3PdF^HxlEL7?t6WLHTBI;v?PQGTDD?(tZ&aH9xxCr6Z9B#AF5LqisI5zA}^Rjh% zl?|clwX?tqfb~LcKVz|{=FAh+gbI)xbPUUG%0{HQw~7qavUKSezKY-XEYEBV7*KA& zfP@JXmhG2-FV|}Ux@Yjr?-1U6NQyBco2dWHqsC4$C9*%|v50Ar#gZ1Pgyh1dNmNNF zoU>dDSB39HuW&LFQCTJuagIdJL_AjSbvoYH$Z&{1j(tTrO^kBhT^bllK+LshfcUHc zT_~kT8NwulZ}I$K9NE|%;kZtgYfX7=79*T%5AII{;dqjBIKT2#^(HyPs-Bi?($j+q z{B3{GVwVdLeN2Zc81Oe=Zwo8=OYTI z$#I|dI&^WVp$_dOU4KR(F&`-J;*7kPNJP6pK&IqiNHH z?2l6_*o6}v)UYJd#l7_ z4d3Zbv5dtV&eR~@t(E4eczNBF_Fv#}s#}oD(q&fjW$8ra6IU4c#TPH)hc`}mP9vc` zlis^m+$X{9p-W~H1triX+Tyv%tQR7>iSM~^p9O!CoMz5a5^6Tb#!mlf;rb-Yd~y)(XD8))cf*0_xdIG ztFA{?GCTH}1c2k03cRwxlP44H$O%t3>>6Ni+xChr5uC6eh;;hboq?m&o7I$S<>Ym$ zY4WaTUa)pZTpF&Z*26hVwW;PvDx4_R)xtIPU5{m%x-EV?OYR($p3$Hp% zU&ns3HBVS(1s?YhAgr4>aAH4JiNf_l!n%;%+aJXWUIjqj^^=erE~@H9*#yEl&RN!T zgu@Cz<_X8?rs}CeqC@HZYR_du(34cX*N0S%Ovd5lC{Krzu>lzi5+kYW0yeHMHi5Jl9&>5qv7j)XfTL+Th2M~nJlAX9TD~lttnjtd0CZ-)m_6^zdelB6RQE@O}QjPsaKTQIxec@!0Bgk!>M}|)ffv|O#sdIW_1$* zxqqO1&?d5urj2med^c9`99(vk@j1d#)yv*H-7GA!-e;S|2{*3IGAZx8&mT*LjV3P!hcjs=E&S88iB&&#HY|b@(tqV(;I@=i&CA_`>u357|W3 zpHkiATDW!}NtG`b0&R|z;M%QgMQQJ0DP)C{YvFq8TlkqHF-~y4iU6>>G8_$zayt0G ztq#24Lz&g%4Z=s=$#`lWjwcTgscAp2$Y||+h%@tPf`mf$cGQ{fCe*(a;Sdm?a3WZN zjmSGBoL=5^ib;sW5L5unZ@$6_ns7QNk zZ`7FTDQk@5m6LHbQdS=`YGjJ4+U*?ZPU-iGeH2emoNMgF7A`}C>d@LDZn=i<8fcD$ z?|jehEu;-q3hi?(T#^V^Y^6j$P|tcs5TsSAQ04YDF0v z0bbq9B?baw?F|lDBNKgCS@?zQrJ}u0`<)ZN0#K{o`_ur4Yee2F_L)yO-gL_BMOs|+ z+AJqse#=Tvy_etAQVS1~{g#>|oCDm!)7D}lxlNO&;8U&RC+ObE!CI+nrs8wM{*u%# z&$v5c{{u~C{Sd*I*jui#N><-u`_MRcdiS{&E|}LPdMXk2)SV;I&2e8UCRTS^oxg27 zZOs#&`f+7epKhgiDMfWJ;>ooaH9*PyD*?d=A*lyIthX5XL6~?0AVp*L--ydqD|HJc zrmX@9*G-;K7vbDa8j;_ma7>0+Crh&|_TF87%fS35`(!x3MGTU|nuU|df_%n7lPsFI zc-*u`&00Is9NNATKfDvbwKYBxStatCXq;eHdfVls_Lh97QW#`4?XPig~TKs1M?;I0gt3y;T4J#0bZYr+mWcbnX7V#MT-1bRqK_6^>s} z1CVvo+p_AlB?oJ>uwgLL@*Bg+wUIR%ZgY~8NQBc5Zbe0xYakBO~ZVaHC=u6TJR zALZsFkuvV$^W|E3sn~PfZWL1s*SSO7ZU)kd&5^$FT+dR&H$J>1nmIC=F#1 z1ubx}{y4S}w7N|VS!QYKM2139#?>h1L&cM|TE@G&S0$X6;E)ajPZc3H0+bDb)uwCq zdKL#K_0(IGPhP!6mSk#W86YZTIgvG!-0kRoZZ>TB&8O*z{3gRm#4O9g34mC`fZ-$+ z;^YuDi<)FFSu%7bvdnaR8nn*g!`rqUuCx1Q+7*Cg!ZWw)YM z7v+?4)&WwZBgeSM8i$WC7DS#5#; z8Z#cV1%Us~D&OQ~xmo8(-K|TOEapj#tr?E+-A>Qk4weqvNm4^v$f&MLR<)^G7HV*~ zu|`#nbi zNTd@{0@DCKbl6iozB=aZg9w0H-JN7d7gj~P|h<(OJI0D4# zCKl9_9)WON{l+i7l?y*Fs!utjPJK9Y?hVFa0f1=Y?^?^5Z&&@?HFM^!UHMSD_SRmu zi@Tnkgwn!IIW$L7$H`{js{CPe(dH5dnfYl{#7A^<}`yuUeZ9ShDZ{!`sp zcj|ZAfV~L$PmOFP8356A5hdBwSZ%+%ri@>bc@=>7r57Nw;Ip81-WU9`I$4T6SL{(l zvL|ybw9xoQwO)$RsNN)ox-NQUi*9NhJDQ0P;O*uOWQuQZR<01qpzRMw>&#g;`*vd| z=Us3--RsG-H-R5G$F|kn*uu0r;ZvDq@R1ghU%Kn+-gWu3_W@c^?@JzdONVN}Lx}Wz zdLeX8?fU2b)~|FWAa1IM`rkN25kf#bo1E7!{QaEe3cow0aR@#(10@gXtaiYv=O|p98^s;2KK()`u|9E-#JII6!uPn&gAZykI zSNOWEe*@V~IjG)P+&n6aXmn$X4_mEgoy87g*oCV(pv)v+bpxyC@_5=ThDHm1nFaXF zkf?YHu5xuR5*+tFOo&|36qh=ZHMLWQ-TZd_|C9hsSw0}D|HB-@2yq4)a z2vU}nWRZZV8KRO5UwT#m)ZWNK7jw9#j8_2^Ek!`AppN)u|L^SDECeJfl%9a_Z=7W4 z@R8MPrg^(ghQ%nycte1v0^FM{%k1h{O1HPaOX|Bsu$eKhn|-VAZC_)rg=+>lsqfg} z9{jd5BWTDm*U2|-a(G@6#1s2dy-s)QddpO{EP_M+t3wze5oIp`A*nk+o{tdw*p#wd|Mr@ZI%z@0DL-SStO!yE zAkJTOJ1T)ET3iKSfZ&&{$7!Vg5x*DLLYqAfsWgNUa%g$_C5tw1YP@kSj|JGi0Z%Hq z>m*h!&Q$H{OK9eQX{r zsO(kY6#k6=+h;Lp!9H>#R#{f|hUaA7FI!8Ev8Q4$R}g6ys{YGQk_J+Y8J{L?`X#UY zkbUm#S%1#rqlDr2pnPAn*2GQhb|0r5D*ix7`P{Gn{Y8(({X zNRa^$&varDV#TQmLtPlSb;pFEt7M;-7U9O10rKEMB~Ds2Nv_Psm-@)h3qz{_Ksd$L zK6|47&WS4y9ya2oaR)CS&iKmrIseRa{J2&?_<*y`ojkF}`_g`@$V^)3BDJ7zCbN*K z1#=|Qj9_YEGSotm>#Td_B?%`u4jGXT9Z#Ewn#ypmur^xIM8tU7f-kanva@$DXU$?Z zGxd(<1*Ix$+fiejdpq(#J+-dZQRiUy^w zUcI(M**f7FPUleZJCPji+b?^QWc{+jiG))gU!vhCv(hYI*0o{N^)pW7nv0%&!rD`EaX<7tTpA8LHxk#`{k;=2zi3{l zU3rarfgY9NghMx%ZEn4`Ee$33)LakHvXIjjz9lMd_kIs4_G}L57pGbF995acEX3vs zi&G@Ho~PHlETlP-$2i`g+%Ey#s#@0wT8QYKa50KSvJG*)X4Xj8t(OtcYs&x-w%;8f z)_&*Z5~uaJMsBoVm29EdrxC@e1H_5%|H(qPIq!8=Yx~l>exYW7*6j_C7wZZT8!rZI zk4t_QO>qBfpJ=IRmM%WACgW*=litt%q=Q8snQB z{%7VSK>llTc*h?Cf_wcYz`NdlsAqD}IwBvyPyy#FLpJNfCyC^swEby_&U+z5AKL)^ z1|&*6@Wnwd4qSD1a-0*lI-Yu~!1;aao6kZqg#XW}a#HeEdE*IyMV9GZE##)z_|mI5 z^%{mAEgWS{_Fq~cAewR1_}o)1;^_&Y1J+&SRQBR(?NP5%rS*u#u2rj64LI8;ZizOx{=Kp0OaGnQ`@#IKE8iywa%qRzmsw+<4o;!^gd13XL1ZVs8<2B@`1ELNTxv{40 z8z*Mw`5;$L_Oi+9b(X8;^DXW2S^Ll z$ewf2mPu@*&9jpF8#tjBb{55GK{C<>pO5u=1fORmWrFkL$;eC<9EZ4C&VoecDi?1I z@EO;+rys@b#Z`npefpHyIk|VReyDSiLIUJls4Nc_mJ)eHS4+ z&1@~T-~-}W5kLzOmrf$RZ;lqMS$52{Zh8N z_=!2|mG`*HMOt^;2&Z^A=wT@D>R?ZMy?b&zd>A7h+$;L^sgF;h@TKexM+n58`)WEs zE_*!-e8$hwf~TjMg}esGJJ0fTHGD~({x4wwqelcQU2Vkq-V?brzf?wyt)dzHw>Vc6$g{O3m ziP|6f@ba0{LHs;RI=7fypZNdXx_yufY}_OK)W_PXdHF#Ff3SMqUz8_F9A(dSOzxlqEoFSbNGdlXAEHe{~1|L6y}jm{?`6 zaY&>Nx#?)ZRz6HEcsn>AE!aTBbr;>xiL?O5@oF-e-?+xuV8sZh$dwgA+x|so_zCY!CM;eoN{VrUAr_Tp3J>Ys;dNu*tgQ!8PvDO z(;J>!i$**VK9AK!BCL{~G`v)lZ2+}eXn;YTW%n|50vo^2`-~s(*|7X?6nOKkZ>HYp z`0v%CJOOkEKb?zm6J3^1r}f;>Ni9Iv4KY`3oM2ABJ>^fRY>edpvM=jP1{Z|U)89F= z0!Wj3oZGE+v3J>HCzWK+0pb=Cyw(YH$e;%ifXEVI!my!inF$hFxoPHJubE|jkMPjx z{=HvpLBwNTR|0s9-$QD0%b>X{)?!un>g*Ml-F;b7m z`D_uuEx=kw-5<1CbJ0z{fnN6Kds$vMJzpbxgp)3!BxA~wG0U2Z9+i23^a>_*y)4h8 zf(k%@%+hHM*X|W9cugb;GOu@{NyOd#BO~OSZDy{hz0G-|{qacw#1r7V6_vn0t>J## zcHmW%xkj1wzLc~6v|~X23*w9@u!whiMo%5`)=9n(TmNx#?OW~IPc77?a7X+U?$D)B z`)};^sF}z4sc9N&)eVX)TY$BhQ?I9`I{bI$6z=wVfH*gzc_KO&d2R^k=Qr)Cg+&V@ zq@F5(h;l)IAVL6$Iyfx$D!3Xs;7mi-?UiUD?*;Pr;48S+Qm+#BBKYzxVr*fP2==F( z5J7^o+SC#pZ4TS|dHtCg1$60TUuR&lKs*ayADwW#{=TI~4X{^(9s?*l&-}S6-DY zFLAgGr+K8xlN@}tMRrrx{{!u*BL8MA04dF%g~82b4w%uYr)Kd6ta zwxt?h*0vPk=!@Y9=eZgg`ON_FM!0ss%+UhAmQcv9VMtm~oAv+glB*ZVk62}vC+vOr zBwKW8$U2t*Tisz%j_VjQpoAX>f%x@l&z@*6iuai;L?-;`4rbX!IYXrRx9eZucOpN} z^`30ow(Xm5wmmuRk2bCI&#>pv=kSb)-D=0@u)UVId1gr{HrT+YX^(mFL1^;%7Jz6m<*x0Q~iD zY;%zJfNU{p5bw{7s-6n0bMx?#t@gyX5aG>U?}YOCH`=tBKHbh(&)9ci{kP3?^XcvH z8hQ_Y$e86Fwdg;o%;dN|o}71H9i!>ULN+|sV{NLI>@TiI4nUCK9O0Pj#M$4S4G362 z>)zbeZ+2FW^&0@;r2${zL{tE3TY_*Rrda|cG~UTKQK?;@*$tj{U2>K}VfUMsWnDYO zm06}E{VqA+v)=}*Ke(0DClR9@DiN<^7;(1OyO`A+w5~U$fB6E17H`TaX?c&}6A`;= z%_HOPM=zAuZLoR+bWDew{RqTe&ZKkZG)ML7D@lvkXpA8E_y zVBSdeX7^6y%i;kV-OJz}F0k(G2*(@1YZmFMZ+9HUO~TH-4VWD_(bseSP21t`(}11-U}y zVeK{ye2WVI)=m!Lzeuc>45x^20Ei+LfGh+cuX=}+z5bNX)2e%|9b(H%k!6BoQyed1EKyCnuNcEZcSltrzA4V$Fgj#3DQiWQTiG*=6gx^$;WAkyn4? zsqy!7y*4ZNPtEgud%Q(n{IsAUOYc%_*f^hfT+tmYeB*WGCq;lY7WMJuUiPY*8YiX| zq{r+6hBKXbPAd_PRK4Fh2`h{L^l$q!znv>{%^K=%<%1IrBOFu!ZfTc)Yc2!X7PU>1 z!-Go$q&Y3^;jx8pB{80sJH&V3#PGDPxm3L0HuY`4%^JXFRMu*_-m&y)3P~vu=0m+f zNk<|lvt+1dexhDygyY*BBN_%y;RHhel<@XA zTsuTO{oE;Dugn6gUwCTfMQ1$0Z`%|%TmzUiY zH7oz<8)n3)(D7hTciZ~Qib;l688LFq*qR3q@fv^KjEnu(b@=wwhWB&Lp8Z<0S@>zz z?6s!3=FDv}Z~k|UHvP2#@$vJLrF!65w-j00`1>JCw5$Hybp}@niPQZCptB5p<89UW zpW1JKZFYtTO>n(4e*{~E<7w~kl>BEF)Bq%+pr#Rch6-xu_>$xl;RJ3+#WyR7Tkm^r znoW0z$}Df{bg5G}eCNYsb#;e5^=(Icr0i`D7{!w$k`~HY`gNDHkm6V;dxJn78!sAl zFH{2|B0tG511Uy6{Y;3gSgx$~bLLmFAV@X=lKD9{HkrHO{W)`T<(gwJvgH6A4PUjl z50-2_a@bBJ^R9bk=)3{;TJy8D`d%_)$+s9`evd3o-FN=cu6x?}(B4@jE4aRwBb>x8 z`rfwbBD?RN_I4w6jziqVEF+xf>|xI-oWR{%d{QFZ4q=xy;vBbZfvq~quwmcelc4^duA%~b&xT<$D5=g9|ytL zGA=JEqQ=r!-o&(6nv)Y!h_^D}Ptv@5f;st#7Co77`Pu&buj&jU-BD<9f zs7^6=FHz-$#k%IWvLu=mp_FtX!e=Xa=I2=d4({W5T%4nXm@?D~3J0?_=@Leg&7D zxG{i?#bQ%qZ&C=-iuo4x5g(IQctl?iq0>$|$idW+_hbG=0Z z5=^A|lqWgESO@%eh{k}7l z-8$qht(5N7^HHA2kJz+=8xepZ}sDrevqe~9TSk?+Lmz-oFbo%7D zrzg8pl^&>#y}U>+NoZ zh`rO+LB~dq@u}#YeI}I!v3s{=#*VM`<959iEl7Uu3QQK(&ywYee)M$uqRfbnC0p%H zXnQx(y}-5#jE+yNWN>@vKop85eB<5eS2j2B3F9kookd$E@%xZf_uH)#Z)I1j%TCpE zJT*qD-fJUUo5g-R%nISvh3AEO*qH5kxNMjexOV$MIh#6}8R0);3nUf2Vc;d9hekOh z5px4OCE~TSG0+&jNs?b$f=2A<_^j5rr1DNSn4i|2m~})cs4J+FQ7j z`tL%iMMO|*=`mcsrG7a5vJCZ>0F$EtWzin}ELwCd^HIK=S* z(XabXv+bQD%Q7t8S}$Y9CH%<Mm4mwXi?RJsYYfZ!&ax@2>eb0|v#dD5 zCVmC*{JgN}P zFTe0LvEIV!EKaH9CK0Xc_nG4uBD>R*MO0|z0EEq3AZbl+OEDr>;$8^$Ql-lXkqQ-) zR4!g6d5X^vB;!*bOtbyvw~5K&^y%MOwybX5x*6)$T?R0=9Zt7m1Lf( zf6}vnE3)(vyJ?750jRHUzm4dl4B^RB8zg(W&?IL7g;P7SdbppbPc~%YAie_)LhshAWRKu$|9f)LUZ5~a=i%Pb8hWEBvl|0;mDGC-k47C( zNEXxSTwI7$^}V_$h;sYZ?%G+nuOGIka*%9&uZjlAf6;*P%Ug z@*$&^=D2F#1vC=XNyS*xhk(MV8w;u>+g@ zZ9cx>s$ORTA|N1~01tx6)J_kz>%h}_Vq4<2h=WQNIi9X|wzH;`;Y&DY%0`$zQTW=Z z+}DO`;c7Cwzl&arj89LZ4#15W%OMA}$h$x9p|!m+dyGn2ZBBkf8tHT`>Q<;0Vubq! zlll!vgz}fCU;|ZC@Yv~n46gaEs+YgbCZ~Do{LKY6A~V$sFwJlFd+H<+KFJO7<{G}) z=DPzM-fhwYp_J;Lm^%bwU3HdkR96dE;u`=`xjYP|vz~f?`FmW5tT4wF)ghA>{XG|E ztebxFqjp3jCi;>gf?BKZ*@U~wSKzVap~Hq(MdBL$Iq#3woRGR!t%Eyu{J3Msj)MnN zr>;3Z)wr?M$Gr4ewGqjAk5or{`}1gTsgkk5VRx@bU+eU>5*Dw{AW5jiQh>10yYiqq zQ#$z|o%bw=I_R2izpISZSN^vjuBb6=H8M#7EDb$B!SMD%H3eV^dZiGkVm>6o4bykinHI4ou9m*5LEr zv9ZC*QN`@^`cDievaNwlH1=m{CK>};a^mzQCUjg`j2=)Me-5TJe{LX5+IGqLF8GKpEIx~VB zQlaprw1_G8P3&XGIm%h1k+YmpT`pXSZ@tMa!xbvun%>yy%NIH1(1VfYxaw+-t9dl; z%1TX}C`v69zqdk%gHM47sC`mD1j(0QRID^H=}^02J@T`cMvwmd^Xk>Br%ai$dUc*p z9`#u@pb;2We8}J=gAg9O687u6`i0V^fXCoFe6^kri14|J8L#6D3>M|h-f)0$wCF#v z!^ufq6an5Xetl-D$=p}o?N7^-Wxc7Pm;UDws%$@a9&VYkd}HtAUZ*FMpKRJ2Q32q$ zGX%i9_d=tsCrc7BR`}6x;eJm8726?d;j-|(TP(Q~>k&qEMU)Sq(Yg$C43Ye&djLTf zBE^%N<4Wq#OInnN&5x7Vp%3+7OAMI3@wrE_p!OH~oH<(-Od?3^%6sw=pQZpRqwMFi zkt20re0Y^)Lz51!H0Z?&zpP@q>hr?nUL{Kgi^q6)fIuigkbm4`syh7c2MBTsKVZ&d zDHNz$+{1a+X0guzfhi!u49X)`G{cTRQ|fOLa1}p@t&tPm!eg`|O6$yuiv=;?P2bg%;#} zgyV|-o`&B+9@0uZlg3>=F$HFiJ`no0Td-touXx?~gie2hL9DhI4xv1T{(!YUL7!fZdPwP-7^e%19h4w_;;5r z+YMT5xX9)^o16jp)MuXSDKmmt!I6mCSu~S)YvRm7R`|YE5;==n51%jZP|Gi8{VCGA z3|CU?GA}Shp7w~1#}#SHbxxhE^gS%S9yacpm741Yg*yCum3rhxAlM@|ZmG#&5FRVaUFdog z3*JHnKVCb{xlQ?+ix{0}wfMeH>KY0VQ|X{|o^VhZWQ|3avCI$2jAedEuQ5!aXN_S; zA#NGb5}X<*o?(b^_i}fw2_H)EmTr^81}y>3k#2RS-O6JHZw}~MxT0xmhwwEO{QAAn zh{zVMID!2sZ!)VFbXoaM=VbOd7J*!sDbYEuw$-h-I5XuTXk>1#Wtf(1pNy!8T3h|BohY@u3R}OekxZUJecpeRDAJ^FB6wfR8DXp zJbIzrEx9ptpU)k2yg@ub=yIFN-S`7Eq0A{O%$x!S;rqnu)CX7HfZ zwt*}@VfG5uyak1%D0WXmj{`vU-lfyGXjbpGr)*qEhicE$Jn`$y6aFmAD(b)~kjR11 zSW_aJ1G=e&n~A7(JO>8i_oaOi6K{}vy+drF~+dPS0i2T7qixARrWxNi| znY17ex0rEv10UwLhgHe!9loB@IV$RY=)xDl$Fa;?FEC2_^(Q8Y0m>H@0n4CI_%mn_ zu=rvi(bzShY(J1knQoIy#f^O&Ogb%eAnIDDA`l?NW8I7k>-CHw6BfSiWONhhs9~$v z^TZ4gT_wK;*@6%F4XvE;J8V)~8Oz!%%vfqMRbcH$UA}BLi*ng@wTe!3&N=j-n%gq7Ode-kR z(o_2P@z*^T=@j)46;o{7(#UdF`pR*|^T&0LKpr03@*Yiz8#m0J_0zYA5iNi~k93?; zlzVIUmXu%tTuP9fL73PJU_j-|3hw92#EF05TVM(K7Fc|(Nym*X5$y5q?ND?ym#c`! zNy9@AeP+)lH{D_~IzC>WlMO@!$fHR`FR2l!4;ZXFi{E&qAY?4bbbpjZ(emU;8LJU7qh`F+9hhV23Ivt!|{FXiGW?eF!pKbV;#sKL9_`tvl)A=H<*TUrny4Qkd z4sv4s!ihId$|H&tl#1GrQj(Kdqjk4jqlTeYG$TW#SeI_yV|&w-NM84omV3N@QGKWp zJ(m8b9!t;Pxa7|`l}bZ0S-PY8F(Yamsj;@9Y|5{(pz}<4s}u5z_gb{+3gjSb^?DRyy?5uzjvG82ioU~=%r#DI zpl+7BfVFV%{u0^3WnJn?i!BSR9l~uV<}A~CukrfJSN>#Hb!=LO)KPCT%bst(^JEcw zRPDWB9GVx-A_y&Mjj6#=)$VP}>OqZw*o6pU#I5AC{b&t03HknTbSQMZc#!Pg3t<9G zR>$u^k8V?yZ=F_CP2zkfU|_ z)LHqGb)fE%I*n5cW9VzZK;3qCE0FwrxsWxnbYKmM6Beep91|2Pvr{^~w_admRMQoZyF?toXL zLU$P+{}wJZmG7y)fqd0ccr*bXA`tL1B*>wIsZz4dOaE`Vvu9plfGl*Qtba~UcS~{` zvy-FQ7!KE1PNsvTALR`fz50EA<*l>m>|UY}_1-7Pef;br&w+3D$p}tQ7eF0E647Rx z-wyG{P8np{rT4__JxoE?!rh9Gw_6{!?3v?I*krcOJ6j_|M1|H#w$vFSG9?1@{_OhF zCW^4Cm4>Fo4Ah5Nvi|kk#l1}wBQ0j`J8`}ZiKKlY1WBiBQBe;?vEo%imlhVPQ;k7Q7_ano~luZYI5qY#_E^F=T- z0h>+UI9(-Kz|16vg>+L1=&IpeiNLOp8O>fQZs^{>K z9UGF6=g3EW8W;*jckZn?!B^eGFKlD&b1K~}Wl`4SkFXKqJ$VS34o(O+hGqA{SKgla zbSi|`ORYvmg45*7w(8s(kf+nr?+%!m#0p<;Z=YqU{8mN^YT?QpV&`}yMLx5+Z+}TW zeBI5wKy|MWu|lh5KKB$PcxV5V)sK|4cy{a)vx$O6wsRwA>AT!oTkxuQgiBJ#*IxjU zOrPe~ZMgv*6LnyJ=z4)y0=8g_x{j2uGJw z1=k>5wK@OczL_oZ{N&-Gx9yjJK-e>(1YyGB@STmn*x;c1_l-fkQ5JJN=Xs_o_yOBD zp3^4O06n`yRw!es^=_YVLvJ|c8pEIvs(Q#movG9GU6ZfyMzqXG0i)@dP$N%Tcw;AR zJ9#Z!eZDO&*b=kcBu(8>w_Tdd3c=??#N_%3K6M9mNK;}HRIjem`l6C*U+sUqT^ouW zF|zPVR`LiH;s?#YC6TOqqrl0vAGZ@EQ#t_>hDotg8;%Uy`GhAmf{(hNJAL{zfZ)%w z1Yx;%-#c>y7E)SJxe7Dj&5 z%67o#Zm0%lhcB2sB#oqasXFwbhsZHzEv~KQDCy2R~bvP z!s$FCX=@Cd2iQ#u*%+?yKosH}bu_+9)!W5~Kmn5$_d?a2ShsDbu)W6(a^)s*%0je$ z#Z6|_Foe4QJe9{H>=yz;HhtL&0fzWLaTQ`@N*`8QWoQ#c?K=c&QsiqStM=b~jBGW%^|}+SUI(3j@LI0t@K{C#@+0lPsFx}uzcXQh56<=eCWPWl zuj@qw2y`UB@ab!FEu{Kq^3yqve$w-#GL}-1b$YGM0@31%-mqhs6tdf^k<}P>tzOT6 z7cgmYjjK4!S+2q8*I9Jo*4ILy^`}JZ{J!t+(CwVMA*Q_*BI@9(3-3Ziyz@qZwQH2L zqEO5AELp0Xvx!2D*n?haUQP={#O(DqH`tWzI}}^FUV}yix93CN`a_Tua>>s{ah0ph{+?VB!J|+AbD%eH|p^MUwK%URxXjrFIfESh_hS$?{qx8gjO_KpY(MF7jy zPVVRGlqp4u{M)f(hb}ixP5p*&v}JVUpRw=t^~X_Ny_Re4#=Y6IZQ?qQhsVo5)*A1< z<5q$1xUqnJ8fQ8_-A)~8Z}p`X$44HxA|jQTgFMdg6&V80xBH}7cShfZ3Q?7MPA?1vt1X{Xw&)b8;eG{q}u>_v-IKy}ML+q^ZRMYeaziBOs7TeRlE*VClr=S_`azp-eIVcP??3}=nuEDR{NWz-m! zDI541g}i+z;99ty)xwRHHnbKld+;Vjx{}tJf$)8Q{RBSUcGl23|Fp+{%?tjym@SZb z;&mo1h!Is<^tLyJ*anT9fj;D7XL0&=$BNl+LbKO<_h%GaNLvjrHb#&zZ~YB+pLt>5 ziTvl^FiiddDBl!p-+n3&ofIlmh}TZ0wr^kXn|5nHe(RsLZ=NjhN1N8`KAgeT^>6>q zJqzs7BuDl}4PR~W-j+pOS?~IIeD~8_N&SLG$KL%JgxfZVMJ$(|O z1j6Ub%>mN7Hl)LoS*v`d?!C$EB*!Ama{Lo!UbsSDr{4h}DrwoN#D}4b!8j#*lh_B? zjua7Ltp$b1wKYh@JzK71WsrbLkhcIyp5u92Uf4H-m`wkp06)`U{x&M5v=>~3tX5+fs!bP@TWq8ye9?2?Gfw=dJgFCxW zg3ymm_ZBtbY~~+|bhQ=Tp0$m#`cG2l_A{e&stRz9Edl8pZ?h>ih65(kRrJ1i|A9&X z7f`M-Y>j=W`T$6Q8=6J%jelQSk7Fn85BgHX?ab;1oXifae3|4YKu(bAo<|$-@k3aFv)i! z|GL(1OrQS8v}q@KJldkVn(^$sP)i`k;@+d{M9V z+kSi`OPYkwPM)QGEB48`pF&aOTSyS@Ad=C2GlWJb3=kwBlx1I#b?}9^VY-R{bEohF zrbpgtEH9$u-=a*94c4=w7b?g64KhzS4>0fxXzZjz3b*bg;C&=r1<*pE##E&BpZ7?u zOX}HV79r9JMbmTU1^x-z9oU>QUZ-AnNegi(Q({c?{)d%G>?pPP8afaGv!nWjX5W4} z?e4=zGH}~=PyH)y0eWN3_j7?uernDznR%gAz8N3p2P&=KXfyo}{1o`3Kmq>q^fql; zpI?X2n6aoeE4`1bdokgNy?imTt;HfW}Ix7M|Fw_2%uJSwaj(2okFWXa&MP&B9S zNLYH{TV2I@__)FW!W{$)5SGc^ddCJU-Fg5S>F@PYRfwbI7?!=kHHN8kxZ|*o)7$?v zhOKYO#(wHR*oFHrwA;IEVf`tibu(O96CNQ#jz*HR$?UqvPf*1|i5f88P>g2XLA^GpEfeU@Reo}lcT zG2_ICALd`TZryqP;9soM#gl!lF3fD17icuk{Xw(YxeShMoBqoB9;)4=zk<%wr{j86 z>i^(DLXQo8UhF|MB%viNWAr&>bUQ61k*-4ozFUa1-bSv^q6}mrM4fln7^Y*GjbZvt zl(EX+yH7WS_JFMo@LXefLcp}1vGl&~J^hw1X}!8-AUBycPa?&keAi-;2`)tNCum-< z(pRQLwX_-{Y00~A;RODWS>U3gLiu95$Htw!bM+(&L?Qx(_!1WhLG4Og3X&~ZHa9Rx z=7HXGz1Hl{+R=<>^l^h&OW2U3q7_l^x#jNVVX>5cYGSp4?V&dX1iDw zKV@Z8Rc8{_?vB^cGTW{;++Xh-%iw?Co zcbal!HIP({46VY|&atVKwow1_A=RTKcL&M*91N4DuQ5&l3Xhz$$h&@h%a$`|@_+FI zC|Vr0!sDGfH@WUslkZ6Tc$A&An)^JDO0^Y%beP(X zJrlZx{+vyFe++Hu+CR_sI2dp(KjzU~x*w0wj7?vA? zeG~p);L=H|fNa$Ke7Q{as-y@v2OO>cKa*K%Z!49H#ykwwL5OG_2Xd07ukm_q)9Z4R zKpt`-XP+!^?YaMfQY$B?f0G@vyZsMH#Bmc2YTI@jn1tK5uK<#)fALH}_XmH@dTn-7ppkRl-p1K4{Y2K3w)s zy!$lxq$XSSoQ?Yd zve5dxtOAUp8B>(qPJ~QgW7yWLtTDXUH5R>LAu^mNTEI~Vr~p~IjdSfj3|%HC=;Jk# z_Qy_sh_GmyK|)ZLi>3?JHBgM#NAV1t#(?k2O+r&*#J0hjsrX|w_cWH18ZmMfn*EDG z6SYXlw3T|tp_Py%he7Tc7iT(p3|#Wwf(Czm-;i-KKgYa{O>%D709bN0Z904QYxtRs zpQcTl=HkhdoJ}_B%toVzoBUG(KYPp4sCExdNR<+Fp1jJ?!IikLV0e^#5^T(U9uhhw zI_;*Yu#kN$VUf{Q@-jeVhH$gqNu6APxW>CE8Zl)Vh;0)tTD!GHI)-}`pkr8_OEt<| zn?*$f`(&v;59nZ=j>S-l4D(-HnqbO>*YU zId_BiGdCxom^W|l{Knb8Lu>$z1`AMG?K!61^$bV0r9*fe+5t6K{iTtgCLcCr@Fy>R z$$bUk(KGm4=l}{V?vJpi2m+x$9u?N62I}&X(WMEU8Dc;`b|Om#-;+cB#y3tBTFt93 zyw-F))uKgf^|6NSziTy?YYcPL;_o=9%@WA1{uHeU-19G1#U{C&dskNFTXP2uhTddW zYvqnbDD)5_kdxkV67sr|mMSfvC>r64l(r3Q|D)Zh4r~@5K-MC&cl+v0g8sW$Yz?-w zrES|ZD}6}b&2+RY!o*>+so`(ggv$K+d-v|m!Jm0Ka_rqZe|}?P(J0$*IzvHy1g%g0sxJqJq68!$e=>`rNgdog__Djp3!DR1`(IOH>NJvvye104ev#V-e`?#H zH4&NGg@}OJ&vlQD?cI$J7nLKmbCIwrw{2^1+jixOv^VPjlBM;QEY8$5Gha);!fP6T z@y5w-fThtz{(Og@ix(R;%9ic7hQQ)4gvMr~vFBK3R95wt5{}&sE%qkIp`xv z#eu&hN_e&}_W)Z!q`wtndjwa69smz0^mg1Ift)Y`VZ!17>Od90@kTbWoV^jB?kAoZ zq6G4sR!GbY(UPjW371YFBQw_JB&60j+gb41CZ{KN*{?p9Q;`uhnPsf;)kg&+zGY}* z*>pQ9A;h6gW;t7Dh>)XcQ}+w4pN^wuLQruI2n_!y@mP9Ndfmi`x$q29ChCz9H@12ndA!cnCZtEPPn9Hzwn!3(YNpPIBpfRM7}CM0nQg z$w+V3Ywm_vKDVm-TTT^1MUrx?F>J&7kpU;Zm&BCgQVvXc!tz&pYa zs-m7ZD%W6El~&CiP?YqxccleR#IF4h4-vO*u&*>qf>Lgg@Rte*k|D_gU=rjl9&R8c zH+LUqm=F`>uFQ+}yt1%FisgboRZzwZ^cDWU5Ga9qQPxY%E%vB|uJ)vEA*=o|?>^ zS@R@%A;PutrM$c#BegRxwDB4!x1xWMUibVxG9|j&*DP=(c9vR1#GKTK5g-yBIyDi9 z^a8b?VcJUkg$oiuQhjVHgXHa-wd?Sj2I6GNVxBn3d~DI4Jqs6Z*;1cBS+ZooU$<-_ z6nhpO%Z$+Y^1$W|$;2DF{a^}~LX;+KJti0$5eBlz6K$D7>enc1Q_ z2))VPi;(C@K6FEXo-JXK(OL3p4)S^Eo^OUYae;fi;aM-LGAAQd1UO2sc>e*Ur)vze z^Nx&#Kj0k0b{is~vWXeU)VfJMH4M2B7|>yWaELg6PxBU>1L`;lr%u1?Qlj(sB$Yf& zWv0ZXP$LV0quc-Ba}$aY^2k+c_UJMAv!HgREen#QmbXTO-I2J`Y=0SK!pjZ3rcn=| z@Wu&X$$aeCBKLFb*s;u+0maX{c~Wny&^XLn0=(={YXaavwjz6QJGVmQ`eAJI(8CBk zRABKM^IpCI&V?RHLS%PjA9i^wCf=pCd+g(9e~_U6Ylcu;N~0!KWr`^ozF3qkP)Kn) zY>i>p$Ffm~+sX>i*xrK2k7ah~920u;*%SG$BSJ)l7DFWL@3GRCyiQZ%Wh5=H(qb(@ zZ+ornE7(G@GvVIhc6r;~wt@CP_U~UB1hsRKuy2C?;P!!oP;Q|D{qf^SMPs7whjPVQ z+WJb|jvyJ7r1FqtRgyDteV(#LstL8+HI3!9)73e=E&Y)r%j#wzF7@iw%g7)6^VY3f zmu4Uq?+A>W-kpsJGzbBg=9n&ANQ8_BDS>~ihqJyFgn-Xo?z&jJJ z9jV{*zHZg;n)Zc&2+QA-NrF8~#_e7D1uzL$JCU~tNg(-%YM;F{hG8^%`#Eyt$UA`I@ZogncHcy3Twb|74exBEs*&>ZFdWEMza(`W1aBeK2PI|-;>ahdnnP_V zNuw;zdY`Uy=eIbTV&7{H8^g{*s!dOIHU!hY$0wb%4a~o513217k-mp<$ODssav>2eMW9 zatY5WJbHmXN;V1Ij*coiJAAWq$-`Kd(rXQdPp+L;u$R3utQ<$(&FVWt3&oxar zTU?)qO%Q3kJ=BpJWtk{cBT_Olg^B<`Xq%qCJH-r z4ch0~3`Iynm%XtD{qe`cj|du$g^*SDP1DOY5=B1 zsK`JgUJz3^ga^NA+vObrS7=GDBShFQ1VghsMRlmCVoM82%X3n@~St8IXI~0fGedo^)uk053Ou z_7X9vK~Pc?l^@d_I<$TJ_Lc5u`}RYJczyR(E%`4X>?4uWjKpFw~Ox_zJX`u1;yFA=y#FGPrE$xc=|V0CQ7LLeBS)qg_7 zH!s*6(0IMMTQ>0gJxh?Zbh{EXBEuDl{oo-0sa0Dc(#W^U6`@0c@T09dY=}uh zOISQkcZav0=sx725QV^A`au`k9;qK7+UY@n^eP!_XfZ|mmL?&ydAlG|gjkUrFi#?T z>d%J=FSY*Hyr4aQ$Lpro&EJDcEBgMeDl#Rm=8BYvokyv){{cBdfoL(J>K9;M;Ib{K zyTPu5G)Q>X%K+(r?N&t1OO-B4r+tFGWJ3@| za)4tew-9ItzZoL?x?7=@a1}4Gqh>;gICbdrx``3`dw?S4;nc}o$l;0_ z=n}OZDYtF#!QZx#^T)}LKK|xZ7Yma8p}duX-BAJe_M)U*qVkD(xj~pDtAaoo@o6=l zIC%-U02u%0^UpsYgRppM)Mq?;0x*!N?9RrE11s>dLs`J_)eEIb=I4TyT86GiMc?UE zq%i1Q>!2t0wR{c_?(-}K9xDzVtXYHm3dIMb;$;sD>b^v99&dHXEmWYj2Di=d=|bZ9 zW(emuwQFeH387!njk2b-`4B4I*|&56$o0LH;wlDQ!?3v>Gx5v|yoqli@&)^a+~3ip zD#N=+p(-N%J*>3oCIKR^nYaB+KeugwTvuHiIUXXYwHoq~oH?)k`8OYu`tZD^EmT_! zk^`aJn|E@y+bbTTWc8T@b|v6Tun91!oOEdMVZ#E$hbJE~;?op}mXV*~XVfVEbtHbO zr2rI2Q^R@mWJuD%K%=5vcIaO=L0`b3+UiNL+m+jpbTOTwJ_@y8^A8_)o6&0h@!xYd zYdSX)k0Xa~a(@JhQ1(U=h{hS$sTR;(`EczS6vtG|| zYLu>*O9JYtM~t0Lv1kgl?jiGnn!XTvp0%A$U+{WX-S2UxM9M?N6-|j-gC$E+jnHtV zH9{fs<4Oob9}(?;%$d!Hq-KIhkhWNDF-TAV-w$;-5*vIH%a_u3mRZd!0Sc2Z2O@V> zN;0vsPMi#@5(o_E=@}lM;Sc^r^5ncf11yRIjLH*}e8SjBjNlm1zh8ng;0{Jd+@8T6 zHw%UCLkxB*!fk2N2T0(;`s2-^B=JXrAZoWJ%*WT!**dZtWi5Xh>S>r7azU)ig- zhv6EVCiL38+9QPqac#wt!955NTUk&WOMgTvU9((B1}yEpVmB!h%^8<4lw9C{Lbti;vrg+B57Cg>+jeo0(m;EOzQGLZ*P088=W$&wB8 zev&1_zaKgTPz*+70E{mxAU23b*#wi$tS+;&bnjkq0mq}yTZf*;%XW+ zo+cc{t|kDE?8LkMk1AEow`<;ZZX48pufg|Pw&fxrcchrM$n6Lc<*kRI&>sh%A#vF; zjWQ0C0lN&8FN8{^BzAfRKS}l1!8|{c1W@qm$-plW8i~pwHqLsN9eTw@Y1MsntB_?Y zvIoKg>5A)zdB6SX&)IWw=71gy=PNts@0}}$0_i6Li4&|Zdt(PbVd29i9nk-)4U(`K zBEl_njJlpdWo8K0yqxu#eJd>Mp>zUnGOOJh7EN|}fu3kCF@q0m&tF^R;&n?}dM()G zp%!x3WrP};QR1Ao4bJ-eALiQyBBn?9Y!NqdHqOJ{@x*gybK?Y{l?+>rGU<`DfVr;q3Qw=Ex3xzM%f|c#IyYbId$fKB>=6`r}iE z9%$pu_hTdbu*-{YyQc|jh0PGAK_2zJ>MlcsMWOmEt_X;n^#&G2-o#gkOeG;)>U75W zO0D!IuT!t@#>c9`td5RxXYB$f<<8q49oq1yhe|E>P*D7Oh;ZA0`yW7L;T3M=pxAo9 zDbv>a*YhDrxZ3%RT?qh{0Fw5Z%epH8@qkI0KH!rCXZ!U}IH26FU8~BM|AnV#?Dot+ z9-qNa;$Hy8DqTG(+wbh8zVU^|Ft z*}jK9e?|KWVLZHZ3z}63)=HHUZ+WmVnn7yl*@FJpjz_1?NB3n+n^*g z4EH~rZ+GPA>h!ilkLcMVAhN)y9jg6NHvv#7+dm<20WgVpe1`wS>odE`@%~JIV3B}e^eMxuCrni(p9X`wG8Z&Bit3ht zme&~`NLO4x1f9?S&h~k_X3}#oZ&RQPkK){52cbg|_KH3CY#l1zPP(Q^cZ!CKx-De( zMk{&w>C*e-n<2{HVTn~ZJj!y<^{UTejdwflfubz%vpdV@tq|Jfh0am;uMf7;SC$}l zeWll*Pg)!3U6{#Bi8LkhfqgiyQ*h7N@Qm|mVx3s)|Vjrq2OIP-~8~1sB&A9X(#zz>BN-uI> zAwJK|_6Tlp{&>1mo=;f(aK-3W@@i1X(FqONuUdf66PbUXjcz3hpR0Lo4Y6Oq^Y=u! zMAzfk26Udh?4QCI<-^lS0p>3BhvS=fRtDOxm zDs6c|@?mSPq`#$=bVNz#Yf%vIq3EAhObQ}*l>{ceddHg#RCss>xWqpbzCF|T4A0L9 z3U~E{V8ktf*myRq0fOUpR7}jBqEr4Y+=b+^7TKybid}}s2hDPA(0&zPcRO};F`wtT zY559*5CQ~W^i>1Ss=#vRvE4al+8a4Maei!NA2Pa?yqu(SP{;sz@$P>vL(JZwu89Q* zYTnc}=$|kM12&np^U~gsV=8^6Ja{c$XG_a7C2BCsqhmU#q=g)fM$S(^34zChyHWdN zCaU;#zFk8^3ei0zg<@+FcckFJ5onpAi-%djh{;k5y?m7Ah7B-HkzTpwRsqaPZl6 zkO$z%=dFXj--HW2+^^z35Bs~&=N_Naz$H9vf28<5?>x4%H#&EUB`lL58oskNY?lc5 zGTf)3yK)Obq-Wf!wo#t&?)bt zl>MLarpdV{J>w7;{PY4i;eU!P(ep9yo}BCT$?`@8knr}S%0+PK&<3d7;Ape%LuM;z zbA<=0tA>ANy<+R2%eJLkep$~Y36G?exX)7-c(C9te}`1?uM`Lfgx56jU6(s$%OZ0N zX?DVTNrPLp>U*3!;g3%ISuYpU6(D>C;Gk_~v76j~(pLdJ8=(zN8yzx4qz=LBw~~<8 z%kc1mdz)615sq`0{CsSoyzO=ZIec(eh+HW793tNS$9Qh!u-F2TFm0jQ0=M%4=w{4Y zgoIH7djE&_O?XVhVG?zZ$21c1u@I4!Zm|olA<>b2oq_2ZNuXe#G<&3H^QyA^MBLm#bi$> z)v6x08Dx!2&cguFP-v>GpAKCU@%$cIFc|>_A;Q-8IFiqi^g&vMa2TI< z5sB8E;VzorerSF>Cuf9+|A4NpiY@v`WTS`TGiO0W^NdPLzT;(l5(gjh{4INiC zKr(d-Fj1WJJ3&x($^LV zFFI|UKvzX;2cS`7YEPDOiPyW#WbvSQect@xA4H_nKr*QvBGP|Ik;Bto z)JRc~^N;@95F|$zZl@%D9`Y6;`2vvat9K<|JY3%eu}>s3&W`ctz6EcqqeNBPBWt9H|SO*meIy zd&au4d08G2%x z<~@s+cajcYp)U)`5OtEfj@MzT%>FVa9l7mvuL;c9R)?X2iOTk+4>C|A!{@2mzNN-dDS+<)U zmJ*hE3t>h_MSm`+h>G5KGT#=b6GnSFu>jFidKe&{03304yC2$gXm#CITE_3$$nwmj z#qsbpEU&Ja_nO?`U7be1&?R4m?cH zgI{z_=m~br+9{%2ewZVn5*FD)811QC3|3f0=S*0IgQUHKSb#9gS&OAB0D9{kk|W|T zt6G!$$Wv z{->9AK1B7050d_i*#WbZFiP6bor8``E&)lbCc*^ugkBmer`8}UfW=BwjDExc|2b4;}|KIVjJGNzcP`V(NDpABlLd z!D2z5-&O)E77)=JZ}NO>z1x!Bn02VH-m;7>q(L%#Itg1WJ7~?V$)Snw<-8oReIchq zwZ2x@Ij{R+q~jH&L=EiXwr?%QrIxhMc5TA+A8|y`{bC+T8`9t&zF0NnK%yIwNL%A* zq(Nz?L~FO{Cb#LIDM^2-11)D{ra|L|Rfnq(lmiZxUcGA6=sm?d&)6sb86q*YSFfJc z=|Dp;`4hLPH&ti(X{b@L$;8fPC_z-Z?Rl}42tlawrhRmXJU0- z!-z{-D;y~?!El$P#N`QU1L;4?UyVpKwr^9A%=33Zgw2+2L^5rm+j2qT4?y!K_tAx= zkhn^(o!6e3%P{Fo-v$H9o4z&-4|8Aum6^EP*n33?5;3 zb*a`q*~mkx#hfI4+lQ2-f2oekTmo24fC(K=c2t_N50N5+ld24s?$ykBriXpBpQ+Z} z8_+yc#evZUnF?s2axggPC9uHYSafV1MnQ?!H#~Y;)g0Xw)0N3Vg@^PbSvh7iKKf>+ z+Cb>^BXg~wy9q@6c-jBTe2WQ7^hP^n$;Wq9bpN)3oV{H}k9 zmWh{8Lw=7p9XrB<+UAt#Nr|*THqja;cJ5H5|Bxalj!3^~G?o(Ku{E^6EbY1xiL^EH zb=GzZ5*Bxs5_1Wl)b`0=+I}~`9m7NhCwpe?MWE2(gs31^h|4q6X+2Xl{Pypk4j_oc zVdr{sB+0p+5RK4i<4M#GMn{RofWwK`S8OcuJ(ne83gmC%<)AtzRzD3zFv;~-Q>b_A zh5_VhBp&J&w)4IM>2#>gVF4kpVY78uCP4&}x(sI(-OYraEyTHn9<_SPG6_kGh;661 z3z4c^mr%o`dpYQhOjp!bv7AfKldl%$ zY~j(XdNuK}4UhSEKDm$$mI}HG2qqAAykw{7@g!~F0GXkAwosC`ytZj^OA`+Ejtmj9 zg^I@_7%OZSg1WACq38G5GoZ;o94XP45mDd(B=sLHD-ydGc%L$CqCftMa7Cw zKRs(!g*^xix}Fe>_RLhYJ(1DIqSlX_mw*nh(@WrEWUIn&$xMK1X{M|D?v)&rb^>Fg z+F$iyJ+D@^oAe68qwSLmonC={{u{qRzf3xvu_4G#NxmGEO-NW)(dQ<#yc-_TCeq$P zq|!xzWHdNcCxYywmPzxv=l8@(i3iQ%svLS>$8C=r?lv!?YZH7N3H2Y;5dslY8+b)d zr3qF@n6{|?P;FO`NRlr7vq{n)`*uC%l6AKqTZEnqm>^ERo4>Zqyfw&PayaQ&zWnc< ze($s=w4T|!SO3HL&k%}^vuD!}NMNijy`ZhE9e{>P$8D?~2FLOh7zHI>XR|B4Tca)4 zOAbm3@TK5%ot?x(egfV1R}nv+c!c^92@o+6(sP6U@r^a2oBi)b(i^Eh5L?KWSYivY zqPwMu=@vsWgq%0~s5CYFu?kzBqzf;G7bpXN}u#n|P@WwD{#J9@8BVF_x&S?0aWA z{YMQ17{#`APMWmSRBO^d`)k7m-z-G^GmRTUmwqaa>} zvzgFar?^GSG5kvrp7bKWhi0N7zsEfTRwgMC4J>=#Qv-m=ds=F7KD1WrA&iyMe|&oz zVubn+*Zp?x;?LhvY)fh9oLp$tn6_My_-n)Hx9AdJ#to-$*QptLE4 z^woC3g3_hu%XwxVzBOysw8tO#6DR;AczX)0;%z2n9sL!;ASx-uwX)fV<8i~gCWMn6s+ThdiK;%Y; z2*wd9a+I}5fFmNE_6iZnpV@5DN27NWvZ$q6(;-z!((h&|k^Y&I^pAggc==WflLSoY za6%`IFXcP)%lmf%7&D%kZ=Wn^@K1<@8PEU=?|OnVzW6q#Di<7E(OEG%Sj$m7Mvide zbyN@O-6-hE3X1>A=MTzrg@@CRuztAs=%1(`ae4(4Sdkyk5&ciGQz#%&!r}haqS<%Cm*T@iQ!9Rcf3v)}vUY*GAF{ea79+uS;M*=BP6gZ>z=(YixU)}!*HB$Yz z-EN3Tku&QH3DOVr;2l+Iq%Opsei6-@mv%Bpn71TJH%R0Xz%U^u|7lr#>8(rTaPr^9 z+e)mH!HF}TIa;#hLK)Ark@*b1g$seh(W4&-3SzPDL*yxh#y>t18VHVCXtYrjW4XKp zoLc%dW~(d0BRd#%ij-`YbFb&&K2iNHe#^BmA>R~uIK4vt8?{oD{qNwk>`O=Z`cC&o zJ=-#2IfWGxmOZ|sqFc*L4WhG7nH5Vx8xV1?PI5}r%LqEHlK81|5l`u}h2O#u#Af>Il^arOV8nV+RR&waBEM=zkGp{Wb;w6+vusD(H46T6VaKcz2E(B%~U1y$f9}Q?00f^Se0EIcwATm(IA7kA> zY;1V+meby7!+Qxl(B#Hpl;<|$X=zmtRr8A9Ar}4 zGvF^YRFWVkEF&p(W2siyKjRKXTP-3&E729FcUz1&M5zDp$JVCN+2qt+66BnW&3-{a z^4_NB6D8?@WW-2xT$oD=TbKY78aHfcLDw1D&LDmLh@wK{nG()<=5~BQK%e=~5D6L3 z{7Bc6C9S?*T6`HY6$*#YKyV;e6?${*Xcrw+OJfwIw}Z(+F9!GTi~Z&FRVW|+yqn=c zy~0>SD13O8HC?i^GMPps0HZc9iyT-Q?alyw(|846;+x>3s8dBTYY#qKr z+PN!MyQqRhc}xA4m!ty{%q4OOKsEW~2gC_-7hPvs{0EUj-sJKVjFfdQvr>QpSa+7qaX#kE;_7uNW6~fksMsQ^_Tz|uAmP;b4ITxEFe{k${X*ne8R$vZqFdxw5aa168|@~cO-$7sBK{a zDbaJ=Neg0OM`2!rPAOCym?Ebjnbf3Pnc9GxMk;c+*rFn*Qsr)TBhsXu(Vejhu7Kf2J+fabBsmMozg zQ0RI>N1zpq4WNOkgGL*#(@WrP?Mjea!5 zL;QI0v4IEjj?<59z895%ndh@wDjIw+HEN`&HRwen4GWUL zm+;!Klceh;cb-PxCBT_!$l&Cr@4qR+P~qnfkZT`2*{47OJoKX~4))WRp^ zClHShZg@F>yt_q$Q0f&1p`g17g!aE$wir8w-YK3fWW)v*9;@g)#>%%v4EB-xY@^N= zHo++|Zn#Sd9J%c*>}<7uzgij!2Qi$%+*>-@N=Za53kZ-H-0J&m3szKl6wg(5xu> zG18R4Kxi;i2@Nyac&G9G?>5Twse|L1216uXA3r z$MHaXkdOTU`KHE)FulTm`J9D*xdz?0#E)mkYdb6@EbdG~w$OOKWwD}PJnRVa)w@R$ znA9RA%Jqj%iE(bb-bOHUOO3vB*Nsk*^Wt^aliNNl;=5mWk%dHRQrmTGjcLoIol>n$ zNwt&Q=VQBW-xQ>CPHb=uATgH!DbmeMLzsAnlfrU1`J_mZAH4C5InUU48h`RXJ`5mM z$bd#zG$$Bu%MobI=%%zivEX2?y2?u+J7`d17xjlcN2z12=&no-%JXhc&vR@Y@4*0% z(=I+d|Mz<773yQ6{3dqJ(YNU5?+OUn5VZd#y^*J{JX?qfOEY?M@HKuHSa;9|M7U9R zPKlVi<&@Y`hP!gm;3i&b13o#qReZ{;<0*ZDh1I}?Hg#rLQv#8gJAiFz&Pg_{P^)F{|Xp3(tqX?2gSI; zjErV)j2$!j^#Y?F7!et6Xxy({B`*Pq*HJtM1%tNM)bZTfwaiSQ+O+*Tx98=clIQY1 z&qu9Vaz6i9_Z2KY$Zz~f0^PKrz)owhrmXB?_Pn0Wfg`&4Cm$1?eZEW%Q?pn zr{|G=ME5R$2Nxzz&r|;yHN0xDs&ZYg6&7C?MQ=p$5xtSSmJwH&p_yQz504>FUsWkm z-;ol--q)28M}cEum)bzrCVJZak7z6@jXDLX4ct7Ef7F;a5*0b?>1@KZ)wXmQx|UHQ zg-RmPTFOmK!VJbh>6TJ{^cD`hNGy^--Nh0;~9VxelnnG+O+AY zQ6on_;INPdO_>s_d^=<^o)MVdn-;s!JNWH}B zj51PyWkm0?q`*nEM!v7LT9Vr8{s)rH5&XX(M1C?vxcfz)W{Mn&ZB+@+uw<>OrTf zn0q;B`Jdg;z!qFS~ z*n3E$eL=E?IHQ}QCsNO`NhHV-DbZZuNP)wAsEOTOo6tLX?^HvibB-)&ZQ1<-5$Qih zlOw8QYa38VbhQ)xuTI-a+G*E@4?iO-jT#3}-8H$EAgL3Jwi})iWaP-{%ru_QeP{yf z$$gtYB`8DRMLXOW2~5T_uMHb!&NEGbj(yI720?+S02GJ|U}3H&4X-z&4cVTsuNs^a zxhlAC+d(C(6H-f~cvSx!#aySp5gtEFE#20g+hh4)Imh@xJjU3*G%kO;2Q1S8%Z5S!pb#;BM4`7d&`PEm%~Vjj=LaVK1YGOLTzCB z4-SZW9ieKCF~SLvBvD>u(TPZ2Eh_5H-${|~g2d^cEpN#sz#s`-0%kmXO~5u-#tk%` zAtv_{lqYy%ZxrB+01_4$1q62MI@Tu|BePaZfJizWS6F;S4+}_0Z)CzMgi+G7h2-8C8Rkfd8uH}Z-i+LKQQ*GqPg_2! z4cPo@9I+cYPHn(m%OPU=4>VgQ?fjTCBKu#{KdS+zv~za(P8FX1f|7LVpPfs<l>Z)VqapP%GzlZtt~sUKwip66qC) zhtrP`4>r7Nu&7sX6JY{B-n%z)hn2PIE*5|(2NeKG6z4w2YX=9I`X zLffAvo&v|^t*H%4>Q&Ywq8O2~Ga9R$J4jCBHhzNL$YHbPHBwI6N!tc93E)gprhQqS zQN7Y7VCs~bsdr^!R{S$1rS1h_l2rWNK|zpCCm;vPHFRi7FrK0F3|(igvrqmraRVAa zAv>S}vY-JP^q>judF%a}vDi)-ZOBWY=9C(W4vN?t(8PEbLq}Ix|tK4TMNpQ{>oMYYOTJ9Rg~-8l)wYwmZS(&#$7itB_zK38kIu zT_tH_d20^(Eme04I|<1jJ$v;&nELGGOofRsxlpzBmBsSPAYRauC;BV@RnZ%f zu+W*6YafZ8J)1A&wsSKj!`-*F_+_i5tro`-Dsmi?Iz%KIOSzl0UEau%BIlL7ayD_& z&Z0UVWlI`!iE9pF3A2)8GO8^z=&LjX`7p)jF%P+mVK0wJBL?;dTn*f#u0EWoW zcIJwYFwlR71~iNdV3Bq?8~^^rYj1jDI+Ee9hz+JH8Er6E{eeaowS$+}S<6{GkgXQ{ zQtHbO6dqmU7L{pgCn~`#Kj2pAF;oi1@)43v{Q;4m3GNLB<ZGw>o37=Flc|FzJRKwgMF9E_~#lx~y$vdoi_~W@7 zzq#olG2~g!C6}hXgY-NDy8b_Spl2@K7851)3c|w*Iwug%k4MLwgI2zEJ-tz8Xrwnr z`r?P$AsEC;Af>I=jB2%{HjvcT$r$O)I_XObQ_ zo?*}=eD0qm*LyBoRw(H?w~XuGVJadcc4iml?EMrQGTLBpP`xg>G?JCzcxp`<_Ey{N zcy1UT7|y9!cwN-xAB*|C5hCK%21KMbPaAxgWr`e@c1-Qm zNGWZ_rJV>8m$!T=NpuP5B1QWC17@ZHOqjbS^=0y+?F_9nPDXpoKK97yhPf`&?uXyN z^h?6&M$2u1) zGf1U9)JWxJ+QQf_)tV0yiq?>~;*#_=cDP>hXJ#&%X?RR%rjbcd%;2Q%IT@VnJU?k# zr?%T#d@?#;!xwTP8ZKYVzL=dos9wFa!JA$jI=tC$troSLx$*=n5U^liC{M|QCIJl^ zZD8!6m%ywLj`-zuW-EjTW^?J?BnMUbK)SN?Zu|{@F1YD0=|@O#WPI?2(Cc|LfJm>v zH$qP6qHj4m9Sg{%&c1+n_eLhHwxuE_k&@xA3cFVua8sx~W3iUEe$@X;L^|)VEehX9 zbRsfo6Rj^<+U45dq@6l#>w?5i+oIo6DG9Hn&*^}aAGDJ zb$;>xGHp$J73|wq6O1ok9&e>5Y+t^5u)|bB4QBn4Jv(00TQoL_t(i8)%1OQrq87 zb3k;QM6X)gD@3UOsEL1@L;JI@0TC-CoJh#oz1vGWo1@)&_z@L%e!-9T5h>U{_4QI4LYX=Js zsn_KtKtZS%uivI@h1nwv58hsvVgc!j<%8{2TGqg}u?zIiO28u_Ux_b7J&!gfPOpIf z1CHL97CdPDc(Nh9d!s4dBJ}0sL{SE%*)4v@-;mc|o6zQ$1_2%s#0cgQ?zQwjGL?!P zB67ZYkezIW#M0JP(oW?q^;&(ijj1-4Trrb?x z_v_csna${D@BiW)Xy`vf0~$a<2bxp^qgPLwJ1|objTw}$BRGCf@DfmT9DCe}*WIaa zF9$_Snlwkend~UM6ZDKiuoGtD;HBz8|3BVh>atr|42l{=|4ov15$s<70&UdN) zKqVPH_RR+i--H2}0cZZf9eGQoBrZv(u5&@@wewg^5GJZ7NnMiMAQX;Ingk#+9Tc(yVq_psnFkGe+efUend|o+L&N|F}=dz z(era1;E&&V^ znrR?T=$BQQfwF(v0gs9dP5{d(0P`%N=`(rq8t8J7mVG- z_KN2ZYxL|P{m4ub>X5I5hl>w4fO!3g-lRmQ3)-`QxRr?sdR)S4z3%@2)r!4&X(|~B z00000NkvXXu0mjfP)h>@6aWAK2mtehE>{mxfBB370RYzl0RSBU003ulWo2$(a%E$5 zVQy!2Y+-U=bY*ySb#i4caBgQDSan=e|M$Kd9nv9^64D?gIm*EZ0cntKkkJBC(j5bp zE)fp%7Pd3KH&=#WCGI!nkG4Z5pIY*Gz>HVpans74AO->$0pI}$AXG;Z1UL`@dSF0^@Yf*V zJ_vXS253Nl8ximb4A29BHVBXf0lXl<0}QBwf!iQJ2MoxAfqNjp1PrhfiUR=7gmgfF z5(t2T0Ui*b0tTLd0XYz$PN)e8a3lhF0l`2AqfhQzF2NfC~W;FyKuDSP=m%03Zzp z?hxt#0_=zYW)NUQ1l$1tP5`(M2AqiiRsirJ0>r@pBM8s{0qg{P0pK1O&;bDg03bp@ zjR+6~03k5oLr9qjFe6lwkQAXYhyYOnSO6eK2#jz`=m8?Ygb26`0Kx!Z0s`&fIbo64+ad00B>U8F$f4E0>lXW zfB`cQ;0p%C0U&?~&>exX`}&KWx{oKd zrn9q`Gpcs8v(>*#zRyN=-Nf}Z;a~Jgy6*kBjy#A;y1cWyc5tqFFCoc*x}1rgjhRzM zF=T9}!|UKXBdve)F&W>|%At=AFjJELRC@sw~u8DUYR(B<+_&Vgd5jLFdo z#-a6@wZ>-8R-$NHn*NGI;SQBB1%=JWa093po2sQfJv~JHg^$G+Wn|OSkfEjE*`udV z@!+GmYNJL6+}AThcT-JVaxS_@s5~79D|}klAD{G)NTkf`^2Lwu zAp%koPvzCx1ta_{T%Yp|l*rcQ+|LZo~D zZ&t%dN}Wt^6leXR9>X_}$iD$rfj6&-{R^p{{lAX-o7J`X;g^*Nzr7pQVU#SFQMy;X z5U!(gCvuhZ#-k7;Avl>SfhZ|bl6oDF6`YuY9P5G8>p^wr(Wj_v! zFtd5Fsvc~a`N`9B?V_?)IFT(gUz&_TTa|X0%N*f|*e&r~-R9lC6@bt?X+U#Txn}ht zAcy&KBQ0Z3)GwkvyZO5>2$ zW+^;a*}FaI5v|eq-sEO^mIF?zarqO4;LkIEYb{g#i}9MHhuzQO#wM$(U@weUKGuJg z|7hVM`D?HZ!hRx=HBKYC`10cK+G^KIQ{0H#6eaCkly$3!T9}89&4keDPxvFl@#AgN zw3eCb=DRiT7Ja#A+@u3Aa5!e_hW$mmHyj1&d zTMr4dGK-X;JSC5K9d)-p@QeTYpSmggrIN*TNZog?S;_Iw5T%re1V~x~SL*!BaLel|>xE;s5s(0v7T}7gie#h2#wpu!}G_<>joE_ef>MM7EBG+YxPS5@i)I$ zUvl?)+FC~ox`X$wtyS7?BjA7C>HNrU-!oi!Z0EuZ{@Ro5FOt_MZo z_|Qtb)76R{u$17z&dWMfpThQ^>9xw*k9+d6_LN>vDQupdo16@M4r@@JygaB_NFwhXt-99K2QI-i!X%#i`>l5;qKs1)7>6Izu#b^vh2!_-9(jP#3pnF;MHQ&YOJA8kJGtk`_dD#z}iJwdVaX#QfN71Mb>U8tfl zub@GX({}L01!!N@#Bg36Q5rr>y{vPWpXf}5)FC*pL!zdQqn$Vme}7(Y)0)S$ltu9GZW@}IdltK7Bd z?sV07{1P8&wfcM}PC4wBLB_N3k0E88I& z+x16HmpWamaofG7_`XP98oL~tW#Z2yFI2GC5c~403ENZC`A@&C7^W#YJXKZAo|EBU zbYDzA4jFy@HPh0?moyI~k@sh8!jn7`9ro$i2Y28toQ|;Dvy)}YeQlPl`r$xt!KdN! zNkarI$F6*0yH#niUPA8lmZXaD%itZ-f6szPsaBRcW5fQPO6NCD`J_yuw++{bkM}QD z|D*YJF|K1T-IgvNsxkhmO)jT7a#}yUonAd=Z0>@D$3V`he&Cim3T@bV4@m8qSp7TH z-?<&WJ?n_*8|-iPaYGxebJ1^x?U(=N`sj%O4l{x)eC}ex_OFdq&Lp4jS9_T6vQG+d zt*uTm#)ttiLJD$jBv}y=uZuz}V|jjr=>cGsbuPE86FWn{({10ZqLR)iQ~Kyg7e)zX z+U4N*bW+Ey{DfawNAwn8RBYTh-HLgVx z1LW=rDdaM!BVmkWBs|WBR%UTmA30*L@{GkqX#RD!FYpmp?_E<36X`_WIU!FEhepUI zteP#+X0yiprQgITQLC?Hr%c>l=Q4LhNIXT*m5}q`4vGIvThnfG6KnK!$Zp+H&|4#y zP{y%1hpgXaMV)Q5>>7c+mG-12(OWX`YX*rT4%;OpGS{dhrIb~oBc&iX$v+ON7jp)B{-zThdc5}&iuAAi%_V{$IW*3o{Pmo(MZ4WFTyb+;TYP#*aYmm@br+*NI z$@F(zezo{4>ObPhp3Z#C;f8cTT!^na?(pN`Dv|nsch{-ZO&HkkgnBq5SiFn}1O4dK zvtm-TeA%BuGFB7fX?fjw9Ma;Acv037#Ac*HcN9Va`Wsdt4#G<>1B6|)tx>WbuglQ2 ztfL@C&-ZVsKcz2e%KzlKhZ~%l*!&pT4CX;Vqyb%Pg_IjR(}9tL{ZRSblM@Effce3D zsZZ~P(6T)DU!&n{q8-3KN)rDpi;begXBBYT7w~ngc2P;V^S|-|y{8>6p?Ws^beWyB zlR!bSh|XPV8#iX69##DT_Zd|I+Xjxij~uA|GFR3Hur=MPZ;KJP8_tEjw%zJeaxe9mJd2iT;N060OYj)d`JcV z7_;t9Z>;~@AU#w%`)I6aHFIrx&`CunJHWR~&!m&vVzV>t8+SFl#78U`bH%wjeY;yA zM*STzGY-A345E z9d!(+me5nfEQPSm3BEhjkMwnNTrl?Go3nGO_fX(Sh^G(YU#8FE6wm!ro4;RuX}?Bn z*L9C&bG`BzTJ33` zpF87$JB*cPe@u3pNb8YKysHz^(wVHOb6PeB&F1|I5iMJ|dFS7&l-r=>U{^7gq(?>xwM znD36`AaT;a4@v!|=ag<$i`=p+r^wn`+4rj|;v&Y(KG5d2qDE;2Ys?sxA81T@hFmkf z*mZYg$62rruuOK(42|%Xg?=W1@(y5q-q6lng9j`OIp?B+y=B+dU+FuX-kSK)giOQ} zbv)0tW1---OxS1YKfJVAhOb@F_4}G|s zuVQ@DQ*iUQ*QZ~HcucD53(GFf1{|I=KXE19d^wLpR>{4CU=0|Kasfab-AN^R{jH!v|lNm z1kdfiP|U0wk)ZeQy~3c7@w{aA@b);@-R`Ih*$us`XY^8BS6HdPVSg2bM9~c_8 zI*pCfHDk4TcifLEi{IiUh$kL6=zYAul&wL|bGhH@3tFE^<{*QN+cD|fDgk9NNRj~i z^x_C+yzdGNnzEU8t`{+v90>{2)XF?KzBr{r1t7NISY5JrD$#`rsQp815dUUL-BcLJdC#uVNLPBBLZVMR{r~M@-QE?Gy ztwypD$qwa@Saq(rq|+KDxZ%k3*4WKEww~#G3S^7FyUC(Na&QmcAafr~A&`ke@Z+ zzcwDG(iWdpM|_KIFxV88+!lu8bAi;?m!e@ZjF&2cH%p zVM=F5RhtxtT`+dm(6kh`k@ja2gIpj461JCM=nuqAr7jz$|0t$YeD!V7lqYzKMX5Ez zDxX=KcMJ(Yf7pW_x-P`@u$kAOet3KL|CRwZ{Ga zdH#4M565CHv0CY>X3V|L@jK2*AD+$|z&r5&u&sAjys#fxAaSz|{oi04%tI>LrTO;mTW3q3Prmi}Rt` zM61v@|CAjb+HD?V7Au9pDCQuZo!6=8qz*qiRAiSb-8Q++mp~8j`_e82&ix(E1n~ha z2i&DbL^2dn6V1sRa3SF^Pq39JQ|z@;AcoFty_Z7YS1*=&!omOaask~@9+FQBjZusx zVEF?y;ztzI4`bSTe|E>14*d{f0Q`^L(Yi#@EZ2fDZ5eu!! zsii|Uwy|^l-ne*OzOWjWc5jk|_3gxU0#%E6e0oy$rOMt9l9SaQwJ%}O+O&55QZidK z|76i=M}hr;!BNkHAUQY-W0_w9O;d(+5a>8{YS8i$c_fWf`h$m>U4Bv1`DQ%oZpvpK z6o*?1XJ33v^?G#wXUYnVcmKX_a$)p-1 z{k8SQc{ylOn7I<^CT6+9vKBW7gNtU_H%(dXCU>d2{bkZsaNV=5swV$?Fp!7n7jA^R zGd~WaDPT0`1<d>tiUO^?^p{C0trEN1&i z!!gt^JEYihNbp$)yZ@!GbiMM?}(ziJT)#^JRY6W zZG0#2V*%80#x$1<_ts?1zAFoS$bt*b;eUZ zEvs#PKSf`}`C&wcma+`7m0k}defbIa?Bub}}ymk=;q-QBIVW5v|qA8| z5*pLjqVF5gs35eE{!T9BVGo1+G0Ty51%Du0m=W#jmNiS|%pI#;$ zDXRdne|p#p!d?Q+16P}z07r%T=)GHWZepT%Xqd>8e!5bsBX-HeO|RvE`Pke_*ww*Y z{SP5{ueWebwR+T)DHI3BTwD9!H9816K6eW5Op)3Cj8c6iEdBFQj%@M;)3$DmR|AS#;!22n1IIj)FZ8zol?*LM=-l0OxC<)weu!i$37Wi*XhEfNkqJt2cU(wN}wuE(Bg{B;_;{wBNl=iCqqp;r=51SBflM!m%V~Gk5L?}5X-@>4Vi0T20 z5_IPVUPXi4W);uw@o&?cMo57zW=XD?p6rL1*q6~e{@XD&dSrae6xC| zkXz98f_!}$N-@?NHTeuS+eC53?H}`9RPV`XP)mtj?Z8k(dKv-9(~H}8$}M`2akN& zY>xs9`voxOq^EZtm&2*#^FQY+19=$y2TV_+;F-W8Cd#kResMr+YaSowd@|B*YX*#L z9==P-fwaNiFnADLr9lS_L&B4lQkdwbW#{4==03}cdt225!!*|D^L$d+mtrC}=gsW_ z&^U1>a)6izvC6N(7<3I|rq+lZccdW(c#DmB_J)*pjHMMRCA$^IEK-jM<2+3PgXhb` zn?(O*s+GW&<+jh3I+azr`E&nIs2bNgEqkTet`g+=kY>rYI{(Y9zP;6TBIFXqVc_=r zQfF-{n@J!3#XE)CInOzr>UW0v)xMd&7QNMR-!EOzGg<$=czba@US>5N9E0l@j9I!5 zI3-SfKoqjilRw{LKMI^+l@yYGgG6&n4HN4g1AL({}omH@G-~V`1fByJ!s(y1aJI0(+ zX7?&UZOz*exNt>UTZROim&nwf^xZ6k)% zPj(?}HB(0tdEV|!I12`XDpg$7tU=&|SpJg#UQe}>z%Ey5##}VyN2C@<=&{*CAEg|b zHC&SW?!F&h$A`pC(ssXW;pFR+gPAgPv;WZWr$mkOEE-&~dSbR<@~lo|~>!2?&$b!POD$3M_ETAXHK|7vGbja>q8WH(dw(N zpQ!BNPeE1Na&vMUrc@SG3_)6EZDE6zjB@;2F7bU8nn{g|OxujMYZSfW*q#og`R(WY zGkzy3TK$Q*yco$>a=O83B=!l+UBU)HcpGET~8_s)4M%qPAMuLdERl z=D3{`w`Ir^D`kp9k!MH_P-6WkMH$*RCFVldSLD{FG#b8 zE9)G|(g<-=Z`cwjgRvGg9f-Xi(-?a;%g6FLy=CLUQED)A@j9aPm zHD`lz)09j7PS#2+QlO(C8nMlZ|7Wo^YfPs7Q?K5aP@ooDwE>nUTGP8-os>3x4@EY_ z4Nd`KvKt)r1q!m^fm1C8Tl=~8BU7dmq zPMRB5(Mp3>sA88t<<_b-UFNq`Kd=Hp>5jjj%AHW#JqVyd4f)s_Sl$sFVg1n_$*l9i z&nao_3byjK`y+>rJrQQj{HHp}03`4MpZR*`b{WLEUw1Ym+AZYTSWG0fz@;4pqrL0- zjv+H8R*X+UT#!CUn7JKhLHG?AsJ&y~!E=4%to!W0+WpQCW~nV<+Ijj$qqjYaQ4cwW zq_-h|ULb9iG@E*!6bAK^;I|4c_-t2)quEc?9+&9zD+L%D9k9QD6Zjd`ky#uNh1`uv zPlR$??z{L;o9iQn>P^k8BmTS>u|90DD(I({n2UlqF7(*Gd7-~``mE|ZzePtTKgZW< zJa`kI5~rfkM}ywCbTo8rZ)ICFeNsr&&sCT>q!mRAfUUPrM$FHi^B5HejOF_xv>8t) zE}U0C8`MID!=6}eP$n^z*lwXte!gQ4KGqkgyX_{HN<*@K;#r1?RUpv>oNRQ1dH(bN z*u9h2zie+<;7)qV*lKGW`ChY<)c*df=Wa8(mVEF+s}{2nfmYl1Z;+X9C-VAB^=Gy< zTfS2IYe=&C-)(F*BOk~MOQS02XSQt{Gc^kf{!!2HWbiSo7@N&>M;3|NOf?(Bw)bH`2tJjR~h}kcuq#(+F~La|HA)Mhu$VAO1Z|J zFM@4s0sDn}1SvK?TE}*`cY;#8VG$FNpmojr>wW$nbI>7Nd&gMNd|mJ{&+l35FL4#? z|FlH6nw5(KK8Q7BLE2B;gLkPGGO8&H^i_J2j=$EK_N;V!{NzgzuQ!z~m>jNPCu3#U z?oxdN8!KUae@lWvW>fg0wIy@zjf5``T+xVreW|%Mq*c7JTZBagp>-c(yR5b!nvnik zI3B0WV;H} z?c~s?ny;V4>#L(<#+1&E8`)gjNe}a?U%mmwG?2v1=#I+ZeIC3uCt-z&8g1?55N*J} zI7*ET&#RnqwjmAgSA8>CIL_#=b)^sDxvs#NGs7*A%WoflCYf8FjG&W*XUCr3<6hVel&CXc7S z%qFi;++EAg*GGMPtF@>Q{j9;YAyd9RpcJMM`KsR+_I%&`XLi?Yc<|MGhFj{yhyNo7 zJLbG&Qw;$TjpDIysJH*ntNrZ}&}>V8rNuF zYWgBMRoHQXCk?MAM)F7i9l;bFhG0OyTQS>$vRY~JyGWC*SxSO0VN4ZPW3r3(UXPp@ zM`UIA)|zW$Ozu|OmO3x{Vz;E3c{}#e=UO(W)G0^J=s*Ht)THZD+g#q3B9Mo%40|&z zj-}T&B`AS_9+Y=5kJdPwMC}kYzH%T#6~vJcUkG*JWFn6@g)ot`$O%~PJm58Wupz-2 zskhhk!%8X*UtRS0)q@-!gx2zU?%B+gv2`je~qChILS4K?~obARdgP z;R2t9@55MifiFfk!JFf2w%*ra-g+x_Y+I-f6hjRfS8cOJPA`EfR^Ya<)XCm@B0~IH z@Y0fD`@{37k-+qEQ!|lG^%%E5iz9)ZyUKow0Zk%dd6w+IKQ#32JX5dD-ogGXw$4@9 zb9l9N5`o`EkJUcuPps0!F%S5d_0taIF{WlUQ=M56Wmhn^2}_6ZyJLcYk(Xy~;wooj zrp5x4n^o#}B(E!PZ!uor$4=`Ub<%%#ivO$0Qi~NUA6MKnd&h00q2qn3g3^B>v%@2WyuT^oXr7-xl7ii`;48;~R2p}my{KUvPx4Xo;6T;wcYz=LuUp zU`ik$FP|)MLKEkytRtZH13iup3rvYmdgP~lg1Ej6!R5wKLKx;BV=UkUxBRZYV7&iLM?%&9AJ@2jY(G)xN&btrJIA*(zV*o z3iHClc&DYeU2UvF?aS5nZs+$`srrjy`OK$I;W(wB%1lgQdAM>?X97dSCGOJK ze!TBOC-ukprzf}S@PIN{XSmG<#}}(I+B&2}yXoa5_2+nSekG@sz2#=6sd{20uEITZ z#h@f#oxe}OqAOUw;fl)iu3$8Et8JZ9uH}y}UGf_}dsTHGkV8sZ56o++M6}kgtf?W- z^b`HrZ>UjK!SVqTM#jNu5^yQ6DYFrHYh%(bH$Cd7k2OQkFQ~wGt7?U|g|mxS_utS` zz?RQiH~kUR2^Bt~3aikeus}hHmV@z#!d6}SU zsoj{!x!xnvdi(EB+PpYvf{qA|@0}{sah0bTU>WKxB5yQ282>hvYz5@d+ z>P2wyFLKx;cZ>TY(!71RKZp((wc%tFw}O96=AS7ynO2d4*$gjxGU7%@7HX2^JT2Ve zr_j@pG~@RQK!sg-mmQmJ|B&E&iO(p2)zK!-OH@T;oYZzHKY3D9)SFlAoq}aQ?bM}H zLZh37edE0WtE5IPavOx;htp0?=OL535zQw2cQEFbkG`TfiE4=5HpCWbu&3BsKqOgETE$o|fKcTK7Px|Z!h@=(CS#}YG8 z@RBtsjeMsfQJ_5$n(<(x)gw`odt=q6Z!R#kjSZsV0u1COzR|l%BaGvc!)T908eV4? zATENxeJe0LVw4yS=TJ%zA1ZV#bslDD6%Vr!Ywjb2eO)0D)U+$pFFR0n1 zz-hTq_u1C0=ulwT$^=t|mE!abzLG5=o#Hp8V)hs0P#pJ35GYEFc_6qwj@ynqLHz0$ zXUUT{$g1LWWG?DAbMw?9B01%vFRVhiFpsFBfiEVb7P`nMM7J~T;z44A8F)YW<#Rc% zm2Nr6J3M_p!$B5;yGunyR-PDos{_Urv%2~N>CEIUx&|@nv!vwbj6gfTzBYZ!y=>yZ zE8Y-G6qK(Si*)N36VtC%0F3u)PCQp)PrlKLGdtr~WL}rHs{OUY3LCcO+`1KCQED%O z#$jbXC!)^}*sZ zJo`-2WOQmEdPZ@=oa#SQ(-Yo`FFTd)Xk&{cHw=KB+2ziaTUW$?K=$>Pyl7*IXzR93 zr1{@_C&67;A)`dd6A)17*5Y z8L#Q%zJr*g_)$9f6K)u0?-%b^Cy``__L9%9SV{1#3KFciKZYsNf9=SJ=)2ojkrz zSgz8?WC>|qJf1(rl#A)1@SvGn3mM;WKO$^+90GiO`Dm7J+h!y-2DaKBF_Eyk+RT2Z zL(v2*(-KRxmIA!fiH(VT_3!UsI2jh58P}-Qisc~zh6N9DMsjY^ZQGvyW@uEF>Cn?C z6mAU~Jj=N?xhOr8FDjmH!nU;6>sqABx^KRFM=$ zXi!FX_9y0^cOkb*d0NEr+hDJgq4k}+>)V=Gkovg8kB?X7u{|HfteUf6=tr>~kib*& z2q;V}C#){kpd{m^R;vp32$k``C17-0uU0{{U`S{yCHO&aQWdsbfS{-lw*G-UH@ojA zSJcOSgtC(au9mb6V|gt^NQNHHttN^O3~jK_a(hE5W`_>NKN?wkR{54#g{O%$TZn8z zZCIM8WLlScEvh~5|7lsukuR@fPv&E{!e;rDj9z@LC%4&9(Q@-|R5hC;Th!^e?PN*b zZp>!Om|6!aID7PwwJj+i=;05@PWtZooqjLDjd%|k_d7u^^&;0!gd%10N7Z(6stF3+ z;_06hT;73sKXDjc3s1~UcZWAC1q1W%fb4H5Ly~rZ_kvr&sahhVkRHws} z`brD#l?xQNa8V}2CCvrCGEmWdUo8}1@4YqKVES681S28TVte)6KxGtLlc5&y44AsK z3X_|R5RaIgcv%h&c%eL`{a{liNvEeHwyaY0sMJXER0*vojV*OfTrZg$engM(k%B{9 z3MH+ZZjCsTvd$%M&4vUCFr*(S{#dFh1?Yt7z$qS1*DWuHgh+YH^jy`L3rtPAQwr+c z-&t7nf0aD!9d3b*3oxiZ61X!$OTOtNokq#o0kaTJhP00Pa9kOpm1?2BVY(^Iuz#t8 zWGC$|h#BRgZd#tHq@}obCc5<{NA%C`_1VjnJRfOkw6X7>l1_m(cK>Gbo1iWJ`M5k$ z(2&2i@Q?qzBTccqA`d1L{+!#XoI{VeW{Yg?&#B=lpt$KlQ#nAC!G6} zlCy-lK{I*$ok0)mJwq;Z7}6Tq%tTK6;tr5tD!-ZHWjy;snJVJhuDHwceY15-cam;f zXV)uo6vqzsuvbKTEk%i7>=j!x)VsYbQ^T)?Cmq~E{#>;GC(IzRlu(W+*Wxte`Z*Gq zd)_2zQ;UCCfEr+=vN0^dU(g+oqHd!!_lgzfx-h1ZIWsT&B+p}|x4m@{-vlfe-dam~ zu-WD-&b?#1CnX-=&E-5~C1L!d^P4YvAq*-0qv`L-#geC9Qi$O*f5q%ZTDeo&bUgth zppz*uTki$mNeYiqdt&bvake!OW+6kGtHSazay8n--}#{gOD-jGdY1)dLit7|Df?pc zheX43`x+=mq@Oym`Fh7|%A4mO#KfAn{{-V_yZ-nL-`M@obrL`;LY^rxaxI$l(>71p z3NpwQP&?tLJ$oo$@GvO0#tK^wwA~XGS4u;ET*T#7X`UFD$iM^C^9p%cP$~h2-c}dS z?C#97Y-6nZY3HszSgx|SRGBbC$FT~m7%iO1g#LZ_@j1YwTGnexMqd(=l()JnspEGl zSa+4(PZ8ltc9T7yL8E@p-O*U?iT6uB=kd|uhc!|PEj9Smc37wuTEg0ff-31-pdb1v zg8Z<#AD@P&;E8UI-`fc{ip9N?$W${mo(oM*?`v1dHBBv@e`;$%F~d!cD}D8D2>(EH!|2QcR0tVCA;O^15Eeq(rUgmD6FaFTa4eX}&fPD8YSG$$7_=g0 z;jV=WiWz2u6+i<6OJV;E za>XtOA~;4&!`ZvdF<=+F!QAHAAKrhg`JEYUE8-7(O;Wuf!5XIM0T$uD@bX@Fl&Q0g z1YEr5`};22V8I=?p}62ga*3JvbmY(xO$QIgU^}5a|MwTCC?Ry3;G{U5#ClhC_h*O05sSkolgM<&O52$nH7Yl9N#h!X8zuKhOes6`V$G zgB^mrK;Mx`_2TE~lM!3Rtvv7{1MvruZ*J9mYPb|!$QT!JM$|C0tB!xt1dOMO?<`76 z8C5|v8@kJWADW?H4v(IzYizW0kbB6)iG64!jv&s}A*nnOvv6t@QW_mlW{@Hg6bjpl zc+2g!5#&xHW3*+)yPTKtW%*MuPqT{o&Q3fQ>iL$aa`LWwv_>A@pa^8Uf042!0ynl> zy>MK%{7*&j5ylk#K$D*ruEGD(Cub{g*(+_o=cd)(a85A4jF-yjb-x>KeFluZ^T)H+ zj52KwhF2YbA#u!X;9k1!wzPDpmniP-9yBWJko-PX0hk{TFw5=pXp}Svk6+dl7^7qv z!BFN+q<9WMz}@EG+xVQ#4u9)&oHNq&rFA`lLpeuUV^vIFnN~1F-*IUN(~;jvGMv0t zzB+rCvzp^gzB(5a&V3>jjMt!(Vo_2lj{f-bi3#+V8K;s#QDH-YxW~Gle;M!U zI%c~}q1lvj>^GAlDa-vRx&LCcA2g6$SQYv`kl8l0E2u7j&utG}6twIT2PZzZ`#b$G zU)6`broU_I)|stLai$f~MF$hpK;A5n?ZdgwZ z_sYYm_G|1cUw@kE0|UlQ8Bwd5Epw9Y&S=9}tGycdmrRpXg|j(0jrd;9Vec$!g*q`$ zUGe9B+KmRsp)M=3!?rkw@VGDUeeYxG)fG0F%Db)?WE;`4EsH>3kU@SoSn zEyp`}Lm3hOGH0!=*-y7&;b&@|)H{CN1JP4R=D$p=tjo_a(hfmYgOP3?vSuK%a-sl_ zcfL^vuAFA;^M9SPq;-Gs#W*7F1-e@CUFHQK+G=ooTvuEM+IL6dm;Y(#**B`GoTUVd zZ3O!WxXZMzxAEL6>5uJ6a`i?8HgecQA05dI*Fqz+zI!|Oq8JwW9&FZaHx3)z^4OEH z-qlYF8V)4$R$}%LV+QAWzjD8gdQA!Xo$g~dF0s^5yJr7i$;Nv{y2(WGd`d6!&Kq-) zUfX8T1UB1SPXqByIky;61B_%_HQ@C>RtKQY8zuD3>Fl4k@0CT?AcL3MD}U!&TLIbU z_&GXtl?dXUkdFz6^+9qPuH9-=16|0lo#7TE zJN)}qkDx?jIJhtZt&Gd0g%NK*s*r$r|JyabPZkGR2b~waGA=9gv@*#IzNy#Ck5d{& z4)wcoEKfX?e;91zUL#E+H);A#0CCjxYZmvVGwFgZHLw5pHxnDm85cC^%aIw|n})3N zffLo+D(QExW^ zBj4ROxx~?s)-@t#v5i>>LrjfV5tMp7_4#HX!5`MWE#fEDNnfD;m%z=jtUQ$>fWLh47RI(%WKZopKJ_*DY2O#dRkFlCcV(7^O?^i~sKx zW}@JodG(fcD~UO~tyw*dFfsYB8^MWIpdu-qbaqR&>z^#O9x$UrFi z-KLtilA|~kuX9_RVkFO-GWlu-<`S!?*WZ#{FAMblrr=z6js0nSp4L@q-bVG9k()+; zCt*eFvCq7|XOsi!kaD%@P=yPw9xF~L<~5xv9-4km3>M^>Oxe=oe?N-(=*yv{QPcH= zy`(WL4a=++TTELiG!R z{7rvFnY~TW1hBu-8h;rGnVr5^JM7&W{2`%p^V+4jpjuk=_KrmeB=z$bgAAA7+P>lh2uy$0YTc%?@D?I|<_mblhDf=Rb=Nf0EZ0-cnP|oG6|&<;P?e z$#e-Q@L-DOl_swz%{V6`D1>OOn{q#v>r-XH&pftfrBwFrP0}VdY2gl~w~M!YdcQvk zUh5};uJb20h8wE%Ph3x9?ch}9Z>v*^m0#7;9mj#su+R&=Hw1=?dQI&HAMzJzIcjCn zAyMzEe`7IG2An5thDuj4#6$eF(IX`@%4t)xGisI&&W+|Wt@||TcbXt^;EKQY81zf_ z&nmHcWiG{5GefrPIIfl=`qLDWhaIr~r(b5v^x6(QC>WI-+Tg?%@9iwDyT5#9iQj59 zlq#TH8%cWl1U#tz!6jk5#yLNQm$b0PhuctN{4dOU<*Du6)nE$oyHNVxcFZncC zd)lzyU#UbgVBa@EOlQhdN^NK^qW0{BoL2Rh=hr5}Y-LV~dv%-@_(Z+)z3;oWw3L<_ z%$>mVu{Zz>LG!+VKkyI!%}b94mtaWBZL><#!#l3N;#78W0xfUt6;;$#y%+AePeS-2 zw57R~aW|G&=D?E`zlp~ri_Z5Bj%qtp|MN-RvZn}I!H#i+ryYG6U>Tz1R(@RQrv)mm zd&J+=E+I7QV-WO~Xz4aidu~e06uN?K!YVX&uWItPLpsG3b&c=Lncx^ z-zQ`5_hpP3P+Mtciu8a~G81oUL^Hl`U3PhYz$5}5^3SZ9Ez`}s%dJIWKPI6)qfHF zY-TiS%3g+6liKyBvJYpG-}E))0=`k>SY{L73kA8xhuP$)Dd(l*DvnfRe=-hIOJJ8< z8n-)C=~AmGZKg&78x855((Qed#}+KxeUo&qW>lUHBo>i(?iQJ&U$)q}J$)G5JW=Ad zjIHx%JY*U|{nX1p&vG$V7U*rS8~O5q?bi%Ysvh?5EmMz4LFl zM+i<#qK_}?!X7<6lz#-lNkq&8c}{`i9@+2#Yb&v8#xFFY&Lhj5=gcI!o76>KXU(3n zun_A~R%z;nR3a-*W2~m1;#^qCM<#%kCwb^>K$hS_sSvQ-TOR7W#}4S_dSNStEcauu zt%LYn@`KS8DQ;v#Q7OBiTI0?I$H0DJsgPlLTP*ltDrEWscYT-&InOs*&MbAk&thBF z6+(Ll+h~NtmHA4a`uOMPNcNBYc5rPJA;fgeab&5Ua4$nOzS$36j3{JF1mR{t+w|j)(^o>BCxkR(;wMVYv^cRiowgZ&3JxDt#K#^vkbmrrKf|J~rWLHV5{?nHmId4+ z5HWx}*zLFMr&3DAvx0H(SvN6wgl@}Pdu3swicw4@)+(2{f2!Gxu4#Ji+}=|y4lC`@ z9g&fZbw}9hEROBMj~~mN^F=(y>}Ec3L=Sx%MdE5(cc=>vCL>&68{No;BWV=pZcvlf zJ^D!kg^e<}x;|jJO)B$c6CqiytCy2l`Pq7KjcxHyvnvEA=*fr*^w}6uQ0cs_sXpaE zW~ORV#l;$DGg_XisU_G?bt9ET?CWJMP>CEYZAzZ8?y^80!O5DY9mwP0vt_|6>T&>* z#(b|;ZoB!J95$~-)dYVB@=%HD1EI~xf}t&Hp)-|eJ#QRXL@U^;>#;{C?wh&*#s68O z1FTMwnRBgs%z;3c6}WW|hB}XBBG?wg7Osyn=e#~xclJIPF%<%>Zy8)ZcJQ9y0;rF$ zrS1eTgBzDw%Im`}D9=;0RLGKHdl4_jmHE0(|~ zM?z)CZgKE6KNI)pOM$!<3}KI~g)V3_x~gE{5%x&0{s=&}u`luD{%uX*DE#p@3Ai(c+)iSwYRNVu?!mU^F6r$~{Qg>B>`6ZY7F-e|E2KP)^!*m`r66${m7$&2N# zu06!wXIJpEIx%`jOQqPBr9${9981QCJ{o9iH`iEh4hJ=<2EdgrP4m5qe!v9nJTWJ} zjV1*`mFO>vi<+G2*^IJ+-=j*T73}Q#V8PH>PQIIaMCQcFD7Hs-@atVl#kKwjm01;x zMP)Gx?UCRfP4bky0f@`V^G!<+K#p2fu&iLZE&p@hHJst=6z5bIWVssDwEoW!=bhL1 z0wkX}^56m@5pXdY?|HfE#tC*OFcrd~`#kR)x=(R#T_K10w3La&&0t%f67|RG6#u>F zDaw*xU*^?q1-o&u?rcLv*c#qoB2lRjwL|JnMPIycQ9j~BlT#qo!5j$o=#*?m1CW%= z(fI)RdH~4e@)u;c+9Q(;>`OGa#pM#IKm_s(PB#9G2fuX%*sXd;-5$wp$xQmtmGL@z zgjHCb(-oZR)$)q?jD`is0@ZJ!iDR`!H?&JAErXM?!mHlP56OW zAxtlREVgCaxI}Ok1jcg$5kFhNq*#Ok%BNId)ibN)a!E(Rix?6o{u)KO-GM{a* z(RNBfaosaytKDKUt$y)imM*D#ejku(GR2B7IuQW!;vAw$jbG!aAiWLbu}7@%u|0}g z@-;yI)NAH@RU#^ocL$&B*0n6?9#P4=JQZ7iGUTtz!lFvFwi1n~V-Ge9hP0@5@U6Pu zt7g;`s5=@qPn+>DUn^0**IB_Tyqj_@=TUt1d8i9z&ROttubh=4gG@n*z5_s!omNsj_DPXz!Gb7I7QvEu-llY-Ni z01%eLLnSf|VO3c7;&kw(63K2!mdl5klPAa%Ktz@;muSAA=eCUXyg5fKSQYGu7+))4 zrJMwfH@zlI7M94e2AR`lB!H*~+ef&Ny3oi54=y}UkxxsN11`pTa2MX;x+hN?>ZLe; ziSyo7YLjNv`)uMIFdg(1IlVeKPw|-boJa3S;OKx`u89y~Yxn~`?Ck^uz zW9EAioPI+hreYKVh#h=RuTeE47u6MYc~&sk?eE+kJ#@ndbdTt^Sa7<`;RBYZ+N`M? zz@rr&fLN>>SBxUE94Na{uAcK8vRo^#=DIIkHR{+_kDivoLfstUL!NqCDm3UR+8Zqz z1MAAG@BICR_3Fm)=2!}SSn#71lnNkJcw(s#W4VSt>I&gD=CV%F+EC3?Omi=@L7ao= ziRH3hj_BjL(t(-2OPT7`5r7~*9J;5Me=F^28&)$~aJqcgDo%e)Y8P{&+d?aN2ira$80aO|13DRN^eNu!8K?=4Wz`oXu#(kuH~Lc;u{LkKI1xyQ9;yyj5!4<$B#X^c3&?M`m!xyj)jaJx-hx^%s<7Y%XUnXn0`` z+aU>jVLtln2XOFf6`a2E&(A<`I>?6`m8bf%lIVI|)A#!K%2WLlmB@P(_J$p_tF=eg z*sfxfF_2d^qa$J`qow3&1v8fj>8(9dTS>QG1Go8^8&n+0SjUMD)|V&%={M2I*^Jiu z6TddA?NEw&xte1san5yT3d+3|lFx!4QO&uFJIA~&!t}FVQx^~`X2}cnt-SiEV_Ram z9$b(fxd$Z^IUoz_LQhMnRF4Ckv5os0=Zu&*ccpsx(<3a8uyfv2Owa#%=dPJEK`AaxkT)dp7tZiGlNSL(A34e>_2$o zd~Ds7Iaj@0u!)f8U15uLia;NFib{oOin53L3|nNmN4br`f}df&9ByKT-j=Z~t5Z~C zgv5Dpo+82){xn767nyE!fV)0yX1l(0(QAzqoawV(9X^|aXwuJKK6{;6(?pCpg_ZGX z8|JBKVVMGPDiM41VX$E6d^1M^F_*YqjqLz=9elMcI3h*`f_voHEr=L`lglMKBE|s` z%7JK)Yzh>WJnxZNSgPDk%Ca}aBxs&K&A%DeN@)d_>oT~kQ?&CQr$Q@Q@}8&o%zHL{ zxx6jQoO5JD>t?XrPeFOGjcOv~6lHpfFMr|#s#`Osvq~Q8o+`G*ZFFJF%BxkX$MY_A z!HD^HmOK_*SmvA}@g>B$>7=sLo9*Vd>M7V+K6minI#gzY)_a}#(aP$be^dWkMto-l z6XfG@QgM%{5=EA4HB(NG7^@ir%2O#g9Xd$>X|b_Q%*iV_8FP}72gtkEO3cC3B zCdh-`+7mEaNS0NMazmxoe6hXD-6Q3xa?P30k5>klO@vSvllFZCaqb3Wc_SM%Uk))_ z57$a022+28i*LwscX^>?wCxQGQEjZn4TB3VoAg}f;y=f`tBlx1)oTu_|LQ{?ptrb3>C-8GhL zQ(F9DEhtxjjBwQ=H?+pL3Iy#wN`Kj?+_g-3es5SKNEs^b{e^=dE^Q zV7cxJTQl4kSc(4R`BysFSbKn80wGRT=o@tug$0iz&k-f zkvPW(Wu>R%=dl?aD)A_j=s5TQ5S3bUQ8jb&VvP>|v8f17%=fm8@E&!V+wzJkacoHT z4i4&*0x=8AsYI`L)R>b~^2&i|!Vm#OX|J(rMuI#5#N|K^r+@D+Z1~9KsXPGTAoHbN z317-n^a@Wb|HiU*t5`5kkz(aEe!RpvM>cpYSK^%N!i6o>DT?Kqw`E}q>;6pT)oLQN zE+{Mb!RJ_Bjdj<{Y_81LW4U_4T9vMD?mTA|DT6!gPS7-_LU?`V`m*(Gie}1fUIGOI zmB=Le1eU}9BNnv}3d9vUh&gGGW_%)$_x1=NPv*pep)WdmD;mhrJVLCpL1x(W}G@DPj>rLFi(ZxWcSEM)LHK+`x1XT6OtGCf@8j~s0Wu= z>9H+)TIx*mMAgd?rkBS5WP^?0W#Ng1EqYtK5j)sK2-5r@Y#&DlEGT0N%EFe{ zjpK<#)+w4e=M-g@`C2MO8C*Axbg1VkEO2wRIN&gCiEbM;5UW^x?q)w0t$3|kb5zpPW#^|7uH zH`i~$KbWVuGyZ>3^CmNi-uuI@PVuV37E#}ur^t0@@3ShqT>-?X*TtO^%P8p6KwK#h z2B+B-JETDP?$)tuP>CvZkdjxP$^c|K07OKLZ-`mGmy2pD(ehN4z@vRtDnhylEN5p>iNB!TU-PG{z z{2I&Px=AyZg5vwb*86;ca8Xry(kuA!o;c!wEM;&NwhpAZNi!J@A)R2k*>0X9!&cb< zO%a5x%P9}NREXAHVT;?CW|L;#Qmjy)s+lba7nZ@LIImL%S8Pi)0Q+lvDj|2hI%YF+ zqPkNc*rTeg?r}}Mpl$iy%!AA&no1Dmk5VB29}xgV9&WmHZW{GvL4*efj;yUS=R196tZ{0pQ!41eG=6~$aP=N z^6Fz-l!DUrSvTUNIOjY?Jp~Jv3)HugvEM@^V%^V=;^rFLf>`-2Z>|elD$w`I!is%( z4*;@F!O7W-Josz|AFmQGuu?Af_ADq6<`U1k3I2T?d|@haQ>klNP)eop`cUh>x-Zx} z3bOp8+!jr*0TE04Sb3^ZC=m655I}hF<+k_?!JUwd%z z*6I}bfNC1q7D+STXwkYq8tV$Ncb~0ZF37f4^X7IQ{e*8>T3?aX%eAaofj_P81Xa5F z()DzF%KTswT?26$P40Eu^Zuq1r>Rm-0P?9>!63`JN8t!gW-}Vdv+(Zy3<{bWI${y` z0U)e6<)Uhgpx2k^4!-vU%qI59yVf7q^XB%5_K16M*9wNXM>(D&CY5-uy33Wdf8nr_ zF`Hc>o;Y`&qI;vo!j_r{?E%$G&F|A=L7W*2Tgsek-SzG>Jw?}p>k3c!*uiS+eGXn9 zJq0VaB~OtL>?OBV5H7}rt>;Aj@WPQLKSQisQpKgZ=g)`vqFH`;e)I;YER@!Cpkk7A-@ZiHmnx>KBcL-(yaK`fVL zzSfK)6TNtCNuS zIoPd09%Q*&D$zI8s-|gzJoX46|EZ}&F(<@26*?&2%i_pU59E39(===z_Xr1{%*g;m zb7+-BPZfZ;octKb-{yb_dkKSy%xk45|I=71MC)!8f0ScjdEUp>DYDF$>cTUlVxTUd z>d3Y@n2g?5=XN!-aay}8_%U&A*9Tz>EcZS4R!FJFk*XW#G3!H}r)a&;LVbB#3R~99 zX48CW4M%ar+RLaTI6)LEzFaD+Zs)IyIRQYV68kQD0Ajs5ra-`M4M6TXn~|fnG<-mN zL_7k!Wj~d2iLUj>8-Jz|b*eZ8$cLxV5zEWCvXsGzO1_v1grl{bN*tj*!W@VVGRGd# zW+cdue(Tk59Zc%I@)mRJ9HD%-K#GlUC-Euj9RpKsmxp5W!n;YWRL*ed;Xmig=1lw(tCRcf8&6o^!!Do!29S8eB_`ZyWo{atsV66vqI zic>mb$|dq11?_r9d~bS<0m!R3pt2i2aIZ~|BD-P~Rh$Ck!&Gr<{(4@%f|KTFxU|;o>Jw`(b$KeOM3wPYefHv}P&u7b@7=*ah&`eejC%wCc@_PD{Zv+rVpcGT z7!`>2DCuXY#CP2uf!zu~4CKjf5$pcS6*|Zs0f2b9L`|<@&zt4snNqRfl>P1x+#`X! z_9$4?Rb9=gY!K&s(#yex^_*kf?Z>;q6U!t9Um|3kBG;X%5Q)V3CrW;YZRGgfpg2d^ zq8Y`^IeVY&ZJcHn{IHpg;@o3foLUg{6nEa@TOnY%TdF@E^$PSyyctm)&)GeX>!aEW zf?1)@2I3azQ9vk=Jx_S>Gg&?20mxBz@EM%mvx9F1Ly!y^ADtl)bNOBeAhfX9JL)QY zLi;@{m_=QhFl1y|sO-%+WiiSabFzLaK1oKmMR}@yRbTKGW9*SeYXyr@_yo-2O6<`M z#%xwQ$(e{b=sZP^Y_Pe0Eb|R_g(th|<;qhO+cKE;GUxVOJ=gt>z0tDB3avDh0q#47np8uDrWdt0mxBD zmIHA^i{lJHf_W-L{17mwyC7m3t@UY#JwiEJrhG3CeytAvR5yG8Dv_Tv1@bD`m&nmt z7MyVKZ4NE=$N>lww9Ltah&g&)Mz&k=_=&LOwB;i*;8j z#7&yvyU!ZBPt>>30qP22*xC=aCAjF<3)XxJ_FnlmhOGi+62A>hyq2;U=tIIW@S8O= zqkM43t93{+05ox?j)`| zyKz|N3w7}&*w(M;DcZU-6+)Kl1~n;z`-Kf~Cd*}cHE%9aAJ@m`%SoJHumT9xUO2w) z3(&$UE#p+47e{lrsC+tpCIN(%a{fA1GuE?=SGh#aW;839`Ci>4FPDf#mD_UE_Q)%T zw^(Px2Rs0IUj;+lBfCezx(WAQq7_Fv3rl-sX|Ii(aZ0ujE%I``y4<^D zTVT06T2!Z~!V`PGo2d|7pE+S$#+O$EE;t#|!d7sgZG8Wf(09GL^S)`Ul%w>M`^1<7v8QcZo@%B62vnjgwO-{_GrE=q*&_^2Y-}f4{sqX(W^_GovKi^!>XikX z6->LHZi`vLA8Gi2sYC`R5iyqWmRLD_ZL51P(aTfOZFw@2W$ik2mBl}@Y(V2kRbKtn zL?U6@ST4I0Ks9aMWs0cqMB-czs4hu+iFLm)HY$Uu5Y{PDB+}cmumzSoQ%_6b`e5DT z$Z|P4K!b#74JY1_NMzv&#L5bxJ}X`Sc`Ag3;sd@IENWK3d*1&>>K*q0swt5EH!Mae z@oYxssa~}65lVOPKdZP&1;XJ2n$2jASm1L7CnL+zJ#aEt}yJ;4!;oKMa z(;Q3|ygrU?S$Q?XmZlakY(??<@R2N6GUnB7u?E10-{oGFWe$XIqY324t3*}Gu^8pG zDtx38W!KYYWHbrdjN?>dDXSTazO1#Dg|S>BgA)(Fn=th1AVHp~N!Cr&lieaXu{e?k zAD=^TT1NoE=iDH3roF(unM>pz?f+6EfjYMg=jO%%F4*hOu*Dbgdfgi>UMd9ZzA}gF zPL?ZiZUeIVHT>PKkC`H3Tb%HtZ5%^=rJ!c2Ud}|K>O--O_FTPn;{-=#u)Nwe5!$-r z`mpeXlOcgr6Ng>cIzOb^3!I>UZ%sR=1yFi-JmTxjFNbnfg~dEotpMaJ*^E*k>;s_! ziG6s_19=1|P4FjK=J>PkQ&hQa`CbGkwk&MrAajSxbpUzok#dRHBRR{S&G^ieyc*lB zV954JDHZmC;B(g4j##(s*?(E#JsYQTc&vMvE3aM|Z4amlE4A(r=U(Q#jhRH2SDyvd zWLF5pxhDL)aqF(J$LxIu%N5&t67Vv(db2`xikzaX3@!^#2-9M@yWC?3D)_NiekKmJ zG8JOtoRzNh6rHyvS6bUBomBm?&jSz_oB$xS8TD)m^HjlHB7zf}rmYWTs;NY}ElwqF zNreK@__LihqD~4Vs1nEa2%nQ(Z>UTn#>#D}#0mCViOBMg8f3m!4Y}B(rXB_-P2Hds zOd>Y$p*2m%TTvJHNb9c5`R!_ZK$TBRsi_Jv+fqJ_rq`GP;fpc?d5_&%E^)G|8SULsS1`nTv^#Vv ztBCivGXw;*p^Axe}Bzi7v zb@a3|Y%vu=xM0{aefrlo)0Ul`t;vv@Rb+fUWwEa{?yrf;9ec9Z%4UT6eQ{3F#Cy20 z;Ug?6E9JaK!n`j4Aj|&f^1Ty!XxCc~ME6L0^zk?=n1jrjr}`DivH*mQ?U3GVE4j`I zLuLi@!QC%hb$z+1L>~O0ps|Wm5HZ>H44uXOe5U7{HcdupPq|!ENv!PidT>?k#IR-K zNL6?ub#ZH9;A5=2z56VfZhR0nvpz2bBJoM^MLe4t&Vg`bTb@Y7Hfn0P%1z}d`Zy|s zEcZC@@&Qy3F-}IwWWJYqD&-QHrwY0)vao!?97rx_oO*V>D&<_oDHhcm zWR5*zzE@9@{RbnN<(sf*S0tkn(Emd5C=s_gMnUgHzbL<_3py6;|J^^DzUCQ@rkDL{JoG@)HSG~`I3+92kHn{24 zV!3&M3q37m>&~)v6X*04pHn1ScfwMJEe_rP_Z&>d7Irg5)V%q`#5vCUD+^m1wgR@r z=jvH_@)vrFHc0q^C(bpp0fb|@-c)3PKKr?FbG?W6=jZrXYOMsVEG#OJ0Ed6GM05+E zwh_o%a4LCo+MXZ!7JJ0t=(81=l7Imxb9F0sa@*KkNStAg7Di^7$sl7%(MT;kbD zz$3l}E(KyBuVR!7SEE>7tx6HFt^2H4_ zuoc!1Il_VKGg^3v*T+ZZgoFQ&C7~*GC@r(#lmn1YsGMmk(d9t6M>zOY@=YMiQXqV! z_90sqwzX6`yM7uiEcK4kuBVdcJu*j3D$zb?xkOsQ0(rj0XI8M6?^QJ;RU+?^1t+oF zOna>)XklrOBBwtqyjqn|w*~7ixS&Y9U3((*0>rsJSHA)4Zfpx8(aR)y52#X{Gi(hi z6(X_1b=UO~%O%?yd!vP8k6B*rEP07Ug{`)Mf=Oh#H&W~=SZ@q0P~WkwL|1sCSAICF zXkX?foM|KxJo%G z5I&0RHRNjefVGuu*t==M_OVn57HYVVo}zW*O!+GfCS$?Rwq5_$d5ZDl)?JqT3~b{} z7W}Ly%eAmYk;qXQ3|pW_Tz59JtwGpQSBTc#WpF#dwl-0mKO0;36qZ*yA4WGCIzJ9_t4nWu^7DfOu1(J87`v~n3JNOeE5nIlN++aDpwUt2DNhPv(RK+MR zPxZ^nZKy;ld47(=eYr>Dtl%+!Jsf*f> zLbgSUPj%teT|MVag&ZW?;#|Kw|9HXJmRom~S0ilArW`NzQj2nQ1yDBgDe-QkFfPR%Dk<0Xu^I znmuigYTX`*j;aqNHE*juUEx(To@P-O00aueU&c;!*^GAZZ-25i=^S$)*5Av5Appq1 zmJ#MaI0cILsGA4B^E%VV2u@~YBvHfR9mf1q5;ABM1-6N|wGbCNPsoxX_PB*s?)Qs#A26W^{eH?(D&3S$n<2IUhSPw&k8p|8~?XYe(1`;O896 z-Q~nNP~QzE15$ZmYdG6^ED$SSxV|D|xlDy@s@o#A^$=IBafvS@7tD$C46&$Pnw(_s zZk2p9Dv*z1G~u%;*?hlF_sBq=D$y3T@lpuLvZv&4Xph*JXkkWu-Z6&f9tC@dOwbKDLv#GqAla(A? z$Wt7ONn(9yGA(&mc=9;%6`QK!ZgRrUJMn!?g|P4>I67dCGPvAEhOOt=a=y{RDtTA% zgRmu*%XO!x=!LCuL-)aQz1LocPq*`?Tw|N2+FI#J6X9?n8mhe9U3< z>@mQi((rI?C5|j}kF2dkDHR1L)lJkmw}ovbQXn3H+?%k3Ep#Mehc8i_b98{hmKtG+ zm0-EboO2+Yx&D+6l~4%ot0Bq!9H# zwx?iUM$JjPQTz1ovF^dwW~7rehr94$g|6Cv-CfyUH8VGDW_{M$=JOrC<_l0DdYPcY zp5y?4&>j(h9L(}i+1^GodxQ$4xI|WPKY|nPQ7yX>MX4Dff z3{ET<(rYCvs=i>JO8vcT{V^+epk3^?GaQ1(W(9Av7iCz@=yHi>GrCH-p7_0VCRli) zu8^RY3%KY{us2%R6+*aRSBQr0d!qxm?kaFodG*Z3^UF2iXT9rvX4p~z#HILe(VF#`?Tn_4bxINnk4B4l?|cdLJD z`ucB&t9|l=h(XEcF;8W7y@=T9>Un>| zf*}Sc*VwMM5-*pi5p^ynZv!I6XI zOz+g(`vI}z5yZJSAnPn*Hh#Rl(K5zz7Y&zH$lKzeraP?LIvt=m$9Y#1p^0;^(2A)L zT%UCTU&?GNJjuLoDJaKsk9(eC&#>3~oTe0%MmAVkI|xU~5B#^!k|VcgUP|7v=tXhe z;liOckp;_o*X8G(NS%=zmL2JfwIup)Oo42l@Q81twS|Pek7$B}|JRd$l#;)YP`Scc z!3s_UkaVvhSHbB+f;<-$DltgJ)H_NAA}bhVd2;59_B2z80QpbUR)Wt>G6e!6M&=X% zbJFx0kY(39>VlJdrUwVaDG*x0%2P=t-i!1TE)YAu^twXqB|?qL@J2QerVG8W#WG({ zoV!#A!j{+;Jw<29a~oCg1JvixeQ$Jtds+%`!RP9$@Tb#OZTAB%-oCMBHhU3otNv|A zh<{Vj%iPh!wOMQ9d!mb{#BPZRG)6|O311bcnGkX*d2m(AH6NK%@|nzoi^CQfyG3w% zdPmiaovgxoOm>S!Z8Md4)|7mE$Fy6Bbu1Xd!Bmkmkug}%9 z=|A!TRTZ9?DbmkO;+|(-&v_c>DKhof9a5>XcCcKki>1d^UhM{C*?A8(vw6MG#OiDwa<<`2cD)SRk5 zw`9T>RWlQZwp1o?k4_$X?u#UV1T8EECr>3d@!`gIH=V^EU5~xO8x<7oce4;9hj53EnaYPEek<*o6j7G2ANyI zkaCGCyfa74tYF-uFm>Yqrqw|Swnal#-j?19F}QHz{F%W8*w#Kj-)P~R6+v+>YuMgs zdCbBSy#PrscPGoMcM0`bFXzIR>J;68tl{wbhpaF@7f3Cg8ap}p^q!{Tx)p`b*4{5z z);wo0@2P@Q=iI`~c{jtBYeZEraKEG1<0edQ7XAU{fhV2@TS2l9(S=IR{j9DvkYE>WLb+_`!=_s9wzc#oKqS6fL_O<11>lc~Hqu!=+X5w>2d_u1gW zHM6`zU6z)8 zfANKQpLw@k^qgMxyEmO6y%C&L=%8X0?9qy$|AswE0&}|fS5zR5EEDAU0P3dBURuEq z?{z<*N<5|39|k9{7zJdxTc6{i{!Z+c?@M^l zy}Mxkumz{NNng| zN?u8Hvl(4m342Fz@YS+#=#_gf(YAb?&B%LXvl(^pxkpt`58>cnRQ-{j>7kN$kGZ#b;_=zBu*EvXVZPB~o}xW=paKX^uXZmH z5-!vg!c1a?%&4Bx^#kt}tPOAdcQZ?~_GVA(MsV5^lM~}daGID_(-yKbIe%4&00eu) z-~<5C9{qql3Pf;^;#%vE76n0mjt>Ch6bOTpXJO4w5`d(bO0=H0q%8IFDT)3(??fE# z5rPwoy7&O9n3J`Y0OVa!7nJ`H?l$M!ewx+VnKygVe`()6yeiS zCeGRWJZpFF@z-Rz_Ere%a>=%+E|@t#P}6oab$9QQe|_`?f>XmEYRQ~<@aOg3JeTwt z5B?1@Cj_S{u{ZwI5micdI~ocANzMm=90Pz%IxCQW2#_yJ3y%cI?+HZs2q1it<7CMj z*(0G~P6YX!pMuFhDS4_yDfwf%NA~>C01Jj(E)l4#;R76hrh*}s@yceT0&#f6&vim2 za*(-~r&2Kr4!)=4r4kYA?x|PD60{s6NOnt*ziUZ!-lLDn8NxMngY8+kN96^THbt3} zmGrb!Z^?_XREP>sJk%##(0sMtXQ>NbpR0U-7-37+uu>spTg4@o3hAAam$310Ojuv# zH3p|`XA8QT=QKQptOF`nF5VcvD6lvQ2fv{Z!D#{aD0JRi0*Lm=0K|cOqO91Jx;PfV53IqpVEelR1;vQ*w zjb~vo);-T-IsDp|PM@q`wv~93Q9iYzt^u#V7tD#n=4CS)b7HL9X?=;45S+M2`>o=% z^2TcfS`At;3k&y%`Ci7l&t9?)1INh0TQ!`J+_x*3nA`>nkI8B{W&P6S4Uvn7;U%}~kJ%iIw7XYOBNWOy8 za_rFs1gA{{nF4utk3vsX-<+Ey0MQ;nC324fQJoP!z5IKz#xbY*g@d$Dvql)4n5SYc zvEfL(sYEO)Ei504QK;nE2Lj0;oAGy%FNbWR13+H+UOlDM@DZoiXpg)+Ror=jJYwBr zz1Z$5PVatd`QBfGytS2hbrZTrmPE%rQtJ;}7MSk^5u0_dZOMVphd4!99h972{fqKy z!G*#W>l6(xT$wN5eJ0zo-sgCwLim&#w(;S^tYb49&xPIDk5wNmEb(OByj~pq*<92a zT-0fsukA0NZ0z>(#8Iq~?|)x;8~}2-_1W}J?Gf1RjDJW2$nV7-;U4X7Y-sfpMk%qiH1gAhVm=l8&4?cp^_dsQ_+um~r24+|7OB}lG4*nFd+h_vF>WN_hc>+iY z6bSYx60z<`{vpM>_{hcY0zft;mGAc9q+VqXWaS4tL)y5g<3Rom&K`4#*dwxA>`~Ec zSy-k(OeJcMQtv!%cpxHXtv_!pI05A8wglTs81cT_9}D-(JC-(X{_L^=!i44SV7?9m2uSi8YWDi%v0s);M?oWdO_PMdFB$mJ<4=eFcrx4*gRvm?jE&GI}MQUclll~ zIE~9wRXmvsc3bN`cg1Jpv`1vOyhmPNBKL>|LyC3Sqg}3Gh;GZG?2QFncRhAsY>V?0 z5w<$202jfhrMT`IxW zklxce+AGQ@M|H#iKz6NK9?C^sIg!Ds@YzM}Q3mdjNpH7Dml&L;LM0Bx)`v3IVUMUl zDycv^JdlS=78NM zu&pGR?=2O`e`)W$9c&4s0+DMOdxsz|CC`IzgUrne)=L-$ARFx9>q)YI0wB!y(q_~Y zsO!wh3qYVi^lUjl+wrl9V!j_uZPFNG62h;31w>rfPdWsBN)22>P&gehC{6CJ? ztw;>pS<=EqeVRu%w)*U*cf&spTYQj?aLkBCjjXHyPb^4 z;%^7L#U5ct2MO|r3tx9d@^e&)Tci>NAQe&|Qi)E1^fY0QTBChsSk!BqH4s-hkk9Ph zt?~AVQ}T;Vdgt9?!H{f5F(-PrW;1Gj=JoO^yhjc|%pPHI8s?V!=L0~j@D8yqSpee7 zc$ufdJ#s2>Irqp7ABYI!J+cWy2u|FiV4;Kcjw(;3%I(;^LoX_7_HuNeL}+8>_u@|`!lacb(E1g^@SmTpGsc2r+<6h%hsWbr7hc< zuQZ&%!N(qzZhF^H`BGyB_Na*LwjKa7mjJ?puRS7w;2uSl{_HF)+9M^!kFP4$0YC=t zP=UBT8i^Pv2gon<9j`*!N(pkIF)z+LX|jH z4*wVS_wq#<>m5A?mAI!^nR8covXAw0DR=&U?fMuube{zfS^oCecjD*2&aJ!`awd7l z{EBNuR~HT)D@Aacd#++@>b~63nUz@=7@S^|1C?V+Bhq*EjfNmNRiEC|cCrWnVh2C& z=~Etj59G<5G74+CN6TaVIQR!{BG&E8wLR*|aly$SF$uBGl)ON`@HO2Q?9oFX$V)TN zo)tRiWr7uti%S-%(7_&Vv=_7yP56R#c~)LWCVg!+VzwKE$TxBr_N1S)RC;FZ*=g# zDq64*0AkWxi+Z~Ziizpb~@Z*52Z~!x43I#Qv6|Q)w?}1=Fr)F$&qO#X436lR4Rd2<#Da zAP7#X+?HL>7COc{zE+}PIF_ecm^^KHGt~uP+J^3jTi7Z;wV|+M+S4fo%ijar}rnO&FROzUD*g9i=0d zeTHLT)kIjWK~3_|M|YQAE5F{j@_175vBL{r*OfBj?|3o0>UQqX%SuS^M;4qmm(}9n z_bm_YM{rs>4FF=nX>IcrfPCtWvJn7?3r?3By9x4+-D;0!%}gBC!GF~8)qb$s9KPFuiEdUf0R%*BcT`+r%F(;Nl;io3-V<2i6D)L)O60*$q|G>2He(&1 zuFySlxkQgFv*J`_*@3*}d!5_jKF51B9^{z=(W1U@xkLsh4?wnev#6_bC-0HiEmh*^ z{9S8IfnbkV+ajAWSm)?dpUC1&i1Y4qKSPtRM||)ntZjbKeUHJ(7WH^o z3zHUPPDO_o4kI{q5kSaphh|j%dOU!%>)`Vq4K8wfM36tyaN^E5mH2~MShZ7`{Nur= zJ)+M+B~FKuFUB543sDv^0AU-AfN%Hkx)7PTFSyf5U~NXf(alu5bK}-M?kp05OGXXf$_5>@^m2z8S5a@L6cr*Ly~@KrnIe zDH-~PG9~M#m5@0-LvSLy1#<#`^hRQjJ`ZH^;5U3{!KtX0dnD!rcFW-OrlXyUiajE8 z0?0Ggee4tnKDSit76%`DlBs=uwhYD;6o zo{}7$gpd|0AIe|vwaPT8!%)qwg!9wOK+(S{{99B~Z``Zg4Bv0*n4NO#>I`}(PS2}(fNO0w4qAhW>E z9#)pq|GX{WBfi;{DLRxqg#zve3xodL7J0s#ep}1JXu1pYh*{UoC3>baVI!J1J>!z~ zXWb*l2Uen{N)NB~GF0iGvx{sNeZE)8$?(@PsIcT2MZ0T}WMrH~`6DD31-^`hp(GY= z5_0D6k?dQl^N$JmOhi9X=YP8Q)B(pl+_h7-2ZB&*Px{&NW<_Sp`P~WxDSb0Aq-%+v z_%wU*m*CAhAX(DJq+iDk4_irpgh({gU0Byr75_~(ceTtZ<-zECBV<1)EShBs^GWvx zz?`tuiZ+rE7&8+<6grc`mo8)(@tK&1p)Zvad?DPtJ2Q+x&tt6^QEwbT#|mx@18zdF zC<-pD#+PQ$6)sbk%`NzGy1A`u|738S>Xg%CQVNkG*4*~dq3`xn5nu7&`x!WgVARDY zm+~BO|9p2d{~Z!yHL*)Y3bOBMl{)^I%8ccYqM1TN|yo%9Soi@<8} z=b>(%M$AD#LVL-Q9Wn+$320uVzN$lq95 z1)Spv>Ou~mS;02RY?MPKxhWoKqvFpBxQs1?VIV+}2zSrt0o8Qsx+OTtamby-buI00 z*tsG5qspLBEM|VOKE@yI(zer%Fh7_<{Md$W|KHCC+nm{JdF}iFaoO#xByM0w)YC_@ z>ECrD$&y>7D#TBtXkhhek5OQ=TxEH*8N)M3my~~TDsZ0y_fa@iY1sB6X~T-Oa`uPu zOZev^tM20TuXMWxkv5H8hZv0*4m<;z1+5zAU*3V&YsuYiE?vF!MXU? zU)9yi?80cR4fPIVsB&=QG#My(RF85yI-Epuu>v_Yt|*0x{)KrKCTfk^@^+>(@ekE0)l zX)k&1Yi>t9)whEEsT{Xe3u zdb6SM-ZkYaypbY>O}SbTK&~GzB-a_pvpHnqpm&JP5b%6d7@xKPv#jmPKY+M7npRXu{8RoFWMX9%eq$3Ex_Yf?XBp$ zhOx<*R*rk8ms-C3^jn$-^i*kmK!opufaNDzU9-yh4cBGinioNKI}`mQALXnka>e-Fa-c9-1r6%k* zhWJ|_i@tK`?)kpyM89LY>O4m%S-Ea_@49y$FS{fD71^+|Am;~ax$Z1blAh@yEAdTKZ}=*d8igj$!&{HnrbrrpJ5o?ke9GW~K;YA)se4p|f5)0c4} zu?3^G&FP2lkp10kiW)c{uJx|`Sjy-x-@qwe?e^oMVI94NqcduJ zxIY|_t8mAeS9!03jS>t-b}xCq;`whEPeT!j+M*6K9&@z!xZ9yK3?U-(=TlNW^prow z;Oj!_TxAPXN6L3UZnjwLa>!PvtP^>WEcu2JKe(GowhNaz^{a1+2dV6L*#YNm<+in5 z;zFs8=TmpuDc?W8^g&xv)6ViT)mH5>HcC!v7s?vA8_6H&G5;fGwY8r0i0SWZrY{@- z3B4)pIVadcs5+(gkx%ir*;pr}WtL5gnc4CeePhw=xQDbz zrY42ieG}m~n(|IOW_n+1i;O{nglVag$p@?D&X=MRfxSKOjqxAn);BH)u^R3b8D6n0 z;8-I@hq-{Vmd8f;+Mf@TyjYp6}^L!Rr3C>)l>l9lf@aaaL)d zzl++d82Fp$EnZm6f2?K8Z#LH8s->c>H7i_kvo?2bA9{fD;E@xiMo6Fd0!igf77b2v zWrC>Z8+f7WkJWbei}u?uVFd@*KAg2oP%r;E+z0Sw);Jz6rpy|gNp5^C9xkscPR?ct zwt)dQ>|xQ-8A9x=bF_S^@ngSau2gU&}D$F9#p zcvrah%4O;`p1 z=$TPg4`i4|B};Iu6Eo~H$LQQHr&A!+A*oBbuX*2LH0+Gr@@e@VDWaTEZ(tWmx;=1l z?8!sOY_3KNPd~`gOvOv#%+$=EBG!$p3y6=lK-YVXIdA`#VnwB0Q^Tz-J`0Im(d}T& zs^<4vGg)0NVA7E57s&?3ls% zZ^L%}L#h8<<5IKVe|{W2Lj{n+E*y*xFPTqmoRlH^Z}Smx{!bxVp(5T;EWrY$s4Nh{Jy3&7s| zRY#;8)QSW7FS<%(_vYQ3u7JJ<#exlci}!t0vTm>tML@!T5p|upTA(pAx{w!X1Vg_a z-eD6QeZ;uqau^w84TM@RX#S8B-cQ6o)?n6#9WgcGlM9BkR|ly*j>5l|hCW4~XAZi^HBHNja8j{bk> z@YO%Bw6ud?gq++*?`GGuy8f!XQr#IvMPx3Pmp1oPASCosO%|W}bJ!@%B*D(h?cbLG z=8cFNifGVhi5OAqjS7x=BinhgeD<5=QAS9{(8({e53hK0XmJ`b1iHeULHO_V79 zG3+ODK_1MyRs8~6A*GC%vpfkPH(W+qUBOI)IrxGO#LiY%=)%NPZpjjsmQQ)s{tcuNK2JMn$1~(eL9M(5 zfMhBQvTKIWAnqv$PCC}{`ooz84QK*h0_FF#HamyMHlMo|&!1ZE)JNEtLD7wWy4)(D zqo}XxK;sK^UI=js@h85LxI7p+qH6!bWagS=I%?iPdz5UVa;anYq@AR7xuvm0HuVyL#OuDcs zp7{2mOoQ37o=9yWZ~a%p{9D}~XexK2IQfCMEnXpxXXy{wopkOsDP_Iv9jm&O_~gN8 zsiT)UI07mc_ZN{zCvRgjr*M4${0@u-drx-WFQsLVM4a41PBOTEcmGO^oPBLo_|Enn z+2+0xaVj)+_e<5}Ijxf;-qf8`M>nWyq5BmQL5`4M@`dx`>@2^}uxE8==5qLB<+M)5 zzx;+i9K$E0Tc&>0gN$P85$kVhr5yIJT^x1VC!TbYCTUdIax1 z+8dpXe!tq5Qlpp+3o29)0kZz5VkPnn0hEHG#aX;p=}YC&>8Kh(j%x7SS?xxp8Lv|D z)4P3#(K^&sXnLn(O~J-21jhOK4~uHA=`QawJh9o)A2l@x3H4SpAvt(6qs(bB6T8(^ zFFCvLWa%u=prRu+XYE$BLodm|g`#4Z^*dRoo|2p#t3qxtc*S^PwUsZ9qui9{V<)I$Ab}?uPGzJf&LkpN5LgbBfN~{*L2lgNyM(t4LCN!w<8y)@x7Yezah4%h>`+cg_)P{>LL1c&OMb@UMV2IwGdjn{lL-|%BOQ7aJitAn0h`H{pP>H3$?6|sKBEP2jNwYNl%gCC z-9N)Z!~5ineSddzZ}Jn+ZLR&}XFb5vOJ#AAIz4z|lzZ}J*R$n9bzE27+@`asWfJBp+{yJdgw3SD0y{UUT~vpXnlcp!gXxjAm8JpNaeE zV6xQn_$anxx{T;vVrH+HZ%ayos;*Q2&nEp?#~8Z3PB0UTS-WC&rB*$jXoK-Y2cZ}> z4%6}QVfP4J4||Tij+Z-U&Vd{Clsy; zI`#hiNIqPB;L&Y95^&fe$Kf&)pKDEOP5!4`$o;r^BDS)2}u~o4YlS6 zL_oM9^)Q0t>AW=t?o1k|Zce$AvJsYpFAJ$-WYvl_^c1^pFs0Od*#X;MZ=)2=>u2)d zIm*H3I+otjZ3k5I!ab@ z9l5C1b8U4LlBqZB*!Oq(Tj&~}|0SHxa&u?g%?6}17BpRsEH6<3PFbC7div!4m7VK6 znQjd!KxL8v#_#5rx0-^-zIfvs1I}K@0qAxG0Q@f6WeLvRO6=bZeiRZ^CnZAZS*yLk z<}L#?O0IAzJB!asAfXsFy7!Ip+RQP(=%<5P*-7OG&QT~Kx=Af=>w3zUUSMmW-RpXr z+AYLs9Ipzf?K!H~3YL!JUXfocj}?uf3{~{ZJQO!XszMt*E{!;^Vk~}0A2deg)kV5n zs=Y~A?ua2(3mqY$L8F!6FsevR!K=FCC+rQ-UUq;qOP7M~{|37;Nb!73)Gu8@HL>_f9^ z%H=0y46{Ir*dA${?7hhOM+se4mGb{yi;*@l5M2Cm=c6XT{j6MWxAaPpB<@Mw14Pf| z?F5t(`P@JFYqYkZNFMbAWxhp&hP4g+n3GSSj;P>*yMYO zviDyKT-Yyt+g(g@vPFK}VST@taIIA631ckJVf4t=a|@jro%n+UTKL=TjP|EK3yG^Z z)=%8V@(t{O1nwtc)~@$9A^7uiZn17yiH}b`s`mxl0)KmqQz>VEK`P+idqLc{c};Wb zSRi%69lsIJXoo85_6`ruIkKO1k=FG()3# zJz;9K<^=f5veKPIpLjW#Qbou+q)H;ovyH$q>0CSyWfb68>~hfs#`>z9qWQm(dIAg*&jF2Bmb<*@zJt&B6nV-K zNisWth`lL>YDi+RF`i*sl+;_donNUyJvh-uYsm@@J~xT%z7>oS6JUevi*1sPC|jdu zb}g5wp%2{O_G`Q4A&zsENGVH-1SXiZZjLK)UmbI0N}R7SG0X)^?!D&xbdCypQvuzi z%Xb{}=_+dgqz`>+Y6yJws(GFvS0`R7RpInYQ@;|;lJ-P=BIdg>_N15XE%nw!anqY zSD(>D1LVWrMUs*URTaIuk2kQ>Mbhs6sU+UmI?F!cu0KFKQ-sLNg|2w^UtE{ySg-%n zeU+{ad?*`KIOY&-v@;x!VC0{hFcki&k*rTCGjW0ybQ{q!wW_Ou(xQuqvy6P;?wI7= zP&Q40X=;*Rf=mWplrnFX zjS=QmyXHI52@+W5;qmiXM0a5TSFM9>aQh=Vr;kUx*h*)`Ph_5cjw>MLH6~1Gr!zb! z5qvGmYBO%E5ui0V!*J{OJM!PVIe1UTQ{p62(YKS%T&(D4S;6S2D%}^4(;AveZy6`L zzovJ#NE3jq8}ZFHq&-dfJ7^~r?3CaWzz-Bo;;Xs(tclrc8q3rCPIsZZLFbRV`d3?7 zAY8)9ME2rhE0W#Gj=i0man6yVE7zAFVVp5bs1#jd_p8~ZoaTVK zbIvHBTus%OZ>tP=LMe71 zY#7c}>ue6V?G>q?m+xJ`a??iE@(Z!Zui}_4CoUQ4(JBt8KN?xg=Jnjcws0Zjt*KzK z&3CRH5i7*qC0NIs1f16G5x(=*qBK9o!jWtEb;WQcHKPC3NlR3~=0*N?$x%x?#m;gj zSz=LDot5W^cD$i$HmgMwi0d(8PRjjDl}&2EG)nwa`Q6iVnnHd}RY<>ZnfZe`f#R~BZ+|m65TFYwTVPA5 zFInNyIQ9Umg19*Z>La-m@q_opYr|Th^39C@ISF_ff-*p@n^XTIaCzwS35Zp!`u*Rv z_H7xF>w1CRxqmQ~f6rv3y|uLk$rfK%YqL4xJ@x3Uw^=$7=!CU5)!PQY7u9{X!diJY zTVfi)7c8$Hg;apNtQbZllv8en*b{@NKRd5_Qg21r*DN%%sSm_ANj9<1wk+7?%%A-L z326VEZ^{*g(p}h?R-K@L)q` zGN4BgcDebp+e^m1Q4Tz2$A+T+0(tyku|GF_L!vD-&9p-I1XDOSt;G|L8s%%i*aVh~ zU_GFmKmH9R*0FBE287SXLNMlj@_viia;nrSFs6|1%BL7raHO>vlic`B!{NugN^0a{ zJaRF8Y}h-;{ld(A@o2qze*lN3BAf2U#(K~8)~{!e&tjMSHBr)Y0)A@5cmAbf2_vqF z_UI~`?A#32zG&eO@2%DXBG%y9xkES5`>e1y>~E~lBv&Tw8UJNTh3KwFCe_{fzc#Sw z)ORoVqLkn|=EdX(n;uWu96i3v#)=icon-Sl?DRU!!j2K%?2<~7I^L;9Ux*6CwApcw zhEb79(|QN>i^vBQZam|L9YBanLO!Lufq5I(b`;7eYReDZu=u7 zdcx8>mt*y!NVEGKDEVtzb%=vO3{x=kb$h}ZRRI>=%e;4$^6raYe|tbH9T!bzO8TNz zNX&YwA|JR)wb*XPa%Z?hZ!)UxZD9U)ucd9Sf{a1}-9l$xmYr8Q?`>v;++7L?W~THZ1n z@%MUfx@nxcv2zzE(#a{VAbQ18qGQTCvi#dm_cexHHAFXc^3*enxD3x&nwXR45qu^& zwTc(a>xzWpik|oLRi;qOt5t|@)^^^mUrx1v&W>0o$pF*DF zTuGky{<9%c4a7QSn9rb_%LU;|cvz`siuWsdziK=l6>Xz5M%X%)pUfUiF>p6VdUK!y z(+N_c?-(Ukb8n3b?=BNA!qx)9W~K!F8!s2~OY$y=eZ0Opk80}>?O5DZSlGp;r>VHR z?64*|#g$Z?Cx;W8=2*wlP;E6x#|@9t0np{>AN4fHFC!y4WOG8XAmE0^=`V%V2Dvs? z=b2T}Sl@-h#xH(Vk;9X5utIM6bl2u=ajw!S>J@pT)Ljykt@%eW#bMU7x|c=fA6pA4 z%g+>{r*%AD1e=t)J!q+pEn{$B2+=2(|NfIp1%|bpD_wj{=)@23^jk8MHBnuMxIXoK zPU`hb%S5Ni8z23M!TzOpsJ9&>!L*`V=x%rwZ~~^vVQl=JOQ;2R{*M9wRI5&QE(H8i zq3ZL=#*c7kP`I;rqL|s5`X1AfgQr(E@Q1)*uO-otcH5TruqLIkA4v*xEJ&744-!iyh=r~PF z#JFGCq)lV<`)~s>k_sin`EUIm%5A?%$u*8OalV0gE6Y;Bg2yjV=a7m7Z1H?ZB^}v{ zm0C*0ju3fuP399ON|svw<%xa#ZNfk?-oSQv03Ua+raSB-5XyXT9*u8Ct0&bbRBo;w z%A06nHZ=++0wAT7J-8F8p2doJckZhEjO;exs0SIi_pki2dn*_h-)!*eC`24Gtw(t; zNE3~!d0MV3EHHb~@sp;LR^astwgtLqQz)Xml49MqOw2Klte?e&{6X7L+MnK3IM0@~%_HyfWA z{T!QI4>*ljwoz7S?2?LpKEt-a1|UfP$_2sW*evcTGq2s9#Lo?=)M1T&{Fh>ZtqL6Y zMVT?jQ<;&QBF#YWP@YNPM!aW_3pChyI#PRmK>bA3*TraB^YOm!YxhNMmk1Mk#|VabkL+9|uu?q}65Ak+tT}J` zGCshL%^z}2xfMYidy=Ob$?T*sHqUxZaE2A2tz2UkH7mQ&mFx}}8~^!{6|lV>go)Mk zgN<1Wlb(-upW`qRT*re zyqWG5M9;o&OxBP;`fshCYO;LA9QVdKFJjN{lwr*`k)(o~N!;AMt^On@Ut&I86Ftyn z=5K^3+&_r#SiE!UG=b`D8pkuY>N5>h66WENeH#3C{Y?H%1YcJM6Ai*|2@=Sx7w^oc z#V!9r3brQs$Ju>xP&+_JDvuM3*LNnSt8rkZht{_s$@#BB7aVCT8ZB=HhS6Q9gyIr8 zY2GnJzkd0C@0`#v&xBu-dz9z#b*gxRwwnLHH~7o`iWIx?i~iz}lp|7fhs5vxzw}x$ zl9wf!-?87@b#v+FDt1Zo%vhRiueDw-_jPEMv4yz#4-VqIm##Mc#(I0yuqWnKw#`Jf z<@wr(0-^S@YvwuMR8nIo$?#Vi*jmeZ5ARh zucTq>)skN-PXnj8V2*ce@xY1%!drDs@?~XE*@_~^NB7ngee`^i+PA;$j{&QG_1~qu zyAPr&(9I&7)R#r$)Jjxqa^a3BkxYucDWXpX@f|A>QXWL|5blr`E^=MrZ{N397;S>3 z-@dUz#wcgIps_sp4Bd$U7S{&I7{9Ip`?+}4T(s>&KUu{ICMA$n>I$$o_KorE^?2pD zNP$Sk_ZY%mkB5L&-;AOo#SY*v*R)t94`>m8Fw$QR72!-rUE*s4rwPLjfyW-irbUMl z(_7gpkzUWMq?4WEEQqH$Duc{s5@A*QW!hehXtrJi$r%L#LBUUsP+l|P-YD*v;zhID z`sxzOcDbPIip=YEc_?$fI~snSxs%yJ2O?C*U&eVi9p1Y5bHw zneXK$!M?`l$zLVCsAE*`j(Wd3YnCxP6>G8oh_Q&{z1SNLAvR{JGLH7FTDTxb{Px(! zh$@?eIF}odHZadyX5~&O{GNymp8kVl$|QFu*bv=X5XK}wFsvhRF*eCn!_(!J)L)uH z6vNkcpf*A{Z2OpbekThV+F)6KJWT=r?edgQY*3$Gpp>5eL)qHttwDEjv3#zOtm(xc z-qJ9_W3@ilgu#z4#Dy*dDJGR)5^d^I;>9idLCSsWp1S>@rjiwU?iRFt&M{M+B86kX z3a4e`2Is3hQZw_J4#=`cJQrt8xkP9GBg@9`m&9Iy9bZP~J-CW;c|Y-OaT(!V#Gyni z-rmX8rA7Hmv#LTfPR8=a-?inH$jvx#;>=sV`ht8(9CX=uPql2n7*20o#n%BOL==8) zId<@mmA}ioNO`~4LAdi4U`k^PSgQu}bL+647da(Ez2cBuU-jIG@?y?f2sZ}Ja+Vf4 z5tN&-L}lK0wo|ndhB5O<`HGRd8ju1Z;7R@qK6npY$b|4Sl=NK;hjM zA?ijA_citLpk`?5XEt6S$jp=TO<;8wT6sGi<;IeV)nuM*{)m~wz4LG(3e(L9qx+pWJA;A?YvVD}`}J683Ll=X~5OCa6uJ>54}OE4R;>l^l(7Nebx;6z5^zghKaaI|(n z6bC^>`1^qytB6jlWn`B;?Q9Vd`nvdgN)!h;A3pu^!*-T!(ZGt=P7zKyg*zGi;i9+?W<+#qCjHF0~AO@HMGPzqTe9`Yf?f zOhA-V&p^O#Nw11W-Xac^bU9A+n+sGc(`X{<04b4`rwrG&Y<>4h6VwJoEn8*B;+tJW zb4+3=bze-Z9Jl^A&5ur zp6Pz=^43UbSN5wxu}25QmlKV1IfYf-Xwv?#fjba)YloMEMH;au;{SMp1HkK?GqhPT zG4^V3I}NxtyV1;(QnEl)Sk9s@bqpKv2llt{N;@KlvK9P!f@3{0gpJ#(4Q2JdUZH_{ z!*5X`nlNWx?4AF{GKM)XoP#~{-2)4LTEnzB?w)W&TR2XQmj5_s$H%fxvJ)=(rrFJD zPmw_KnMf@=Y1u;gJ_9E2^#0|3B9MdVNg?H(o6~#&WkY^4THE`aZJ$fy zpyWam7S-@jDxfbft_owd@i?%RPT;z_k5@u!D>Z;sM=Q# zO)&?5by`fpPpVg_?g(Qle{HazgF-me?Q>TmsxUj$GzjZ=&LXL=3Eh6|dhYqE!EyMz zJOpagG_zR=djD2|F30}~Z7U4CaNRFRK9^#<9oIgHB7KmAJ)vW708>Zm%oJ{8-Y>-o z{?5<_0C^#0AAIQSS74~_xaYXKJNLw;`*E3){!!Xl$wa*jmTg}I`O;G`pRH$BP!<>4A zTc8ap%>99_#t>dPruj3-`Zt1hEMpWpE6pcF-jzs=97@?Y{lF}$oo8>lPWPVSuJECm zB(XvyH<4+P3P6v{;ewSFftvc!D#eL-XI(Vp8s36USUL50*hJT8*uFQ24%N|zclP!5**Et#@gSR>_NYNz*t_RRNPSO2g^U+_UmSTG?oZ%ZDH6QR-Be}4By z_s@6ZTpd>F3oXBwV-?71pTlw%eYTxL@sO4|Wn*Op=ub)Ap10ED>)I)Xcjcps#utc| zDNU8g7f$5}@7^Doqz}?dFx6^_jV?_ITZ8pNUbzPedcNj^eBwJn7?^&p%D)~FR3H5| zYy5*oh9ecN%4%WcmS{t7yyZ;6AGWKO3caf~wx{Cx;S=_;@98{uAL3JT(%-mwhmFeR zQlmsppye1ePBv&AqZ``(-;d`-vbW?8yQ&}~w=uhacvA>Baor<+Kp>h4t zh?OoZ3L>?dFojgzNRbn1KHj|X@5*G2iuwiY?wI~nJdzlzazV%!Aw*pls?ewCS zMj4sSCWA$(*I@JvZI^hEq^>?XCACGU zDN}h4A}-%YkhqPhp3%p8LSl=}7VAm)4fcf+Hap3A&Uh>F=4-0DodCNSQTVkWK>+0J z*yUo}NEdFSbKLVs(;}TsUT(+~X~Mf7xf+ z9VV&3*u>A!WMkd~x(yBei&-yXAkjB*1gGZ!&1=yj=zhbqQ*l0+q%jLhMQ=`O5%@J_PL)6wm zM^mLa5z*KVPGos-$A6nZCIW3SlN#RdomC&M#TZF8OIraW3epj%f92Kgy}l7P`OtDMJm=WqK` zWh1do{7>ORWk>wT`ke$1=JNPJo<@S5j8R>j3m^_2snZc%|JyDb%KJ%~q_)S0Chu@1 zszyMQ`JWNu&BSL|7r6C{(N$Hr;g6;<(5{IcW8xut6g=0OBP*eTUnrNA2h= zrB3076k488@9ny(Hs{85BId0x4)X?Uct0UfbVc{i5ssUL5Q798KdVNrT#w9$i50fu zR8w9pbWFv}&7V@gda_e`7*P}q2h=nR@+YQv1j+?mP5XO)r>y)ce&)mssBg&7HL|8y}9C?4A+@IaD_55Og&{z+uGxyaOxymP8Iolb9rD_r-D^`T3b4>f}g1 z@5ri^D0*W^qfemeU@v5=seS|vy!J}M^i!`w+FJ{NYYQEzCQLN9@hMl_G(!0swD{H( zm-oyQy#ks-<+nyQ*LaV*F!J7&M5QAoRmS}85I+tq+zWzH>oRkti=vo;7-tYOnx7*l zUcux8umgJ9cD0L8EF(flHOYbm>Vbf_K8|`-wN?M+BfE3#f7H_7DrhZT)K&&0mGL60=p@w7EUwyUYBr4-6;?kPns7}$2m&d!9wc_t2?G&ic> znNn(h(7jxUKv~wz^LZr(^|=_v=dLb07@7+zpESisIT==k|9eiZnP`9SKSK0LIY0zm zLJxr-O2NKJd-cS-Kt3 zJ6@lymqWC7U7XM<`tvZ3d7(rUlYqNgIxX4;&o>@pB>YqnUjzOWZL$+{a;aTy)^GL0 zKrj(SQV}B$_BA{fQkaoP?c@IW*+TuHvPV=X<2?11ejS~36(8B8)xJBm*c|C@=dd3#wcnn zPBsx$XZZ3?POwcr8%;Xt-X%>#B%@&g4a3-{;qB2+G?8thm1=_NNFe$d@4SNd8-eML zsYBMX=DbV2L$<`n){O=H*6&8OCABYp#GGU`NLx2X3+GG<(FlFr&AxRS{^zUGq(7yW z>S(e1Q&OR-yik-ICr7(GEVBHiH-}$Jg?VGBXK=0o)?uTq=!MF{_ktOlRzgj9_^)|6gH zZiP&Ra}fERvb_Lh^`v=ytT!%%G&7Ax74+aGL_ZL3xOTzMS8HnfNp`b;4m64pkviyb^2$oOc9g*%mnncNBNf#`FydzLb-!7cN1`|`iD4`mf{s$^3Ise!3h z+cY1_qcpAatPvA;M`VFcO1B0Jl5f#Cd*sKZt9EXAg?<0}QP^*dAwsU0ocMPu*r$3z z;v|dg7{()3%+LnOg1&hg*OUlgg|d&@*1z@lgpnWFA==It|E-^1o@AyJ?i><*YLQ}P zw9w>Rj#Kc4cCE0po)@!Oo-Fm-dNHd5=*{9;K?kv#FQX9tCT;2u2Ir>2x&5wVctBiR z1H?Pl)l_;YkD1JDxz_03vmpReZ{3K}k}z1$G-;l`p5pRYY`2?9;u_ojHcLABWWL4) zo?o+(rPNZH_$VZ<_K9ji)tQlHq!YSBZwgCh57xx_-Fi_a95moT!tV)p(Y ztnLHY(59L7k1^l`D;Q3BYva;0|#duL55 z|E=j-ulo15ZJJ#|%P;kK8=iOAd{@j?IyLKQLTi&Cs2Zw3iJT86u=L1@*g_@4Q~$r% z3Hb(%R(M+1ZQs=50nsU}1`m+KGkxf51qQT{*)piSSfMgAqfA>hy$>Ya{*a>N^Lo$J zPSkMv@s%)HoBoT_{MxX|lXvwB-JAuKZ&HEB5|^*PUNttQ#lT;51#y%1Jx4tad66bo zw4^D>(a2X)VTU9yt?B{%bZz`B!|5-Nx?zp&aHpz#8}sF`9D8QGzz^f2W6Hsv{h5dJF$Akya8I?={(Ww?RVya4E@NT zXN5+{#{=am&U$s>cw2TtQyJ|QQmm*<8YiL%=*xFq0fBgQCDbg&MHjR3xszNa1NxZB z5;CB%SE}pKKi8k%2*A6i>*X0AGCV(AuF8sq$DzCmzIP!eos*9JE+6soE7fhS(M(!A*g8lMY~Dw**B{zH>_Yf zr0OY;dyv(iYv`p|5$gd|`=bX@&wby5&b`7H@xs6Es=$k;n26Op%LFv5zeJLjB!7)O zdjOr@a57U4pVA<1?CA4o(Y#$q?R%4f3(|pX436TF77OEiQlUYGf1^z4QD&42(34Jw zZcp5ftOCMo4Styip!*kUu}kNMB&m>87-c*0phpa#b;y8D{W`qc1eP} zCAzm6R5zVOY=)4Cox-N1>i&^{()p4Ur47Pdr52NoGn&b|c9c=(BGn+|li;(@c1SpY z`9s-HZwNPSou({#c2^%337TL~0U;ZW% ztN9a#(@t>>+rCUe^o)r#zvGi*$x&vxa+pS&|+(eR#%#cj|SKzMJV@Vp}l_g`UKtV9(=w-`s1_g2ItOmkTD+WNftyh{XW42bD34R0NO<6cpv zko7Z+sml1X6Zt9tZPK5aiP^zOW_8WQ@$k~~%!%Yw3g_%hjvqx|s)4XKY=navY^O^o zFvTsaZ95RpE7M-MQrGiWJtnxFv)2eu~j}NA4eM zH_Kx0s&9#+d5dgM%exJxck_OAKos|0C@S&#KIOP|Dif#P>#^X*70fiP$UM}pP}FU+ z$XbIJr5)g*1L$f;cILqcV(w}xZM8+6|II#}-ZFP1*-IB2*5Hni+B!y9_now48YDJ- zSK5n}Do&~fj?48;Kf3(wA^v4Tr64~@AhF?kjh2%fuTWuU@~Rk2v;p2r+@mlwkeg+(Q(8?C|rqJd%X5Cm*D;l@=Hzy^vu!Qm!UhxhDq4<(4eR6;v@K zV4ZPM=D2yzRFylt=zz>y-?fYi_DHT7i zO0*vktwM8S_ilFKoh>Ce>UogK=ao|%jnIKroFeo388#yWofw^C{Mh`=t$DV zO42(HTn;V>7rEozXmrz9Slpmzc64;L4U%ssBgaXzy-pK`O+HKk9=q#)R?@XJ*6rj} zm_#;Y`x20-tQvoQ=Mdg6LXIF9HwV8vA{<{Q%j%dq5UH`CoX&DWBO^aM0gU0!g}olJ zTt(jt#Vk!LdOb{98K$zn+Wz{?fKyr};z@0>%m0DG=orRA8E$1RZ2T_noK(7Zj(B$f zK0(30>o}xx0yKW8(lf`Bwzth0L~w@YF8dttbTCZ2uTEl~jT<7ZwQEHC7;gOEmlVbT zYy#PAXV3%d*ukg|8AV!L{eFFlYZ`hkameV7##CCtsxaJ-lV}E#mEHUw>9Sfbl5LQ% zqOStmMx~BDe(=*88NOCnQz&ot&p{$^>G;EW5Wv*Znsbk`8;lJL$4(%diw>S8wrgOF0QKA*tlxp>ZNUdg0Y~}i<1j1%I z>io-*_ia+jzc0f#`Z9)NKZTa~zTME71aSHs=NOAv86a^P18V89j+4EaEp-_~cs+bt zr)MgLrIKhQPu1+=hz@%GCW?G`)!^_B8>KZDuPf}4W=3mQ#SmQ^hCx)7cX5$x$FxEA z#PRi}ddZ~Ln$&+oe6w@Wa9@cDeP8TOy+}!g+@KfX^B!L<^KVTHiGrf9xi2+mp4=}? zWwC0~FMKky9je|RV1GLqiV9HA0zodb=dJjx&GzM57Wagxzy zk$6+#RUCZNLoHoFkBEsRb&m;3H6rVsL)wX~|1VrWqrkt_3))`?RU#G0su3J~l4T1{ zYgmOfhtq2ua{_=IC4j``TC4-R)#r%H!{d71f)-W~K)gLtQJ4A>@4Md7STCir3M$b@ ziZ7OH^`T_B5Q&;JquytEipSHwjlWdIi6@}=WVsAm8ri_GwT~>Crm^tE^Ay3? z34Hh^sTQ`LR*nUyb@_(?Ab&|yp325HHO84rqypihPPjLVc`6XG<{o3WbX$1v6`Tg0 zg>@Hrv=l1wJ)pAo$bwT4mF*rm00|Zh-I0YQ1)?5oQ;GQFMgG6i9&rlPFhJhtr9uoY zf;Ij2sEF~( ze8F<#eX(EJ(0vyCTny8MA2+?4go7T-Zfv94Asx%r34%D6SWzmZngg7zP&{^1=8c$~ zD4;T!6JP!nfGFZKmpCUs&kla4Rm$-mjjK4d90;p|LB!aX7<`M*nx=K|{SuvKVcirF z8@*hb^)4TCRiT3^kYVdfG?nOjN4q_hxHwpGszL`J69~HOD%ap;XOU)5oCn#KqxhCN zU-NgoKFk@(Q&jA@uT%)HkH@xBRu4a*SQ*!YOL6XbihyZ^EwU{(5!$-*i6g-U*w$PV ziIG+2<&Lpj6N$*WZ17X=_>x6ZTRLa(keiHx zSm)g%6?GZA?S+!()Q#6{S&)*~9?83v+tQe^%Ks%FMb;qm0|XG-j3QzVK*kCkSj}id zvQL%LwN$~7kL%7NO;z0l51qt0u~Mdp)ypvzQl(Ug8y(FgC4pITcz#iT>(TU zK1aB41wUGM%bW|QL;y^**ba?W(u#Vp|l61irD- zC7z)0MRxGLhFne)sXpf_<+w)(PQ_eQDG<<6*^J|ry!sOP4x|MqS1=?1@svFK5(ywK zm&hhHHBHNAB)iq{fkI=qs+2PYqKB)!zC?U(c}QF6sYah&b{71o&`O@78QHSrojAV$ zg}iI%d^O@s+aYGogBe_<{?twotW#tP>b2lP8C-gbo!)>f!1PL5@_8vMc;4@t$FfvI z$Xz$iSXYR9qXpu8^bwn?tN=m-vRJ3sMSDlr=bIBLGe+mwFBt>Au!V_ku=L;^N^cfA zY~-S{pGpAYy>hN9EWTgIzC^Lx$=sueHLE9DzSkPt$!@9SpRldO%YhiXwZ~k=Zm~x$ ziN2Ftu5p~S8M#ML@+!3^$Xn|VRia*I;uyiT@r!)QoU7pHoW>imI9By?wB#LJj1)qw zu=iQ0zcuALyKz)6=imb39AOLbJ=hhZ46X`KD9%}@$m_$f_35<0uUPPNKcst*=iQq$ zBXRzyntxg%kzwlxTp#>t5{}a%dV&jjECN2l#Tf93aIofuHUmqSvIC=yeg-E1NT?3J zD&;m(B`Qxf!>L5qvY`s74^d|gFX6D>ux$+b> zj@0XYrpH2WtKuJq6}Em^r-&`LQ5hDto=tn}GUrx!Vj~-3i*4xsT3nxf%;2(K&SSZr zms=Uyz?)mnzM^w6_?&bS2MMdM$ekeT;;yVZUNM`}TL9u-XV$@oN(9K0-SQcHN$+J) z@*Mu1J8DD>do&K@EjaZw-J8Xh1?A+`*j_dQ0Ab;sa*6q?Z0?_D1(P{h#pyK8IHjOb zRWSDm0P?0}EKkLPq1fwX9Z^24`*IWKNlZaaew<}Sw&y8^U;OcvWzNNN-ROWri5y4D z^6H?sWpgY)L)e-N)OX_iM$y0MXA_}W^2TzRg3{YK66aJGKz(c@)}4hX-^Ker@dh;^ zY;{rV)5~oho%X>D5RN>ekEKEcd^VBuGEWe760Kn?(68In)|nS_2B^$D)wiRx8LOY1 zKlnw}J_M)x3DW@}CwcG*@*6KZl{o19bt7Vf5810@d8$2W1P~iOu>M;wm&h@K3Qo)T zOpj;RyXiG-b7b9w+am@iFPF$CU}Oc`qsSh}mw1;w++y}k#&;9vSoeb#wwO6T?^rH- zpVdToUFFp^mcd2XVx1zNmV!7xrveBz5y}){y_^-EKqPYAKde;~q3a6yZ%l=Fb&9Tu zP?>X`_fI2#;LSCb%j<)6w;BM|DSqtOR?w9u>I11J$(1g+m$0nYz5HuKi}wUJy`Y6< zP1B|lRdKpc_h{KIF(>sUMsQKbf&8zqM?CoXhm=dSQtLDoqi{OB8$O^d-|h;AxaFMZ zjCjXoQX!gG;aD!i*1nB3u0Hg!sS6hTSj@(c zSIlNpmACG!Sn!%Rtv(cC%XQ;;mb})TVM{F6!WM_zloy{>y&Ri+)I`V$KQ0x5n=3nW zYG-gVq}R+=;x)5br7PPZ)x}+&zS$7?HJta?&=jRq# z!4OrVji{^faoxwH6~%dw;@cQlOF?n2pHfhqUTpPEiJ2a8MIdA(uJ5ns4K< z;Kv(GR-)c#^Aug-2@9tplJX){;=*3G?+gRdOO`RFgF#9rszKLJRM?NQ2?Q1YDnM+KsaQ#Q79fQ%!{ zu3*ShiB&IIFoe%NFaJFtYAnABFvT9|_RR@WzilV+eU zI8RYw%X&d6&dpOC|Fl$tHxSN7+wE`b<#YA;oWu%Y8td-D)@C(EJoR5+E|%*?tUc!F z0Luob_JTU86gnxYG42nJ-KsWvN!z_T0*F(IEaUBHEafA$EJooT)$;*Vs>E#^ed!PeCi#E45Z%B3;V|!PiQH-)Qa0m=jniXS2HPRsFZuaw#FFkg z?&TmN>ThuVIZn`o#%BN!Cecg2 zB*9!}4f|FpX8;n|6fAV0k~iiA0Kvg0$YW7Gn~`?CK>mY4H#m09*@BDB>w>*H;ta|q zQi0e#3PG&1a(H{pwI}YCS1Cub?Al7|fk(DS)k}Pmv=+8h*1k-mGIlU#Pf1~UHP+n< zPpIl}eY_^ZL3>)t8%zd~_@;UWyK&SNg7e;DJ?APsQNfRYU)T~B{Mhwj*wXO#Q%XS@ zX)f);^|^{|G*9s^>qFI$V%NvH(w?Vi-j>ZOVm3f*3%v2ZDqUH4a<7EV;2p&P5I$`a zF^TN9EtGrI?^QEibj2uT%e6;dHKSAV96s=N$9crM6;g?a_y!=nM~HO>AU1q}EeroA zm=gh^&cia18=NqZ{ngZPnCt` z-rc&vr)_$2v_}B|2yI4L!Gp(Bu07@+`7M`7kYDbm*R&60URN%W(`#hcvt@y$);4^= zfxOsl@VTp$ua1#3@Da*6e7Jh-C5-m5mId7-g1ibHUcckBIz_||rJxwLlsS(Y`dHpp z@kIQ1iWQruSpKT>F2a_&ae(?d?_76?6&JRAU|XDc!g{%sdDhIv^ZtgJ#9*P7%~O;m zKLW8LaZZAb&w*`mbO6PB!j{L^ou`Oxtk>N6H3j8h zxAC3!Hkts$DG==uL7sbL78U^n_b8bbmaJe_oH~EqsYE$q7Mxhr<=Cwi4Dq!R8{(mg zQv@dic`1+)Yi!p&B4@xJDNhAnw;p@6!4(YQb3RLjP@Hodspa3uwj|Csx@I<@KI;@^ zMzQyq&cv6Ud;yXok>%Bh?;J;JUalM2@JV~8_t-&cUBE^*)bp_e>%pZ+WD4rh2-p29 z@40%OcQp~(&<}OvfNg!P0tlN7$t0$1H4go7HUoi=8C=y~;O1KQ{IZ?NntFe=&}>Hf z>r^1vBLN7VKDS3I7^=SM?D{RTM{w|;-}n!5TMR%by=_tL;Ac#sT`yGjsz>6=_;$41 z79aK~XkkIgtH1ZpbXzQ!NU}WN>W?g!2t4vJ6~eIPu`RG%#2HUoX`UiY5jJmLS2G*n zLL%`Pe!P00Ke4c-a#ODx$0p5qsBbAKTp#lk8#sY>cI~*ieu8P68t$4m9bDwtGbIXJ z)JX#n=hc((r(M`$KNq%B?i{fvkIZdx z6{q(ey2o5`k2t-?mvsQ^-bI${g{{fpC@QZe%ME7E$+o<(Wo%39t{>l$RKYoxygpW^ zNYs}|%-mYb!V^|3l=KemdCP(ystX(0K)BHLp*Uy357+&+#Q8IbM6XoO_nZYkU|SUD z6p1T1civ;UdbR=Eh!e!*aXEeo!xn$ZjnO3wRfW}ib8eED6I3D=)dTsnTvW>?26G^1 zWd++k3b#eIs$g$23ihbi03`9*bynBQZMjhfJlgc`E?U6~PJRtwh5~^~biv61h-^mf z(dBPVB|5ua#VEcYM+su3tLZm&A(&3_dY>7#mhOJcR7gi*JW-!47cqOQje)i2>V^9K zDm)?E`lFkt*ct47W?B2byDG0{3Tkn!z0m@=kT@61HT0R%uby)jT9M_d@WeG$;RFTw zx`izOpYiqWHtGujc;$ar4#Yk0pHb+as^A0w>D|HLWEK_yWIL-F1AKlcd{vrMqDREo zvY!QM9iugd5=_#f<3x2c<2NU z{&G%%iWga?0s(Wnj6EVAv6>M?Y-s0w77Vd&f@QpxQejS>#gS5ph;@9ioz;x3KKt>W zI5(>K)PEywoeEC?7o1+ru%*X6tf`7G5$Zuns5*u%nIZ=T)2@loC=crt*N+vRh~*M} z_}GEFK2jHa8;6e_@I}1f^y*)B3d)rZmM*!9%fImE{{Obp z;^x7J0x~YOas#K5&ptgv^|e8w0C65W*IFpEuT6 z!H=;m>&7Yl+2;DWX12AN){V1bk{gvlZ|mNCdRr3bn_E|3jC~dP*u9A7ccusnexxqk zI8slnaNV~-B);7HR~2MiV!0={?tH%6t`AwR7q)I#4FGPgvv(+Wn=hqCw9M;0ci^B7 zzAUWsJouAia_nt1v#|I8ssMy4@yMW5BKGJ9m58bwzEwqCPJu8dzwh@$>PtNDCZo`8 z!J@XP$AC(njqM`KRu#-V)dlvv;T|=*B)UyTvA#s@5nuiBaWEO12uI`yO&J^6AeJkU z=u%Lwlh3g&EBImINfzH|v2~X?w~-B2r)XUv^l~|}fnm$a+Mjb|gXqy#iF2uox0<|R zT_L^p$`3{2*q;W=b(VY+flqAfbDSXPq)un=Y!4@El-hZ+8$8V zx_g=PT7)gaw84ef`>?|WVAy4JPUQk}{! zKEY|+*jvuzWA7-#2fVFBOx}(h20}@tH_bTBl@F^tOWjjF=N7YP7VHW!xS(?0<;`pg zTfr{)?)-RqaG#}_jnM&WX7e$yd3*H|HM8X_=Z$P4)OWwqa=B6!T#am4h2ckjIU|WB zGM@0==l8j$2)j_&O4*i|LIf8H^)1^H@D)+i0rl+$O)KjxYjNrwPC3@57|8#_{BKim zqLw)L4KWb6!-pB=`_Df1i+_QSYKa(fJu)K_mRMr6ENJ+E>b-Afd*0}Ii{S$gYU&1R z%U%Hp5|;fr5gGtQa=n`)%kRxOyU2`IEb?Sj#&31$$hI`$$H}>6xkeK0$3LpQ&lWpk z*}m<|zUf$){-9*88A-tgB~{r#&CYInO8HLL^aP%Z^;lY`>Mwx6Y1Hh9CJu zv9+g!15Hr_vJ|$k`KESAg)P=)u!7fq$+hyq#RpcBU}nvp>_!sLa4`A|19{B|cYwp4F z#aUa)K9_<6Kka;Cdp@oyy0&Er2QPQFkwi)%vRC{100%x9 z^f8`fz-M9WjqmLS74_`hyvaVQ*QrKz`Fej9XcJ3hqQi+j2J$HpQ!VlQUj_1Bledds zptf}XhDE|muW^u10Kyru2}3OLqfeqbrfg3_PLsE@W__=!?Tzy%xe9(ps&w$wz( zF&ewDur+S#AFhE>6pmisP-AJ=j(EzrmYL;Y02rT00JbF{6j6mqiEvRuiT z0emIRZFm4?T=)K#Q_UMKJ`Cz-b-V>9quvG}qe~Q=3hPwt7<{NuW`r^n$%W--)PBTR zVyR_Lc18!mBRiu6^2%DAb)Yq#&W?;bbEfUn;ta;9H$T-K$d zcPh|sagU6Q>=p@Y`_csh`7`d6gC(9e>V54nHeRn&!Q_ooSa5=Y1WVlZ(fI5gF?B<7 zy*?^3V}5!KiI|KIV@Q_mjNb4X$bEo(;rKJji~{-lKB7+5maC|dV!2{6gr*piX7I~lJOM7yay#_hRSGfGA3(M>TC1f9Tls9O=h2v6t*}KeX#JcwiR$O> zMFpqcJ$nm^=$$vX_?@M4oW{jJ*pn_kMRXWQ0LUBFcQth*-jP@$*=TG+Zc^0$`_eNykU$T?jhT@po%B%&$) z9(Li~37NKKe@>NiCja^%VGCO^MLB-S>h;Hm92&Z-c%WyuWKLUd8B5$`!3k8>8LfjQ zu7H93BZH-p8JmxGpKuB5+y=IszlGzJL@dszXhuPuN-YZ!>-3Ip+ZoB!gVq}8`WEo6%NEBi0 zaq7W+Mgy|k@DrM%N+BYNF%Zs4;*$+8pE9JnoH>j$O|hE${!Pii>6%*L%gG>#*{mWj zrmbFIzdp9jSlALte6(uA0tddetNxD|$jG5bMl_x;YbQ8uU)tZCosp3Mq+nz6FX@cJ zjH2weZEV#K%Pn&nUj9EV*2Nj|=h!qI64sD)DG~ECN(_VvLt`2WSRx=V8?U>9KRdMd zYouUFSoFLJM@eTrmT!b(V@Wm^_Id`HW|<*{VKVuzD+ z%W|7EI$*NJ4t$CR({sXTrI8IZvprJ2BYK}Hg^=aG+oG_=ax%uiUN(~GS*}+Sou6jp z`~u@h(Y8{;fja85TOwa?X`XOn>NC>>Am8`;cIN&I)Ts>QU-$x$w~POtY9Mw-gO=R* zbYa?JJoj2*BBtKaaLVZ&Jto5t0AlY5S1`i|kVlr&G+kJe50)qZ@y7NHKw5U(W3f(W z^cpf_Zlb<>rYVN;M7wd6ZI%2KS+3UA;}1=HpN%B;v&d8A+=V@~E$R`i+ZV(}ygW(7 z@MBqSjDclThJANdOQ{rM|FjGcnqIBSxk@31K7whHmCZh@2!5KaZFt;JwLuOPWh8aaQkQ)K= z7V(26&W6eFGLYwtVDfhHLkce1`l+3fhRw@ys(18a#u8KOkBkoVQ(<7Wck~ozWGpd9 z?LwmihN^(z)C%KC1{b9G7H90o$Jk?RzW&|cyZG@WG{uzVYFazKTpt5#>>`7UZL2=l zJjFDa%;3VvId)TNTMXS-S4dV0(aUvyiYPLjgu|DUy-ZV8RL+a+yD$CL6OPV!J_mxV z3}Yha^3BaTCOROGe16*dsr=#_fIK{SR-ewuSmG;MfT}YxCO@8wuUL1VCZc42VclFyqez@^1t3cJXt_^^B-P3I>mqhy{T7;%WzZ)u~Re%Gq}hMJO(v z3)#}PyaYv->jSb_SMP90W)C(%(Nv>jtqy8ofPMseZfRqj` zZE*(DV_)G(WXAXFK7Xp@@%eEAaqVt;P?cNa5|2S8x*qtc`cu6E#_A09!|=1t$chX30qC~f9Bua!WM>~0oTeQ=OQZ&=l&>L zyP60g=V<6?TgMC5)qesx_hvR_TPFn5%s&@d@hsQA`wBI)ag1t*6i-U>PP|-hkaux2 ztEgkUXdOm}lMfiJ3z@M-1zKu}n)@e^M;=*NcWx8GNpk(=hW=3$mRt%hZQCLnul^h# z^`q4m>n?Or?@cWW(bx{5`E?@Z!v_)r`C$cyq1slL>z#B;3a-ha#0jPwZx}hZOSefs zexFEU5zU*7$^b5u;;Z-Bzq@B!GW;mp@~PqZu8?d;%klik2A`*x`Q`HVYBZJyPicC! zL=nrjd~>*kElc$Da%pDU|Ie0fRi5xkGiZv0t;{Gc;M2>^{BqU%#snt? zok}cm<`-Wn+nUMrv8;*f-Y_F$a0g&s1;{lMevFYHfcG$EgJ; zpUr42(Ewy=k{PKb@{TYJ!Q`Xf>!vRIb9Fg>ygl}?4RGmH!R6A;N12HFTNS9Ejchot zNi$*>Nb)?NJ4Vh8E(%Ga8?9VZ42=c2us?UD%1co8=jaMC^eKM@lE^U*oExiPRf6)c zFSdq+1M^}{%U^cN7Lg^{_C+Ni<$W1%QdIpJZwa*cy<#Y&l&ARW^_9| zw61rLfjlh>*=Q|{4kdm@O2iHpUVO#6#6YlGTdad6(mU$e?OGXzHb@?EkpI=XEA_m& z(P2L$fwaDxF|ba~g_XdClXG^3WFs4lB(hpcVe6}PzxecOq5dFyJ{;ryy$Ak@@2;`O z_UH1pc1iMilBnk+)F*81DgEmC*!zrMPT$?zP!079D!2?=(etseWiQuDP?g%QWZ9M+ zmv)1^8en0|o{wc_+vb$M@+HAZPB}Jcb5obLvf<;_epsUVsr-y+SfY7oK zERk6ER{LTFC$Gs1>78`M1;(%$Da;~suI^*&PyQRUn&-DeU-@F{-AcegC}oLA1jY_Rl5s4q=1 zeRrA&3$!iEwlrzxq9***i=1m}csLf4;YUM1x;HHPQRMuRZw@!If(IaFTjc?Kf%>Zf zzL4YBI>GkgT7o{l*JFT`-UrlTQ*>PpVU?mS_MH+3l>#yJ{=RKwc6S zqqVf|YP7{V1t-ajOc(+{+~}Y`XNed_2V;r(<>JGBKG0zQyWCZ@+-349>akPz^h*b| zGOUyV!m#ppQG(*qm9xlGaFN2+^50VM`+$>_e#(lEM~~X1o*v z>d$x7f1$_7dFsI}(6-nW5*1vnK30~ywiFKlz?W`N>Yn#O!t3Ohd&`nHdUYH?&fFw8 zRf5XTdYy{kl(`uVKzx6lCt^s!)O)j@H`+?PkxhFLfDe5 zrtdCI@tTThp0C@H?_@G0c_TMS+h#Tx1M!PL(0g^twMdxY^sQ=%9-J(@g@No`W7#cd z1WTO!eea$LVjy-#Tzm^o#^isN86``Wkw@6rZovt;e$3zP9Vxq2vh34qzDvOgkXN0G z8K=P#y-o#7EY0!bg_Ws*aKU1S!j}6KBg+kY^|dPP9}afWy|ZAYSC=Zdqp?@RF0wAD zMfVqN%4^C&N`@Td2c>%}-5;5lf0rD9Gu?v!Gx_|y-yI9{{ z02h=(J8YiIBC!v3?YO}-_M$2;f?mM5p z+m&x3#CYQWbY7A-l1K^4R~)4xCf21Wg}6Gpx#-GR8i6$QLwVx|B?F3m?-tbCf>Vb@ ze3!5|qjC>UVu^buustF*O?#cnV_l^;wtKx70HJC6O0+CEOXSZ*vJ8MIbJ7_N(-}o@ z3dgC=$lFR_AQtQP7=S!|xuLovcOYW6TxLG9!DF@*AyF$~W#iZNLVf%0`B52Yie$N3 zR}WmcPm$G9unX6=SXYlvF|;izxX)^yVzr?@)2lN{J~Ty^!l7+_6a8E)lw>j_VM|>h znPqJC-TV!TQpjOfr9piX4xY~g6L#Fga!oc9$j19s9ov}=Kt${Q?$ld}7#}rdx6PJ_ zMJ@5X07PmFP1ANp2VDxb;Dqr`iI^REq2n}@8NGKD2BP3(&r+&outZ5%UQ1LWmXnYz zMMyE5OJ`ZGm;Q*^A8y{ux_aM@!=+={L1TNHQcd|3UCZ^b)$E2r1azjFN)S+B5N;)8#lL%y zbNlXZ^4+6yF7&Y&xD@a;4q$Cc>%Bgd)Bt49p0!%8dD(92CG}Hn zC~Gfkxl1BO<|LN5XrZKF#kzc33A9erYkZ=E&Zu{uB&<~LwRfcKmUmR`rY_D1LgVdO zDygJ~T(y!!#3cGkg{Ai{GcgX*pj#0vGPj!c1!WyUTtA3m7v7x z`0jEnxL-~r@iwC}&~go?$M2|D1$Xjr?A2~ODZiYS9hB08YjNfVFijzz2|t4w13TK6 zWaY!wHEj+TT!bXwc%Pn+=AR3FoFwk|#f=uCM>PclxnnG`uIiOnQ+9jegoFHsF4=gU z$?F{r@_s4l`|faISZQ$4;%=u3#=y4KKC>z2DH68a)^7J*L{|u5OHG9KPxE{riTUrI!j^q^vMqbLpnigj(6*QinSjqLxE{7n zH)`TZ*(d`Lt@j6&zuIzce#tNMzs)8(G(Gb0v$HAzke}P^qk1jTqKRsW?!vN<+OAq+ zO=-QqBeGj+i7*fW2)+r{BKSKJfKXDQCZA+R+2MU(Vru;f=e7fgW}N2u?jy?C6+)H^ zwbD+al%sT6mdlTSv;}f*-(8yG_k4HHwiLG9Xyrb|(F3nkjZ>*xVGFoO&!^&>v_REI zHrVqSv7}>xNTQMRemRUSY=xFv;m?POoSx+pwm_d$!Sx%&cTaw~m&)H`^|4AJM9lG; z3^|3xjn4;wl={U-W*jJ|+~P&XK=x-fkR)NLPNk2k;RCw(y%Q(M)TQ8bsOE-cPJgX& z$0_d!Q>|2x z^n7+N|Apc2P7>W{^(Kx5?FzwOed2$Fyd%TUT9ra5rlnE{+h%GX)MQAh(#$IQPs?rt zKu)VpB?i)^-Du7dX<0yKjGi~K#L>*rP_GUw(Wloa5%VnzQ_fXHc8k^8)fUN&)a2ca zA|@}E=!zJ_2b?7u$e)^h80|d8IzY&PQ9bX5;eg;&k|uEIOX!S#M{+53$jX&Mo@y1FV}mY?b2nQVYk$5ttNQ}YYyh9#psQ~9(Ku2sVme9uFh6{vY6TM3tR6{N z^TiU|<{TbwAGJe92bUSy9ub+7ozV$@M+#0O9YB)flnFx+n#gW{fh9T~xfHBAm3Khc zvlM`!uM?ckyLcmzS0cu&;ON!4vFO2R{m=i{2WvK#V1> zeqSnF8;-*%Pqk?0f4|jbny*98i)PW&L}|M7>LX$#z1_u zmIbFTI3v6G%b1MPD7ii{kfYt7(U*vXMNM8ZBLEV)9FJxEyN#`fcs8}*47(L2lBDQ6#5 z#lIs4^0(wTJxi~Soss1FS$*r}n@9k{JMzJ?4j`8-S=OHeNGU{iQ`ZzB=PhFv>{Az%D!86)8C>Kw#TnNP z_^99xe4!^a#l3nyuaC!jpI#V_!g4COdOj*a8Tzn5)n0BEw*C~Q5W?0xZ$s^(uZXZ! zMV3p#@hhh*6QnnnYVe7%?8)avzK&Wv&Iua--hSTWd*{wXMV_4C#zm-_0<| z!5W!f{h8Q>Ce4gP5n}A|>*LvoC!>{-#F#YWKE$QE z))VUv)ETYwrfHQ_y!A&CR;T28SYlT1oqn)Ytv}QfQ%MB?S%(?Lj@)aB#y||@8Gp9+ zI_tZO3@&u()V!&%H9VC<*%m#x0zM1Co^8b- zVfu>d%MCydR?kcTk}{|6i2`|JAiDUS-xQcp`lGGHS)$%ilCUlXlc%i&wZ)jcy(4EJ zTP@bz+F+Se0uc4;_!$X62u`Rir`>2`G zVGc$IV<5p2x%de{2HHD9W{fksq}hzTBMVL%K48i6+@(E7P+RPbB##_^(_4RF@=0by z7>M>P07M|~lgCk(k+3kM7|2H=CPKpykM z!%u-vk&zAFMCjk0=R*k!erhDqUan^50DPfsdDwd2UM>Ti*X*8Z3?yYvOue6UW|vsv zIauPi``QlQthnIR+Zo6^#^kG#+LDsxU?3ygj~IZIsoraUE_!to>+GXalh5kCcJXHw zuQvvgulEMXBQqLH+`P#EWN{dVm}OAioHKIHrE@JePpt$OOgzzQDf{usr`W3A=XWaJ z)+WNx6yv*NuQqbNH+6-`coL-$iyd+-`1I-#+dAyKZ*5=l6wQqelqEZ}31u$Mx1CgTamNVfINLyt}DU7=PC712Lo0qU^Qno?Ne z6VWscOT=oecjRXzR31z%(LQP$3`8;`7vCMH-rp-%uoAH(1@n&70U^2Ghs|GZU=zlv z!CIwci5Mjn9H*R-HpY!7QtlZzrMg@~NT(EWTD6xo(B2K(;*d{Se?4=nPOB!7HU zu~r%_@@3_hL4YbIPiN$tec|nRae`OY`aOJpmxQIio{E z?1Nqt{VFd%Je{uP;gHci$ zc3&;!72HiTu3OmZh3mht+u zUK17Ep>yGK8F-#>18+9QNIBJL(@S{w}gD$V%Su6OKzu^fY3PW7G}e`3xuyd1Sy6yZBGu?!jtp zs4N%OO`=KVD9mbp@$DV8CYf=vF?oMS8+1nAR+4J+c1AGy7(Spx%q6T!2N05F>b+81 zE+Pd-Eip#aX%mKne9SogT@H7jQ(D&)*EK`VdF<5REX#dJV_;P|=f~^2Ut_ft8z4Q) zweN0l(I=mx!3723Kj$aSpegd*x6S!O|1pa*bV0dKu?bBPa-Q!BF_QQ_Ry~7@zbI@8 zrZol@#{zIM*inCN)>jlV0}~2m6hB5k@S#>JNzg}%hG!Xrz>kn0VPBn8L5M2vS{1KHHEvQ2-^ zcUqE&H4ft){U9@ngS^h@b4?h^LC$&X(sCok_s8yrAB!DixwXdBENmS|%e^!ipJF>% zZjeN@Tq?LMJCLU6KE*JesB%u&DraN^js@)>9=7^NV;6?{3R`$y41HQo8%%3z_-rPg zM3!r>F|u4`Td6w%4}cw`tI{%xTL6~KptTL$pby=!{L(e%>$14mFvLJCd{#1k>p$imhv3tLAk?(O=Iuvgb_s@@}Vz9cq>$BHA!`AUNe*{h=j)heFEeJ|By z$eA9t3@)0Qrz_jaJTI;(ikzb%^hqp8z?Vczm zUwhF2#6GG(zBDGIWQL(=S>PST899J>tn=VBjdfS{j*=NAKwiB%)IjVV{flaegCt>z zB~nYYKR32LH*HkpD1@!U*fs1!6;dna`cc8n3_pt^Y^fB2ut(UEZ|m)XEZ4vL>Eekv zbUbWbUi80Ql7D?Xnqp0z#*v0U^d{NX&%5orYe$Rb&XaA~clX8nntxuLw5=$G*z+0m z7-7qRkH!dY(3#rzM%xX_^~pk`Sw)l1KmQ&8GSISHWJU{4rwu@if!tIdNCzrXu*B&w zke3ylPIsTMcZ6X`AdeLMlUjf7+DGkg?$O0>wPg7Zl)W_L6h*9mcFB(q2lY0V7_m-u zs<(=KC7I5M>F}0eOe%TNj-0+kOT>2Ns-pz8w0cU1r}%Arcf!^Qhl|1I-YO-7Rwyh_FRt1f>xBB_fG^y$yr)QEAAf6{D<%;PjsC@CNd;wkewU#h*UM-chXu zryrjm{6?o;{CCK1k?RRgr&k$EM8ZO5PS*pD}oxMSaRe?G(8_h2#DSoKi# z#|^5Smln@jL)h9nt9?p0+d?YmF1Kc_5r%Fri#9O-Lk{xIsa!Nt>^6UXZLjH^CWM&-Py zq4d3sYDS?s&8nV>F4$J5 z?6z|F5{!2%S3L0aS7VFEP^a2;KrAu!_YPcZ==J0=)q9H{COGXobJK!TZKoTCIwdns z-?D@=svKVa{{RASqt9glXPW>3002ovPDHLkV1iIf0|XQR000O8{Ddx7bRS_#vjzbG zng#&?82|tPZDn*}Y-|8|K!(3*V_$M_b!TX9Wpi_1a%V1ZZf5`{K-s^EP)Px#1ZP1_K>z@;j|==^ z1poj532;bRa{vGf6951U69E94oEQKA|D{PpK~#8N?EUAmEz7a2i2*p&noX^BQ)?}) z*-aYF_=E5NGyvWGi-?TM8e^`tan8MVjf7i`EDsNl$gHdywdR_8W8=?%`_n)05Bw{@ zpX>Z5Jz?;9)D(^g2r+D3fu7JQ`~yfk4^2u`w5H+lm&c?`29d|sV{9?}8_43{aP6O~ z=bu9?b~~*K1LfvDZnXX6FjN(7TT*i_t;X2wp)jJ`^rRRxdNC2{Bn-DRL51BJsFi}h zw1FuqHMM~)B{e#9>O>}g$tbLT|G+=+EAS)@=6M=mKnGWNL8-sK4aD77(U~H_$&*9m zd)QyI@*GxQ=8g^#;KJzO$^wPx#(Z6)YKlS{(d}cC4ci_~U%0v~XU*sqArJZ;{k>nE zT%|*|1MJ52-r3Ta*Hxd_T<^BvuuHkuC<9XCnQVD_sn*T^@xuBCKH=L_CBTJmp$H=U z#;8*%y8e_#w}Nxo257G~5BqS9cGl^MC7vG#5Gn5Sl(t;T&R!|uDEm7v#BpXE$gtk6 zy8>J~9Ej_UpdUtr5rG@hHVJCoq$es+mUz*pCqU6$yn`Ha>OujJ3we2I0UGuTs84p< z*J$TPNB4&P1OKKYxA+6lxvxWn%ls}MC)l+BKZTXz3lRDcoAdRU7+Q!}w)P9BHX|kP ztN_Ob>Tw?_--?PlHDAtMD4^gHuBaV_HJx5Sf2CJmC#Jb}9oKFP7N05MFT5xLjsPbabNBzp3+rDa zX6wIUZWd1-FxQE%0l=4d+qyX8&**Ycr8(`Xg=5T9$a7{}wDX(=^+)uS3{pkcJGG%) zOxGF;`94&i5Vy*VT^EJqx0M8`zCja@VcSstSyfiTyo1gd@BUIAKTu>89^q~aS&i)J zH;ab)0f+k~o)*+n6yXry5|C?gwx=5~g@uT8Km;ya;Ra@ba{n;Z%A`yZKv;QUOoDg= zS;t@g@xuDojde@7Blit>KBfWCwTp~N)bo(NxnY0_PD$zt;a(``mXfH)vK;>m{j zJNEj*4-j`ZVZbwvEiNfqd`bp> z4b|aUM?Zhywaq2u4go*DQ6bZ5wS2U>Adj=Sca89=h0mGD_Vq;Wx&aNaK(JZi z67rbVS7MvHhI6akf-m7*cr?g`sQ?%d4RWoThMTOKt7M>j`5!jd={6t zcX+>&fVh7wq6$x)AGnm6WZIrpEi2E`G-MEX%B#&N!WM_UAx7dePRl|~)=kL$}T)Qd7-QKjFL zzNa91@b%{xok+0nKJ^5I?p=|2MMK@Z7#eyeaX-^crnlm;i5|;KEyN)(}3i_K+!g?b8 zh^4a6pYhJ3Odl{;Iy!`ltv69W{rC3%5$rmCC}3nh&NX)oIeGAmm7aWI!m&gC5Zt79 zyW*g_P17t`nBSevC=3}?(Vz1x@HQ9XwlY$8p%1dg`bPm7!5Ea{4M-^<3xS{P?0I)U z-R1wvFzfmmWBcfTF5I}n-^0h=;G}3f{kx#P6=IVbEaghRy~2nhLK$=--q>QehbpMT zofrI-4r|vzXAG(;tjdhwS_*O0l zZy9$;3pUVnH*IVUGMy9-$2SI2Qz-Z*=5ChA_U0uEDUfb`cM#I!iI#Ti`~-?RtvIbs z&d{Ld`PO(7vK#0dHgI$4mU}(8{jp7dK&kuu>jU%8oI4c17WglMYYHLUvn9O0hieV~ zs^CfZ5zGDp2^=4N%qnzhDuC-=g%drhbqR?dUe^~jc8D*{kr^SQU=g;*VY7PXc!%zi zv;|I4Cba7FYkRkAcOw+WDRTdah6uvu14Qhu zNbE2VHcGO8-wTWO5Mg*)-uNi~A?#Ve%r#Sk@mw=rwx;eupH&K^ouYAHz_iDRL}26( z_>{YviPZ{o{~{RXZL$&-HQfrgLe$hiAbAeDP=HO0lxja&{$GU5pL|7 zPdoWbr0y5kp7;GV+VSYN0-dQkh;+;Xrna7YH@ix_E)y?Bpe=<7Q6_|dNtvX%vx$e3 z0?s7sRDg{=6}Q(jCFFPHhNBM~y@tq2nqh=p;4i2A$H*>Me4@<+JQIoCHZ#k_NQ zfhzR#-MlRil;Jx_o+(xn$QfT3IIxgs3}-1_yU7PkjgSk}$oJNy^B((JGThBI3Mg-k zc|eITd@3QrT5id9u-HrdOP72E2aOO$AA&ou&eVPp3ZMSX2A0ZblZRd$$M@9@B zMlT6NcI|-dRk<H4 zU7d81NYyf7k`E%7#}vzrE==!RBX#DZeaa=tkzd4?X}Uq*&kC)gyQ;a z-wTzWdLhz0vPSFUf(Y=f5MDUqb|ZC;XGl>^J+_UH;?ig-bXUWiZ^c%-na5opUV7Vi z9(@eI#{NR~_k@c@VNg^*Tr7PyIXiSX}X9qi)OmR7<_K8Wy9WOjvgsJSXIK^Z}y90lwUK29P3)0}x> zjdxpi(Qjgxf5hAHt$$Cg;abHXT~xVe(jBhu(A}Lc%*9*yBcNtFGqs}rVqE#3usVoV zr^6nww>!@BbROK_n2!>O@SbE01)lz^1{2{6S8__fRg8WNxH-=Y?)rxFomn?@>LVY# z@(1ON>bC=g57rP-y=eBYTdr2(t{SX(r>Z3Hf~KwJZW?4$c3ubow~^ z_=K|l0bJEH!_^*0IpMAZ+xiKUcaegwolc({j5EZxtqi)Jl>fD;05U?5aQ?Iw;pg4^XQy`=fyu7V;ZgA1)%$<`3apw-&dyP7%5IRxAzwBT$h` z`wt3-ND&{%6lb2pp8SuXidJoAE*$xf_s32*z7Gd0tj0X12`V|?{BpEjk_x)QRud!`}`YLf+l9p7}S+ zH@sIV+;1GArkjc6nV7G`VD2Nyu)3j91aXnE3Ct^r%MtACS!yv9On-brVwda8Vltgu zMa*ENx;Q|Ndxy6wLM!q?i+Og)eN&;3%XAs3^m?r^<0{J2eGh%5o+t z?83qFE_4~Gx-_)@MpX*(zLt)w>dFS_LZ!yqc=XJ?g2kwhY%t-ZQeh~B4-p>zT`+uJ z4XY4>Wia7xaAB%U=pyuW3A|jSlVnNDXrg2OZfj4`8>B7+T%rKSGZ5de>FqD10A1_* zjThGRsG{#&xWZpTX00Zp`1Rc|h?N!E_AiCg5B6?<&Gmb;(+uPJ@hxHHE{s?;CjN@f zvzsldI<}}oD!^O0P~CDGTeZzx3YheeZ|EjfzJU0s9Up4aY>SoYDwACm2=7); zMSQ1#l&uqj;TXmvY!86c=5)z{g6x%PC;YSCx(g}Q3+qw$33u-x6C7NjFbaeYn+Wh% z@?QXIL^~!LWC9P*$$pyp-Q*35`%6+Y^L^vl!ti@=?I*Isf%S=Znu3k2gvS>+T;m5M zhk=l9pozv(S2pLX+mv7W1|DZ;$3TDWm&1gOEzG>pe-~+Jd&np?)no%tSF{lyhIp$G zY%X9ZWDiOEJPD?lC1t?4ERSm>dtv;frh~>YEBshHm|C&D!n*sr)e;5I(*HJ=r{F{) zgGI4A9c6xP-PGI%RrK+DhGQTL>9;Wzrz9PDOy>D5`- zZXu0$YE4H3`=)mP%kscZuR_8q33pP>m#ebB0=C_I;8Zt1-J%r2`BtF$iR@)i@~GYD z_eg!Irf2-u!!7AUlWuK)1YvXGT}9F8+gxfK%+9xn7x$EW#FT<#PKQF;3Yo!eAz*hn zNwbifQdn<(MoN1K(>+Zwc4M+VX7_!Yv7SfYq5E;HZ z!RMbF*pm|-##yzA&ExyqpZge9I#vg`xm8>_BJij2(#d&U>Ib85!<%p0Wg zv^nf^VHaMc5I5hh+SP4sp_qVwHM#gN!FKtJmomQFMV{Th;XR|mi)@VlodOufR!Drr zUgriob__Waaz{K=obeN`4EA{&PS#V5w^;HtjCH3`f9_HJ63 zl1+s2jv8#k4V=l%gNxvCDLLQ3_DgaQRl3h}k7fs=PzA~makwzy_%{)be-a?K&%&N7 zKOjI#t>T|B6@y)hj`HEV-6k9Qri1C=)Vo{ax-SAIFxJ`FV44WqE>zf#?G);dhe2{o zJx&O(_zTmD6wOAg_?!z@l~79gG(Rsa;%fw(|0d`F5#U6-)wj;=|D62@Ia5W&nVhZ6 zlkpi}65a0uK2&Tse3VTz^n1`Z{<_WcwfQBcXYh=eCm=EDlL#{1&J3sMC#Br+yl^ElRC-@2M1Ys4f%w*isePw}Q_?;{bPL%WLyOUpLXD)m zDEiuyV{2tV3I*r8rD>BzVERVBSZ%x zwMgq)=6|RXzs7C@W_z`q0p`u#?hD#=X(OW#oZA16EES$E)tn= zCj8I=-@+~w;PX)jYmhPk!62uQ%MtGgEW5hs*^f z=O;noqi33QZ?me>hirc%gsaJfD*0;&&RTFn9sXNk67tJd|62mybp9RIeqqsaPq_IH zknOwSWUl^$OW&=2?0n%-_x>_`n^$ks@tk;WwXmEr3JRZpVl_2*1t3n{HVh;>H7|v{U+m;(WJ_^q z$jhjs&)uq4SXVS9;8CdEo*%{|?B-vq8maydS72OIz431R4YZ2i^xJo1$lr_8rrCK_ z{WChNbRO%wnR7)A61iLTkbSavg%!PUxdY6PCR3kYyamd}voeOpC9np@uZRN!ms33B z-?6c8@3#d=r2WP(rb^96&*xdC`jd_7Q?)%g4TS9Pm0kymM?;r-MM)f_T`WVCb@QH{j<`f;`fFm8=(qo?5n^d z*zc_5_X~?2AZ}wsl#k9G4scGg!w(>}o8a8aYG4xoyY4e^e43&%b6>~vtG-I6!BO_3@;vLpuM zC|{b-4&xU(OXY@-h&CNJYN`nOy<(sLqoNf5 zl?6BKS1nv8Y!08zQ8t`v^*G0u*im1C`?t-s=i2zF&`*HuzwQ}vxDb-L>e+aT^ohc1Ofvq9p{MX|zNY3vW>!LQr3TBh<%3<7y>>L>fIIr?Kkc>k zUhu;77$O`otjbZ%7P$}|Blzh08GD9r^Sq~+KL&*lCA;8>$<)rwo3y`~1-AXf#;MIX zkoZvA=W`Og-SCw;2Ejb)0+ zcF%O2wi<_nX(o~EW!6!z9#jg_Q=ju-NB7j>MY;$`QHY`u!$E(Ku+=RSsZHjE^+tSE z1gb$rM+e*a#25KT>>4gR{h6Pn7jo^?Z_Y%Q)o0h8@5!EN5z$l^xhy|ntO9+e5y&k` zVvXng8Ovx~E#I(z*QB6!IQUSzoWia?*fHrgL5mEl;cn$H1?W2THQoj%6Z^dr_?AuE z76&{bbYldBTM^8&6-UH(5dkhFJpP81z49T~j2Fo2;2OP>b#)Xfx|(bb%5eDt3tXcZ z291Fxoj0aFeku|QKXvYBw@lb*=ul%2aKl zAk~0A7oBI+r!Cd_0cEdBH0~CdCJ?6+UU=%Yt^3Iqa)TcVB@f9!n0^ZH!e48to z_N3V5JWnY*wbOSxq6mC>jUB<#ZK6xkB|kN=Wt!lF0sDjwnr3R8|g`(QD-`c~)LRke3hZ%r1)MOW;@=pNrAQ*?QgHaCUo+*Fde|~+DZC?ST%*U_^1VNYA=lKWb+Nw$hYx+$fg)6d%^ z@Erz2s7Qmt?lI^dM)A~mVCxn}Aq%;GLq}}_$Yd)>hbae}B>x%MWcZ8!6UqPaM8ckA z^FI!k&Lo3Texvh0V{${~@0q`1+^@2E`lY)GhL^3x6%EO_gh=U-9D7Gp8( zG*8M4h@vsci29`4*fvJsKlvcoL(l$2RjQAZ90tY$Q$sYKwB&hV{ftMy#!Mw->yHOf z7{l))%l-RsmhhWRdhtef$2U)O^j`d;U*Ym5zrZ-yw-7%fAs*nLz+*J%>Vt2E8}iVk zb}9Uq*XXJ>K7guoKjMjk>zgpObuzAt0+&#QsB{WA=j?E zyni$k%x3}Z#;{w#wfw*R=|8bDgG>1_6qNw^XW}tQR`(e*r z^MqCYs~O3ZE49Y5=9|{<6yKLI4S!aSCM@|ExZVp> z`RrW=nbGg;Ac<|@Ei-0P#OABfcm|kt?7xc%Z_p{J+xo)DBBysYK`Md>4o(# z0k>uTGxK+Ra-#hLQ}}Z4mcP~Ng-4YFX5kI;bH_Nh<2t%xYl zTju>|`>xw5;Zm+_6}PRT7Qu)j#_K`is%@i7{$PM^BvZsqlE&jy7odRdcSJ&nKuSlw zwt8N&LXk|TDYUSi6=q&R=iOWwg8s(i3)QXAri4)AG}l7@t{2wyXwdp6;Mi5qnqPPL zhF;9fa9R!fao)$JWD6Uhr93TEQPAqH+-6P{ukJO!8{YWwi6I55sclp zpBWi+%+FuGkhG%PfN&a+qDmrfkjgIKP>2o+Cj^)6__zwfY+AH}i01wHlOsjIg!h;9 z*(Gut>9YT(Nn8A=!+n1WS_?m6olf5?zT@{?!Jdc^@U=k{(BBLAv?S_Y@&XP>pY;%! zMA`iJVY?dNQU16licRjbI7Ir40+@P+y8m4v4S|tmiLi4ae_r}Y;kOZ zuYP^nhX;XgwLSQjN2x(&gF`VVSYBhD=ad zb>0j4y&2s`4^WDKs^~AdC&jrG=jXYs^qh?$Rq_RRzG6CN4MMeTh0Z3V#pCc6&6ng? z*nPeM4~8Me6bF0>3-k72F&WE?0$&Y>fVZKbAd&}tS}=L|9_XS%`7;)>Nb9jWm7U~; z!nM>(5JMLyDl$L;(zmBl-Edb`AV;fSQn&{8P}$npY1Odk7uJ6&QM(^r6v%h{m<*=R zg1-_!T+giJNBoAR(aldunAb4UuO{8e&t6HnQlE$8d)^7i8{2S3PA{OJOdiUrNtojB z09slK&$WL6J#=#Ckh*J?2+VuJ{+S~D*uo(~S3FhF55S|v6KKls-Rx3ikcUGyU*_$r~jIW+f&NN zMxS(qY4%Il)4mV=UXr3q`?QJ%Zk_XxA?RbezevKXOP7{B`IoV9iGFCPzp%Q<;Iov= zIlZFq#nVdc^2q{(j*OprO6lB{ z!VgGIlMOAve?s(Pvc~`PGVUTc&4#@2MgDEDI|5UDpJ?NcO-fsxpJnx&UGuJR3%@`d zJct<#YqZ)#c(x<6jTf#`%mY7CQpOS3mU}zds_M%5jy=#Tm_kQApN8io1?k^XKcL%p zamGg<_CKkI4q6w^9VY4rrNp1fy4_} ztv=@)+c`qhFkf;=pTA-ZmyNI*`jnl|UA9137f%zR?Pm>O*$>K`lT&?Z+&*p`2LGTD zg(mwEt^1pj0b>1!T+;LWw-5podNo8PJhg~PSj5o=JM%PF7N^8`%Dlb~j@FaOnQU@k|?=1g|qt^=y*NnmxjS{?sKZZx)?lsdb{GGGVR_7)LoQ^YBD`i}vG5#0I{~}-P z1}QvsgHL?KLUf*hWBwJ=sfa-D3zz3J#xxYS)=AvO(5Gf}wXw6C<3WMw<1n*W>~9MS z7(YPX2II3SG@Q3X>B;E5FsP}shPEuseIYx`i+HeZpUGv2xbOI$2>;a&Y}>}@HC5uj z{PpV;2X@z4AD3rgnCT|q(x|L|$ckaMeQd%eql@TZlSPEHV^h%K2!b?)*)6@W-kINk zUHZ^n7{19Da3}r{u%E`v!g!%Ov*RWSKjT_SH~gujF&PXMTovY!&FjAj*rlm3qBCd@aGcR`c_t>W~>c4n1+#@1Jlx(ZrNmxl<- zUBimZMMf0RZ$z!PTl+ocVjt4NC#^(qrQT*?lt&etOgtW)MgmE&!ulfc4(jg(|Aj*8 zBtACn6H;_T;{2`9ZPRBOA!?LTq*1Qc2_b-PLZ3yn%ztYMfQq(M8%7ZBjTq(kK_S=; z0TT^r!#&TWmZwUdK)cJoI1dQ$T*V*SML_?QHP+Mc&4s?Z_wj_}2W=F$-^hUBXO1B7T$NX}N+l}ZB%jaQ;>3_!7zXQzlK+o|N%-Q6|OPqc`cP-#+a{REX#e^v_ z)#p9UTWM(Lcv*bO8{blNt=^cj4Dq@*%YpL7v#z@Zgl~euU3kT@;y1QBYoD)?}is_PC?mLnFsW2AWrWdXij<3=X(p_{7}W^5D*2 z=@;suG4t@AfpA%FY=f{p)vXe10Gr)A>vOpr+VueKI!-sdHaZ>^I&@mn~!#Y3|UyPT}}NF!<}&POS}aW|Ki% zZgSywY*M_6*^rjJj3~7*56|*hb0VKQ{jdcnSfV2 z;r0=ET&@;2ta_45ICkJLQngyZX)SE>5V8-;4^c-AI@dW%VrdZ^AAeJs5Jq{vxW-3+s>ILsO(~X>1;RsQQB2sd&0Uge8_= z;a&L|xb6i`L!mdcUIE`4ztqpRMw%6=6Rr37#1W_zmJ>R-?)`?vPyOGZ=k;yhpogFJ zS;lmR4HimSL-4l$eh51*Pt4+)J{s#l8eUc}EXOpTANO3fuof zt?(#h$EL;9KI;|oo1^->)&9dEu$cAdd=TNG0F-K=Prrt$&nonuD8w_S5TXI{7RC2p zEeX$G4>S7Io%ypth!*1CVO=k*NeUO|ET#GHfUUfX2x=7>1Gd=a->N>pf+{nf(cxPJI1kC`c){iw$k)pS2<3kqd z)6}J93m>-RJRo*b7~>B(W;z46D=HA(t(JNU16H#Acn6dE(DsSRLZ(TDo%T7sMEC-f zD14s-dG{ zAHTT|svA6gY6UdxlshH$H~)k+FRbM@E)EI2aVz{C-1e(&=JM*t3Hv_Lk+FdnFQIiV z``AQ;=0paHaHI!Ho<8*a0=wpEo02A+x6H}nBuXtz^vaYS@;mroTiv}~)9~3>X9Vz} zPk=t9r^il^ih^wH>9UfUZc4{2GFf4CzPjX0w(nijVLc=o*4L7)2z@}Zc1GeD-spbD zalKV{VJMt08f4v(@qq0bqLIIFjgANhR$BGBrzmY*+7zA+n3_6T&w0Ix`bl<4@265) zn0}_C6)*I*pYeV(nyb@)0pUATL9?LhK{d`OJe_18{vYA$x!^at5u0d|u}+eP2<}zH z4WG}!1vgCYn_)0rg)(ocp&M*%>AvNxpvUqAZbe|$o(4r==bx?zn2stQ*(um1A3Lmz@}RSHsFj7Q>gla(dS?JoC>oVTF{k_O?0?KylurBeAehD)Dk zsVq9}OGq}_@&vOm#Lyeu+2N*LHRY$N=Dnt&_GD(M8AXIHOJ@e2G1Fb0Gut95!5VeG<>MrXIp^M0C6cyqZW4i&qQxuEDY@}fs__V8vMno_62c)Pif|)vUY$Qeo-tR+jt2>y z4X=9v*APF7i#pWVZ-mvUssaplCS6Yvcurv_;nd3+&g*HOd^=CFLXD6&Y-_iM`RgI$ zl+^R4HHfT6^$7W|g#X2oZglsJ5}v&b5>ql;APgf*;qs$d*r_+#-1G~Qs2lTxZQ*<; zf1`l>WfaeZ!G0RH6Voc&56wr6jPcdLKm;eXOr9kVrFrxpnp`??wjDN%iJIVJVWDe{ zY>pT3y+9cq*Y7)A^@vYT(}a(oC099O{UXV8*XVP9IpGNoDthX6Hu(m7I_>jJns^FN zpBTm>j7RvaFBQ>Ii~DPjYYMMWplHTHg=s=o5aHI0qAS(jk^<95DciEi=Dpi;us{1R zgMzfkh0~{sG^)pcX$is*pb^8FhDPvEBQ5p!8K9WyqY!HptY@EFLvyw4EUT=ZHC?flZi0w~<^JtxP z8wwL-C9YasXac0W+fxKrD2!(>K!ox{4>BD_S0Qhxy4M~GaK&p2A;-O zTVq}^53F^3%a1NH_RkAT!a85PXm)OEDlPDy)j3%|#HS!iB4ILOe1!ld70%b7&8O|4 zJ4$qELh@&Jp(G1Ue~t0f3aJ$6G7npPS3`ctLS)3)_5(x+z*9PFo?>jxP1o)T&UO<1 z*wPk-*`IB0tswqQp=wai!+un9`~0fy*%iWf!;NVUx@GAQ+L}id5$t^Kg91B4y^Q8U z>U{qG!UAoC-dNcSWu6+@Fn{RsQG4KOW6%vOezIT+*3^^yCD`ACcewCFTWxbr{gt@- zEKH`l-LXS!c>)ad;UeP{ov;a(A=N}!RfD(Yd3}2J$zmWqPmR7_A;Bz0Xa4$z2V9|{ zIBgH2l+tR~XXb%-O3K;Z2HiA;NPV$q3XL(J3e@M4S~GU7$pnpOWW1FP zvKrONQzX9HvPdog8S-=;MI&TqD}k7VJ=~`Dd4F_Ds6nqkuy49X`b*^fXk^FmsHcdL zd(nlWdSOwW+y^4W?h8g=;x~NH-1y14rt@&k_-O%DKt93-o?v_e)>qlL4uh$yo%s=u zyn>lMKscUZx%XNy@HxJRxdS`5{(=qtRm@=6PtD9l4)G*MIK)U|dmf#Cgbn*z`gs+q z;WTe@%ukRSOjzdB*8kmcr0Zek9QrJLiCt0np%Ad)6be`Nq4xzE%k8052a{3Tx|YuC z>V)xvSn@`gS3rO+1tN?hHsVHloDmQ2qY0Io7n55pVx)6Dfh1Cv!J)PMOIZAc`s$Ot zYyXKVze1iXpn3x;d3 zod<1lc3yKid5fGWi^twK@e-358z2RSPZsR(qoI)clj>Dj?!{P+2ktNOOk5V#B*v=CF`dEjzF==68|sP;gwHRW_Fy*VERemBKB3cIBEG-+k;Fz4f0M4$qo&)CgECJq$U zqppGbt4~}1_NV`f69p1q@I7#pFc;-=<8y|7QhA&q=ZOOVrNTJhi&L893*OgPUTMd@+D{c?{gGmX~!I!f= zZwH0#L7obyp5h*_tig1X%2pG$qp^yi5Joav1ed6*^OqJ$h`O(L+;!o7>-{c>Yx9lW zLx68_EAHL@)&eNKP1*f6hVskDNO7kQYO?3i~Ja;-@-Hc z3BPbEor#W(d1q|cFsAJpWcy0HQVUmrx1hBPPV9^^_>b)YjYEnQV&gJA>KEgM? z$la}}KIeyp%yJ$t39sjAK$~fXZN3lYXCA{c1#Rx=#$-WxbXx6>C`?;OMi2dc=)6ZA z`!}_P#5^aEuy*9T$DjGmjvln-Ag*nxU~lXN=etoY1BAVAu?0+R9*YRdA-`lrT^ zN_5O`IJI^Ivqs(;CY{ph{OA{0PtGO1438+4EWo!2!8tg!xwKOjD9Z02m3hCgsxF*@ ztmcRUyt78{*LE4$miovF7~WvrACZ!|{qDW`8_81r^K?WH4*|C!v^=eIZ#Ft(m{YLP zj+4#R*_D*})i$1ksSSTx3#0A)uJ9C|@&g`O2O+Ifud`9TN@=5U--RbCGlsr{=M@YH z(s!OHb68t--m}3G2b0~HDv#qprI;&?;(;fjow&HtsTfqYqQWJz&s)5+KHYv}xuxKI zH<`k3C|gdm`E!AuI(&pTl-es2W_B=!dDUM2ZFZ{TV@ScQYR-TA(|@g7{)k`YjpGqx zfx@V*Sa`R4uHl`3qdYpzTu&T>a9v}ETf3kXjeqb|lXRe!h76bXlFZr~eoe zq>+K3h}AANCM#^+-=);+8qw)?Hd#o5Y^58EK9NN>HTcd8Q&OGuA)#~sYU%7rnM^9` zuykArF`$4UgYnp3Ea=23fz7t`hU|?M$;@u$oeWXzVcjNd5YBmF{R(zd!6cuWc_6#V z3t!|1Zja(UufUd#z7YlOo&1Q0hn|o*kMbl=we5$wPTdz1hATr@`n(OpOYH|yr>pa# zzrfphdTKPQ*@XRV7pC?(xp9SWY=ijx+XfDLj`E;%!80l#LN4qSg;)7iV)I;k=UkO< zk;J_T5ys_9pd5_;#>bSqtpx8W`hW2AJgkJMXHP-##&OL)e}N0**5J$OM!IT+kJ}Vv zwuug?+xiZ-4yP;7g$rk%-f-Q5zxa1pys&nUjh^S^_sk358XU-x+Q*hrwt6}{z5cGb zPIc~_n+T}QC;HZ^3csa;pOVVWD@I6oD2aah&xW)O53X(SDHqj^ehCPv-Kh8_D zF>Ef7g&ZW{8a@1~zd&1+;P1g8b>Xc5>-uDBBitK%-5LSUlb=U1WJ)gY=z{pitY3D7 zkrbWZn&&ad`K}V)im>@WqHGX>%LL-}5B|o4Eg?^%cY9r--b!p)J^4=pDR)XX&QDxr~?X zQ&ChNFK+Yo+)tAb)29OA+^5e_QWfB%5rb2tr~ea`Oct*3qZfS$hU45ZJ3uHz!EitH zPkhs>=V?I$in*ETgBe62=coz+$p90_nT~tB@=^ znHW)E|1;I~!rJ|WG%M}2{D6{mnn_-$+q^xCpDg&sAM^%2bn+$+N-em2l%`FY%$1>Wg#*msn@8BGT~e3_ z43Lg=_1v|UdcGpJ0lYhWKsvZ^-#EqLq>y!I{c}P1%UGCj7ZR9<)Saqad}QWYuBIKu zcrqgVW4@>NVC$;&zo`IQ&?V15E4)_4Zb(C(%m8usn*b|7)W2}dC~BqM6FDz3HldsI zCKaYofKNlMC@-vS5t7-s@&`DRKAzRS)DY8YyY!7`^Z`E`yuUMr0GmK$zi@>yBi%pEsXXhUr_42I2`nt@MGcR1q@TYVH=%yf$*$xG#*kq|LSL-t~OR(;T?|uT&%Et zJnl)=)ZN$T=2Ow$!sy>X^KMfZzGpaQXcDGD@V_Ay^;`=zAvQjBaf>bRuT2Oe zmN}r)Rs=+S@~<`gHxPrXG{D+Ljc0i1Wn1_xOJ$;@&pn0+Qx>Hn3^O*aJMj@GPx%Y; zEE@1^I@&4!gm2kyvp~Q@u>a7;x5t3gc@%`Zqwy>{xD|X04?|~tUI{4u+3GVF-Iin3 z9Z19|Tn25szPH0S9G#hhE4-t^C3^Q}`o{K%JO8xZU%&NzGZ5=M!UI=`ff7!a!C(z# z3IPfC{y^~_^dHPRPJ^+I&ZY5|Ssq@XyXRR9dNyHxJC2R~20G$tRp%5Y;Gb`z2xAaf z28gbi|C@=vW7mI#4f;%Dff}!<^v_M6wugtpPi3F9!iORx2*V2BK>jkb@z53Y%_j7H zz0d`fBFqG*Flgfg{ zHYs!Pv_EjA>JjfsXK_s5oQXVcr<7hWz=l(p6uR&>52z~T5fpCiwJl_^2_&yJBl?#? zZS`3f(hukP+Gh$nP%?pQ7f4;Z0Z^)8!Zc{T18?kD>4TzCNJ<{&)!@vUkx{T-R8Ymd zux5fMO5?Ojr;~SiE#hOJTX=b16;E*eA>ZaY9e&wdF-^YUaYf5neRjLdWuYhS^8+Y< zG$lw>3Q|GFqB%Wkn;j*1_rF3Qk?DkE2y-eqM<@3{dD_46rNVGkli^~1nxWe<_%adr zCcfJU=raz6?<#CM6i1wA)&f`GyNuAK?!BjR59LG=-P{0p;R9 z`!9soBD)Lu^9@Z{zo+j~Dh7{m-D^J8m^zPWSSXz$|~&e!YA#4hhc(QAj-P& z7k=4a`0SCISU0!cs%r&uWJ|LFSw=+^eL^{Z0}$^7f@=(-6F)< zRkQQPBd9ZNKq)%qs?9UP&v*NY*nQG?eh)o*ZififLg`C+pw-&#nCGu-if60Km=iYq z1OKF^(^K6BLE-*o|J6a<9a`i6Vj4cqX=XIXQB zA3Wf7^^Zsat%iu=*i*EJCnNNU9e>-O&zi-VN;sZL^p{_e2|D7lRzSo8P!5vY;^W&} zbL2|r3!jogXNSM>mT55h=3>?D4a#($`bVXqT4g6`CF2Eqa@B7SVx@Yk9cE#VO=v)W zm=0Vi_iS>4OK%|V3y1q=&F!m`feRs)!tn7vvK`cUU98S<7p@&0{Q& zPJti#e*>uxg^4}sBo^1%5lY5M?yp(fA3(gbDHz3%xHOr9=?Rqm_z#sIJ>}1;gd;BO z!UsCm>^y~!zTr;kOp7b^)CS*l;n^TgrpTz6ec*b*D5JS5;{fG?L_FWjr;0T|G(0!F z7N$5PL%}5-GG5u?LxkeF(M==Ht&c-P=MQ)o=zRL~kVNsTYPRvyn~N+DjEMiYJJE&f zNu$Y02c(9uIT|!kVa-mneql|iK+}Y#D=ZsIgK6IS;EQaK`c~?$y<%fbGovtuSBqu& z07Nn`oNd<(qbXI(STPPRV+k2)6 zE8aL+k?t@e!c|f#pkeN{ql8tG6OREl5ndu?$mnc`1M5?(Fh0z1I^Yqj>y3Lv7d~~A zN_i-tZ3Re4X{T~hhrjl=(^F9B|Me38S*~sv?|a3y)b)n&6T1V;KNMsf1(nst1{_l)s#R(O2hgjMkKyu!z%#IJSP9qo}E zWfd>h%)NS)1A&Zm~e~ZvnT>yuNC161xg_sglF>;KW+rs zkjHL}zsb&svkAiJ0(KPmkg{<(#GPh6<%wRTF8UMRVCmQ(5Wh{O|H#}RisMB z&}ArX^w=2{{dc2%2Q?q@NYN50y@hXo$Pa$E`6oOKGq1?{0vs&KloejSlwX>{xh$bL zYGa}+rOQ8hRThB?BY(tBiJxwq9&ygQ-O7$uhPDkYZ|j3+bgXZ^l>ODBDMjm{WD<+m z7IsQF*-O$bewi$wKXDnEVldqLSi`O0USBPKA+4B)1ZFqmvC=sI(;A-hul!x zWyR(w)+sr9#;Xwxr_}6_G>_}Zd7y}}Wu~aCC%%r5f z?c% z4}7v4N1tM0E>IN5r@j^CF76DSp5B9pjIbMMDbDpc4ko)>+gF6UCG5O0p5V?lq(6-ZXC*^~zAL}PZOaa@#p-slqljG7 zLAddhtVdJW#-FiE1TSN+A*fW?xmi&i_SB3+-s6S0jshWJB@%a&>4Yg{6r}JMItYqT z1~wVKstg6o>fupYlb-TbJTw+VW7)zHCp(yLxPXml2-xMMQ2KP>Y|=P|Y4`y%ds{!+ zcqL)1Ese8Y2bnHIg*>SCuXfRj45;FMfwJ{JV|K&wL}l6aFhmx_y}Tvq%+FSP3PDPT z>hyG(?vy zA~x49k++7&(n6lo?8qgHOn7}OgwvpB#J>_%zJvo(5CD-_FODyTx|aIgeE7c6)nWbx zy4)a>0MU1CZJo>4=Fm)`4*!b+`@+{;_-W!bh$K~tMjSYM|8>w4PBR-m0mF6*>wlbYf+Bgx5tD>U#mz2Ib?h6F^iGISG{0onag6dkR?4_fwN1v&NjQkOwvso#`7(ShKMiibcJ~wPQ znJEk42vZ8b~3Gt;Ea-Kj>KJJ&Veq z*S4>`F{!r!Ji<01R_7G66-IIx7zO$Z8i#+xZRopcikj=1+58fTUdu@B7^nc1YMZdL zV86V0x#RH|=IS^ysVF~T35wJDj5??0wA@hZOKE(O4}LH>rO#`|qUgf2#WT9`Ei2tm zz@AIoY718-S%&5t5si1M&bt>p9*mO_c39-2Nnim>SIF}R&ne@V`};{d z;%)%=&d+mMh@)&-4^x{)^=o2xiJ1;ptEa2NUm8xkkS+QiiFzrvR0^zk>!~a9#u4M= z3x7IMN%*X{o!gxy4c@$aJW^}154{lS(iQG1VffVz&b;Cx7&x}hsH+wnDi(=@tq|&& z%uS!aw+hq_@|gVhEBJ_XTOdvk(-j&MitxZaXCsB{BmTaW(7jgx4ZrTUaon#|@KFSn z(rI0tqa-|Z^9<#xTk?biXLBC_%EK6`oQ-Xm)VoR}qA7jR;Wd25PF=RHHcawL0*{*x zk9GWmw<2hf?=L6)Ox8vjJ6ttGdve2M;++W9QJumj3Gtm>qQD|8r)(axdYR_o_ay6b zru;ei!*i$VY$$cU^C3c2^<0Wk*cJ~_L~3DyJuz=!8|@0=BTmYfsX$6%4gaJclz3cw zF@stEg|Y4C7FPnQmi@xQxRE5JdVuC5e8V?`b$zbsf=9`CARpJ$gvgjw@N|8^L*HA> zT%gs5`PEd>dm=>0g@+QKl{^9GYd=Jo<-Ml~@NUoTT7{S)C3dzgt!(|-K{&!I^$jO0 zHdP{%%873vhSl4sFXZb2+g>1qe1!K|V|SAqMTBqQ`JSh*w77oijDlz5gD%!zMZr-5 zt;fwjT^)XGf;oBv1+w9=t@h+Gd%OXy)(c2kQo_|=A;0M#!yDV5u0_!I|3I1<9mb@# zaL>}AGjm_OH~)iZ#6ud-=z&K{L8b&>Q%UfA9AF?7;Q(i=cee3SXX~BPXRj?!zdn_E z;$thR_nL*(c2`N;;o&e!rXfw*D6KkVTV zCLObg4S5z#o^DcjunSMi0iMZ1Mj7IfH(l1IjWyEUPycQKZRMFUc6g->N&E7Lr4cv! zd=*1dVU!o=p+$IIVn&1yF3}+YiMFEiqtNqzOI&scxKbJ+-oRKVt502>huETE(8=Yw zWJl{RxC)Y5jbjr!StFrFA?@|TdLx$Z@WRfnX4PB6Oj4#`vWY(8PSGU9DY$@-Eg(W! zcnO8iI%@ihK6~$GP2~oyq#0>g;qrOOHtCcX9k3M4!H^@T@?H7J1aVl)W z8grRqzF0gp)a3(vVm8m=FG+_&M~1U&+_1=BuhJ0M6ZcsRi9ka6uU;*r^x(L{SUD#8 zV=(urL{=5ygf5kzK7>L}h&NdO6BL#`0o=x(x~h6fII1M*`~wq;Op_rNeKx^hj)bjf z{`g;0+Z=*0?^eO79tqLMCZJr9cz*95hXz-j|I=v=ys&5>2@)*&VII?`AH0Ody4t&2 z#Jg}czf+BGy6_<-se5|3)*w2{npzlo9-81~e1I(E8LW~P$)gvUI9RlinoFP1)iFrY znsqkzCa3msMf{OiLFxOEbxDG46E^>b@toS8dQb7e7MSy;ZT&J7I=55em{m@QA(I>R zTKUv*gq@k%EmFM|gP47r`Gn}T^S$j@oN0YyqVjBi0(Ilxx=iGcUokrKvsLdlj;fle zP9p=VaaGiaA}qUcnGa^(pqk`9@&;sP{tXGzD8RNysAm7e&@DglQMO2eLeF+kPrxU9 z?FL@0ghvShp768RLp}%vg%7cV%l3v16fbfv6TMOLWCNlxK9h!&UQVL}pVt)QIsS1X z-Hi+jt&^-sC-AZ4iBfc0-+7B~>jFu}Q_>KBi-RLJJ70h(3jHQxJt!UuY{*my`w#gf ziwhGzkZ;MAqSh(mpcd8=3&VNU3L|2;$U$b$xw0r#&H@NBmL3cNlp|`SV*G@o3ts%H z8D7_gJKKJ0AgrqB!ADV0I4MSyu}SH>rfK!UQU=#^bb&tsVxyM(1&<5?-3AKOVaRi9 zuDOI~RH%)s=*E{$4&S11{tTSl@_q1s=_A}?tRJ0)ZFu=CTniJ;+}8qLr3aHbPhoJC zIHc{f^}F-r-zo6WCrH^|fKY$TBI49;^jpS4s({XyJ_=}K;R*-)51Zq8_Ns*;$EMvI zKrJ>0mvTBNsME>9&W6m4C>@%HO(>MJqDQ!G!ia!yMBW8F7*+M2sQ;@nBZ_R#U>gK^ zgb@|`muPw6nn#RKccgw8TqQ7C*$x&}~86 z4RPiYYOZBKaEQcGgAL7i zLBg!`TG!!Y3f|6v`(N!6ayvhiyp~jTpWUacPUqJRB!{cBtTB^*z$A%%O24e}SanHSa*yZDXw_hAa^Jo5xz{l|`TVL+Y^ z%g*WPd32l9r%e~cKmIs2cxN7p>laOqHJc_>e-YDb?l1Fx(q5wC=PcDcB1Fx7X%RSS zVSWlD40aJ1#kg)kFwXr<_pp$Zmyt->>5NnSE(u1oKQ+hJVO8p-q z#wmF`IWIgi31&Y{mRQ#G6j$`E@fN4(5@{opo)g<{^Emc~AdZp}Aj-1g0{L;lhaiG_ zT$3)^X`M||;Cu*4Hs}4Qk8ouaW`>{iy1D$cn7Q<*_Y3Q)<7*bvH+;){)^%Yl1kfBJ z?62IK)FsU?d`lgTRt@V#wXD-*_%SWs8u2Dp|D+e)GMqJoomvCM} zL17m?V7+VLV{rKLgP%nT0pg=iGoN`8#H~=rd@%&+dp!f&n!sng@YZinX7{9X`Q6&# z^IpVN1Z?GuOQ?KcOH!n}8+ROgJ_-&%{a{=k$UmvrtF z`RO3QX<}yrnQJ#Rc9np6nh*kNQ}lje1-DkS1)1id2nkt4g+d$Z{1m}6Dtr{BTBpec zC1wE^sCuC*ZhSNg)~OjX)_L_TLv6(#EAzlqfGZ2O$e$(dHz=h(WJ&I&cQP`E@oJ^u2mPX{(JWm)( zrD7Tn;fUuET#b^P=-_vh!IgUVw6eYuI1n9;o5kk~St0tuOGuxm4zC$W3q07>Iv9h7 z=&VPesG~y$m%>~hM4vK}>y-eqge+iEg6OpdEW*5TMl|7a!0=v!B9kclT(0TT7z)wI z@6o4^MnZr=?_#pElQ>v#`RC6Vl&sK-KN|k0E-1?rFTwaI0;lM`k%IRHV|zbv0@|k) z-HO6@V^B?UIMXlTwbOth|38HB$8o5#;pM)qGT5pIiOQFP^O5Yrm zDfo1C!o%Mo`EkJ^7v75SQ50$i%!=5FJ9{$!v}$Vv(P z(gY6Lb%>uC%G*cELKw+;FnGqCL5T-I%ijbL4o0up8@e2YkH)E-N!@cj=P01AqOqtk z;mkP7qe-0&@%A`?o&lKe$uJjWc-}EZ;1dgW9e-hbiRmAb4w1AyRPV5HF#!)61o@R)!BTc zmD`>V$mOtr-Edw9f=KeQMMW9Ig|}{n8jzh4YL@EWLs~P}$~-X5u2hASUP}Ez68+fv z?AiL^+~3c(6f7p80mA_2lz42127cVL5K%)X*TXQyI&YqFgdrN2>p+T*$7^WaWq~R- zfA;zP%Q&Uj3jCpl$Z-Z%xMdB@OA zt2~Z(N=TMPe8gMsNj~6_>fnRFbrJ5+#s`1nEtYv|G`2e51Y-;}G$Ot)|0>;>)LVZb zzK8g9IQqDx7d~d+qLljqwbb7M(RE45JaoRID(CxPUgv|!Ua1p`tpzANsq99ruvN}{ zjgK?1d5KqOAkCS})C!S;+Ko7@q4R8XLoTX#<%?4-pdYN?>z6H#$NNQy=ph~VVg~c* zRx;cq;b20@bovPCI_#PK&oxf-f!|6I$xM>+r}trSb3&}0(#!6jJkR@u6<^g;Q-NW6 zT0U?KUHCE@CF(@C2KW0t#m6S#V~WOCs&3(hm^@5smA&u8Gb+5rk2XVzOjL&U)iVbW zGqvseE74e%M*9VI`{DaPRrRO|n45oVD~L?h*eo`A0}(UMb0cN};8kyd$`BQHU4c%! z&dc*@ithXV2(y6lyfB){bDHV#@HFtElx+o~)vTF^zB+G6%wAaUJfku~7acOX4q-yD z4SRFMA*3Ei%mU<@AC5FcD9*?l1pk&wJn0cNN=Niti~tA2dnRzd5u{D3E>v3T2vP))R3Q#tTQm!8xirko80Mo@h7CSmv?Jx0LXW^-r}E zEle+Au2iRt$BEcj)M$Hgf>W_CsxeQIht6^pCgZObIah9m2fAMYJ|SE`D(ZACW;{j* z{#i_m$$hcbm$R18VXHdpRU*zk1&bQoUn+KLxQJg^X;=>fCH<9k{5T_8L&0cnOy5988m-{D=u2W`HTzycb{~4!Vi3<&DpJ)HC%vT=SOx z6xaCx%UcH@^@o`I>JB|oW&4)Do7=g+X~E?gj7}?^tn^$)9!-6ALruX0UFPQZq}$ep z!u3c1#NA?|r(O+T%h;}Ir9l)-cB91Y3--fSp4F2`s8r))XlGyYhgzN3=jJ~5yC%Ug z_`b`~-FOT(Xt2d9%qaM$n~=*>od;=Do^5><3R9=hN8g%RiQj{5+oF|r{*$UqEl{qQ zyMVJU*7cf!7Z$!JK4~9BckbP+RvN*~1X5hijaDuhd=$ZXwuI&nKVl>gm`BX`&>M>$ zd=!OSPtWt%2`CjrX8{4X`It}UfbtpNC=o}3b<8#m@W1cRzhO0 zQNSr2g$?we7uE~*jaRVM%F$X8L7Z6&nbUJT)WRpt_@GN-ijYw><^mccijPeQ*9Il| z0bBfN5M17u`7rfF)}9j--79&P3JSRmyYv-+A`3w3(Ynrg949hr`$|4$;Pnfi-a@ZI{9`{=*d zmhd>qCO+cAN3pP;8*t6#+UrN2khAR@bK`q?JcyS|xRkfSka}IFkhMXb54$%r9x2vV zYFoft5hQklGE5JXE%o(|fzh#$z^>VrCr0~}EV1Y54TnquuuJ8$Fd0C&D*zQhoGjuQ z!zRVQ4PYd#otHsM{fzUPV@K0O*z*par3H!8RoCtr;nJg?2beI_C0%A`>w}Nsw#`N9 z&_qhj&yv%bKNTd}CKfJ;=YRbCzjAOPhS!S3dzUK}McA%FBo8qZ5O>?~59N)IsSSuL^!!L`-U?u8Ylxz~}XdwtVnbgfY{J zl?dVv^P}keyI187P*}z!Me2PW%z~$G`sZI9eZ8>CN2g6h-#wyu9oWY3pbz|7Z&v4N z8&Y!-3a89?Mt3N@6-#M#r)I96h^@4k@B&7SQvojH*ls*K7r!8n#z_%AQHy-ea)k#o zDZnvbfG)E~Z;JIDYhybbT~YjvZ7|jXu5RXWdzeIr8k4NGk%FK@zz|{Vj1%f~wnEZ# z6POVk!dELT&N2c~GGJv2&i0H`x@}X=E2-jkT+;yw*1kLd0KHfS|K(0%615~u{l_Yc z1X_#~|CH1>qCAz4*x`tyyv>4|um)h7gq^L6dxaMM*ROsZ-B{1-L2vqjJUG!QnTgRy zgan7S?%`!VwgrigX2dAXNqcx$pU#D6ba3lmb^zkk4mHlx&sTz5J{>IexvMrN5~24u zkQ&1_7s(0zYE{b?nrOf_*nVL`SG3h)xQDs?Wsp=Nr0hj5Y~{)-ynadShU{=SC__)6 zGe61+@Qv97&WskqXa(9>D6W$d4Mioiwa-C+xn$TsZEZ|==>YLm^>cM}v3}$>xXdIA z&wODF5RFB0(jcI-+e{6gD+1Ci_f6L^e@~2D__Pn|suz~YF|Kr@JF%yyCE(`moK_Z4-F%th!O3a~dZEeZ;AHBSA|cvqci;kiny$?3W}!N z@jN>|BjZvg6rsn5l1pO7P^!#x+We#zeh!ICqzNsik#;QP(HU_`laeMVJmxl9iK;2) zz2g+tm}*z0>^rp#6A7nnATrehL~Ojdt}2kr(d7S|imQ{*`+ni0C>))6f`?f_H-GC| zhwIktM&E-%cYc%yZ)NboEQyJR1a{(X@yx!aZu-DS<)hQ+`;Di<3W8QVbgCMzj!^O& zJ1mb*`@tH_lNTIA{FVJsc583T+dwD3zh*{M%u~RX#>U0+m(VJY#eeD&H%Kj^U|p>U zm@C#YhgY`Eg`QoAtH@yU3#7(SwZv<>0ZYoz;R5=M$JWZAl-}XVR!IVF-TV-01M4Af z?1%?oS_#(=N`g{jfCD+^TJxNGo}h4=mUbe4cOXI z=bntXlZ~yfTH$tr{lo*x5ygkpu$1&Kap5R22yc(@P4rW~A4I0;RvR%MncHv~Uq!}4 zq)XFv>SYk(>6FjRl(FRpN?Z%#jIcQnQ3yHcfb20R@UYcTgryzuSifQOtDri^ud`~e zQYvr>@}JjP#J7L4#(wtN&ADs|;Cpr}1m!cJTV6@y3J2DcV=?s&4J)yO>-@hBBNm<$_f&?@D;&H8 z<1K!)2_K!luw3eg1r#ehRy)|=-h~n=X?nRVvI)<`jwme5jy2x3!U#iZmW4-A=I{<& zm~9bO_H7j2;s;xn!D6cMzWgSnYQZv0xL>;;7d+V}-J>9Kw!*>6L0j<(UvDB|>{LU* zkd23!pk{Aics~ZFvltKeY6YT^cXFpR`uh(Po_p?dIC+-m^@O`BsBCjW_#1DL3$#A_ zAJ7xM!Zpote!ATf#=DA&2vc2`AJDV9&6{*xsUWHe^TL|Ojc0W5)*nh*YCM-jy8D-z zY^TmOuVsw3@mZ8Q(*+1e?=qifiY8ua8;sF4iYJ6_m+%(h62)k1#c{hqXAMYF77zpE zB^(?P(AkVHVJb=qX||eX)JxP!7hEDu)E&h*kn*es&via4Bj0zw4p}`}Fxi z@9g`%AS`-NBDEXu3x0PP&m_8!^(CIdyTx0$6@{LJ{T0@#P7Zi>Nhg>*H?~I*nbz*M8qyM^m@QhAaRr=6y6d|I| zO*&zqE#b?pX&L#_SoG_@Wb#?oubb8JP27jMh-(JS+A83NHZE@i2ek3%&g}uCt z#O9qiwGnhoNg)O)e~mg5jZ=zAvM3(cW5BuVj4IszP6>npqr+Y#9OzvCK26lTu!Q)g zS)U*HD!LKT!Ebi>V2k8+PcHmpL*deY*S4Dr3fB%)GTZd&;>!L}KUj~*LB#W?Qj!M(O4qxjWSbbgBxM z0ob-eVBw{|T*h0JQl*C#4wWt>UU}mRfy7mD@KZ=IZy#Z$&s8_66$#p&xFt=G&K! zzM`iL@W*ne1S_c_WjgLI^tf)#%2yS5b7}{NPq&6rX9~e_T~V*VgA`k$L_2pxWOixr z7U9$mMik@+B`zO3gF$aAmcd!*xU{Up>{$w$+zfH^_+ri&6Sf%9zTpk z=gTw8+psZfAmbuS$5?u=)8XXG8$xpmUj~m6X<}060ydpWp9S>1Rqo-SCS9b@jrDJh zPin4rFNvqlG(;giJ)BAt=-`Ktx>7o!Zl1UQzmASa%;}7=na-1$Dpul`U?9ui{`9|3 zWtV|xRQM{A=q8@Sr^kY@T)4&QkZ9vWdEeIuwy2u$s(pt7GcrzU;Z|ZUk&^u6yYHzF$$FwH1S}V{5Tgt z!KkA;XT0#ufM_Kuk3?7uO%pCti~Q@(hF6mW9{Ofg z*23c}>Vy|m#5VkIaVznR03Z@YM)d6Yq2>*HlGS)!8sGvY+R2!dJo@pC>k=v(1+FU1 z*m*;Bvj%jg6Ex~j32h74l0;VjME{4P8=ZdOEq+$?ndyVFjXT*}47_D1LvD*9W#tc! zf}?36VuhTpS)NU=#jK36!)IO0l0uvdDFvV-u1e@=?J6we7d;J?GL1?o%0qUXh$vN% zQRm(IK@GyOXnfk6&mYvSJZ!xi>p?Vzi)=q3q%cxoJ-NBwvDW3Rc6~OGe^5lEKy>&$ z)pt$f&I-Pqj*A5K7G6KROrbz6Ae*5)k8C~`=P_=N&1tK+c)sVt5u?w)(*QW{iH?#r zTS=>#Xs~&qFI*}6=aPftZ^rAdJP|!<)=8BY?9-HddILMv9X~@^4lrDEH~DZ3K8i#m zK2xPfJegJs6Sk=R@{Z3D#VZUqhJY2CE?b2tqh~{fD|^Chx|cBu#}>hmrZwu+ajiTd zT7?899ih(0xE99Rsw5{55ec}E8atJtLXYTR5jDp{2&prt zRnE1{QD}CQ%MhSg?8Ldh>{z}(wOuSM$FYUXwm1}N-9vxlI$0?G7gu=-rc0tm9K5Rh zDU>dQr!l;$ep?AnV%UohA}2-MiVmEGtQj3<(%ETYG%VMI{1AV zu!4Ct7x4wUkLH{g7Wp(D>xe#&ngt!t!mmXIGez@4?*#>$9~&OT##lxcxm<5WsPLJ4 z%qt{SqA)gno*fr`;GrwH?!?n@I4tvW3nge7Jet=gENWaKczbvV;QE|vOltg(!u=%l z8ZQYXPj*I3ot&CiA9lLSlF+3MCe;Qz?pdQBmvH zSp8hp8UpD8wTwFd?N9%wkgN>!`GFF>-ZnpPiN$yA*xF#sMGn&N7T?{#^Qs;p1+N?I zaCCrarXv6U7Bovb`lE79FT;pHr;u%q|X z6Yb2y((AFPPy0?QO}caw0e;Tq$Aum2FW8ui#ckS-V-ON}ZO=`3cw(@`Si?j+V{CCS z*~{&7Hrxw9?>ogEe_YyCm{beqGAkUvFxm1yw?a@jp0f$Tym4k0#`;Nz1uu}T3rR>` z#Js15Tc5sxLOi2A^M*o+zRa~YVRp%i7a#$w1~`r)vN`E_`giJecz|PIm~kKW z_d0zc>%d@6Hitec83ojOqvO%Bxz?c99HPsmWMs8Qxyt}iiRDzGF!I)c*g&eUh1^!) zx#=4I&v86q2e)qF870(*Yi%4)r?lK^H6x-xEnMe!*nIF3Zsk!Wk}Z353ahz5=N`4q zPhoI{;1(69+&UA%!E|2JRmD=kGb&u-+cS!YAFr&T9wRmc?!mCq1aZQ_!`!A4I~-cV z+JWLq?bf{*qUunAai~%qth{lBK%x$>b5;eGL$olR^{>tA6{5=JD}GzXcT?wSM3!V@^e$4aR06LUTl2m zhNw@Q%hT4$l+Z!v3s)$-6(kRjj;-jl*FZM6upZ(kw$k43Hl5Pk;q!?s)K z7V^Ml>&19j3v<=0-4Q6cItw&M7kl1`395O<&7JCMq3wkV+i_)$>tf?LN*C0O@SYA~ zzQaOW8qZ66EIXH}T4$zQ?<_7q`Z^o%ZdT(C2fvdQSctFXYB)Y2*~f0c_36-hL@SehVZ`$Gyh5dPV)Kd0cl&MVNsl>WG=p(~ZBoMx;*}N3 zkXdG1e40vjDL9vZk|P+K3-g*YnrRr3k!zHw*gtb&a~wneY$Z(9DEUt$|6eLNjnDbu z?@<(US0=%-cF3-M!b4ZkH@J|1>ohv|@B~M>E|A(NB`<@pXrQ>)5bD!o0VzLEuT%|OMV|}Iyy{bvEjQXN>I}y8P($PK8+kERI0(a;|KC(#sTU|LM z*wDHz-)$GFs#{P3O!{7Q-ekXb2u*moEJeVhk$)}2xd7d0rt>Zo<{G6Re><(@T3*>l zk2-&a4A1HUI+xwt$An<(AiTC)SOyPU*FdOobZVDUXRX6aF_w4PAR2c%VF@CWTKb4* zACv8LkA2iC&>IzT>bDgalA*x1c<9t7el(r0E1kzwq6)8V0VjfWe9Y7H;yrrOr$-0p z0<2STZ`y=9FK*Ctp2Tigm*3aoXpzw7K2?(l(SuVP10%#(r09BvRa@oBg;t2JQEL|E zVX)uE1hy81BWQHV1PhK>MoeR=H4uP5_iucnBbzLP4hl_1sk4uKB8H4O7b_@5XrdF> zE4B`WbZ{R3&Z76Y_h**ps+M8wYK6)A=+oc+^nbOfKuejpxJFT(YR&)~NXoYD*?kaM zT=T)9?f<-h7cTvTu>6^G)+3p4#4hNw@k%ab=bp{W{8U7(SI(I05~=4vSmHJhO4F%d za2K|rNQ<6G$MWPE6|?ddaF^ubUQZP%LAA7-Jpmr)3t8HZ4EuAR_>Pl+7rm3(4aHOHWh2eTJ+ z&1lyOK^YdFk?~f9Et(TQyL~9^-8W(`k4~+>tLky@=8r4g{$aZ_cq_rJ?Bz9ff3o#0 zx)h%t=DMOtM;Di#ELA%NZe^weD?P2yh|FJKq-_A-l))7WJa`_GtJwqbBL+WWOk+462M(ka!&BMmJPR!$WcK? zpG&G*g+mxvmDf;O#xRNAk}SqUzR&)BrNr^qsg@z$LP##{F-526Z(;7&smc=CRikSs z4)=NdW6NJ`&53iy!@pC8@|WQurZlwHBqv?0GJpHi|1BB8`;qh$x1yMr-$vr|kaepO zhE+IFtEY6TU?C3SLiA(Uq0kcvb#CSU4&0IPvod^l5^+fUo=Zen8(aR0Ze04KDwW&7 zr0&)`;2*`eV|4(3H1wn*Tm_0oPQ(mc(k!n|rPppSGHm|BTRe)`-0q%^ub+Dgk!r#5 zKyV6Z6AE*)#x6Vzbv|Uag{e!==3mXhD4UEEK4e7U>h|DqU)Mg|q~CF@%2HDI-zXb9 zgv;swqK=SQLtXN-e)a)~kl#NuNV4J6{_H~F?d|`V*AdNk%VcZ?BBN}P@jAeZoG(v^ zLk^&egwLq!SyztYaIP|A_32Qfs%qto8&NczOZ$kOh0jWK3gb3-g(x}PHd!OYgFd=L zc~-7hr)xKaEE{Fk z=51X3+CWnj#GbK~-qNE`qO(qUo~^Y$q!Jq#SilgbYehUXdP&6zO?WH>OFar*I|6Lw z{n4Z>z);C?+c4p*Dyk1|_k62|kD=%gIN?kbrEbBxuAHn>(t;KLSQiDDbcl`1;qP%I zH1Ly@iG|2%t}vMrOUFiRX}C6eB$Auj#H=7$+(>Q@GnxB#&6#H)2BYukgptK)?+aT4TNY1PUX7L zdqRV%?Ju)Px0@*#FTXIU0h|VJ{Xp?t>{R7c-&;g%x$sgN!@b&_c-d%co-S6}<8kfk z=@h2|#LygZ@G}%nME*4e9n(sp=4C%=VFGeTYHza%#XS@@2U6^8{y^gC$zOXtwTXs* zr|SZH=)|b=gbNid-REz%;F&qN^b^ANL;roc3d7F!MHu{UD3$;bK<>ZJr7%mLvp1~G zGIrGy^R)-%>8cZUD=q;&_o>Z7KNBN7uJ^UhXHPf z7N%`5S=yW|@wEn>6LR6!uB<+(Dh@u~wJp4oR=SPhFp`x|OR|sR= zDq9&tp^}gXxSb(5Ffh)I_pomE#uWmKDST&XV#2&&meGb;l}I{0P#zI-PY1PSh_dlk z=wLm$S&6Zg*ih_;1c$a^B_CXvaL9BW@a!R;Y(&n5Fb~fbwJojfHQ2H@FlX5Gtj=qv15Tkj_pC58jl( zD0ByX#yYjUAH&IyMd3f?IjP^HIVJn`!jfG^&=X9HV(D^h$fLxAaXJX>&V?iN7HlZ{ zC?eLm$vCREt*1m_wJ+S-a{eg)rV zX+HT%J^jMF1ROr>b?cRKvU_StPOe1?n+rdL@Oy!?15vNt@W4a=@`!TRc0;G0%PZg; z=d&RxfoG{w!el@8OkT%j2g5Po6fp1XK(l|#?hYP_HDq%Ji$1teVJ@6%k6hR5wz7>I z`29Pn{vR{@_fRh=gY#KkqE6_w!&!{benom=l{c6*VTu0J7k)Y=Iwgq?dg8)I+uDx1 z?ZQ^xS>cFc>7%J15A?iogl0#fq=E-VtcDdC;=Sa9M;J#by%<*IOcIq{jeLHFdlo{- z;NsPpSwuKB%3P;a%QP_`F_`eZ3BnZ$?!oo;RU-c{zdP`O%{~FfXT829<>crIebXxuCOBEFLU+I299$NHKxUKF2{r z6qEeyBZ8&gG6`&tE|I@fB0_}KHH*x)vmxUF6}Eb=~Gy8{4=-R>Tzt4CA!W|PKc*MNanzxs+ zQyC^Q;G>^}z$IiXnYUF$9l9>&YQaZ79_NaHoZf~qEkrry-Zy`}Lp;P&M?(4c)md(_ zEj_j(FpCD1Sv(S=P)f#=ZAA<7KbQ-zYz?I#qRbRr4yidb$iSL81aB0|_%oy5RxvR; zgTc(CWuJKBWT;0kxw-^lbTV$PoB4llAxWE?8!4_wIUHEoVVw&!xJ1S0D-p*rggyLS zzs2`5glF{hXS`PO@GD5q5<4Uw=XJ5d<3b+i!VclnL*v~q!H1shI~U#rijl0Jk5<6& zZ7yJ@+eZW9+_LdEsp+B+QqFjF6L*`{ZAe+KH)Dcn4w%B3seOX1i>Bv%CDyoM*Lzg~ zwJzyyHgt#p{3TzZJ;5f*Y9jvZOH1g1uck25CB^J;;aaB%mtP(c78CU`e-=B@d z=dH0l;dRaxR06R~3ByaUyZIAaG>@}`dkvgP0n_ady>N>xyJ&-Q?h}zd9h_y9aZYnC ztfGeEJO@IFOEIV+?qX)Nmh71Q)V5c!tpoT78oOEXtpKNV$$Ba?g|mBVPE_I~o3Uhk zDsNIsu;}4i1XY?nVRocy#^vk7)Xq%gk?`1_7~vw`$t>G_(&bg!3fG$e#Ur~B;%jF$ z3B8^0Xp@2KRByM1Z4+1(m|6ok($6BrYFOKSk zMJCnD2(Qi4XhN5W>$Xnmu6`!?bsn;6Zt&CMtJYpzA-s4l<@O5?g-Z*$Al*%(d*B*lu50hY+8kj$p!kIm1o80-kn<4mG=pZdydj0@ zzdHInp~jSO)`)!KS#84V4|0LHY|{`YcCfkdbBf;c=dhK%Z*xw%cusa6S1q#kzj3nb zpL_`-o7dFCI%@&OhGaejapsy(FptecCE5_|>k@VfSmE+(wtSeEwq?o?TX{6}yUD!E zTe!o46;vG9<~0|7kY(&y2(OZ91s++AtD&PV-^k;~>kG(~dIK^clW{PRic)J0r{Pfo zhs}-61+s`UULtEF3{0QbBivV5XX9DzU@x*($N?Zb9A(6yglxytQ3bL`!QaC!Q6AcsX zJ-LC(EaM3GjM2Te$*#7739*hmbvn;B6vhy>&xfhbbY_{O&%RK?jN)EZXq^(WAMnZw z1jL!CB9sZ|DFuL?@;6qMZ!txzE!`?D@2X!`@)ww-qr{-u>gwmt^gn5iOTF5bXMnkX4XweXnZC!hT#VlZ< z)~UIoDCc6Mdgiq}9S%E%IZ?725~9>Z4U5UuYpZRAnL3>FV=>IDLs+vcW_^Te{AS@SrK6c0UUqgj6sTu0EjD^akev^ zt0t<$m0~gI!v!iJj1Ch_2U63Ac@ej>(26wE^2O7O#7?I3g)vCifzwFfmqo>Pir4SI zgCPRHmJ)VD0GTO(Z1@Gean8I6K{a89nZH8(ReZ{c${k%harNUCv9BHBe^eDiSRNcJY2vz zI}luaHyeE8gY#}M}6a_YR~_WbUZf>vDg%vNF7mu);DN;(pEu9uLKu`yI?u;zMX zpKB1DU6aaM3c|DR4t-A5$uiXxjG3H>6>5y5 z`tK}-5ev!rOVNvAUJoIIp-VG=y37fg;Yv+zY(_72TI~_QWtbNXr2og;ine_)qUM0d zE_i(+I+-2g80tI;;yG#v#AX3xhciM3Q8I1l7?6a^FW|`?D*gu9kOIyX`Vkyuhf^zb zRu@$U!r?;pQ_)<7KIWf1gSrL6*k&Aar)pawa(FS|*;c5Xi?34K9PTHqyAcYWR7dJ1 z@KlwFgRoIe@|~%|?G(_9K2t+mnR1mlB%=@4LpuJ6hgU8=SIBe@ zG3wJL8a#P;LA)m&IoiQweXIm}P7DUCtrY0awIOB^mqEu_aGyMEPX>6 zHrf;=n}G0_>=?jbJDK5JLXTORF;&7eQC38UluWG){eQ-yAUE5#RVkdk2%gFH=6*%;x<>`(-Y?Jp2U0}LbETdFbU44Q z7nsY)N!gq=RmmY71Nc^i%l77HDic$5fD+mXj^-mI;I;s+kyb1lk#bxWGXxK|C-dW*ZaO)zb=y?R77BBP_c@*z}@XRPAi+)4_n;xBtgY9Wf zxoM|#W(st5P{_X37`<{#MA(K2IUho2Ayt)XMMp)P+At!c2^?KxF0wk8WpXF<+H|hC zQa+>4F>e+vlCRHd(kxfGofQkwMc!~Hhn4!C=5o1_@5P|{BM7rOKCIDQx$$L8-0z?fwu`X*#>bLYDt$NCttoz=g{!s8ly88H>Oe{P*8j% zvK{Jt3@r|+#V}4=5xz4Kjob^h+Sal~S6#87aLdCNe zcj{6U@FUwte+OGEey#0^hqx9~B@>E2>np<0g}&cCo#1oAEeg+0KZXkWkD$>_M$$Nn zh9w&B^*nJYghzxig*a`~lPk5x6wwXL=m~4zEOlW7VF;MeOO7AO!{!?Kj=^&{-ySe@ zX$6$gsoQJQIL(eI$)}7BMO(NQw&52<2)agh&K?TSwHw<|*~5A4Ylk@Q`?_xlFTTeHF6?0RkVqLmfE(=HFrQ!ot-}jkp{h>4UP9!p|W% zqS!R-t9@iScS_313bR^Hy$##*@V`>C`w2qE)7FRKS^!z3y)&Pj!udWo@E||@31;e( zic;Y!c+^tZ(lJjr$gvb0#m2ccw%P{5|=bnNDk3PK~JDV79XPpi4 zsS^lwQa6hg8{-kq{R$z)=2%#pBjD9owu}N{yUGSBK3fq}H5btrdzP1Bxs0<61#{O3 z(PzyW;Tm;VnRx?5d+>Fw_2sNR2Pt4uq5S=ZYT3~HtJc{%RT_k{QPW|G$OQg{>HL&N zsX%T<$l$H~fbAT9ksa!iMr_?@!d4C<*2M@R$3?b+#xj$dy|6tcH(F8lN;;g9ztlgA z5zbWr3iZ*H8vEGyo~cV{;E`+KU2j?=Aos}Hmn3ZIdws+5A6gnKCc`3vt{q3zF67$RJTId-^t!dcU#Ok1gQ_yOS_2MV8x zzenSw`4e-^9Q&y%%`JR*H$N^fOyul!5|bL3cIb0=Chm+evh|gy?OLUiERsb&a%;^9 z!dz4Vz764wq>R5q?0wV1TvFbcLja);zIaYedPu+&h~lSb z#6;n_BB=FbD-0}TRipbn=|){9$aHg2cJB(;wB;)1%D~IoE#9G{>0-Qkdl9`#My)lL}!aa3LU8D+&lLVuNftUaPygZekUc5 z!>;OoJt~LSUHCA22c9Ba0~LPK$VSvroFk!2OsXQsg;Cxqu=Q{az8)aLF$m{^*^FMq zIV>O!2s3`n_%xoU;~CX9jan-aGVZL05U@A&Al!rg81K*{R{`g|QgtLe3kYiK*qMH^ z&we(_mD5QQ@(gk?Ot@AB*{jN6s4a&f{pkTFq@K%*(5D3n5zpAnN9tpgsS-h}DdE9IJnBP}pe`XJeT*o0JUp{bMdWt(oNF1ul-FaJ z99jJ+>UjmmW$*^~#?GO^V`r~}$7cu!aGyNqqfCFU!eg$2e^SXBd^FeKwkSEy${`>j zwpqOA!sSPt=mOS3^E!8yzUJBN_X>O_?88bWf*}uTI9xVSPomQt6G8-TX;5nu;+d$x zTSn+aEq<`VlM>0nJ*Z<4UT-%LQp@hqLlcf+#f5$lEu-v9Xk`u!_xc6t{l~t?IE}gX z3vI2e9>*!lXD4^%Cw!*|kpvI);K%8+luS0{C=ir=A zjoSCgK5V>MTDgI;UNqka0tQ2cR zDKW0? z={lj?)1i<2-8Q$b4BTt(4j1Cl|60TU+rc2I1B%!6Jq>-S21^l`YMdPkK40O&6)M*1 zAfAXc3g)FaJLbpvU@OHlh3k5*;}PqIPIbcmTq(qgK5aNojg&9pVEl>jZo|WjlWuVN z+4)IX%8<1gh<#my$_8)VMZKOdQegLI&&$@BtQnpsM%Wp9@pm|gyp&}d^=x~HsFZVe zGFB)9VMg@5Mdi#CI)vTlOdWmy+eX$yG>!)lZqapJe)WbL!E1#V;12MvK>1tp!7?(kULkYbN&K4hZMe%w=Q%!H>~iJVU)nNsjAdc3jEwLkIYU9QHpHjd6-2qp0H>1BfG@Gp*R!I z5C8iC%H^A?VfivFiwp zu5$knl)d4E$+R|R4V|LIB&S3X+h7G%3P&Qo@%ofGqlt$0Sd6@N!b(us+z%VI;|%!s zBz!rjIh=){C&WN%ZtLULP^g7@LlpH53uw1Zt@Nf!* zUyid39c?xp(Luqxi4akt%LlKOoH)dTK7E|2XRpNr2{V^32XH$>Y8a9eq?S}_6OAel z738VeJHI>2=MY1bNgYuJwHt9Th8j-Dc$T+BjjV@Ovo?cNNMXnm+oh4?3j7F{8e~Qt zdq$rn!t=JRq-_0Xc3twMdK6G56myvPA>T2Qg*u{pvefvUKWYH_AJpM%G!kz=RoO8<(PM{F_+X3zoz!kWOfboxlm4e zW~_S_(MKB!Ll&&DU1WQOM15+B@J(`76P7VquLQ!GQ&3c=DM6Yh;K@NGB$^2BVsF-?Ces^rw|<_DFc$j!d!T`md&5T7F#yjgEFIznVpbBkT~3cA!X_j zSU@dxzBa)S!IU*O@iVJaq_f~OsmHlnW$?*ggZ@PiS&?ZnHj}JmlsPvwCR_TI>sc5) z7aY>M&pAafyyhHdV<%UcI#tSXjiWH#4jK7J6B2ntIjFL89<5HjwxcX0-^X$m>1B95 z33c*+JqCvkA%BrHMj66+`8lt@5(=d?ZwjyM^EmS{&au&=Xd=GA?}H4 zHAz__^4Z;hJj0jA$a7%r(l=JBC}HcR%(ZYZG}l+}!v}ngo@~0yxY&a5#OI=wrhEV}Px|Y(3)~b>2EQErnVX zp}Bh|^c1?ZF@S_;mlkRb0=3QM&1^(Y$yu|MbXMpl;jFEQ_LmSP&bkZ1a1Z_Uw>gKh zUc+?bu){;?cJ(wQZUPUu{2R+q(}1O4eJWhL>B8oZQCMGqItIB;kA%69lYeY~9c;7@ z&>ccsk5W!ThEaqe!n&_p7FfAPh@k029d$~C=Z2%4=x`w+<_a6*Sz7(0{lZjDb2RoR zhX<}Ai?M=p#(JL(E6ab4m#*E(HESq@zDMgZt=Vna2ZU z{j`ngtgsMegfiyjL7zUNh3bsMbSzxo+;{TS{EkNk#9!U zBLz{Ph>SXBhdDG|iS5haQ}RB%W0^X#56F#`D4TAmp833L9e0adO05u(=g3DE+`M7 zZzXW3kFp`YPwhB-*M8$qY~_)qgozs7LG<<0@8KYUhnbWPZK`v!N@){2^GFFAOi?q7 z&2$T#iD}3YH6-*+D&b&>|Q9(It^I zs8wwpjmVVA1geFxjA#W2qdZ4nfzB@Mn|p4(kSWg2Q*jDz_$72BJ{{a zanw>%S^gcCH0Q$Ev~ZXPi>U>ts?A1kg^`u`=NjfrLLD%iDICO;n0wLFdW39cuf$Hg{%W*$(#3 zz_XkM6#k%yz-e=h?sM^0G}PsQ)(y5;h~nFWS(vpge|;--MDgzX%g-_lh;BNuQaMiu>WX9@J>(h1*59SXbr3=3h|u??6`e|K;;Wn4@nGnkNo-{} zgmXnX!5=>la+l4DXmCCk!d^^gCiQy(r5ozQmV#2&|14~Xj+tDdziLs%$ubuy6sIuc zIKO2}Nj~NLYxk*zD|8Y|d3-qb%v-C1;-g1A;qE6OW{F39!tzeRGivaeo&vGLN-)f+ zL6)P1;yrpj3p+aI##>Qn;r-7tbbdJ*&lb&H84h-;!y?&09e&^fCs(%i1T^=svpM=) zIC~k%JvC86_yHW7jx`IJVtqv(_0d;-KS-dL0T;L(byG6+WJN|d$x&UVS1s`;#!Ommrgc@2L-_&g9>(G$T2`eIHw!U_xyP=P4MeRla#bDA~FOzO3roDUgR5#;B6c40WWJnzs#vp`gV1*j0f5x+Hfh&qAV!CfMe;^B_^wGgqa%jkC z3;}wEcz)XEGhybMwUwBCGgVeo^epB*mcMcMP7T*bRTUgmYS0U-F@&{gti6ivM6w)_ zd+f#QqjCSRBZ$`e_3n|W<3@o=eo&fv$6F7M6jJnGqN76XAOO^I$XtN;;)F0VHZu{H4i{->l-t7 zg6HwX-mJHwqSmuT3uBIfJv!ktd$6_!z0ZBBAYg;Q@f9>9vv?S1Y{j!;vdzk%L(AB= z%>!#Aph<_E2i!^l&)%7-(ojc2pcy@>8txY9&m&l((7Hg0o&mL*yt9Onig0L1w)Q8Z zG3AiV=!Il4wHqCdvssg^fYCLqFaKHXYDkvl75LFOVeL;vy`DIr&!@pBI*M9!F+lanmO4i%Fo$Ij)&mm(bt}EMt}z} zca}1+(uUIqplgK80yyh41#?wwN2m zcn;3`oGEHZ6vbhP54Kk#hp5w0!g1+=;F&O%L9^f~Q4ryP3;PFzpr*i>=YcOL%0bUw zQ^5Q11%kxx*~fV)bccLOgx4BcafBH?aw+rJ2;<4Y*(AK;2?I!F8JirsltOa*hohNv z@=gs8oNrO3+9Ga{B(^K`dFT@mp0CLI;mEbDfjHfG9QNJV0FVT&3d!=brSV;-}n zbrs>2R1n2KdvtK_%=_+VvT`FjCj(SW%-=PPj_zEgA zWGiN*O&cnx=y2kFpMsMTLMCFnF|hlUdlFNW9Il!xLfix-+yc!#GfGJ|G5ljZ<%uBcD2-laJ# zH-Yn`4cj<-qO%JzKm?H-9y(iEzG(y=LbwX9%dCMx<9=BB&;}>%%AU*sO$m!HwkWYs9JnTcKkwGPFja6#kX>5Km2loo~Gm zg@g6tUIaAEdV$NV=Wg(g04zQ1evp+f*n)5S-j=GSbXcqRKw}dizoYe}gJ;r(q0LMPa>f~KV1WAoBvXQR}IWEeb zm;c-j`VZj^g_4mra4^4Vs)4YA+6qw}Ox>9v3bR`zvBO?)MT^e}DnmBG>`ye~yubPl z<$cH{)k~_)>QF~S)R zf%yEva-|w%8NHb$Jt5^O;@2_qpa06KM~=$ng7#q3?euLFbNNfGrX4WUQ=w}fSEV1$bJ{uGW8hI4Z;C7tql3~NUBAH+}s zJmZ8H_hnDWoFbX}(RNaZYg7mJt`OoO;%R~&(S)lb8(TSJFqL6@7A%apT|yZtUywmc zO`Tv?Wo_5DE|ji)&M&aMLaPxU#;)V^PP`5cm1GKH&Yfem?5#LEdDM7x;)LS`nD^rj@7?q!=nSW>SbCo{5AV4(I>s`7{jRvvnmm znQiyx==SC{Dmr^bcBsJ1V!m^g$_X;ctfa1Mj#&wNJ4dp0N{G!9BKEwzg59qoj6DXXWG4jy9eQf8=W6(hvik-lark_$Hd=Heo?2LXGKK7O&&Dv1>F7FqzlrfQ z5tMS)n~U)Dba)`X)~S#@mLdc?6BE992$!Iv7T#Ca`8u6lW%s1y3?|8jPbdcWI4s@V za1IR=wr)+I-ZzP!HYpv7+6^8 zo%JAdZuFFBusPi5Nin!tU*>tui6@(6&N3*QKD0V|YJmuC$)dBLOf)FU)Uk>%HMyDy zPDsQ_-)MC#Nm1WF1F}a!BnRs&>?t>SAKVoYMu^a)&aoBW9vw;fYEjs!y=+08qbK|b z=c&pTm@YRNPQ?LE0Mpo#7j}su5^Klyku0~Q(5cfzm>-J*J(R`UQ)Zx$tDqz)o=AT< zg0qKF+@yIC?P@l5c7>#HU5keag{;9ep~@frIghfhs94dC8myUy$?`Q8t0^j~Lm$SR z0x3_CvkrP1JohUv zV4-b>d(bDh)LOWQ$%c2SF;fo~HLrD|_GmH3RW2I};;ap&)ES|eq9d@c`oGBb)OI6YR^z<>|v>N}u5R<4wiW3I4JOFP+ zXCEu%M2U4Bb458kdvo-&_#VAW;qH^8lBw;@SlAurSIZG(t|l4g7-deZO2d@%Et)ly zEh+w7F;B_x9jeeoO>`1r%%Kk?S`j9y*~7V5giQ4gfI_C8nbjr0iI2neUVZt;Tm;-Ap*Sb0nn3Yz7Db)gnCmXb50mECd&`% zWl#pDRBXmI<|tc64^8SSHWa=uYR||=2dhnD-$CZ8pH`k=na1|_N#v|2M4`?!uT*oN zC#;WgI=r#W<9f8shjBnyd%ko-B6>JO-9)omxyP}+f28Fc4?Ay#8IvQ!DMIs1WE#~& zN=6A|IHXd-5TS3DvUR265zpd72#><3i|SkJ*Dhj0`CZbmoR@_T1qw|flJc0N5T!cc zXQH)E!P`c*l668CDW&8&9lwsh{e`tO=2HLgvCtbET&ZVxqIF?ry3`8V6mbss!J$I& zqWR?d;%X$=3L94lt1zbPY0kND?~f%Dl@Os%@~|czAW!{_5Ftb}dO7GjD{vKC98OAS zPFWsw!L-G2amntS!~4);uGk-E<}PH+$*6NqB_rX?)QFQEauBY0y$S2FwVZiP zx(}E|YNVty?a{bAKDSce=dDLTf#l>38xh?*8Pqu^XJwmgdp4x?7Me^S#phZ=JZWt8 z$4vhKGDkiu4Ccu+DVAB?da zdg$Qn+H6#K9SJ$tlI9RDg?li40@hql7~I3!UKD*+wy;|LIX~aA2F8wmN!y`PS{5Rn4V@_|TAX`~&IEBs=di4K(2?H;us{4B@CuP8ujY z7~VvN=MoGTV$o;yOk}u9A@QEHkI6!i_0j0*mQv32L=^GQ^6Te8oMbtMnUmc+j2gl+ zv~}Lj$#zd{@X8Fq&>?rDf-GBODRZY*iVUc0Ow3Ij-qTtBj5rV6U6^swdbszUB-OTY z!90v4a?+1*%ZB25E<9FNryLGhKr4|J5k|*9;b6jNs?TP#mBQ@x8Fln;H_(82$D`Nx zwlVkKpSt0}wc62zx!+5#uWnv>{Fy0KQYV$v`6xqyN%T#MYfyc!ymq6HT1>L$`wx}f z1O*RA|LRAF@yV>S{XE1I=BTaf={ycez&b0$+^G}xI4Heyj~lcOEyJQHB+kUiX1NFj z^yVkk{jE$Z4Y5|}Q$HR#5XerT3OQ$g*f(bqddQa^-NP(0%B+nL5UEh3o~yd*HB0?~ z7GD1rdu58ibUD>OU?obOYf4Wh;zE-5Csbrze`z~h3acUHJmEPA8FS2j6`t9H;n8a& zIy9Y-afiYu#an~U^*0x;{>6nSuf?}GdtF}-v&Dt56K*cH5-fmGgGkNA9PrtPgmumd z?yy0M=et^EM4z8Q5Z{=ID&O28+>Z}&&_soJNbV=hDOAFIF^rH(b@|~5xr9k^T`IZP zIfyc1m9yrh53KAa;yH>$F(PaLy5E7Wc9O$6 z@u&?}Q8@8dg$bW`QHL{YE`K;yIJeNG|Gd&ssU{@~JdqPWAFq-*QCL!oDfh9T>=Z&T z{y>MdY5L?CZW20dQl$tH>sgFgmwAC~z6m>TPAX)lMyvzQ(28%bs#rrVM5n)ImU)kl zuEF3mN~sY>{QnmB23nBiHjbso{{K&2&jPq0#TS|F?cSYp7OkK_kfJEP7g1T|YJ1(V z$mO{D_|o>yg-s$SoQq|5-b6KJS4fVi6h^X$bh*t+u`JO?MJSHWVdZL6PmH}{Uc&ex z;j4SC4#odKQPegTkv#V>TM(idqKLIR*Zhx(Rpb`P#E_?uMdyx0DsZgSM4^omBD(gj z6()frrK1ru_B!I@^+`32yHGO2%@E5fg9#B6K0-DENEx*0 zLvXn%D@s>vL9Vc0bwq!|`EOQ;YfI|55Vim`Z#~;Je=?e;99|EiSvAy?iR=(Z^NWMH zq#@7G?b8pDb%jCQ{9-z%Z;0#JuVsY$2&-c^DlF$8q`_|#%Lw0~arey?6y)a^^-waL zHX&m}GRt=aGneJL3@8{Pm}~H~qS$a*D2P7^Z%PG4vxl|Ojc}Q#Un}s;AlQ>|H#L^$ zK`wEhUwn%2p?-M0knI`35|&XB3(4V@_^NcfNz11eYH$$hAWY5T{x~M&q0jy6`~|Og zs!`;n&_-agZZcx{6p!w{yqIBq=tMQN-zqQSuhVD~&t!7~YIVEiTWUj1OqSppBD#TG znRl0)zCTfM-Yu4wYvU!H`Q{SO%p|qQjK#k`_HTVUwi;^4gkURffG?dby zF%21Xn3CQ6#)&~Dy#pv!Kda%M`7k@HV|1hy&^gV!Rqlk z{oE}t<+@-(3`H%-&Q%Lu6&~+HIzafZHvTGtdHjvywmrNMgDCpEYz_Y84DCHnGx|ue zNEn;Z^Rl&y<18lGztPZHNT@YKmNz(+HT8QIpQw{jv#-9lwxo~&nVD-b5{>yK{1o;O z1TtKFTRpnZo`t}_9q4j~eD;l%qEd-xuTwz>z-2@zk#2rt$5vz7fyu&gsh7d?SR`Sl zI>9P`9U$D^ijZ;dUc`$%d@_GpGN`Zt+Dp_3y$ouysjr*|=69L4=tQlTe!Ff=`iQ(^yFU0|XK6&K3@mkoof)M4h15m7pJV zia0I#+b!t|DQ8OwaHpXpli>(@w%Z*aKgs#2sY!5!d|Z_cf29G6DH zOZ(=_YnoY~><2{uY)k|ExQFUyUk@k2M_-(mPfYwKTIubi)C?a79SrX=Cy=bV=KMEO zRVY}(%i)}j-o{3^5_O3zx(5>oX>pri5|}l7;hJ<&-0%WUQn<~Z;;!O;##`~hJDq#e zhkxvHhO14E-Lam>HQ;QGGN8go53$(_qhvnhB-rp5bC8f4Yce^T`8}6y2))=pqP-~o zMMF%AKF&|*8?f~fB2&uN{tTJ??f^R9h78y+IbQ_3 z&N2`M8CBw3oLW#O$rU0R>=>2H;jk;JtSjR=boT6=JdL@BU~)`8ffKq&g>BbZP6rg> zWOD_K7Zq8YgyKd4(G+|@jKayqJs*AfZMew?Pav5l>%|T0S@sB@2NkRv<(~Z}_YXJ} zWYzatx)3NCZ%>EKp?a>Ax~obvi#Bs3Jk0nzfq88Kk zOGq7v`Rr09kgL&MbUkW}*DQlRH58Jyl2uCBKTe_xd$Indjz)qXg;kUCq|u3U{E6%M zTtNqX=-y~s1axypKzZ8m4EKlZv|O~Omw&BYqJV+&WBJie&4x2MLQL7KRH}f$b+IJ z0I{6$H*tM-p|VCgG2-_`yW@^^WmppT6P#~_K6^9bYEST@hA~D@TF1l5(|^xC+r5?!V@*oz|xU%dLxT+5DUCB+GIq=)2IvlBTQ#or&|$ z8iYyc4FW`5NFLQoSX_n7p=SybrDNQelkr?^{iA2J&vjF7PFn?!sr(S*K4amJW~|&J z%d$lMpNGNly*n0Xu8J74@tComlKmhusLWtyYQnf3>`K2(lFi{&{&A9U{ymsDLBhd$vWzZ86*E|IV%ZPqm zkB~Y`<{L{snxOu976uaun;|TpOoZOX{2izdH&--~P9=X6uVU=lPByC9m#tbS?%wA` z&G2v2w6sINjIyHowCzCi{Za_*@_ULygvY~p#Q9&%A@bcrXn#N0zUvJl^3;EkBw93* z`60q6)~@(Rg3WHD=J{cr;W*KCL%RD~Nba{B;Ia^G!i1oHPGFaoOr8sU=pg3woSIHiM|ub9PY}=kj7WB=VixSw8GYDA`irENjMqk)$p?kTtG)RAUZgCpKHf+(KZO|;>jXJVZ;Y=U zokIc^E?C`t(Pu9!Sdv112D;$K!QVfk(+Wt!7b`(zwQDHg`Zlfexa31TkLwEPnn!#< z`7ds*dI+shp^x+A_kPE1j+RfI8?wHx>g1wt!fR$kDWi^AP5#^=j3|iicJ)D&x1)fod6dqZ z6Jm<8E(&I9Ru{r|F!|ny^0O7%-5I*RFgLS9CiW^uc(q+pV^Dc9@gR(zWi`X!id zLY*0J$3*v4!Wya^moJ2o-_We^N#TW(^Bgr;WepKE!<$AY!)A8rV5+GF=2|`t_)znk z?z0!?E5VKY0_EtYujN=E&KUl-flT8omtA{MA**~&VeNIn9JM@lA1^uwd|z*Q1cZc| z_hUm`JPYkbw7WS#N_O-{6HRVmfO&} z5bG_;kD8qf4$gMwo`36;E16`CAr3u796<(oFI{{6kHm#&KwDhaju2^&8ZE!KvVlCbbEPnP=bm zA5v{F5Wn@ZpaU$QhKw7DF`ZFdhqsh}YsXMIQ%r(b=?-JLWc z`n5Ln9OId39+mM!WYvFGx#zvu88WOz5O-RJT?OW~VA{8Ygz~^{T5pBLpPXI(gv4{( zP75`%eELs1{_dqFvNj&f)olI4U^Qd_(4$dQ+)W5X;!Idk39H_dL9Iar@sJ zPO|wb(ey03SH^pvG=lDO2R#Y6GIJcGugr-uH~bZc-Ce9N(L7gFJ-8y;dk|Pcq>XMj z#OCTlHSKLwT-T1l+#9CJ4b2u4jIyOrLIiYu_*JT&e1SG(MEvV8b@X9tZG>BhWbn5$ zrc(+uJ{Pjp?43-{^8`2NzZ^Bs-oXUvt=0_oETWd zZB(WHnSXIVBAwXoP1^T{82}DI@xNJSV1-o#r43hZIe+f}qHtytCb?0vQ@H{8ip~M6 zGp>U9=@!B{FDA5Ib*4F@8v2=t(D2;dCg7Ft+QpYAt1#EA{Kt~}fW_bC>|TL6C&k$q z{OGDsAq8Lk5&y!qB#m|VCB2Ju?#aV4QA-#@OYT_~^-yr=@Lq+US43QgE+HrTwj+`^ znB|Jv#cqzQr%%rSgqeVC?HesmEbiZVx45=T)XX6PGvV_wHm5{Kc*KBAM1T z8D~XfmaKS+4dFeEuOc3ZXt=o<-*(b`KM`-T#NON7Pye!tA(Z@QQ}!X8YJ#t5I1{UY z0CYf$zh5hwWuihEu>TD6E zcb#Dou<`RJM>uU)*~f#Dca;d|k)^8w3a=4v-1#AX!DjNaM$spI>U7uT4OtmdCSX_DV5 z{#xw>2a=L#n4u6~FeQq3uPq3Hjv`AMIcNY;ijN?2_H{G|>ow0^^;{Y108!|z1|?BQ zV`~B&R%6Ak-Tc65-TMvgo}p*q6Xa&L<&CCCDD?=q)1B%MMa% zvy3=Owz9D;$n`q*A?;rV1v14OQjPhu_e?L}3QcO2_8MMLDmoldqiKvMKbS}QQ&hp- zg?G^&dFYHUdf1GOOYGasSNG2Z$T~a!!DU=6qY5O|AaLtX@W3Y@_vOrQN`mEkd=ah} zrO$ba@Zc2k5hv53d;dw`4}FYiwq7{-H5-emazt7F)b{FUa5Ret1Rcc}D%Ij~sC|Vg zMc8v({)5Fv5;c#ah5JV{S;#+=Lq9h2^jQ))Zs(9VxkGx1ZI zSa>XBS=|-VJj*py5YDfd+l#AB39)3?cj0!DcPZ51&ptv$a=z})ckT954jn=p<(Y8Z z;cbA4Xe{K4@O|7qg+htu0*L3%!{!falUE*lo8~%-qrWAm&W8x!r(P1?`;1G_*aQvU zY`Bap=YX(&-u{B+KB)kd4vaMuklP>uThg!Fz`?Y1L1Z(6u{oWB2kA zgp%=z7fIt?F+cA;Y5TkvWNPfHOac0kus|fqoPwGSYRArFmu2AXYhd@QHzb3E$3buy zf1t0r8ryy*t))~UMVtVM4EOR$*sqN(EsY?U@Uxh-*Dxdh0o47SoxmAKw6|i=$oCW_ z*-o{+g=MsH`NQ2FEtAa<6oE8c|4M4{xLktZBM7%zEJqtJBaV!I78UHe)Y;3CCA#iL zmQTbB{}tXdP90gw2J9m&5y*q*R8D4rEP?2zE%YI#NkIHFrMLlx4E2L|r*=NW2Vju% zgD91iykE;0yPoX6%N2&)9wb&yrt`x+M+pnEYAXY4Nj{p3u)`GvyGiv!(j0CLvLk3G zU_#s&5!rr)2%R*)ue8C59Ghs!EnaylxEal-piM{uUQ1{-1Mv@#lkI^Rx&XKQ0Nv)v*dK60$%@^Ckn_jLJ+D(b~2dp811>y6}QkXYEU( zrjR3Wr@LL#V%zr50P1$TLEI4Zo4jscl(<|)?T9<92-;8>UIz@y&P;GazLZ+-n$#vl} zu_`Oln#u8?a;UadCmR9flLjo#|ED)@G0RPJ3f%pY*X76|6-iDk#?3(P*4#|f#1ZGG zVTWK6r%KeAy360=*7*Kyig&obI~v>8yp8>yY6!HN9Gg`B9hv+fVo}Ap5ZkF>BD2ZW z*jGCV^41DV1S)EfDf}c0Jz1=(&aS}Q;2yCU#fDIn?kIx@hHKsg+tIbt9@tz;_~*gY zJYH8MSM!S%rz`II(->zI68a!6OE%Qx+$a4On}#&II49&C1c?06$;V4V(<&9uB>}*b zymHBY+hcqGGdN)HhqQ?8|K}aMB;01)nTPkX0;>~d40=D|ni^z^s^0o*7|d+K51D&B zQIFXgccUV_W)(Hg^7XV8_a*;Hg^a&lI& z+@DFv>fE9Tr)tQ!b#^C4j#+g?&?N3h+RuWaB=H#7Yt(^-4OpeZr>{Cr6O z?j9S6%&4}?cxO5;YzZ^&8GRmHpD>Td_$z^PeS^j~L>dEZ^8#!WCZJAyVY$EGaj>3t z=|+KR_!7fT#?e{TobWy=^iT_g7&UWC5liP$Zlo;pbMHN)XbJlYEZ@*e@o3g;Az0VG zgy%mnasq>l-9kvI9yaJzfK))J<-B0)^V*Qisl@HfK*=mv zOM-tEXGapr;ou%rUd$FQ9>M}Ldy3r5ror=>(4={Hemw))u+MUoEI~#u5|Y_ft~_bt z91Tf)vNC8JX}%e&Adgqr<$dqsOTwAuJXyd zwu0S#d^Vm|{~245d{R0o(K zo@-^BxMLBw|Bp34QOLSroBE0Vkhu#_JL3A}vz;lrJaWc0!fi%$&O$t+8ZJc0llG0T zB5cl}LeflVOqfgK>miqQ%@5klN)Wi_{rwf@q&@6S>Z)y%V=;r6bvQ@U$e#%MWeAvU=hwl` zu06ID&X92=le*e-X^`xKA?|A0)|IFMU8YCk`E!N7YbdSiJJkpo^b!=#!*(C*h9vJw%o!xbWOcGz6$utl%Ds!9X#^aAp4n*$? z{&%f0fA!cKE5U=z_q_UTd$@Z@NV7GPRqcGANdQkIkP4#g9+QaptnJjm=eD=!D;Buu z-_S?Xl-|6Zdj9s=zuB3_KBpd0*XWy^OydyKA04hIK(!;PEW=)+=G>%^X`5sbq1yQ7 z6W~w_YdrKxY4hu6$T&HF{ez>oF}ez|4NKe~?Wky#c?asxyc^d2xrY>a$Wu^F4KK{r z-eP*tDz=WIH*2H zg$wit`sDru`pFdPMOjk7Ikq+?hnB?8@P#H?{zrlpL?NZ%f6q4wSjoNlQJHg~Nvhx@-=G`}FYG~hZZ zyOyXe?p9#R@V+D3!c((4WZz8kQ!RhOx=8q!0Ff=RxLlCSSp1prhuxGCO$@d}Z!Gr$ z$XUQ|ROTm#D~b92=4Y?o(cgAi;o(T8WFnrBE9)Vx_C2q5bd%%illmkZXFG+-4Gre)A>guiL@&+6OVfsv-;4UOAymWbDH8RpPyUF3rm1HMuMJ^~ z&GhaeGi7e<4+55l$h;dqnWp@KV)yFDfcN!q>V$F|M70)8sFJ@z?4nL#GCuJq79_!X z2*0&G23d;d^_NSh;);2T= z-scYyg&ul!D6yXbmi&o?Y{=nU;&FKcisAb%YP<^Yi1Zg{hKxSi&Lmgy4oePZnvroO^oh3H<@)|RGHHA=Y^IZZ zf^6Ed$`RW(X30iZEJ<<|R*j2-$?T~4Bx{f#lSgqCDxcHYp6Dj*7d2jxfofYHIKb2f5Vmv4&A>1OoB!WPN{9czwcf@Ko4=9dfm{N z`H>%3-&ndaVtva%?&{|+q0h22`)>Dj01_dlwaj%Y~m1mfeSs6Tpx1 zXfo|`6>&df#9mbto>2R|;Fx<@GACyrd25jdzQiU`5e6_x!M*<*_LGG}GAu-d)TIJ$ zaEU^cvZ&Ab!wW>%-fg!|rmO6cMMPgnmP=4^a)guCuRUUjdEBdb=o5n9*Ly=ptWq{x z%r_#Mht9^AP$B=-1>8H~suN;~+n%n-=;HmXkNOHk^Ra((_d6mcP{uF&Co8&-1I2>vKd!wK?Y^S6XkkTk7I(!BRL@jd998~iE?-3$?m1k>a}K^Q%KaTN zq7&ryiQ0t7G7Bp5d7N^^cbterosTF!YIP>W#ddh3vUy`x+}9;)ZV9r$<=q%;N@~QV z)!FoRScskRb7b4|fc!oGdFau@4x%{i`#4P3O%9`*)v}I8O`Q zkRbOe+3qLhgT#DCEM!FYd=(+Qnd9#GOWPN#|5xKDda#v!(%}RZHjo=j^*M zS{@37$t>U$U_*>mE)j-!zR~a>5jV8@XS>yBeB3?7A};T@KC<_{SV+HPCR@|_lB+oN zZIE(@>-HttgdH*pTu29Z2g=kgAyXWPktP%U@ussMv3yE?q zz674^&9rYY&*n_taD{-CU~^?}j=*EM%VTfWZY=fbpNz2@6bx^Qrq`?q7acnJT*2n# z5(&xtoV+*=R92%8W8b+H(QM*cc0t9!Tpxz8t8f9F95}nst&r*Rh3ozG_Cy$?`JEd} z2z|n%RITDp!u|dwv=w?GlV*#Os27qcSSl>WZQABD0Qzjr7HlW*;*wQ-6AZXUk}L1o zg(=cg#M?(~AfXWAFAsS%CJ9-e^iHQj|Ex5b< zHM4DOJ_jDYMCNq}*=j3-ds4p48xrDH$aJ}pgpQmG+6ZhL*Oc(cpC+5biSCG~vHe@) z4r{89lT194`BDXb_3svwRyMcs-uRoAiQ@f!W-lkt(YkfGbZG~$v5DMD6j zW ziOi-UiKp8N32AH`xOj^2IGm7Qx7L|d`XtAfAdE|Uo5>Y59|aOCo%+ODP(Yc5&rP!b zu(P+3u^g>Y|8EJ7+n@+ac`Al+65#}6n9Bi z1umj#m9ZA)>rfJobyxA4U~f2gTfcYZF@MI?j{2L;ig9IeQ2d$S%KeiRKR*8H+^(XV z=I>#hFFQ(dkTc>ZHVSUB?zX;TVDBo2uV#!xrI)s3(8rMB%5w;rB4;MS&*>cJ>UD)n z;BDfN=)IsH{45{0^Nq&1q&mNq=?9KcbooNgSF}JKLK%KDB2v>P)f`MhOh~wqb0O~7 z<&4{*F#*-~>lFpfVwwNn`ZA9l9h62ikN-p>a}~VG;+)Rf?iir#=t8U0ys(7eBsBAm zfoYU#Y>G2ttpkb9;8ld|1*$RAak;CQl6(TxEWxwQ_jYesc$GVQ@KC%Lf1;U3%-A$o zt7RbW0e^IpFb=EI_K2wtn-a>B`;D^VxL8mwy$WDTB$VZDKQ}J%MzS9{>{x+>EU|{Gw*j(HRNHM=8_g=Z{c}m9 z;%T0stlzwKCa2383C&z_BjCMYGQ?K7YlZLCy)oEF&D(}nY5P$OJ;9s z+()!BZCINdd;NpB`wR^J#1VJ3;)=H!AubQeP|f@mn{U+OF7dfOB?@cNUG3*VYnNPy zJPn^O3AntAoQ1URj>s~Zgn)%4EJ{^j^pfmiJG)q;)Hh)tG;%XXDo>rK2km?l5@QRL zF@-|?rQi<{Tm|2{K*zT$Ke}VOP#=GWz$DEGLI8hnH;qRUkR7kpxa_KoZ-3%>v^q`>95e5E&Pd zfBQt7IuT|s9L-f@SJY?fR0}U4Nw`t&9WLUh3rHe)5<2*Xpy$yFt`o8$k>!j%C0Y-K zbIFdQ;qn{ALYut-#$pnHNgSJ0Xh_rQnf4UzO)jr&L|YgjN0>J3ZA+i|3l=8kIb0Ua zThC+CV6vgU2>qm6C-TqE(C2azZ$1o{uOgx{;aW%-*S|;Nsb*!?dSvoKLb*(%VLu?= zpth8c^UvQWBFf^L^WC1x7N?CanN0J{Cg&_7-8WY7LNH4xwJi(ccF#r{%=*w$oSutJ zpFN}TEU})R;f-a}|NZ%ASQ9f@ec?Sv!M+1UnTP#Wt$UQE##Rt0097ktwZ4&OEunf{wD78R4o8U}C&Oc(T4dD^i-I4R4Yx{RjqFMq0YPu{0eJ#fe zS^0yzn6yd0))fF`I~1}-0mD)`{N&D*VUvA(IRYW@ClRI3K&wpkq0~fdxisl%qi=k| zHrq1`r%sP)sY>}O2V~(ofLkbk4@)o_Ja8Y}A;kALOl5(*6C;Iw?uM459 z#-5RgKBo=O+2VoI1tc0$5JOz~99&}?s2Ei16mXVN$_1W{kOZg^r~M4EMrU(0zpSMW zmus2>aKv4{);Q<2%6N0xa+6V(tAd7Id2E)koKPp+^UvlQ4bJ15kgO)1QFcaqj)iJ^ z32}ilNAw)d1&R{=$q}sR%R^23)isHj&(Zi2Z9grwNCoowU3kHB3UvON`nn8&-r{M# z+yPtqtS}tHOdAzv?EU{#Yh$a8&d@=!t}kA>K2wJf#amfvNtR1KA(6VBOjuWRc49W$*#z`=SSkDz3QHc128E+vN{f&#b{U+f43}ISYSmrDmbAIez zsO>(~xF*lRG9!XRrRT?#G*771PKH$V|<=$!Z&lV$gEQg+BSRA;8nHx|}o4Fk__J;i#GKaM%LbaO^ z5zLG{fK454)g(z26!=Ae+s;2gz@j56(?e1QkHeuMR2IBd?i|;4pVVj|5r0a++!Y~4 zw~oh-J^tG4sJRCWZ?}Pt*;X+;8v0Dl37VTsCMoQ`r+7VNjNDbsy>m9opFMgmaN+xiF3~udMV74QRS8u*)w(}V8J$>`Nav?8ALR6z3%90n5~rX(u1UxroA+bd zj*xceYb%j0eESaAii_W)wRQuGt5vm0<^*AZyG$rm1d|4nqJzRO6 z6@M1>yr}ly<8Qr${CFml_bHEoU@dT#d~SvzGplx3&PIOMXGaZcYTT=SEfEvRtdAQT zK4z{WSE{Y)yrHRYR|(sdOp9XDNp_?YneGmz>|9eK6Ln%5)2DB9ma_v{=)lVUp-dyB znVI0YeM7>%c@&lw69qli3%7#Jk_;L!&l;DVuO%*rGI5oViTZNBe^qXBgkBxa<-%0! z@l|H7PQxU>6e22O?UWU0)r6TPpwdC2s=xi1a7J>K!0zXdu1q2tS_`i-|7jpmI<%6j z*yQ<72}QkKpg`OY=HCAVjXt>Q9A4wpf55#~`4o|x;hUWzdnp}B^L!QIW-iG5K`yaf zcQTsE^WJ52QaaP_MUz?5XpXEUO^_L4gxgRv%NyJr-q+dp9$$Q)0<)vpQMu-eZ(2Pb zhcLxejYg!nxaPFP0?(|R^V*vpP{M4Wp2~vZE9>i2ZKNI{?mHdP?Awj_KG4JD$B9A5 zDa#V(jS8?>9ro3j9o$yp+JMMxr{nOOdQksU}R zZ9Dp{W8-iM-P%o`{@Pc_Py&_DF02r=WSjR$1#jnAcI0%EVX_sHU?FME`G^07c!-y8 zsc3E{oSq4ZtAeXGN&hz;+sMjQ9!kWLb(N5urwBu~`;F3fn1?wbMqIB7dE$A5#pU2{ zQO|QnoLo|gFbbpc5`hqOF4y45f5o5urvWXoyvHZ9S2v2&$X_JN!-G${iSP5@-`=d2 zc>eOfg%NGCP^1;mA<=zTB8n!#%h@q0$z~c57}4!PG=_P$KjI-CbGLXV!+Kj&fr8ya z?M>*F${<05gkqu%9JF9UGQ2IaK6^TFBqChs(?OpQVBx1FftPSNmf1+Y@0^@TCV{UDxQqPLU-);xB)5~eD5C0lj_c3b*UA35l^1)6iu0@FKF`gUWvzP<&CNL3 zl8u&7e|JeFJR+6*-`0TcxM->*8nrB7gODeuL;+_gBT4QcA@}4KP90&%(oBEn^m-BT zVcl<@PZ7oDCubkw&$yuNyb7Oxg%v$_>N>3A;Nm-kkbM{k#j->h*5H^WnY>Yk3#Jn( z7PK!BeS%~aCi@f;Xeg&3I;Qp|1Cez{tfy++z-ey>2Txw+anXbZ_YMyvxQ&zAcXC7< zqFBBX8r%lYV$aBTEYMy$9m!7MXgi$o_1G6HOthy{wTK>hReBGj>BIXUEn?3538lCS zIz&!}K6D2Vt4tgEv;nOZQx74krp$l-+grGDQk+kP?5H;QMPT2Tq`_H2^D*ouU(Cl0 zVbE32K11fYU`ep2lch=e;Rnu9dPx0ATe?k1?f&MY4W?t@imT}EX4Qid@(=b*VoB&; zy3Neh5rbFi5YKR7n%q*j9`m=&24r0HdYaOIled4yLf0pk7vd+%?Foo;6^@^4>A1;4 z8t&~0nTexy{&R>`>{_mhR*BBAqdXq`Vm^=4IqkN{!qgEVbL7L)sc zKlUEX=^Oo~LGTQvenU`pd?A^u2szreYQC;=ICwb6RA_{O#8~))_>OT+_2D*z*`1W0 zJ(-0%(Lnibb}CCfS|CewQZ5j{h4=i-(1IS1^Hg2=v>)K6$D>&Idrj(<4FAsFV&WRRobec>Vg zZ|3_(JsoZ{3Ap(LatUZmOYI?{4Z76tNkX{fONhCy!0&NJBc0}8>^kD(<8C)q2HsAJ zrW5jvQs-cM7IsxgUlb7CnGmz=m6yz)C4nD?I0J6iuuG6naL^|2%uA-DXCl~IgqjQBBzCF2ML=SM45%+3jdo`{y z@$f>ea7Zv4xm^a_(<66S^JkP)@D8{Pfy(fs@2#=K&3qf&Oogn^>x(Fh>xHw7H%}l% zAf4jm{|tpR>%-$Tn6xC%zE`Tsvx{6Vq(>kDOjjCi=0qL~*|pswa-Y6hA}Uz589ECC zrQJ{mf8G&El)-d~2S?_0Pi0(|Z&&#gP{cjpdp*F}$UM5~D6hS=>YI9T5!aGhIFC7Gv4MZ-_XZWZ3Y;ldyB+`;7Qc&5|j>|5CW8<|)mcCq^@H z8;Y-z<21zXqeL_5@i&StjP5rlp*)!fXHI?>IA@n(l5tyE7^T3SC&Xkkq8CO)DZ)S9-q6_H^uB6_8WzaO=g;;k;LZVcmpto62Yl-a zmPBFSzGoNm>7j}5lCz>{C=*ffzHC5bqE{=lFHe8}prPn~fn#^!S`X>*;X)5+Kh@?( z*6zrm=Wh9#8xLi#VlSMlMRiKb67*r?;F^qLo_i2=ZyNKAEN+oi6z88E3;98D1JUn} zPSeEicqE(o0V3Hpu0WLGI?1y~ZyCP%=(-Kc&Whfl`AQ5R!jCMaq|8^i1^=xfmdzX^ zxR@r~7Q1FYM>g{=fjciGd;Asc2K=mweLthv;BC5?mwrs3;?t+0z>mv-|JV)YHcVJ6 zHkXjk<`vsp3Evht-y$whF5+3z0BuJUVHjNF9zibOfvIV*JYr~g;3pl&CV{P7W$hn# zt`6TTA;LQy#RiTW5*i?)m!;wK>>|3}JrPk%c7&MA=#!`H?i?^^qs<_a9Kvw@5v|Lj zha1Cv%n(l+=W@_1<1CL$x9vID3r}EN(h!$~mtU6 zxKLq|9?%7a^kb>asqV#|CIBiVMu!)+UMSL;va7 z3r}M%9&w|YG9XwjnGhNq-TeK>2r(W$MgDSX?vE`uxi7$LEUqjAkhX3izbEY66X`R% z=QDQxRORf|hkTBF^~~<=hERvtC^Ig{j2ssdpc695wIIFFt&l0V5^&WbopPLz){&v% z=#b`&(hL7di_Yb^!iZAb%$_0nx1fhGm*Zru%00_EIAukWQnA{E#?Rn2n0;NFwaMY9 zu$kFQ#M?zVfB3r~`)4e)IJSUr=5QOzg1c6jMvYUXXA|=3JCtLfyaYsj_U-)k<2i>k z-vpE$5sOrAsUw@^6tA(3W?H^4Wyj8G)#ToWkQ`(B6ja1@G!T8H32K6c*c{g*;0W)? z8wz**a1G}$b{7)j5<=YJ3Wy*~BDg*{7KAorL?^(PM3~z)eek@UL5)Ey(=2~H3~z2kT#qC` zWZm0qWL`cG#pb>`QCFC=O>7`nBT?5;KJJDlYV4-M5+U5piR3nmn{#?G(6=(k?iDm~ z=JC3IpQ79(G((WiUp*3XdP3@oXD@GapEIw>x>iE4$R&w-Rt&C9(ZPz4RX|9dpLyey z0usX|G7w)vPysRa&@6=Ob2|~jpP*^`N1}m-M~h;<)#P&dqUoRO1t#Q-uOoBGyUK5T zimY|CbAU6tKI0iC>0C>BxW<(74JSWZ#%#g=gfeaPxf4wtbg!L!!%}uZQx0hv97Ll# zq6&Mln9~(5#H9!!(pc6K1@3M0<@*z$%8W%|YzrNk&`sIv{J3O_INSM(G^R>-8QtwA zU-12e+eNIy6|rZ|(^$Z+Z=2gPBzBvtviGtI^)ZT00Its_hG3vzg1YURLh!a>XHz`A)lP!$rxh4A!H+@(Y7*isgh!u90pZk6COT;jY0Nw0n@E~AB0)np z(_0UkQCt2CvP{u4UmP1H_&oR~onx8QFpW-_}M0*iU(&)q8 zR7je)o-~^kg&fKoLGJRT)%)2+*!F-5=C6lV(wH9xt8QBG@g`bPYtJe_VB}HElR*Ex z1fjJ;T}}Jz5b+*U)OtdM-8?*Hf8aV2ry zjSl*p&PCaVyw_!;DESE^gwEaCewF5g>%3fhBl?oSX`orbW92qW2Iad}AHI0_jJfb0 zLAcQw=pUP3YCnP$yd6 zZ?bas+WoadAxJE)Xf8osBQg2>M_a~SwW{`YCC8q~OpQ1uTG}dU+ z_(4RGP2ARN=hW$_3H)1QB39^DTuMZP>^N>II(OBcn{o1Ch2D(lnb}%|TwE*p6`(Q+TR%bwlhV?X;3EvU z5ZWM7pAy+L3*N zU7b@nG$PWkA}s$#%h9*DDE$hJTg%K}%yAepspcXyNi@bGIM-|DoR>9gDN;4V3z06t zMDbtqLzL+gOir|7`*F4TS#g3taRyxPCYY}%W zx@Jg@hTJwBj4$X(S-IyVSJ~{OqO+yZ)zRVE*!Z#1|GH2R$DJ*3Z6GK)HsnJ4eohPE z6wH~?{MmomH>(WUUCzit{(8t*#>0*Apej>(-K{iANVZPAS5X5U;j=&3D^oDP*3o@Gx3b`-`0ST>J~2u4Vp_1A*2Sr zT$r{{DsNO+enlWf_f_XWI>~EwDp^uDnqOt}J*Es#*DQn^nT5SOFx*7|goQtu3=q!N z)iPO#vfC9~@C@guH`;SJ9H>v`KNo^=pLYKaV!-QLguTX2re}R_@`5e0s^qq`1kU`! zGw5x9Hrugj zb5Qo5smH3wzwDi&H0}6UhOF0)rQtCBYS3eeU zmEa2}1hk6(7i$fY(%5EV_3;)V=AFl7$>{uBY+PgaC7=Y(6*Afh`L}=-VCrU?Au;E4 zCOQhZI(EoyqB^Uj=xpLFoEO4o%Dx^z^ad$7u$bk=a!9xh;?CpJ0|jN=fG4|~(;0ma zi%K(jYumgMAe>Ric_AGFbwDb}zPEOx^ImF~Mw7tn*q1)VH>kb3op{n%(zI}`BF7$4 zLFDY~{u1r0CrQc}hZ`9RR=US|~RAzrl#$MmA_0KN6=+n7j-@^jpx2sE}W zWrc?uYewwo%72@ZlXt+vhz0)+(Og$og;>sB{>Y`PQ^$$q&;4h@1T0^Yge_|&HR-nQ z=uVhsG-hss+E9>R2Yt8%9pOGH20scuUBqly{z(q(B5q5mkiIr6IX*_Y0x@ohTiPP* z8`DneCINTbXiXeI8n-cShPJs)tywxUZlYW{toE$dN7Veqn9+Bj3acZK^RLG-Au2h*K z&Dm0;tLA&G5jWqNAz??+1#5TuRB}W&`OVZ&!j@8|Kw}*dt&=d8zgjqIlbkf!C=El% z3D-ksFGj0TB?%MpQ2(+M?5Knb*Q)H%-81H&VI{*`!gs8@_bAJ;u|RcmBTde5@VEv` zdeEaUVJ7X5;Rdn`!j}!`FyszvbO_|nt?>Z7iY+vL8W0X?edUlAcp>gwMI%hHGM-nd ze+YGWzL(f@wCpseb2yv^iyFU2B5+U4mhRZg-?(DVt z*6bJHzLeNFxB^PFBqxBFsZoEXl$C_)!mvvMdSJNEh_)y)`b_it=LOsRCh8?no zdbEIrUdx@#s&^+1$IV=YwB<7T$WFm3T;P7gSweJrrk{7u`H;_VD2k6RmxN60C{1ai zyY);qEt_hU=r){woHIotPbs?9$QJ=o$!M$+GtUvM&l8Dbw`FG)0~beYb4-<(2jhb| zp2I;lN=RIxK@Z6guu4!`IKqXO5WQo-%~px^ZnI+(&l`hlY**~gMnGhbaO^v52)FzJ z{+#7?F34&lS=>dg^&_HO%qkmr+#Z3@DNfK91u@BcT}1uvP7xxW$d6};O^2J@AtQ?@ zM3H3|o8fj+1kup#O|{-Y^d@NJ66(tA)_|NLelN#+_Zj;r=Ur*%`OX*{+J~!Y+V)wd zNTVV*6Em5s_d-?-F+Z;m`gY+VN7G);4xqKEkb4>(XEOOWzZ@YN;Breu@3&GWpLY8S z#`-rRT{%cvB1|?8ln`=(QSPH*?w1Z7&a8x7&W>%;CQ+?&L82nK>@o$#|Mx$WWMANd z1zc`ioE$(%U>iuRvt>^LTn)89-Y|eNdhG#QOU@@C88ch6Dc4_}k>;SDAckOZHBZxgiyg@apVACh#F@z)hkEL>V%? zxair@?z~~0=qIkvPPO^Ta2pJal>EWmmk{g=N=~h??hFelZZfu*$7D*wd$%}DdzM(w z;mr{yU)F=|H2%E9N2qX3!r$MfAqmoo`fbXf6!+5S5gh5u*iy$X+% z60FQQGNw?kqeh8LC0@3x0?V_cp#D;3v=L=&LaZOE6kH+A>%eWE z{6uX~7W1}oO>*DUq7Ixb!W~ZziMhLEiZJ40EwC`wa&nYR%1k9qUTm85Ec}6mh<`08 zN>{6y#qBXUFK3_O$Q;)9Z-2lsl`lH~SU|!OAmXvlMK$w>0+YF66O;@)L|GCzRQkYa&wudO+W{&D)%k9e zZM6Jtrz@hXO@`)v9Fe2tKh2npO%zI5Yq=S8C!_(pyLA?pPH|!1p+Y>5yIi2i`7L|t zVw(xcTEC-0+JJPIwlbJZvi2=Et_9(at4JP}9lDxvIq=N)N1WXgeK#-_WUCBsPNE(Z zq30+RbTTCQ=85`uGuA(F8`Z~-xt~{Zof+amli6Y~O~%O7I(SpTBzpJi$HqI-Gb2lu z6Y@s6CDLU7uly1<)Ix7aWc(GJ;D3t$qFAwi(>#fD%fNqor36_BP&CSGbe2@$E^ZWv zPzoOVGG%w{UL;S7CKFlG!aJ0U0<`diWVFYT954Shm zW9tiw&Y?az8BI$C)3f79F+3D>u$;6EReQ&&`2s1oz0nfA_t`=XNU@lJIa|jqIn8*} zaDQ`dlS@QXR%y~Ki{#(5HbKeSpM{-wP!q})hZ8_L0#Y=91}P#asFxzrZwMVj7eOIF z5&{@n0tq#thzJUTA|X=LAX0@(^HPllX(D1lfq*nAf)oKmCwcMRJM)}3@6A0sJ7;!h z_M5Z&$NA0dY)P|i6R*hHXVoRwvs3!8NthBt>E5hM{;j|(p{n|cyJ2#E5D24!Xl^EX zXd&+gz8L9Hl#KT~)vF*l4c8r14pypN?iOR8&t~PcjRlmlK`?oBHc8V1a=43qZrAN( z@fnuz-fTX>+wNO@cDT;=R0-60fTufpBssZZu=s9uRnkm(m1Sncw6SRie{n+PGj)NC z^Af%m;(gnB#gm=48g%`QR^#{<(t>`wu29?8(D|r;-1U*npZ7{;TEVzj%Tc-9{M}nk zO@-F^%|5(6<-0uVo5B=BqxJo)+WA(y3Sa(Z&Ney7)PCNd{Gs?VOVXa5Cln2?)=bX5 z>KfPQGAJ~MRb_0nOOR$Y&XZD&30tJk-{IB% zTbCe_GCSu7hbiP|=3o(ObE>w-5VCPcXPhWc4mOfTJ(*dcdV9Yl*4I^?R(J(Z%{bPT zU%j$^?H&3SsP*GL$7(k5!KA2I7d1-)r$&3yh?82Y*i#$+=ir`6nF#o$9_#hHqeZs5Q_H*B>M;gr4ht;3IcMRZA$-ZRNRn8wfVOKCi1 zkSvGu*tM6~d<%5;z@=GG9Iic9Y5SAe!9Oedj80$v9n{VFi1%xiD78i(X@AU3K!QRV zN#}mK;p*?%K%)lsWVDg6E8NNp(pK5w!?S+DLgK_IXg@cPNtdWpRwVqDhH!e`_Rd+= zLlGm%4vaqaf~u?4f)|pQuE7Q53;v8&uHK?Qbah#yVqG^NE#0PO4wg=7?9M7E!1 zgoT)6pp**z3&WoFSUjkTkkBKss}w1J9Gc!?6<3-(DcsZN$n-BUd9!1X&K7dzjv~wYRTM5^0BZEPiCwWPoD1 ze^9@pLDP4R^(_02Wbb_&F!v%8_v7`8x&rLM-`CL%CS~IN_#;_q;c)?45|cBep#lY!qWM{0YEGjCZBd+c2qfQ3*9%9o4wNIwT!E0n`p4q zSoi7DW9y?h@NRvp@ds)4!wL4B8>@JWG)Z%t0JAq~K*p3nnYO&)EP!KHW-w?a#4Ewx zhMPx{)8+?sC2F|(yqr2H4+H=#MFD`LoZb*0EF2N;gTld4f&Mt0KOP0gg`f~{G$t5_ z)9~>RTD;?irJojqf{zL*A%Qi!+e+69r;ayKaCJB3$}{Bd_TP``a(P05ED}vW9%}hw zr^qBqMwtm(OsG2Mh>Uts?Mbj{nWeoslE_Ap%u?TyBW*V3_6W{V#R#{wAbL)zae}gn z4vwI+DFK_kDg>j(jYiTO#hvVYYN}yGV)kMUl|XS?>m+0sIKqZ#lSzaNkNJ)?&U($p z+db2*sl|25R#gl!8mANWg9Zm1CsGXiq^r+zURFKV%(w-qvYa?>U?y1mLe^F?fSvvp z%ef+pyWB|#qf|AXDtvpwQCH9|laBe$E0M(50IPRS7wB4yv407EhgCTJGmNi0@Lxz{ zq&hir^rYNCHJkRI<+Z)4;W`J?Ov>j00M6VTiu?*(S}^>ucNuHb0s zt(z15dZeX|s0(oj+dZv^pBkBS@y9~xyuU{f3^&ClBhu!^XZA4PuU{)XTJuEn-K4ma z`R$zHwugbt=u<8IJ|`*>`NP|1Yv0gtS0)P4?ZlTSo|pxN9lLS6zGjZO&{DxS#r=x! zwx~tS*jg2txa`?QcDH8o*YeFp&CV;f>w0GR$*Lu*G7-|-{^_2fmkc=j^B6lB=SQ;< zd(5_$QnfOdB&m=3yIH=@*0~i9?b~V!H#03pxTZV|Q=Hy;Vpir6Ij28ej6B%rb6vr> zJ!{h&mGjB!sqjid#R@~u&i@)5^vB|DVn-?X8dBS?W;}jY@{U!{39?Ze*(&Gl71CXm zDjp|GwYzxsTC$g9#$%@ER7bZC6%`4I-X=+x;tz76zDM@5KpfC#d!`dca=Bq;EU z$%na_me28vHbqLeO59&=GW;iPAN;ps>(el+x1Wp69Yq& zGfTefW{Vf83rq83Jwq548C~9Mo<*ee8pB7VQ_55>7SNE&TA6ANZH2QwMnYvn4oS%# z6NeRdzXC=BfpB;6bb4XscZA~Z6oAO!I-+xx&_qwX*K*EQ?(H$0y(!B9O+?NR_HS9J zZ{{1ae58x9D^;%%GdB>tAJOL7$grg8DOG0k8(}TYL}kGPTQ5c*-i2;1bzb1nsfwYc zaffSVuXtVqtq*3~8~;f4l;i~|&ul*wD}CR(WU**kFzRa{<7@y@hpkFocPg-0QDc5F zIl0(T@X_7>zS+e+sXpH9x7JAVkZfyXX0(RXB&| zqV_I|i^uLKx;ifxC^tKsPY!=nX54V1h}9{Mq|g*kC6xgwd-Wna&{57mD+Ce~x6Pxr0PD^QCH^I?g8N4Qz0}IjDg7gR-TjLS z0ZK{hvQx*QUto`Bx~H082OTu$ko_tB$5Z1zr1THQPRS3N_1n(V_1^mgs*jGBEu)Dc z=jqP6_tL#X>Ek1pzXHs!HT z){JJMCQ?oVLzW+$Ryo~Z!Y*P<*xLZPBmsY^$08@a`So_@EpPz=uG~PtK~8%6%jEy| zKA7;I>fw*|#=yf+5n}}2YJRES3Q#l#;Z Date: Fri, 15 Sep 2023 11:39:00 +0200 Subject: [PATCH 075/144] added documentation for sRGB conversion --- trimesh/visual/gloss.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/trimesh/visual/gloss.py b/trimesh/visual/gloss.py index 5bf119c92..89b7b121a 100644 --- a/trimesh/visual/gloss.py +++ b/trimesh/visual/gloss.py @@ -220,6 +220,11 @@ def get_specular_glossiness( specularGlossinessTexture = specularGlossinessTexture.resize(max_shape) def srgb2lin(s): + """ + Converts sRGB color values to linear color values. + See: https://entropymine.com/imageworsener/srgbformula/ + """ + mask = s <= 0.0404482362771082 lin = np.empty_like(s) lin[mask] = s[mask] / 12.92 @@ -227,6 +232,10 @@ def srgb2lin(s): return lin def convert_texture_srgb2lin(texture): + """ + Wrapper for srgb2lin that converts color values from sRGB to linear. + If texture has 2 or 4 channels, the last channel (alpha) is left unchanged. + """ result = texture.copy() color_channels = result.shape[-1] # only scale the color channels, not the alpha channel @@ -237,6 +246,10 @@ def convert_texture_srgb2lin(texture): def lin2srgb(lin): + """ + Converts linear color values to sRGB color values. + See: https://entropymine.com/imageworsener/srgbformula/ + """ s = np.empty_like(lin) mask = lin > 0.0031308 s[mask] = 1.055 * np.power(lin[mask], (1.0 / 2.4)) - 0.055 @@ -244,6 +257,11 @@ def lin2srgb(lin): return s def convert_texture_lin2srgb(texture): + """ + Wrapper for lin2srgb that converts color values from linear to sRGB. + If texture has 2 or 4 channels, the last channel (alpha) is left unchanged. + """ + result = texture.copy() color_channels = result.shape[-1] # only scale the color channels, not the alpha channel From b9d84a0b24e847908e5beb40fbb8c5906715c854 Mon Sep 17 00:00:00 2001 From: Mathias Parger Date: Fri, 15 Sep 2023 11:42:30 +0200 Subject: [PATCH 076/144] increased accuracy of lin 2 srgb threshold --- trimesh/visual/gloss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trimesh/visual/gloss.py b/trimesh/visual/gloss.py index 89b7b121a..2424c93c1 100644 --- a/trimesh/visual/gloss.py +++ b/trimesh/visual/gloss.py @@ -251,7 +251,7 @@ def lin2srgb(lin): See: https://entropymine.com/imageworsener/srgbformula/ """ s = np.empty_like(lin) - mask = lin > 0.0031308 + mask = lin > 0.00313066844250063 s[mask] = 1.055 * np.power(lin[mask], (1.0 / 2.4)) - 0.055 s[~mask] = 12.92 * lin[~mask] return s From 5824483f377df780e3f9e993b9aeff43f8dc91bd Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 15 Sep 2023 12:54:04 -0400 Subject: [PATCH 077/144] add meta-extra --- pyproject.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 59e791848..d7096938d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,6 +69,7 @@ test = [ "ruff", "black", ] + easy = [ "colorlog", "mapbox-earcut", @@ -99,6 +100,9 @@ recommend = [ "python-fcl" ] +# requires pip >= 21.2 +# https://hynek.me/articles/python-recursive-optional-dependencies/ +all = ["trimesh[easy,recommend,test]"] [tool.ruff] target-version = "py37" @@ -115,6 +119,7 @@ select = [ "W", # style warnings "YTT", # sys.version ] + ignore = [ "C901", # Comprehension is too complex (11 > 10) "N802", # Function name should be lowercase From aca66174ed8a9d415c104585780e01f35146bc2a Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 15 Sep 2023 12:58:13 -0400 Subject: [PATCH 078/144] add test for #2035 --- tests/test_gltf.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 6ffd2a58d..825c95b1d 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -431,7 +431,8 @@ def test_material_primary_colors(self): scene = g.trimesh.Scene([sphere]) def to_integer(args): - args['materials'][0]['pbrMetallicRoughness']['baseColorFactor'] = [1, 0, 0, 1] + args['materials'][0]['pbrMetallicRoughness']['baseColorFactor'] = [ + 1, 0, 0, 1] export = scene.export(file_type='glb', tree_postprocessor=to_integer) validate_glb(export) @@ -441,7 +442,9 @@ def to_integer(args): assert len(reloaded.geometry) == 1 # get meshes back sphere_b = list(reloaded.geometry.values())[0] - assert (sphere_b.visual.material.baseColorFactor == (255, 0, 0, 255)).all() + assert ( + sphere_b.visual.material.baseColorFactor == ( + 255, 0, 0, 255)).all() def test_material_hash(self): @@ -975,7 +978,11 @@ def test_gltf_by_name(self): assert isinstance(r, g.trimesh.Scene) assert len(r.geometry) == 1 - assert g.np.isclose(next(iter(r.geometry.values())).volume, m.volume) + assert g.np.isclose( + next( + iter( + r.geometry.values())).volume, + m.volume) def test_embed_buffer(self): @@ -1019,6 +1026,24 @@ def test_webp(self): g.scene_equal(g.trimesh.Scene(mesh), reloaded) + def test_relative_paths(self): + # try with a relative path + with g.TemporaryDirectory() as d: + g.os.makedirs(g.os.path.join(d, 'fused')) + g.os.chdir(d) + g.trimesh.creation.box().export('fused/hi.gltf') + r = g.trimesh.load('fused/hi.gltf') + assert g.np.isclose(r.volume, 1.0) + + with g.TemporaryDirectory() as d: + # now try it without chaging to that directory + full = g.os.path.join(d, 'hi', 'there', 'different', 'levels') + path = g.os.path.join(full, 'hey.gltf') + g.os.makedirs(full) + g.trimesh.creation.box().export(path) + r = g.trimesh.load(path) + assert g.np.isclose(r.volume, 1.0) + if __name__ == '__main__': g.trimesh.util.attach_to_log() From f5c9dc8d7113eabb34f17a78a2ceade3e940770d Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 15 Sep 2023 13:15:10 -0400 Subject: [PATCH 079/144] fix #1970 --- tests/test_obj.py | 7 +++++++ trimesh/exchange/obj.py | 5 +++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/test_obj.py b/tests/test_obj.py index 90db07741..a5e8a5238 100644 --- a/tests/test_obj.py +++ b/tests/test_obj.py @@ -464,6 +464,13 @@ def test_export_normals(self): e = m.export(file_type='obj', include_normals=False) assert 'vn ' not in e + def test_export_mtl_args(): + mesh = g.trimesh.creation.box() + # check for a crash with no materials defined + a, b = g.trimesh.exchange.obj.export_obj(mesh, return_texture=True, mtl_name='hi.mtl') + + + def simple_load(text): # we're going to load faces in a basic text way diff --git a/trimesh/exchange/obj.py b/trimesh/exchange/obj.py index 7fb50f87d..22dae35ca 100644 --- a/trimesh/exchange/obj.py +++ b/trimesh/exchange/obj.py @@ -929,12 +929,13 @@ def export_obj(mesh, # add this object objects.append('\n'.join(export)) + + # collect files like images to write + mtl_data = {} # combine materials if len(materials) > 0: # collect text for a single mtllib file mtl_lib = [] - # collect files like images to write - mtl_data = {} # now loop through: keys are garbage hash # values are (data, name) for data, _ in materials.values(): From ad28bd0d612d038afedef6769025ac11e69bc025 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 15 Sep 2023 14:42:46 -0400 Subject: [PATCH 080/144] voxel caching still weird --- trimesh/voxel/encoding.py | 153 ++++++++++++++++-------------------- trimesh/voxel/morphology.py | 8 +- trimesh/voxel/ops.py | 6 +- 3 files changed, 73 insertions(+), 94 deletions(-) diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index ade24e022..6254475c1 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -4,7 +4,7 @@ import numpy as np from .. import caching -from ..util import ABC, log +from ..util import ABC from . import runlength try: @@ -35,9 +35,17 @@ class Encoding(ABC): """ def __init__(self, data): - self._data = data + # a key-value store of numpy arrays + self._data = caching.DataStore() + + # dumped when cache changes self._cache = caching.Cache(id_function=self._data.__hash__) + if isinstance(data, np.ndarray): + self._data["encoding"] = data + else: + raise TypeError(type(data)) + @abc.abstractproperty def dtype(self): pass @@ -118,22 +126,6 @@ def stripped(self): def _flip(self, axes): return FlippedEncoding(self, axes) - def crc(self): - log.warning( - "`geometry.crc()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - - def hash(self): - log.warning( - "`geometry.hash()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - def __hash__(self): """ Get the hash of the current transformation matrix. @@ -196,31 +188,29 @@ class DenseEncoding(Encoding): """Simple `Encoding` implementation based on a numpy ndarray.""" def __init__(self, data): - if not isinstance(data, caching.TrackedArray): - if not isinstance(data, np.ndarray): - raise ValueError("DenseEncoding data must be a numpy array") - data = caching.tracked_array(data) + if not isinstance(data, np.ndarray): + raise ValueError("DenseEncoding data must be a numpy array") super().__init__(data=data) @property def dtype(self): - return self._data.dtype + return self._data["encoding"].dtype @property def shape(self): - return self._data.shape + return self._data["encoding"].shape @caching.cache_decorator def sum(self): - return self._data.sum() + return self._data["encoding"].sum() @caching.cache_decorator def is_empty(self): - return not np.any(self._data) + return not np.any(self._data["encoding"]) @property def size(self): - return self._data.size + return self._data["encoding"].size @property def sparse_components(self): @@ -230,7 +220,7 @@ def sparse_components(self): @caching.cache_decorator def sparse_indices(self): - return np.column_stack(np.where(self._data)) + return np.column_stack(np.where(self._data["encoding"])) @caching.cache_decorator def sparse_values(self): @@ -244,19 +234,21 @@ def _flip(self, axes): @property def dense(self): - return self._data + return self._data["encoding"] def gather(self, indices): - return self._data[indices] + return self._data["encoding"][indices] def gather_nd(self, indices): - return self._data[tuple(indices.T)] + return self._data["encoding"][tuple(indices.T)] def mask(self, mask): - return self._data[mask if isinstance(mask, np.ndarray) else mask.dense] + return self._data["encoding"][ + mask if isinstance(mask, np.ndarray) else mask.dense + ] def get_value(self, index): - return self._data[tuple(index)] + return self._data["encoding"][tuple(index)] def reshape(self, shape): return DenseEncoding(self._data.reshape(shape)) @@ -329,11 +321,11 @@ def copy(self): @property def sparse_indices(self): - return self._data["indices"] + return self._data["encoding"]["indices"] @property def sparse_values(self): - return self._data["values"] + return self._data["encoding"]["values"] @property def dtype(self): @@ -430,9 +422,7 @@ def SparseBinaryEncoding(indices, shape=None): ------------ rank n bool `SparseEncoding` with True values at each index. """ - return SparseEncoding( - indices, np.ones(shape=(indices.shape[0],), dtype=bool), shape - ) + return SparseEncoding(indices, np.ones(shape=(indices.shape[0],), dtype=bool), shape) class RunLengthEncoding(Encoding): @@ -452,13 +442,15 @@ def __init__(self, data, dtype=None): super().__init__(data=caching.tracked_array(data)) if dtype is None: dtype = self._data.dtype - if len(self._data.shape) != 1: + if len(self._data["encoding"].shape) != 1: raise ValueError("data must be 1D numpy array") self._dtype = dtype @caching.cache_decorator def is_empty(self): - return not np.any(np.logical_and(self._data[::2], self._data[1::2])) + return not np.any( + np.logical_and(self._data["encoding"][::2], self._data["encoding"][1::2]) + ) @property def ndims(self): @@ -472,22 +464,6 @@ def shape(self): def dtype(self): return self._dtype - def crc(self): - log.warning( - "`geometry.crc()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - - def hash(self): - log.warning( - "`geometry.hash()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - def __hash__(self): """ Get the hash of the current transformation matrix. @@ -529,7 +505,7 @@ def stripped(self): @caching.cache_decorator def sum(self): - return (self._data[::2] * self._data[1::2]).sum() + return (self._data["encoding"][::2] * self._data["encoding"][1::2]).sum() @caching.cache_decorator def size(self): @@ -602,7 +578,7 @@ def __init__(self, data): @caching.cache_decorator def is_empty(self): - return not np.any(self._data[1::2]) + return not np.any(self._data["encoding"][1::2]) @staticmethod def from_dense(dense_data, encoding_dtype=np.int64): @@ -634,7 +610,7 @@ def stripped(self): @caching.cache_decorator def sum(self): - return self._data[1::2].sum() + return self._data["encoding"][1::2].sum() @caching.cache_decorator def size(self): @@ -736,7 +712,7 @@ def gather_nd(self, indices): return self._data.gather_nd(self._to_base_indices(indices)) def get_value(self, index): - return self._data[tuple(self._to_base_indices(index))] + return self._data["encoding"][tuple(self._to_base_indices(index))] class FlattenedEncoding(LazyIndexMap): @@ -747,11 +723,11 @@ class FlattenedEncoding(LazyIndexMap): """ def _to_base_indices(self, indices): - return np.column_stack(np.unravel_index(indices, self._data.shape)) + return np.column_stack(np.unravel_index(indices, self._data["encoding"].shape)) def _from_base_indices(self, base_indices): return np.expand_dims( - np.ravel_multi_index(base_indices.T, self._data.shape), axis=-1 + np.ravel_multi_index(base_indices.T, self._data["encoding"].shape), axis=-1 ) @property @@ -760,17 +736,17 @@ def shape(self): @property def dense(self): - return self._data.dense.reshape((-1,)) + return self._data["encoding"].dense.reshape((-1,)) def mask(self, mask): - return self._data.mask(mask.reshape(self._data.shape)) + return self._data["encoding"].mask(mask.reshape(self._data["encoding"].shape)) @property def flat(self): return self def copy(self): - return FlattenedEncoding(self._data.copy()) + return FlattenedEncoding(self._data["encoding"].copy()) class ShapedEncoding(LazyIndexMap): @@ -792,19 +768,19 @@ def __init__(self, encoding, shape): size = np.prod(self._shape) if nn == 1: size = np.abs(size) - if self._data.size % size != 0: + if self._data["encoding"].size % size != 0: raise ValueError( "cannot reshape encoding of size %d into shape %s" - % (self._data.size, str(self._shape)) + % (self._data["encoding"].size, str(self._shape)) ) - rem = self._data.size // size + rem = self._data["encoding"].size // size self._shape = tuple(rem if s == -1 else s for s in self._shape) elif nn > 2: raise ValueError("shape cannot have more than one -1 value") - elif np.prod(self._shape) != self._data.size: + elif np.prod(self._shape) != self._data["encoding"].size: raise ValueError( "cannot reshape encoding of size %d into shape %s" - % (self._data.size, str(self._shape)) + % (self._data["encoding"].size, str(self._shape)) ) def _from_base_indices(self, base_indices): @@ -823,13 +799,13 @@ def shape(self): @property def dense(self): - return self._data.dense.reshape(self.shape) + return self._data["encoding"].dense.reshape(self.shape) def mask(self, mask): - return self._data.mask(mask.flat) + return self._data["encoding"].mask(mask.flat) def copy(self): - return ShapedEncoding(encoding=self._data.copy(), shape=self.shape) + return ShapedEncoding(encoding=self._data["encoding"].copy(), shape=self.shape) class TransposedEncoding(LazyIndexMap): @@ -870,7 +846,7 @@ def perm(self): @property def shape(self): - shape = self._data.shape + shape = self._data["encoding"].shape return tuple(shape[p] for p in self._perm) def _to_base_indices(self, indices): @@ -887,23 +863,29 @@ def _from_base_indices(self, base_indices): @property def dense(self): - return self._data.dense.transpose(self._perm) + return self._data["encoding"].dense.transpose(self._perm) def gather(self, indices): - return self._data.gather(self._base_indices(indices)) + return self._data["encoding"].gather(self._base_indices(indices)) def mask(self, mask): - return self._data.mask(mask.transpose(self._inv_perm)).transpose(self._perm) + return ( + self._data["encoding"] + .mask(mask.transpose(self._inv_perm)) + .transpose(self._perm) + ) def get_value(self, index): - return self._data[tuple(self._base_indices(index))] + return self._data["encoding"][tuple(self._base_indices(index))] @property def data(self): return self._data def copy(self): - return TransposedEncoding(base_encoding=self._data.copy(), perm=self._perm) + return TransposedEncoding( + base_encoding=self._data["encoding"].copy(), perm=self._perm + ) class FlippedEncoding(LazyIndexMap): @@ -924,9 +906,10 @@ def __init__(self, encoding, axes): if len(set(self._axes)) != len(self._axes): raise ValueError("Axes cannot contain duplicates, got %s" % str(self._axes)) super().__init__(encoding) - if not all(0 <= a < self._data.ndims for a in axes): + if not all(0 <= a < self._data["encoding"].ndims for a in axes): raise ValueError( - "Invalid axes %s for %d-d encoding" % (str(axes), self._data.ndims) + "Invalid axes %s for %d-d encoding" + % (str(axes), self._data["encoding"].ndims) ) def _to_base_indices(self, indices): @@ -942,11 +925,11 @@ def _from_base_indices(self, base_indices): @property def shape(self): - return self._data.shape + return self._data["encoding"].shape @property def dense(self): - dense = self._data.dense + dense = self._data["encoding"].dense for a in self._axes: dense = np.flip(dense, a) return dense @@ -955,10 +938,10 @@ def mask(self, mask): if not isinstance(mask, Encoding): mask = DenseEncoding(mask) mask = mask.flip(self._axes) - return self._data.mask(mask).flip(self._axes) + return self._data["encoding"].mask(mask).flip(self._axes) def copy(self): - return FlippedEncoding(self._data.copy(), self._axes) + return FlippedEncoding(self._data["encoding"].copy(), self._axes) def flip(self, axis=0): if isinstance(axis, np.ndarray): diff --git a/trimesh/voxel/morphology.py b/trimesh/voxel/morphology.py index c4a72b335..afe0e1497 100644 --- a/trimesh/voxel/morphology.py +++ b/trimesh/voxel/morphology.py @@ -50,9 +50,7 @@ def _assert_rank(value, rank): def _assert_sparse_rank(value, rank=None): if len(value.shape) != 2: - raise ValueError( - "sparse_indices must be rank 2, got shape %s" % str(value.shape) - ) + raise ValueError("sparse_indices must be rank 2, got shape %s" % str(value.shape)) if rank is not None: if value.shape[-1] != rank: raise ValueError( @@ -153,9 +151,7 @@ def binary_dilation(encoding, **kwargs): https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.ndimage.morphology.binary_dilation.html#scipy.ndimage.morphology.binary_dilation """ - return enc.DenseEncoding( - ndimage.binary_dilation(_dense(encoding, rank=3), **kwargs) - ) + return enc.DenseEncoding(ndimage.binary_dilation(_dense(encoding, rank=3), **kwargs)) def binary_closing(encoding, **kwargs): diff --git a/trimesh/voxel/ops.py b/trimesh/voxel/ops.py index ac0123b85..afdab885b 100644 --- a/trimesh/voxel/ops.py +++ b/trimesh/voxel/ops.py @@ -256,9 +256,9 @@ def multibox(centers, pitch=1.0, colors=None): v += np.tile(b.vertices, (len(centers), 1)) f = np.tile(b.faces, (len(centers), 1)) - f += np.tile( - np.arange(len(centers)) * len(b.vertices), (len(b.faces), 1) - ).T.reshape((-1, 1)) + f += np.tile(np.arange(len(centers)) * len(b.vertices), (len(b.faces), 1)).T.reshape( + (-1, 1) + ) face_colors = None if colors is not None: From 49518e1701d34efd427b6142890384463d90d367 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sun, 17 Sep 2023 15:47:38 -0400 Subject: [PATCH 081/144] fix test_obj --- tests/test_cache.py | 239 ++++++++++++++++++++++------------- tests/test_obj.py | 296 ++++++++++++++++++++------------------------ 2 files changed, 287 insertions(+), 248 deletions(-) diff --git a/tests/test_cache.py b/tests/test_cache.py index f4b97cdd4..d74e376f5 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -7,7 +7,6 @@ class CacheTest(g.unittest.TestCase): - def test_track(self): """ Check to make sure our fancy caching system only changes @@ -15,17 +14,18 @@ def test_track(self): """ original = g.trimesh.caching.hash_fast - options = [g.trimesh.caching.hash_fast, - g.trimesh.caching.hash_fallback, - g.trimesh.caching.sha256] + options = [ + g.trimesh.caching.hash_fast, + g.trimesh.caching.hash_fallback, + g.trimesh.caching.sha256, + ] for option in options: - g.log.info(f'testing hash function: {option.__name__}') + g.log.info(f"testing hash function: {option.__name__}") g.trimesh.caching.hash_fast = option # generate test data and perform numpy operations - a = g.trimesh.caching.tracked_array( - g.random(TEST_DIM)) + a = g.trimesh.caching.tracked_array(g.random(TEST_DIM)) modified = [hash(a)] a[0][0] = 10 modified.append(hash(a)) @@ -65,9 +65,7 @@ def test_track(self): a += 10 modified.append(hash(a)) # assign some new data - a = g.trimesh.caching.tracked_array( - [.125, 115.32444, 4], - dtype=g.np.float64) + a = g.trimesh.caching.tracked_array([0.125, 115.32444, 4], dtype=g.np.float64) modified.append(hash(a)) a += [10, 0, 0] @@ -102,12 +100,14 @@ def test_contiguous(self): t = g.trimesh.caching.tracked_array(a) original = g.trimesh.caching.hash_fast - options = [g.trimesh.caching.hash_fast, - g.trimesh.caching.hash_fallback, - g.trimesh.caching.sha256] + options = [ + g.trimesh.caching.hash_fast, + g.trimesh.caching.hash_fallback, + g.trimesh.caching.sha256, + ] for option in options: - g.log.info(f'testing hash function: {option.__name__}') + g.log.info(f"testing hash function: {option.__name__}") g.trimesh.caching.hash_fast = option # hashing will fail on non- contiguous arrays # make sure our utility function has handled this @@ -122,10 +122,10 @@ def test_mutable(self): """ d = g.trimesh.caching.DataStore() - d['hi'] = g.random(100) + d["hi"] = g.random(100) hash_initial = hash(d) # mutate internal data - d['hi'][0] += 1 + d["hi"][0] += 1 assert hash(d) != hash_initial # should be mutable by default @@ -134,18 +134,18 @@ def test_mutable(self): d.mutable = False try: - d['hi'][1] += 1 + d["hi"][1] += 1 except ValueError: # should be raised when array is marked as read only return # we shouldn't make it past the try-except - raise ValueError('mutating data worked when it shouldn\'t!') + raise ValueError("mutating data worked when it shouldn't!") def test_transform(self): """ apply_transform tries to not dump the full cache """ - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") # should populate edges_face e_len = len(m.edges) # should maintain required properties @@ -154,18 +154,88 @@ def test_transform(self): assert len(m.edges_face) == e_len def test_simple_collision(self): - faces1 = g.np.array([0, 1, 2, 0, 3, 1, 0, - 2, 4, 0, 4, 5, 5, 6, - 3, 5, 3, 0, 7, 1, 3, - 7, 3, 6, 4, 2, 1, 4, - 1, 7, 5, 4, 7, 6, 5, 7], - dtype=g.np.int64).reshape(-1, 3) - faces2 = g.np.array([0, 1, 2, 0, 3, 1, 2, - 4, 0, 5, 4, 2, 6, 3, - 0, 6, 0, 4, 6, 1, 3, - 6, 7, 1, 2, 7, 5, 2, - 1, 7, 4, 5, 7, 6, 4, 7], - dtype=g.np.int64).reshape(-1, 3) + faces1 = g.np.array( + [ + 0, + 1, + 2, + 0, + 3, + 1, + 0, + 2, + 4, + 0, + 4, + 5, + 5, + 6, + 3, + 5, + 3, + 0, + 7, + 1, + 3, + 7, + 3, + 6, + 4, + 2, + 1, + 4, + 1, + 7, + 5, + 4, + 7, + 6, + 5, + 7, + ], + dtype=g.np.int64, + ).reshape(-1, 3) + faces2 = g.np.array( + [ + 0, + 1, + 2, + 0, + 3, + 1, + 2, + 4, + 0, + 5, + 4, + 2, + 6, + 3, + 0, + 6, + 0, + 4, + 6, + 1, + 3, + 6, + 7, + 1, + 2, + 7, + 5, + 2, + 1, + 7, + 4, + 5, + 7, + 6, + 4, + 7, + ], + dtype=g.np.int64, + ).reshape(-1, 3) hash_fast = g.trimesh.caching.hash_fast assert hash_fast(faces1) != hash_fast(faces2) @@ -193,90 +263,89 @@ def test_method_combinations(self): if not g.PY3: return - import itertools - + import itertools, warnings import numpy as np - from trimesh.caching import tracked_array dim = (100, 3) # generate a bunch of arguments for every function of an `ndarray` so # we can see if the functions mutate - flat = [2.3, - 1, - 10, - 4.2, - [3, -1], - {'shape': 10}, - np.int64, - np.float64, - True, True, - False, False, - g.random(dim), - g.random(dim[::1]), - 'shape'] + flat = [ + 2.3, + 1, + 10, + 4.2, + [3, -1], + {"shape": 10}, + np.int64, + np.float64, + True, + True, + False, + False, + g.random(dim), + g.random(dim[::1]), + "shape", + ] # start with no arguments attempts = [()] # add a single argument from our guesses attempts.extend([(A,) for A in flat]) # add 2 and 3 length permutations of our guesses - attempts.extend([tuple(G) for G in itertools.permutations(flat, 2)]) + attempts.extend([tuple(G) for G in itertools.product(flat, repeat=2)]) # adding 3-length permuations makes this test 10x slower but if you # are suspicious of a method caching you could uncomment this out: # attempts.extend([tuple(G) for G in itertools.permutations(flat, 3)]) - - skip = {'__array_ufunc__', # segfaulting when called with `(2.3, 1)` - 'astype', - } + skip = set() # collect functions which mutate arrays but don't change our hash broken = [] - for method in list(dir(tracked_array(g.random(dim)))): - - if method in skip: - continue - - failures = [] - g.log.debug(f'hash check: `{method}`') - for A in attempts: - m = g.random((100, 3)) - true_pre = m.tobytes() - m = tracked_array(m) - hash_pre = hash(m) - try: - eval(f'm.{method}(*A)') - except BaseException as J: - failures.append(str(J)) - - hash_post = hash(m) - true_post = m.tobytes() - - # if tobytes disagrees with our hashing logic - # it indicates we have cached incorrectly - if (hash_pre == hash_post) != (true_pre == true_post): - broken.append((method, A)) + with warnings.catch_warnings(): + # ignore all warnings inside this context manager + warnings.filterwarnings("ignore") + + for method in list(dir(tracked_array(g.random(dim)))): + if method in skip: + continue + + failures = [] + g.log.debug(f"hash check: `{method}`") + for A in attempts: + m = g.random((100, 3)) + true_pre = m.tobytes() + m = tracked_array(m) + hash_pre = hash(m) + try: + eval(f"m.{method}(*A)") + except BaseException as J: + failures.append(str(J)) + + hash_post = hash(m) + true_post = m.tobytes() + + # if tobytes disagrees with our hashing logic + # it indicates we have cached incorrectly + if (hash_pre == hash_post) != (true_pre == true_post): + broken.append((method, A)) if len(broken) > 0: method_busted = {method for method, _ in broken} raise ValueError( - f'`TrackedArray` incorrectly hashing methods: {method_busted}') + f"`TrackedArray` incorrectly hashing methods: {method_busted}" + ) def test_validate(self): # create a mesh with two duplicate triangles # and one degenerate triangle m = g.trimesh.Trimesh( - vertices=[[0, 0, 0], - [1, 0, 0], - [0, 1, 0], - [1, 0, 0], - [0, 1, 0], - [1, 1, 0]], + vertices=[[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]], faces=[[3, 4, 4], [0, 1, 2], [0, 1, 2]], - validate=False) + validate=False, + ) # should not have removed any triangles assert m.triangles.shape == (3, 3, 3) @@ -285,6 +354,6 @@ def test_validate(self): assert m.triangles.shape == (1, 3, 3) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_obj.py b/tests/test_obj.py index a5e8a5238..cb5e940b4 100644 --- a/tests/test_obj.py +++ b/tests/test_obj.py @@ -5,43 +5,39 @@ class OBJTest(g.unittest.TestCase): - def test_rabbit(self): # A BSD-licensed test model from pyglet # it has mixed triangles, quads, and 16 element faces -_- # this should test the non-vectorized load path - m = g.get_mesh('rabbit.obj') + m = g.get_mesh("rabbit.obj") assert len(m.faces) == 1252 - rec = g.wrapload( - m.export(file_type='obj'), file_type='obj') + rec = g.wrapload(m.export(file_type="obj"), file_type="obj") assert g.np.isclose(m.area, rec.area) def test_no_img(self): # sometimes people use the `vt` parameter for arbitrary # vertex attributes and thus want UV coordinates even # if there is no texture image - m = g.get_mesh('noimg.obj') + m = g.get_mesh("noimg.obj") assert m.visual.uv.shape == (len(m.vertices), 2) # make sure UV coordinates are in range 0.0 - 1.0 assert m.visual.uv.max() < (1 + 1e-5) assert m.visual.uv.min() > -1e-5 # check to make sure it's not all zeros assert m.visual.uv.ptp() > 0.5 - rec = g.wrapload( - m.export(file_type='obj'), file_type='obj') + rec = g.wrapload(m.export(file_type="obj"), file_type="obj") assert g.np.isclose(m.area, rec.area) def test_trailing(self): # test files with texture and trailing slashes - m = g.get_mesh('jacked.obj') + m = g.get_mesh("jacked.obj") assert len(m.visual.uv) == len(m.vertices) - rec = g.wrapload( - m.export(file_type='obj'), file_type='obj') + rec = g.wrapload(m.export(file_type="obj"), file_type="obj") assert g.np.isclose(m.area, rec.area) def test_obj_groups(self): # a wavefront file with groups defined - mesh = g.get_mesh('groups.obj') + mesh = g.get_mesh("groups.obj") # make sure some data got loaded assert g.trimesh.util.is_shape(mesh.faces, (-1, 3)) @@ -56,29 +52,26 @@ def test_obj_groups(self): def test_obj_negative_indices(self): # a wavefront file with negative indices - mesh = g.get_mesh('negative_indices.obj') + mesh = g.get_mesh("negative_indices.obj") # make sure some data got loaded assert g.trimesh.util.is_shape(mesh.faces, (12, 3)) assert g.trimesh.util.is_shape(mesh.vertices, (8, 3)) def test_obj_quad(self): - mesh = g.get_mesh('quadknot.obj') + mesh = g.get_mesh("quadknot.obj") # make sure some data got loaded assert g.trimesh.util.is_shape(mesh.faces, (-1, 3)) assert g.trimesh.util.is_shape(mesh.vertices, (-1, 3)) assert mesh.is_watertight assert mesh.is_winding_consistent - rec = g.wrapload( - mesh.export(file_type='obj'), file_type='obj') + rec = g.wrapload(mesh.export(file_type="obj"), file_type="obj") assert g.np.isclose(mesh.area, rec.area) def test_obj_multiobj(self): # test a wavefront file with multiple objects in the same file - scene = g.get_mesh('two_objects.obj', - split_object=True, - group_material=False) + scene = g.get_mesh("two_objects.obj", split_object=True, group_material=False) assert len(scene.geometry) == 2 for mesh in scene.geometry.values(): @@ -93,10 +86,12 @@ def test_obj_split_attributes(self): # test a wavefront file where pos/uv/norm have different indices # and where multiple objects share vertices # Note 'process=False' to avoid merging vertices - scene = g.get_mesh('joined_tetrahedra.obj', - process=False, - split_object=True, - group_material=False) + scene = g.get_mesh( + "joined_tetrahedra.obj", + process=False, + split_object=True, + group_material=False, + ) assert len(scene.geometry) == 2 @@ -108,10 +103,9 @@ def test_obj_split_attributes(self): assert g.trimesh.util.is_shape(geom[1].vertices, (9, 3)) def test_obj_simple_order(self): - # test a simple wavefront model without split indexes # and make sure we don't reorder vertices unnecessarily - file_name = g.os.path.join(g.dir_models, 'cube.OBJ') + file_name = g.os.path.join(g.dir_models, "cube.OBJ") # load a simple OBJ file without merging vertices m = g.trimesh.load(file_name, process=False) # use trivial loading to compare with fancy performant one @@ -122,15 +116,11 @@ def test_obj_simple_order(self): assert g.np.allclose(v, m.vertices) def test_order_tex(self): - # test a simple wavefront model without split indexes # and make sure we don't reorder vertices unnecessarily - file_name = g.os.path.join(g.dir_models, 'fuze.obj') + file_name = g.os.path.join(g.dir_models, "fuze.obj") # load a simple OBJ file without merging vertices - m = g.trimesh.load( - file_name, - process=False, - maintain_order=True) + m = g.trimesh.load(file_name, process=False, maintain_order=True) # use trivial loading to compare with fancy performant one with open(file_name) as f: f, v, vt = simple_load(f.read()) @@ -139,157 +129,148 @@ def test_order_tex(self): assert g.np.allclose(v, m.vertices) def test_obj_compressed(self): - mesh = g.get_mesh('cube_compressed.obj', process=False) - assert mesh._cache.cache['vertex_normals'].shape == mesh.vertices.shape - assert g.np.allclose( - g.np.abs(mesh.vertex_normals).sum(axis=1), 1.0) + mesh = g.get_mesh("cube_compressed.obj", process=False) + assert mesh._cache.cache["vertex_normals"].shape == mesh.vertices.shape + assert g.np.allclose(g.np.abs(mesh.vertex_normals).sum(axis=1), 1.0) def test_vertex_color(self): # get a box mesh mesh = g.trimesh.creation.box() # set each vertex to a unique random color mesh.visual.vertex_colors = [ - g.trimesh.visual.random_color() - for _ in range(len(mesh.vertices))] + g.trimesh.visual.random_color() for _ in range(len(mesh.vertices)) + ] # export and then reload the file as OBJ rec = g.trimesh.load( - g.trimesh.util.wrap_as_stream( - mesh.export(file_type='obj')), - file_type='obj') + g.trimesh.util.wrap_as_stream(mesh.export(file_type="obj")), file_type="obj" + ) # assert colors have survived the export cycle - assert (mesh.visual.vertex_colors == - rec.visual.vertex_colors).all() + assert (mesh.visual.vertex_colors == rec.visual.vertex_colors).all() def test_single_vn(self): """ Make sure files with a single VN load. """ - m = g.get_mesh('singlevn.obj') + m = g.get_mesh("singlevn.obj") assert len(m.vertices) > 0 assert len(m.faces) > 0 def test_polygon_faces(self): - m = g.get_mesh('polygonfaces.obj') + m = g.get_mesh("polygonfaces.obj") assert len(m.vertices) > 0 assert len(m.faces) > 0 def test_faces_not_enough_indices(self): - m = g.get_mesh('notenoughindices.obj') + m = g.get_mesh("notenoughindices.obj") assert len(m.vertices) > 0 assert len(m.faces) == 1 def test_export_path(self): - m = g.get_mesh('fuze.obj') + m = g.get_mesh("fuze.obj") g.check_fuze(m) - assert m._cache.cache['vertex_normals'].shape == m.vertices.shape + assert m._cache.cache["vertex_normals"].shape == m.vertices.shape with g.TemporaryDirectory() as d: - file_path = g.os.path.join(d, 'fz.obj') + file_path = g.os.path.join(d, "fz.obj") m.export(file_path) r = g.trimesh.load(file_path) g.check_fuze(r) def test_mtl(self): # get a mesh with texture - m = g.get_mesh('fuze.obj') + m = g.get_mesh("fuze.obj") # export the mesh including data - obj, data = g.trimesh.exchange.export.export_obj( - m, return_texture=True) + obj, data = g.trimesh.exchange.export.export_obj(m, return_texture=True) with g.trimesh.util.TemporaryDirectory() as path: # where is the OBJ file going to be saved - obj_path = g.os.path.join(path, 'test.obj') - with open(obj_path, 'w') as f: + obj_path = g.os.path.join(path, "test.obj") + with open(obj_path, "w") as f: f.write(obj) # save the MTL and images for k, v in data.items(): - with open(g.os.path.join(path, k), 'wb') as f: + with open(g.os.path.join(path, k), "wb") as f: f.write(v) # reload the mesh from the export rec = g.trimesh.load(obj_path) # make sure loaded image is the same size as the original - assert (rec.visual.material.image.size == - m.visual.material.image.size) + assert rec.visual.material.image.size == m.visual.material.image.size # make sure the faces are the same size assert rec.faces.shape == m.faces.shape def test_scene(self): - s = g.get_mesh('cycloidal.3DXML') + s = g.get_mesh("cycloidal.3DXML") e = g.trimesh.load( - g.io_wrap(s.export(file_type='obj')), - file_type='obj', + g.io_wrap(s.export(file_type="obj")), + file_type="obj", split_object=True, - group_materials=False) + group_materials=False, + ) - assert g.np.isclose(e.area, s.area, rtol=.01) + assert g.np.isclose(e.area, s.area, rtol=0.01) def test_edge_cases(self): # a mesh with some NaN colors - n = g.get_mesh('nancolor.obj') + n = g.get_mesh("nancolor.obj") assert n.faces.shape == (12, 3) - v = g.get_mesh('cubevt.obj') + v = g.get_mesh("cubevt.obj") assert v.faces.shape == (12, 3) def test_empty_or_pointcloud(self): # demo files to check - empty_files = ['obj_empty.obj', - 'obj_points.obj', - 'obj_wireframe.obj'] + empty_files = ["obj_empty.obj", "obj_points.obj", "obj_wireframe.obj"] for empty_file in empty_files: - e = g.get_mesh('emptyIO/' + empty_file) + e = g.get_mesh("emptyIO/" + empty_file) # create export - if 'empty' in empty_file: + if "empty" in empty_file: try: - export = e.export(file_type='ply') + export = e.export(file_type="ply") except BaseException: continue - raise ValueError('cannot export empty') - elif 'points' in empty_file: - export = e.export(file_type='ply') - reconstructed = g.wrapload(export, file_type='ply') + raise ValueError("cannot export empty") + elif "points" in empty_file: + export = e.export(file_type="ply") + reconstructed = g.wrapload(export, file_type="ply") # result should be a point cloud instance assert isinstance(e, g.trimesh.PointCloud) - assert hasattr(e, 'vertices') + assert hasattr(e, "vertices") # point cloud export should contain vertices assert isinstance(reconstructed, g.trimesh.PointCloud) - assert hasattr(reconstructed, 'vertices') + assert hasattr(reconstructed, "vertices") def test_backslash_continuation_character(self): # an obj file with \ (backslash) line continuation characters - m = g.get_mesh('wallhole.obj') + m = g.get_mesh("wallhole.obj") assert m.faces.shape == (66, 3) def test_no_uv(self): - mesh = g.get_mesh('box.obj') - rec = g.wrapload( - mesh.export(file_type='obj'), file_type='obj') + mesh = g.get_mesh("box.obj") + rec = g.wrapload(mesh.export(file_type="obj"), file_type="obj") assert g.np.isclose(mesh.area, rec.area) def test_no_uv_but_mtl(self): sphere = g.trimesh.creation.uv_sphere() sphere.visual = g.trimesh.visual.TextureVisuals( - uv=None, - material=g.trimesh.visual.material.empty_material()) - output = sphere.export('sphere.obj') - assert 'usemtl' in output + uv=None, material=g.trimesh.visual.material.empty_material() + ) + output = sphere.export("sphere.obj") + assert "usemtl" in output def test_chair(self): - mesh = next(iter(g.get_mesh('chair.zip').geometry.values())) + mesh = next(iter(g.get_mesh("chair.zip").geometry.values())) # this model comes with vertex normals - assert 'vertex_normals' in mesh._cache - assert g.np.allclose( - 1.0, g.np.linalg.norm(mesh.vertex_normals, axis=1)) + assert "vertex_normals" in mesh._cache + assert g.np.allclose(1.0, g.np.linalg.norm(mesh.vertex_normals, axis=1)) mesh.apply_scale(0.46377314288075433) - assert 'vertex_normals' in mesh._cache - assert g.np.allclose( - 1.0, g.np.linalg.norm(mesh.vertex_normals, axis=1)) - assert 'vertex_normals' in mesh._cache + assert "vertex_normals" in mesh._cache + assert g.np.allclose(1.0, g.np.linalg.norm(mesh.vertex_normals, axis=1)) + assert "vertex_normals" in mesh._cache mesh._cache.clear() - assert 'vertex_normals' not in mesh._cache + assert "vertex_normals" not in mesh._cache # if we recomputed now, the degenerate faces # would lead some of these vertex normals to be zero # assert g.np.allclose( @@ -298,32 +279,30 @@ def test_chair(self): def test_multi_nodupe(self): s = g.get_mesh("forearm.zae") obj, mtl = g.trimesh.exchange.obj.export_obj( - s, include_color=True, - include_texture=True, - return_texture=True) + s, include_color=True, include_texture=True, return_texture=True + ) # should be using one material file - assert obj.count('mtllib') == 1 - assert 'mtllib material.mtl' in obj + assert obj.count("mtllib") == 1 + assert "mtllib material.mtl" in obj # should be specifying 5 materials - assert obj.count('usemtl') == 5 + assert obj.count("usemtl") == 5 # this file has only the properties (no images) assert len(mtl) == 1 mtl_names = [ - L.strip().split()[-1].strip() for L in - mtl['material.mtl'].decode('utf-8').split('\n') - if 'newmtl' in L] + L.strip().split()[-1].strip() + for L in mtl["material.mtl"].decode("utf-8").split("\n") + if "newmtl" in L + ] # there should be 5 unique material names assert len(set(mtl_names)) == 5 def test_mtl_color_roundtrip(self): - # create a mesh with a simple material m = g.trimesh.creation.box() m.visual = m.visual.to_texture() # set each color component to a unique value - colors = [g.trimesh.visual.color.random_color() - for _ in range(3)] + colors = [g.trimesh.visual.color.random_color() for _ in range(3)] m.visual.material.ambient = colors[0] m.visual.material.specular = colors[1] m.visual.material.diffuse = colors[2] @@ -333,69 +312,59 @@ def test_mtl_color_roundtrip(self): # exporting by filename will automatically # create a FilePathResolver which writes the # `mtl` file to the same directory - file_name = g.os.path.join(d, 'hi.obj') + file_name = g.os.path.join(d, "hi.obj") m.export(file_name) # reload the export by file name r = g.trimesh.load(file_name) # these values should have survived the roundtrip - assert g.np.allclose(m.visual.material.ambient, - r.visual.material.ambient) - assert g.np.allclose(m.visual.material.specular, - r.visual.material.specular) - assert g.np.allclose(m.visual.material.diffuse, - r.visual.material.diffuse) - assert g.np.isclose(m.visual.material.glossiness, - r.visual.material.glossiness) + assert g.np.allclose(m.visual.material.ambient, r.visual.material.ambient) + assert g.np.allclose(m.visual.material.specular, r.visual.material.specular) + assert g.np.allclose(m.visual.material.diffuse, r.visual.material.diffuse) + assert g.np.isclose(m.visual.material.glossiness, r.visual.material.glossiness) def test_scene_export_material_name(self): - s = g.get_mesh('fuze.obj', force='scene') - dummy = 'fuxx' - s.geometry['fuze.obj'].visual.material.name = dummy + s = g.get_mesh("fuze.obj", force="scene") + dummy = "fuxx" + s.geometry["fuze.obj"].visual.material.name = dummy r = g.trimesh.resolvers.ZipResolver() - r['model.obj'] = s.export( - file_type='obj', - mtl_name='mystuff.mtl', - resolver=r) + r["model.obj"] = s.export(file_type="obj", mtl_name="mystuff.mtl", resolver=r) - mtl = r['mystuff.mtl'].decode('utf-8') - assert mtl.count('newmtl') == 1 - assert f'newmtl {dummy}' in mtl - assert f'{dummy}.jpeg' in r + mtl = r["mystuff.mtl"].decode("utf-8") + assert mtl.count("newmtl") == 1 + assert f"newmtl {dummy}" in mtl + assert f"{dummy}.jpeg" in r def test_compound_scene_export(self): - # generate a mesh with multiple textures - a = g.get_mesh('BoxTextured.glb') + a = g.get_mesh("BoxTextured.glb") a = a.scaled(1.0 / a.extents.max()) a.apply_translation(-a.bounds[0]) - b = g.get_mesh('fuze.obj').scene() + b = g.get_mesh("fuze.obj").scene() b = b.scaled(1.0 / b.extents.max()) b.apply_translation(-b.bounds[0] + [2, 0, 0]) d = next(iter(b.copy().geometry.values())) d.apply_translation([-1, 0, 0]) - assert hash(d.visual.material) == hash( - b.geometry['fuze.obj'].visual.material) + assert hash(d.visual.material) == hash(b.geometry["fuze.obj"].visual.material) # should change the material hash d.visual.material.glossiness = 0.1 - assert hash(d.visual.material) != hash( - b.geometry['fuze.obj'].visual.material) + assert hash(d.visual.material) != hash(b.geometry["fuze.obj"].visual.material) # generate a compound scene c = a + b + d for i in c.geometry.values(): # name all the materials the same thing - i.visual.material.name = 'material_0' + i.visual.material.name = "material_0" # export the compound scene - obj, mtl = c.export(file_type='obj', return_texture=True) + obj, mtl = c.export(file_type="obj", return_texture=True) # there should be exactly one mtllib referenced - assert obj.count('mtllib') == 1 - assert obj.count('usemtl') == 3 + assert obj.count("mtllib") == 1 + assert obj.count("usemtl") == 3 # should be one texture image for each of 3 # plus the `.mtl` file itself @@ -406,19 +375,21 @@ def test_compound_scene_export(self): # get the material names specified mtl_names = [ - L.strip().split()[-1].strip() for L in - mtl['material.mtl'].decode('utf-8').split('\n') - if 'newmtl' in L] + L.strip().split()[-1].strip() + for L in mtl["material.mtl"].decode("utf-8").split("\n") + if "newmtl" in L + ] # there should be 3 unique material names assert len(set(mtl_names)) == 3 # now reload the compound scene t = g.trimesh.load( file_obj=g.trimesh.util.wrap_as_stream(obj), - file_type='obj', + file_type="obj", resolver=g.trimesh.resolvers.ZipResolver(mtl), group_material=False, - split_object=True) + split_object=True, + ) # these names should match eventually assert len(t.geometry.keys()) == len(c.geometry.keys()) assert g.np.isclose(t.area, c.area) @@ -427,49 +398,49 @@ def test_face_parsing_in_group_names(self): # Checks that an obj with a g tag containinig a face like name (an 'f ' # followed by three space separated text chunks, ex: f 1 2 3) does load # properly - m = g.get_mesh('face_in_group_name.obj') + m = g.get_mesh("face_in_group_name.obj") assert len(m.vertices) == 1 def test_face_parsing_in_group_names_with_object_tag(self): # Checks that an obj with a g tag in the middle of a file, # containinig a face like name (an 'f ' followed by three # space separated text chunks, ex: f 1 2 3), does load properly - m = g.get_mesh('face_in_group_name_mid_file.obj') + m = g.get_mesh("face_in_group_name_mid_file.obj") assert len(m.vertices) == 5 assert len(m.faces) == 2 def test_chunk_parsing_with_no_faces_but_with_f_in_chunk(self): # Checks that a chunk with no faces but with 'f ' in it loads properly - m = g.get_mesh('obj_with_no_face_in_chunk.obj') + m = g.get_mesh("obj_with_no_face_in_chunk.obj") assert len(m.vertices) == 3 assert len(m.faces) == 1 def test_export_normals(self): m = g.trimesh.creation.box() - assert 'vertex_normals' not in m._cache.cache + assert "vertex_normals" not in m._cache.cache - e = m.export(file_type='obj', include_normals=None) - assert 'vn ' not in e - e = m.export(file_type='obj', include_normals=True) + e = m.export(file_type="obj", include_normals=None) + assert "vn " not in e + e = m.export(file_type="obj", include_normals=True) # should have included normals - assert 'vn ' in e + assert "vn " in e # should have forced generation of normals - assert 'vertex_normals' in m._cache.cache + assert "vertex_normals" in m._cache.cache # now that they're in cache include_normals=None should get them - e = m.export(file_type='obj', include_normals=None) - assert 'vn ' in e + e = m.export(file_type="obj", include_normals=None) + assert "vn " in e # or skip them if explicitly asked - e = m.export(file_type='obj', include_normals=False) - assert 'vn ' not in e + e = m.export(file_type="obj", include_normals=False) + assert "vn " not in e - def test_export_mtl_args(): + def test_export_mtl_args(self): mesh = g.trimesh.creation.box() # check for a crash with no materials defined - a, b = g.trimesh.exchange.obj.export_obj(mesh, return_texture=True, mtl_name='hi.mtl') - - + a, b = g.trimesh.exchange.obj.export_obj( + mesh, return_texture=True, mtl_name="hi.mtl" + ) def simple_load(text): @@ -485,15 +456,14 @@ def simple_load(text): line = line.strip() if len(line) < 2: continue - elif line.startswith('f '): - if '/' in line: - f.append([int(i.split('/', 1)[0]) - for i in line[1:].strip().split()]) + elif line.startswith("f "): + if "/" in line: + f.append([int(i.split("/", 1)[0]) for i in line[1:].strip().split()]) else: f.append(line[1:].strip().split()) - elif line.startswith('v '): + elif line.startswith("v "): v.append(line[1:].strip().split()) - elif line.startswith('vt '): + elif line.startswith("vt "): vt.append(line[2:].strip().split()) # get faces as basic numpy array @@ -504,6 +474,6 @@ def simple_load(text): return f, v, vt -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() From 49ec9aab81f4f0840235041dedfac83d827e074c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sun, 17 Sep 2023 16:48:41 -0400 Subject: [PATCH 082/144] fix formatting --- tests/test_cache.py | 5 ++++- trimesh/transformations.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/test_cache.py b/tests/test_cache.py index d74e376f5..09dda4ed2 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -263,8 +263,11 @@ def test_method_combinations(self): if not g.PY3: return - import itertools, warnings + import itertools + import warnings + import numpy as np + from trimesh.caching import tracked_array dim = (100, 3) diff --git a/trimesh/transformations.py b/trimesh/transformations.py index fc8efed44..bc2861b93 100644 --- a/trimesh/transformations.py +++ b/trimesh/transformations.py @@ -2144,7 +2144,7 @@ def transform_points(points, matrix, translate=True): Transformed points. """ points = np.asanyarray(points, dtype=np.float64) - if len(points) == 0: + if len(points) == 0 or matrix is None: return points.copy() # check the matrix against the points From af19e8be6c8f14642afe5ff42e6e258cc40bf9ac Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 18 Sep 2023 14:55:09 -0400 Subject: [PATCH 083/144] update domain trimsh.org->trimesh.org --- docs/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Makefile b/docs/Makefile index 4f9ef3903..f993b0d68 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -21,7 +21,7 @@ example_rsts = $(foreach name, $(example_names), examples.$(name).rst) html: conf.py index.rst *.md README.rst trimesh.rst examples.md $(example_rsts) $(example_htmls) .deps @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) touch "$(BUILDDIR)/html/.nojekyll" - echo "trimsh.org" > "$(BUILDDIR)/html/CNAME" + echo "trimesh.org" > "$(BUILDDIR)/html/CNAME" mv "$(BUILDDIR)/html/_static/examples" "$(BUILDDIR)/html/examples" || true mv "$(BUILDDIR)/html/_static/images" "$(BUILDDIR)/html/images" || true cp "$(STATICDIR)/favicon.ico" "$(BUILDDIR)/html/favicon.ico" || true From ba4857ad50cbdb424e65f381fab37c1fa1e685ca Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 14:07:16 -0400 Subject: [PATCH 084/144] attempt --- trimesh/voxel/base.py | 25 ++++++------ trimesh/voxel/encoding.py | 82 ++++++++++++++++++++++----------------- 2 files changed, 59 insertions(+), 48 deletions(-) diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index c7c3d40e2..1f4ccbe80 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -29,18 +29,17 @@ def __init__(self, encoding, transform=None, metadata=None): """ if transform is None: transform = np.eye(4) - if isinstance(encoding, np.ndarray): - encoding = DenseEncoding(encoding.astype(bool)) - if encoding.dtype != bool: - raise ValueError("encoding must have dtype bool") + self._data = caching.DataStore() self._cache = caching.Cache(id_function=self._data.__hash__) + self._transform = transforms.Transform(transform, datastore=self._data) + # use our setter self.encoding = encoding - self.metadata = {} # update the mesh metadata with passed metadata + self.metadata = {} if isinstance(metadata, dict): self.metadata.update(metadata) elif metadata is not None: @@ -64,21 +63,23 @@ def encoding(self): See `trimesh.voxel.encoding` for implementations. """ - return self._data["encoding"] + return self._encoding @encoding.setter def encoding(self, encoding): if isinstance(encoding, np.ndarray): encoding = DenseEncoding(encoding) elif not isinstance(encoding, Encoding): - raise ValueError("encoding must be an Encoding, got %s" % str(encoding)) + raise TypeError(type(encoding)) + if len(encoding.shape) != 3: - raise ValueError( - "encoding must be rank 3, got shape %s" % str(encoding.shape) - ) + raise ValueError(f"encoding.shape: (3,) != {encoding.shape}") if encoding.dtype != bool: - raise ValueError("encoding must be binary, got %s" % encoding.dtype) - self._data["encoding"] = encoding + raise ValueError(f"encoding.dtype: bool != {encoding.dtype}") + + self._data.data.update(encoding._data.data) + encoding._data = self._data + self._encoding = encoding @property def transform(self) -> NDArray[float64]: diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 6254475c1..644b6c4a3 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -34,7 +34,7 @@ class Encoding(ABC): and dense encodings (wrappers around np.ndarrays). """ - def __init__(self, data): + def __init__(self, data=None): # a key-value store of numpy arrays self._data = caching.DataStore() @@ -43,7 +43,9 @@ def __init__(self, data): if isinstance(data, np.ndarray): self._data["encoding"] = data - else: + elif isinstance(data, Encoding): + self._data.data.update(data._data.data) + elif data is not None: raise TypeError(type(data)) @abc.abstractproperty @@ -188,8 +190,6 @@ class DenseEncoding(Encoding): """Simple `Encoding` implementation based on a numpy ndarray.""" def __init__(self, data): - if not isinstance(data, np.ndarray): - raise ValueError("DenseEncoding data must be a numpy array") super().__init__(data=data) @property @@ -277,32 +277,35 @@ def __init__(self, indices, values, shape=None): """ Parameters ------------ - indices: (m, n)-sized int array of indices - values: (m, n)-sized dtype array of values at the specified indices - shape: (n,) iterable of integers. If None, the maximum value of indices + indices : (m, n)-sized int array of indices + values : (m, n)-sized dtype array of values at the specified indices + shape : (n,) iterable of integers. If None, the maximum value of indices + 1 is used. """ - data = caching.DataStore() - super().__init__(data) - data["indices"] = indices - data["values"] = values - indices = data["indices"] + + # create the datastore and cache + super().__init__() + + indices = np.asanyarray(indices, dtype=np.int64) + values = np.asanyarray(values) + + if not np.all(indices >= 0): + raise ValueError("all indices must be non-negative") + if len(indices.shape) != 2: raise ValueError("indices must be 2D, got shaped %s" % str(indices.shape)) - if data["values"].shape != (indices.shape[0],): - raise ValueError( - "values and indices shapes inconsistent: {} and {}".format( - data["values"], data["indices"] - ) - ) + if len(values) != len(indices): + raise ValueError("values and indices shapes inconsistent") if shape is None: - self._shape = tuple(data["indices"].max(axis=0) + 1) + shape = tuple(indices.max(axis=0) + 1) else: - self._shape = tuple(shape) - if not np.all(indices < self._shape): + shape = tuple(shape) + if (indices > shape).any(): raise ValueError("all indices must be less than shape") - if not np.all(indices >= 0): - raise ValueError("all indices must be non-negative") + + self._data["indices"] = indices + self._data["values"] = values + self._data["shape"] = shape @staticmethod def from_dense(dense_data): @@ -321,11 +324,11 @@ def copy(self): @property def sparse_indices(self): - return self._data["encoding"]["indices"] + return self._data["indices"] @property def sparse_values(self): - return self._data["encoding"]["values"] + return self._data["values"] @property def dtype(self): @@ -341,7 +344,7 @@ def ndims(self): @property def shape(self): - return self._shape + return self._data["shape"] @property def size(self): @@ -439,12 +442,12 @@ def __init__(self, data, dtype=None): dtype: dtype of encoded data. Each second value of data is cast will be cast to this dtype if provided. """ - super().__init__(data=caching.tracked_array(data)) - if dtype is None: - dtype = self._data.dtype - if len(self._data["encoding"].shape) != 1: + super().__init__() + data = np.asanyarray(data, dtype=dtype) + if len(data.shape) != 1: raise ValueError("data must be 1D numpy array") - self._dtype = dtype + + self._data["encoding"] = data @caching.cache_decorator def is_empty(self): @@ -462,7 +465,7 @@ def shape(self): @property def dtype(self): - return self._dtype + return self._data["encoding"].dtype def __hash__(self): """ @@ -495,7 +498,7 @@ def from_brle(brle_data, dtype=None): def stripped(self): if self.is_empty: return _empty_stripped(self.shape) - data, padding = runlength.rle_strip(self._data) + data, padding = runlength.rle_strip(self._data["encoding"]) if padding == (0, 0): encoding = self else: @@ -509,12 +512,12 @@ def sum(self): @caching.cache_decorator def size(self): - return runlength.rle_length(self._data) + return runlength.rle_length(self._data["encoding"]) def _flip(self, axes): if axes != (0,): raise ValueError("encoding is 1D - cannot flip on axis %s" % str(axes)) - return RunLengthEncoding(runlength.rle_reverse(self._data)) + return RunLengthEncoding(runlength.rle_reverse(self._data["encoding"])) @caching.cache_decorator def sparse_components(self): @@ -698,7 +701,10 @@ def sum(self): @property def size(self): - return self._data.size + from IPython import embed + + embed() + return self._data["encoding"].size @property def sparse_indices(self): @@ -734,6 +740,10 @@ def _from_base_indices(self, base_indices): def shape(self): return (self.size,) + @property + def size(self): + return np.prod(self._data["shape"]) + @property def dense(self): return self._data["encoding"].dense.reshape((-1,)) From b8ca1bbbc9a1907d67622680617f3072a963d101 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 14:15:47 -0400 Subject: [PATCH 085/144] revert broken voxel changes --- trimesh/voxel/base.py | 25 +++-- trimesh/voxel/encoding.py | 215 +++++++++++++++++++------------------- 2 files changed, 122 insertions(+), 118 deletions(-) diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index 1f4ccbe80..5d2f361c2 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -29,17 +29,18 @@ def __init__(self, encoding, transform=None, metadata=None): """ if transform is None: transform = np.eye(4) + if isinstance(encoding, np.ndarray): + encoding = DenseEncoding(encoding.astype(bool)) + if encoding.dtype != bool: + raise ValueError("encoding must have dtype bool") self._data = caching.DataStore() self._cache = caching.Cache(id_function=self._data.__hash__) - self._transform = transforms.Transform(transform, datastore=self._data) - - # use our setter self.encoding = encoding + self.metadata = {} # update the mesh metadata with passed metadata - self.metadata = {} if isinstance(metadata, dict): self.metadata.update(metadata) elif metadata is not None: @@ -63,23 +64,21 @@ def encoding(self): See `trimesh.voxel.encoding` for implementations. """ - return self._encoding + return self._data["encoding"] @encoding.setter def encoding(self, encoding): if isinstance(encoding, np.ndarray): encoding = DenseEncoding(encoding) elif not isinstance(encoding, Encoding): - raise TypeError(type(encoding)) - + raise ValueError("encoding must be an Encoding, got %s" % str(encoding)) if len(encoding.shape) != 3: - raise ValueError(f"encoding.shape: (3,) != {encoding.shape}") + raise ValueError( + "encoding must be rank 3, got shape %s" % str(encoding.shape) + ) if encoding.dtype != bool: - raise ValueError(f"encoding.dtype: bool != {encoding.dtype}") - - self._data.data.update(encoding._data.data) - encoding._data = self._data - self._encoding = encoding + raise ValueError("encoding must be binary, got %s" % encoding.dtype) + self._data["encoding"] = encoding @property def transform(self) -> NDArray[float64]: diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 644b6c4a3..3942a27e0 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -4,7 +4,7 @@ import numpy as np from .. import caching -from ..util import ABC +from ..util import ABC, log from . import runlength try: @@ -34,20 +34,10 @@ class Encoding(ABC): and dense encodings (wrappers around np.ndarrays). """ - def __init__(self, data=None): - # a key-value store of numpy arrays - self._data = caching.DataStore() - - # dumped when cache changes + def __init__(self, data): + self._data = data self._cache = caching.Cache(id_function=self._data.__hash__) - if isinstance(data, np.ndarray): - self._data["encoding"] = data - elif isinstance(data, Encoding): - self._data.data.update(data._data.data) - elif data is not None: - raise TypeError(type(data)) - @abc.abstractproperty def dtype(self): pass @@ -128,6 +118,22 @@ def stripped(self): def _flip(self, axes): return FlippedEncoding(self, axes) + def crc(self): + log.warning( + "`geometry.crc()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) + return self.__hash__() + + def hash(self): + log.warning( + "`geometry.hash()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) + return self.__hash__() + def __hash__(self): """ Get the hash of the current transformation matrix. @@ -190,27 +196,31 @@ class DenseEncoding(Encoding): """Simple `Encoding` implementation based on a numpy ndarray.""" def __init__(self, data): + if not isinstance(data, caching.TrackedArray): + if not isinstance(data, np.ndarray): + raise ValueError("DenseEncoding data must be a numpy array") + data = caching.tracked_array(data) super().__init__(data=data) @property def dtype(self): - return self._data["encoding"].dtype + return self._data.dtype @property def shape(self): - return self._data["encoding"].shape + return self._data.shape @caching.cache_decorator def sum(self): - return self._data["encoding"].sum() + return self._data.sum() @caching.cache_decorator def is_empty(self): - return not np.any(self._data["encoding"]) + return not np.any(self._data) @property def size(self): - return self._data["encoding"].size + return self._data.size @property def sparse_components(self): @@ -220,7 +230,7 @@ def sparse_components(self): @caching.cache_decorator def sparse_indices(self): - return np.column_stack(np.where(self._data["encoding"])) + return np.column_stack(np.where(self._data)) @caching.cache_decorator def sparse_values(self): @@ -234,21 +244,19 @@ def _flip(self, axes): @property def dense(self): - return self._data["encoding"] + return self._data def gather(self, indices): - return self._data["encoding"][indices] + return self._data[indices] def gather_nd(self, indices): - return self._data["encoding"][tuple(indices.T)] + return self._data[tuple(indices.T)] def mask(self, mask): - return self._data["encoding"][ - mask if isinstance(mask, np.ndarray) else mask.dense - ] + return self._data[mask if isinstance(mask, np.ndarray) else mask.dense] def get_value(self, index): - return self._data["encoding"][tuple(index)] + return self._data[tuple(index)] def reshape(self, shape): return DenseEncoding(self._data.reshape(shape)) @@ -277,35 +285,32 @@ def __init__(self, indices, values, shape=None): """ Parameters ------------ - indices : (m, n)-sized int array of indices - values : (m, n)-sized dtype array of values at the specified indices - shape : (n,) iterable of integers. If None, the maximum value of indices + indices: (m, n)-sized int array of indices + values: (m, n)-sized dtype array of values at the specified indices + shape: (n,) iterable of integers. If None, the maximum value of indices + 1 is used. """ - - # create the datastore and cache - super().__init__() - - indices = np.asanyarray(indices, dtype=np.int64) - values = np.asanyarray(values) - - if not np.all(indices >= 0): - raise ValueError("all indices must be non-negative") - + data = caching.DataStore() + super().__init__(data) + data["indices"] = indices + data["values"] = values + indices = data["indices"] if len(indices.shape) != 2: raise ValueError("indices must be 2D, got shaped %s" % str(indices.shape)) - if len(values) != len(indices): - raise ValueError("values and indices shapes inconsistent") + if data["values"].shape != (indices.shape[0],): + raise ValueError( + "values and indices shapes inconsistent: {} and {}".format( + data["values"], data["indices"] + ) + ) if shape is None: - shape = tuple(indices.max(axis=0) + 1) + self._shape = tuple(data["indices"].max(axis=0) + 1) else: - shape = tuple(shape) - if (indices > shape).any(): + self._shape = tuple(shape) + if not np.all(indices < self._shape): raise ValueError("all indices must be less than shape") - - self._data["indices"] = indices - self._data["values"] = values - self._data["shape"] = shape + if not np.all(indices >= 0): + raise ValueError("all indices must be non-negative") @staticmethod def from_dense(dense_data): @@ -344,7 +349,7 @@ def ndims(self): @property def shape(self): - return self._data["shape"] + return self._shape @property def size(self): @@ -442,18 +447,16 @@ def __init__(self, data, dtype=None): dtype: dtype of encoded data. Each second value of data is cast will be cast to this dtype if provided. """ - super().__init__() - data = np.asanyarray(data, dtype=dtype) - if len(data.shape) != 1: + super().__init__(data=caching.tracked_array(data)) + if dtype is None: + dtype = self._data.dtype + if len(self._data.shape) != 1: raise ValueError("data must be 1D numpy array") - - self._data["encoding"] = data + self._dtype = dtype @caching.cache_decorator def is_empty(self): - return not np.any( - np.logical_and(self._data["encoding"][::2], self._data["encoding"][1::2]) - ) + return not np.any(np.logical_and(self._data[::2], self._data[1::2])) @property def ndims(self): @@ -465,7 +468,23 @@ def shape(self): @property def dtype(self): - return self._data["encoding"].dtype + return self._dtype + + def crc(self): + log.warning( + "`geometry.crc()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) + return self.__hash__() + + def hash(self): + log.warning( + "`geometry.hash()` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geometry.__hash__()` or `hash(geometry)`" + ) + return self.__hash__() def __hash__(self): """ @@ -498,7 +517,7 @@ def from_brle(brle_data, dtype=None): def stripped(self): if self.is_empty: return _empty_stripped(self.shape) - data, padding = runlength.rle_strip(self._data["encoding"]) + data, padding = runlength.rle_strip(self._data) if padding == (0, 0): encoding = self else: @@ -508,16 +527,16 @@ def stripped(self): @caching.cache_decorator def sum(self): - return (self._data["encoding"][::2] * self._data["encoding"][1::2]).sum() + return (self._data[::2] * self._data[1::2]).sum() @caching.cache_decorator def size(self): - return runlength.rle_length(self._data["encoding"]) + return runlength.rle_length(self._data) def _flip(self, axes): if axes != (0,): raise ValueError("encoding is 1D - cannot flip on axis %s" % str(axes)) - return RunLengthEncoding(runlength.rle_reverse(self._data["encoding"])) + return RunLengthEncoding(runlength.rle_reverse(self._data)) @caching.cache_decorator def sparse_components(self): @@ -581,7 +600,7 @@ def __init__(self, data): @caching.cache_decorator def is_empty(self): - return not np.any(self._data["encoding"][1::2]) + return not np.any(self._data[1::2]) @staticmethod def from_dense(dense_data, encoding_dtype=np.int64): @@ -613,7 +632,7 @@ def stripped(self): @caching.cache_decorator def sum(self): - return self._data["encoding"][1::2].sum() + return self._data[1::2].sum() @caching.cache_decorator def size(self): @@ -701,10 +720,7 @@ def sum(self): @property def size(self): - from IPython import embed - - embed() - return self._data["encoding"].size + return self._data.size @property def sparse_indices(self): @@ -718,7 +734,7 @@ def gather_nd(self, indices): return self._data.gather_nd(self._to_base_indices(indices)) def get_value(self, index): - return self._data["encoding"][tuple(self._to_base_indices(index))] + return self._data[tuple(self._to_base_indices(index))] class FlattenedEncoding(LazyIndexMap): @@ -729,34 +745,30 @@ class FlattenedEncoding(LazyIndexMap): """ def _to_base_indices(self, indices): - return np.column_stack(np.unravel_index(indices, self._data["encoding"].shape)) + return np.column_stack(np.unravel_index(indices, self._data.shape)) def _from_base_indices(self, base_indices): return np.expand_dims( - np.ravel_multi_index(base_indices.T, self._data["encoding"].shape), axis=-1 + np.ravel_multi_index(base_indices.T, self._data.shape), axis=-1 ) @property def shape(self): return (self.size,) - @property - def size(self): - return np.prod(self._data["shape"]) - @property def dense(self): - return self._data["encoding"].dense.reshape((-1,)) + return self._data.dense.reshape((-1,)) def mask(self, mask): - return self._data["encoding"].mask(mask.reshape(self._data["encoding"].shape)) + return self._data.mask(mask.reshape(self._data.shape)) @property def flat(self): return self def copy(self): - return FlattenedEncoding(self._data["encoding"].copy()) + return FlattenedEncoding(self._data.copy()) class ShapedEncoding(LazyIndexMap): @@ -778,19 +790,19 @@ def __init__(self, encoding, shape): size = np.prod(self._shape) if nn == 1: size = np.abs(size) - if self._data["encoding"].size % size != 0: + if self._data.size % size != 0: raise ValueError( "cannot reshape encoding of size %d into shape %s" - % (self._data["encoding"].size, str(self._shape)) + % (self._data.size, str(self._shape)) ) - rem = self._data["encoding"].size // size + rem = self._data.size // size self._shape = tuple(rem if s == -1 else s for s in self._shape) elif nn > 2: raise ValueError("shape cannot have more than one -1 value") - elif np.prod(self._shape) != self._data["encoding"].size: + elif np.prod(self._shape) != self._data.size: raise ValueError( "cannot reshape encoding of size %d into shape %s" - % (self._data["encoding"].size, str(self._shape)) + % (self._data.size, str(self._shape)) ) def _from_base_indices(self, base_indices): @@ -809,13 +821,13 @@ def shape(self): @property def dense(self): - return self._data["encoding"].dense.reshape(self.shape) + return self._data.dense.reshape(self.shape) def mask(self, mask): - return self._data["encoding"].mask(mask.flat) + return self._data.mask(mask.flat) def copy(self): - return ShapedEncoding(encoding=self._data["encoding"].copy(), shape=self.shape) + return ShapedEncoding(encoding=self._data.copy(), shape=self.shape) class TransposedEncoding(LazyIndexMap): @@ -856,7 +868,7 @@ def perm(self): @property def shape(self): - shape = self._data["encoding"].shape + shape = self._data.shape return tuple(shape[p] for p in self._perm) def _to_base_indices(self, indices): @@ -873,29 +885,23 @@ def _from_base_indices(self, base_indices): @property def dense(self): - return self._data["encoding"].dense.transpose(self._perm) + return self._data.dense.transpose(self._perm) def gather(self, indices): - return self._data["encoding"].gather(self._base_indices(indices)) + return self._data.gather(self._base_indices(indices)) def mask(self, mask): - return ( - self._data["encoding"] - .mask(mask.transpose(self._inv_perm)) - .transpose(self._perm) - ) + return self._data.mask(mask.transpose(self._inv_perm)).transpose(self._perm) def get_value(self, index): - return self._data["encoding"][tuple(self._base_indices(index))] + return self._data[tuple(self._base_indices(index))] @property def data(self): return self._data def copy(self): - return TransposedEncoding( - base_encoding=self._data["encoding"].copy(), perm=self._perm - ) + return TransposedEncoding(base_encoding=self._data.copy(), perm=self._perm) class FlippedEncoding(LazyIndexMap): @@ -916,10 +922,9 @@ def __init__(self, encoding, axes): if len(set(self._axes)) != len(self._axes): raise ValueError("Axes cannot contain duplicates, got %s" % str(self._axes)) super().__init__(encoding) - if not all(0 <= a < self._data["encoding"].ndims for a in axes): + if not all(0 <= a < self._data.ndims for a in axes): raise ValueError( - "Invalid axes %s for %d-d encoding" - % (str(axes), self._data["encoding"].ndims) + "Invalid axes %s for %d-d encoding" % (str(axes), self._data.ndims) ) def _to_base_indices(self, indices): @@ -935,11 +940,11 @@ def _from_base_indices(self, base_indices): @property def shape(self): - return self._data["encoding"].shape + return self._data.shape @property def dense(self): - dense = self._data["encoding"].dense + dense = self._data.dense for a in self._axes: dense = np.flip(dense, a) return dense @@ -948,10 +953,10 @@ def mask(self, mask): if not isinstance(mask, Encoding): mask = DenseEncoding(mask) mask = mask.flip(self._axes) - return self._data["encoding"].mask(mask).flip(self._axes) + return self._data.mask(mask).flip(self._axes) def copy(self): - return FlippedEncoding(self._data["encoding"].copy(), self._axes) + return FlippedEncoding(self._data.copy(), self._axes) def flip(self, axis=0): if isinstance(axis, np.ndarray): From 0dc74111cbd3e67eccdace3c3ec78839d230c0aa Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 14:38:56 -0400 Subject: [PATCH 086/144] add a test for #2016 --- README.md | 4 +- tests/generic.py | 171 ++++++------- tests/test_util.py | 255 ++++++++++--------- trimesh/base.py | 1 + trimesh/util.py | 592 +++++++++++++++++++++------------------------ 5 files changed, 503 insertions(+), 520 deletions(-) diff --git a/README.md b/README.md index 6aaa40602..104ae2b04 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![trimesh](https://trimsh.org/images/logotype-a.svg)](http://trimsh.org) +[![trimesh](https://trimesh.org/images/logotype-a.svg)](http://trimesh.org) ----------- [![Github Actions](https://github.com/mikedh/trimesh/workflows/Release%20Trimesh/badge.svg)](https://github.com/mikedh/trimesh/actions) [![codecov](https://codecov.io/gh/mikedh/trimesh/branch/main/graph/badge.svg?token=4PVRQXyl2h)](https://codecov.io/gh/mikedh/trimesh) [![Docker Image Version (latest by date)](https://img.shields.io/docker/v/trimesh/trimesh?label=docker&sort=semver)](https://hub.docker.com/r/trimesh/trimesh/tags) [![PyPI version](https://badge.fury.io/py/trimesh.svg)](https://badge.fury.io/py/trimesh) @@ -9,7 +9,7 @@ |---------------------------| | `trimesh >= 4.0.0` on `main` makes minimum Python 3.7 and is in pre-release | | Testing with `pip install --pre trimesh` would be much appreciated! | -| Projects that support Python < 3.7 should update requirement to `trimesh<4` | +| Projects that support Python<3.7 should update requirement to `trimesh<4` | Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). diff --git a/tests/generic.py b/tests/generic.py index 871a5e305..d766c66ab 100644 --- a/tests/generic.py +++ b/tests/generic.py @@ -32,22 +32,14 @@ from trimesh.constants import tol, tol_path from collections import deque from copy import deepcopy +from http.server import SimpleHTTPRequestHandler +import socketserver -tf = trimesh.transformations -if sys.version_info >= (3, 1): - # Python 3 - from http.server import SimpleHTTPRequestHandler - import socketserver +tf = trimesh.transformations -else: - # Python 2 - from SimpleHTTPServer import SimpleHTTPRequestHandler - import SocketServer as socketserver # make a dummy profiler which does nothing - - class DummyProfiler(object): def __enter__(self, *args, **kwargs): return self @@ -56,7 +48,7 @@ def __exit__(*args, **kwargs): pass def output_text(*args, **kwargs): - return 'no `pyinstrument`' + return "no `pyinstrument`" # make sure dummy profiler works @@ -73,14 +65,14 @@ def output_text(*args, **kwargs): # should we require all soft dependencies # this is set in the docker images to catch missing packages -argv = ''.join(sys.argv) +argv = "".join(sys.argv) # if we're supposed to have everything -all_dependencies = 'ALL_DEPENDENCIES' in argv +all_dependencies = "ALL_DEPENDENCIES" in argv # if we're testing rendering scenes -include_rendering = 'INCLUDE_RENDERING' in argv +include_rendering = "INCLUDE_RENDERING" in argv if all_dependencies and not trimesh.ray.has_embree: - raise ValueError('missing embree!') + raise ValueError("missing embree!") try: import sympy as sp @@ -101,6 +93,7 @@ def output_text(*args, **kwargs): try: from mapbox_earcut import triangulate_float64 + has_earcut = True except BaseException as E: if all_dependencies: @@ -110,6 +103,7 @@ def output_text(*args, **kwargs): try: from shapely.geometry import Point, Polygon, LineString + has_path = True except ImportError as E: if all_dependencies: @@ -130,46 +124,45 @@ def output_text(*args, **kwargs): # find_executable for binvox has_binvox = trimesh.exchange.binvox.binvox_encoder is not None if all_dependencies and not has_binvox: - raise ValueError('missing binvox') + raise ValueError("missing binvox") # Python version as a tuple, i.e. [3, 6] -PY_VER = (sys.version_info.major, - sys.version_info.minor) +PY_VER = (sys.version_info.major, sys.version_info.minor) # some repeatable homogeneous transforms to use in tests -transforms = [trimesh.transformations.euler_matrix(np.pi / 4, i, 0) - for i in np.linspace(0.0, np.pi * 2.0, 100)] +transforms = [ + trimesh.transformations.euler_matrix(np.pi / 4, i, 0) + for i in np.linspace(0.0, np.pi * 2.0, 100) +] # should be a (100, 4, 4) float transforms = np.array(transforms) try: # do the imports for Python 2 from cStringIO import StringIO + PY3 = False except ImportError: # if that didn't work we're probably on Python 3 from io import StringIO from io import BytesIO + PY3 = True # are we on linux -is_linux = 'linux' in platform.system().lower() +is_linux = "linux" in platform.system().lower() # find the current absolute path using inspect -dir_current = os.path.dirname( - os.path.abspath(os.path.expanduser(__file__))) +dir_current = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) # the absolute path for our reference models -dir_models = os.path.abspath( - os.path.join(dir_current, '..', 'models')) +dir_models = os.path.abspath(os.path.join(dir_current, "..", "models")) # the absolute path for our 2D reference models -dir_2D = os.path.abspath( - os.path.join(dir_current, '..', 'models', '2D')) +dir_2D = os.path.abspath(os.path.join(dir_current, "..", "models", "2D")) # the absolute path for our test data and truth -dir_data = os.path.abspath( - os.path.join(dir_current, 'data')) +dir_data = os.path.abspath(os.path.join(dir_current, "data")) # a logger for tests to call -log = logging.getLogger('trimesh') +log = logging.getLogger("trimesh") log.addHandler(logging.NullHandler()) # turn strings / bytes into file- like objects @@ -216,8 +209,7 @@ def random_transforms(count, translate=1000): # random should be deterministic assert np.allclose(random(10), random(10)) -assert np.allclose(list(random_transforms(10)), - list(random_transforms(10))) +assert np.allclose(list(random_transforms(10)), list(random_transforms(10))) def _load_data(): @@ -227,15 +219,14 @@ def _load_data(): data = {} for file_name in os.listdir(dir_data): name, extension = os.path.splitext(file_name) - if extension != '.json': + if extension != ".json": continue file_path = os.path.join(dir_data, file_name) - with open(file_path, 'r') as file_obj: + with open(file_path, "r") as file_obj: data[name] = json.load(file_obj) - data['model_paths'] = [os.path.join(dir_models, f) - for f in os.listdir(dir_models)] - data['2D_files'] = [os.path.join(dir_2D, f) for f in os.listdir(dir_2D)] + data["model_paths"] = [os.path.join(dir_models, f) for f in os.listdir(dir_models)] + data["2D_files"] = [os.path.join(dir_2D, f) for f in os.listdir(dir_2D)] return data @@ -258,7 +249,7 @@ def get_mesh(file_name, *args, **kwargs): meshes = collections.deque() for name in np.append(file_name, args): location = get_path(name) - log.info('loading mesh from: %s', location) + log.info("loading mesh from: %s", location) meshes.append(trimesh.load(location, **kwargs)) if len(meshes) == 1: return meshes[0] @@ -279,8 +270,7 @@ def get_path(file_name): full : str Full absolute path to model. """ - return os.path.abspath( - os.path.join(dir_models, file_name)) + return os.path.abspath(os.path.join(dir_models, file_name)) @contextlib.contextmanager @@ -289,11 +279,12 @@ def serve_meshes(): This context manager serves meshes over HTTP at some available port. """ + class _ServerThread(threading.Thread): def run(self): os.chdir(dir_models) Handler = SimpleHTTPRequestHandler - self.httpd = socketserver.TCPServer(('', 0), Handler) + self.httpd = socketserver.TCPServer(("", 0), Handler) _, self.port = self.httpd.server_address self.httpd.serve_forever() @@ -301,16 +292,14 @@ def run(self): t.daemon = False t.start() time.sleep(0.2) - yield 'http://localhost:{}'.format(t.port) + yield "http://localhost:{}".format(t.port) t.httpd.shutdown() t.join() -def get_meshes(count=np.inf, - raise_error=False, - split=False, - min_volume=None, - only_watertight=True): +def get_meshes( + count=np.inf, raise_error=False, split=False, min_volume=None, only_watertight=True +): """ Get meshes to test with. @@ -358,8 +347,7 @@ def check(item): extension = trimesh.util.split_extension(file_name).lower() if extension in trimesh.available_formats(): try: - loaded = trimesh.load( - os.path.join(dir_models, file_name)) + loaded = trimesh.load(os.path.join(dir_models, file_name)) except BaseException as E: if raise_error: raise E @@ -367,21 +355,21 @@ def check(item): batched = [] if isinstance(loaded, trimesh.Scene): - batched.extend(m for m in loaded.geometry.values() - if isinstance(m, trimesh.Trimesh)) + batched.extend( + m for m in loaded.geometry.values() if isinstance(m, trimesh.Trimesh) + ) elif isinstance(loaded, trimesh.Trimesh): batched.append(loaded) for mesh in batched: - mesh.metadata['file_name'] = file_name + mesh.metadata["file_name"] = file_name # only return our limit if returned[0] >= count: return # previous checks should ensure only trimesh assert isinstance(mesh, trimesh.Trimesh) if split: - for submesh in mesh.split( - only_watertight=only_watertight): + for submesh in mesh.split(only_watertight=only_watertight): checked = check(submesh) if checked is not None: yield checked @@ -390,8 +378,7 @@ def check(item): if checked is not None: yield checked else: - log.warning('%s has no loader, not running test on!', - file_name) + log.warning("%s has no loader, not running test on!", file_name) def get_2D(count=None): @@ -429,8 +416,7 @@ def get_2D(count=None): try: paths.append(trimesh.load(location)) except BaseException as E: - log.error('failed on: {}'.format(file_name), - exc_info=True) + log.error("failed on: {}".format(file_name), exc_info=True) raise E yield paths[-1] @@ -448,18 +434,17 @@ def check_path2D(path): assert len(path.root) == len(path.polygons_full) # make sure polygons are really polygons - assert all(type(i).__name__ == 'Polygon' - for i in path.polygons_full) - assert all(type(i).__name__ == 'Polygon' - for i in path.polygons_closed) + assert all(type(i).__name__ == "Polygon" for i in path.polygons_full) + assert all(type(i).__name__ == "Polygon" for i in path.polygons_closed) # these should all correspond to each other assert len(path.discrete) == len(path.polygons_closed) assert len(path.discrete) == len(path.paths) # make sure None polygons are not referenced in graph - assert all(path.polygons_closed[i] is not None - for i in path.enclosure_directed.nodes()) + assert all( + path.polygons_closed[i] is not None for i in path.enclosure_directed.nodes() + ) if any(e.color is not None for e in path.entities): assert path.colors.shape == (len(path.entities), 4) @@ -482,8 +467,7 @@ def scene_equal(a, b): for k, m in a.geometry.items(): # each mesh should correspond by name # and have the same volume - assert np.isclose( - m.volume, b.geometry[k].volume, rtol=0.001) + assert np.isclose(m.volume, b.geometry[k].volume, rtol=0.001) # the axis aligned bounding box should be the same assert np.allclose(a.bounds, b.bounds) @@ -503,14 +487,12 @@ def texture_equal(a, b): try: from scipy.spatial import cKDTree except BaseException: - log.error('no scipy for check!', exc_info=True) + log.error("no scipy for check!", exc_info=True) return # an ordered position-face-UV blob to check - pa = np.hstack((a.vertices, a.visual.uv))[ - a.faces].reshape((-1, 15)) - pb = np.hstack((b.vertices, b.visual.uv))[ - b.faces].reshape((-1, 15)) + pa = np.hstack((a.vertices, a.visual.uv))[a.faces].reshape((-1, 15)) + pb = np.hstack((b.vertices, b.visual.uv))[b.faces].reshape((-1, 15)) # query their actual ordered values against each other q = cKDTree(pa).query_ball_tree(cKDTree(pb), r=1e-4) assert all(i in match for i, match in enumerate(q)) @@ -521,8 +503,7 @@ def check_fuze(fuze): Check the classic textured mesh: a fuze bottle """ # these loaded fuze bottles should have textures - assert isinstance( - fuze.visual, trimesh.visual.TextureVisuals) + assert isinstance(fuze.visual, trimesh.visual.TextureVisuals) # image should be loaded with correct resolution assert fuze.visual.material.image.size == (1024, 1024) # UV coordinates should be unmerged correctly @@ -533,9 +514,11 @@ def check_fuze(fuze): assert fuze.visual.uv.min() > -tol.merge assert fuze.visual.uv.max() < 1 + tol.merge # check color factors - factors = [fuze.visual.material.ambient, - fuze.visual.material.diffuse, - fuze.visual.material.specular] + factors = [ + fuze.visual.material.ambient, + fuze.visual.material.diffuse, + fuze.visual.material.specular, + ] for f in factors: # should be RGBA assert len(f) == 4 @@ -550,7 +533,7 @@ def check_fuze(fuze): assert fuze.vertices.shape == (664, 3) # convert TextureVisuals to ColorVisuals viz = fuze.visual.to_color() - assert viz.kind == 'vertex' + assert viz.kind == "vertex" # should be actual colors defined assert viz.vertex_colors.ptp(axis=0).ptp() != 0 # shouldn't crash @@ -576,9 +559,9 @@ def wrapload(exported, file_type, **kwargs): loaded : trimesh.Trimesh Loaded result """ - return trimesh.load(file_obj=trimesh.util.wrap_as_stream(exported), - file_type=file_type, - **kwargs) + return trimesh.load( + file_obj=trimesh.util.wrap_as_stream(exported), file_type=file_type, **kwargs + ) TemporaryDirectory = trimesh.util.TemporaryDirectory @@ -590,8 +573,28 @@ def wrapload(exported, file_type, **kwargs): # formats supported by meshlab for export tests try: import pymeshlab - meshlab_formats = ['3ds', 'ply', 'stl', 'obj', 'qobj', 'off', 'ptx', 'vmi', - 'bre', 'dae', 'ctm', 'pts', 'apts', 'gts', 'pdb', - 'tri', 'asc', 'x3d', 'x3dv', 'wrl'] + + meshlab_formats = [ + "3ds", + "ply", + "stl", + "obj", + "qobj", + "off", + "ptx", + "vmi", + "bre", + "dae", + "ctm", + "pts", + "apts", + "gts", + "pdb", + "tri", + "asc", + "x3d", + "x3dv", + "wrl", + ] except BaseException: meshlab_formats = [] diff --git a/tests/test_util.py b/tests/test_util.py index 398dae83a..60d88c610 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -14,12 +14,11 @@ TOL_ZERO = 1e-9 TOL_CHECK = 1e-2 -log = logging.getLogger('trimesh') +log = logging.getLogger("trimesh") log.addHandler(logging.NullHandler()) class VectorTests(unittest.TestCase): - def setUp(self): self.test_dim = TEST_DIM @@ -31,14 +30,14 @@ def test_unitize_multi(self): assert not valid[0] assert valid[1:].all() - length = np.sum(vectors[1:] ** 2, axis=1) ** .5 + length = np.sum(vectors[1:] ** 2, axis=1) ** 0.5 assert np.allclose(length, 1.0) def test_align(self): - log.info('Testing vector alignment') + log.info("Testing vector alignment") target = np.array([0, 0, 1]) for _i in range(100): - vector = trimesh.unitize(np.random.random(3) - .5) + vector = trimesh.unitize(np.random.random(3) - 0.5) T = trimesh.geometry.align_vectors(vector, target) result = np.dot(T, np.append(vector, 1))[0:3] aligned = np.abs(result - target).sum() < TOL_ZERO @@ -46,7 +45,6 @@ def test_align(self): class UtilTests(unittest.TestCase): - def test_bounds_tree(self): for _attempt in range(3): for dimension in [2, 3]: @@ -78,9 +76,8 @@ def test_stack(self): pass def test_has_module(self): - - assert g.trimesh.util.has_module('collections') - assert not g.trimesh.util.has_module('foobarrionananan') + assert g.trimesh.util.has_module("collections") + assert not g.trimesh.util.has_module("foobarrionananan") def test_strips(self): """ @@ -95,8 +92,7 @@ def strips_to_faces(strips): for s in strips: s = g.np.asanyarray(s, dtype=g.np.int64) # each triangle is defined by one new vertex - tri = g.np.column_stack([g.np.roll(s, -i) - for i in range(3)])[:-2] + tri = g.np.column_stack([g.np.roll(s, -i) for i in range(3)])[:-2] # we need to flip ever other triangle idx = (g.np.arange(len(tri)) % 2).astype(bool) tri[idx] = g.np.fliplr(tri[idx]) @@ -108,10 +104,7 @@ def strips_to_faces(strips): # test 4- triangle strip s = [g.np.arange(6)] f = g.trimesh.util.triangle_strips_to_faces(s) - assert (f == g.np.array([[0, 1, 2], - [3, 2, 1], - [2, 3, 4], - [5, 4, 3]])).all() + assert (f == g.np.array([[0, 1, 2], [3, 2, 1], [2, 3, 4], [5, 4, 3]])).all() assert len(f) + 2 == len(s[0]) assert (f == strips_to_faces(s)).all() @@ -141,9 +134,8 @@ def test_pairwise(self): assert all(len(i) == 2 for i in pa) def test_concat(self): - - a = g.get_mesh('ballA.off') - b = g.get_mesh('ballB.off') + a = g.get_mesh("ballA.off") + b = g.get_mesh("ballB.off") hA = a.__hash__() hB = b.__hash__() @@ -151,8 +143,7 @@ def test_concat(self): # make sure we're not mutating original mesh for _i in range(4): c = a + b - assert g.np.isclose(c.volume, - a.volume + b.volume) + assert g.np.isclose(c.volume, a.volume + b.volume) assert a.__hash__() == hA assert b.__hash__() == hB @@ -165,10 +156,48 @@ def test_concat(self): # do a multimesh concatenate r = g.trimesh.util.concatenate(meshes) - assert g.np.isclose(r.volume, - a.volume * count) + assert g.np.isclose(r.volume, a.volume * count) assert a.__hash__() == hA + def test_concat_vertex_normals(self): + # vertex normals should only be included if they already exist + + a = g.trimesh.creation.icosphere().apply_translation([1, 0, 0]) + assert "vertex_normals" not in a._cache + + b = g.trimesh.creation.icosphere().apply_translation([-1, 0, 0]) + assert "vertex_normals" not in b._cache + + c = g.trimesh.util.concatenate([a, b]) + assert "vertex_normals" not in c._cache + + rando = g.trimesh.unitize(g.random(a.vertices.shape)) + a.vertex_normals = rando + assert "vertex_normals" in a._cache + + c = g.trimesh.util.concatenate([a, b]) + assert "vertex_normals" in c._cache + # should have included the rando normals + assert g.np.allclose(c.vertex_normals[: len(a.vertices)], rando) + + def test_concat_face_normals(self): + # face normals should only be included if they already exist + a = g.trimesh.creation.icosphere().apply_translation([1, 0, 0]) + assert "face_normals" not in a._cache + + b = g.trimesh.creation.icosphere().apply_translation([-1, 0, 0]) + assert "face_normals" not in b._cache + + c = g.trimesh.util.concatenate([a, b]) + assert "face_normals" not in c._cache + + # will generate normals + _ = a.face_normals + assert "face_normals" in a._cache + + c = g.trimesh.util.concatenate([a, b]) + assert "face_normals" in c._cache + def test_unique_id(self): num_ids = 10000 @@ -194,17 +223,17 @@ def test_unique_name(self): from trimesh.util import unique_name assert len(unique_name(None, {})) > 0 - assert len(unique_name('', {})) > 0 + assert len(unique_name("", {})) > 0 count = 10 names = set() for _i in range(count): - names.add(unique_name('hi', names)) + names.add(unique_name("hi", names)) assert len(names) == count names = set() for _i in range(count): - names.add(unique_name('', names)) + names.add(unique_name("", names)) assert len(names) == count # Try with a larger set of names @@ -213,7 +242,7 @@ def test_unique_name(self): # make it a whole lotta duplicates names = names * 1000 # add a non-int postfix to test - names.extend(['suppp_hi'] * 10) + names.extend(["suppp_hi"] * 10) assigned = set() with g.Profiler() as P: @@ -226,10 +255,7 @@ def test_unique_name(self): counts = {} with g.Profiler() as P: for name in names: - assigned_new.add(unique_name( - name, - contains=assigned_new, - counts=counts)) + assigned_new.add(unique_name(name, contains=assigned_new, counts=counts)) g.log.debug(P.output_text()) # new scheme should match the old one @@ -239,57 +265,51 @@ def test_unique_name(self): class ContainsTest(unittest.TestCase): - def test_inside(self): sphere = g.trimesh.primitives.Sphere(radius=1.0, subdivisions=4) - g.log.info('Testing contains function with sphere') - samples = (np.random.random((1000, 3)) - .5) * 5 + g.log.info("Testing contains function with sphere") + samples = (np.random.random((1000, 3)) - 0.5) * 5 radius = np.linalg.norm(samples, axis=1) - margin = .05 + margin = 0.05 truth_in = radius < (1.0 - margin) truth_out = radius > (1.0 + margin) contains = sphere.contains(samples) if not contains[truth_in].all(): - raise ValueError('contains test does not match truth!') + raise ValueError("contains test does not match truth!") if contains[truth_out].any(): - raise ValueError('contains test does not match truth!') + raise ValueError("contains test does not match truth!") class IOWrapTests(unittest.TestCase): - def test_io_wrap(self): - util = g.trimesh.util # check wrap_as_stream test_b = g.random(1).tobytes() - test_s = 'this is a test yo' + test_s = "this is a test yo" res_b = util.wrap_as_stream(test_b).read() res_s = util.wrap_as_stream(test_s).read() assert res_b == test_b assert res_s == test_s # check __enter__ and __exit__ - hi = b'hi' + hi = b"hi" with util.BytesIO(hi) as f: assert f.read() == hi # check __enter__ and __exit__ - hi = 'hi' + hi = "hi" with util.StringIO(hi) as f: assert f.read() == hi class CompressTests(unittest.TestCase): - def test_compress(self): - - source = {'hey': 'sup', - 'naa': '2002211'} + source = {"hey": "sup", "naa": "2002211"} # will return bytes c = g.trimesh.util.compress(source) @@ -297,23 +317,23 @@ def test_compress(self): # wrap bytes as file- like object f = g.trimesh.util.wrap_as_stream(c) # try to decompress file- like object - d = g.trimesh.util.decompress(f, file_type='zip') + d = g.trimesh.util.decompress(f, file_type="zip") # make sure compressed- decompressed items # are the same after a cycle for key, value in source.items(): - result = d[key].read().decode('utf-8') + result = d[key].read().decode("utf-8") assert result == value class UniqueTests(unittest.TestCase): - def test_unique(self): - - options = [np.array([0, 1, 2, 3, 1, 3, 10, 20]), - np.arange(100), - np.array([], dtype=np.int64), - (np.random.random(1000) * 10).astype(int)] + options = [ + np.array([0, 1, 2, 3, 1, 3, 10, 20]), + np.arange(100), + np.array([], dtype=np.int64), + (np.random.random(1000) * 10).astype(int), + ] for values in options: if len(values) > 0: @@ -323,21 +343,19 @@ def test_unique(self): # try our unique bincount function unique, inverse, counts = g.trimesh.grouping.unique_bincount( - values, - minlength=minlength, - return_inverse=True, - return_counts=True) + values, minlength=minlength, return_inverse=True, return_counts=True + ) # make sure inverse is correct assert (unique[inverse] == values).all() # make sure that the number of counts matches # the number of unique values - assert (len(unique) == len(counts)) + assert len(unique) == len(counts) # get the truth - truth_unique, truth_inverse, truth_counts = np.unique(values, - return_inverse=True, - return_counts=True) + truth_unique, truth_inverse, truth_counts = np.unique( + values, return_inverse=True, return_counts=True + ) # make sure truth is doing what we think assert (truth_unique[truth_inverse] == values).all() @@ -352,47 +370,43 @@ def test_unique(self): class CommentTests(unittest.TestCase): - def test_comment(self): # test our comment stripping logic f = g.trimesh.util.comment_strip - text = 'hey whats up' + text = "hey whats up" assert f(text) == text - text = '#hey whats up' - assert f(text) == '' + text = "#hey whats up" + assert f(text) == "" - text = ' # hey whats up ' - assert f(text) == '' + text = " # hey whats up " + assert f(text) == "" - text = '# naahah\nhey whats up' - assert f(text) == 'hey whats up' + text = "# naahah\nhey whats up" + assert f(text) == "hey whats up" - text = '#naahah\nhey whats up\nhi' - assert f(text) == 'hey whats up\nhi' + text = "#naahah\nhey whats up\nhi" + assert f(text) == "hey whats up\nhi" - text = '#naahah\nhey whats up\n hi' - assert f(text) == 'hey whats up\n hi' + text = "#naahah\nhey whats up\n hi" + assert f(text) == "hey whats up\n hi" - text = '#naahah\nhey whats up\n hi#' - assert f(text) == 'hey whats up\n hi' + text = "#naahah\nhey whats up\n hi#" + assert f(text) == "hey whats up\n hi" - text = 'hey whats up# see here\n hi#' - assert f(text) == 'hey whats up\n hi' + text = "hey whats up# see here\n hi#" + assert f(text) == "hey whats up\n hi" class ArrayToString(unittest.TestCase): def test_converts_an_unstructured_1d_array(self): - self.assertEqual( - g.trimesh.util.array_to_string(np.array([1, 2, 3])), - '1 2 3' - ) + self.assertEqual(g.trimesh.util.array_to_string(np.array([1, 2, 3])), "1 2 3") def test_converts_an_unstructured_int_array(self): self.assertEqual( g.trimesh.util.array_to_string(np.array([[1, 2, 3], [4, 5, 6]])), - '1 2 3\n4 5 6' + "1 2 3\n4 5 6", ) def test_converts_an_unstructured_float_array(self): @@ -400,51 +414,54 @@ def test_converts_an_unstructured_float_array(self): g.trimesh.util.array_to_string( np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64) ), - '1.00000000 2.00000000 3.00000000\n4.00000000 5.00000000 6.00000000' + "1.00000000 2.00000000 3.00000000\n4.00000000 5.00000000 6.00000000", ) def test_uses_the_specified_column_delimiter(self): self.assertEqual( g.trimesh.util.array_to_string( - np.array([[1, 2, 3], [4, 5, 6]]), col_delim='col'), - '1col2col3\n4col5col6' + np.array([[1, 2, 3], [4, 5, 6]]), col_delim="col" + ), + "1col2col3\n4col5col6", ) def test_uses_the_specified_row_delimiter(self): self.assertEqual( g.trimesh.util.array_to_string( - np.array([[1, 2, 3], [4, 5, 6]]), row_delim='row'), - '1 2 3row4 5 6' + np.array([[1, 2, 3], [4, 5, 6]]), row_delim="row" + ), + "1 2 3row4 5 6", ) def test_uses_the_specified_value_format(self): self.assertEqual( g.trimesh.util.array_to_string( - np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64), - value_format='{:.1f}'), - '1.0 2.0 3.0\n4.0 5.0 6.0' + np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64), value_format="{:.1f}" + ), + "1.0 2.0 3.0\n4.0 5.0 6.0", ) def test_supports_uints(self): self.assertEqual( - g.trimesh.util.array_to_string( - np.array([1, 2, 3], dtype=np.uint8)), - '1 2 3' + g.trimesh.util.array_to_string(np.array([1, 2, 3], dtype=np.uint8)), "1 2 3" ) def test_supports_repeat_format(self): self.assertEqual( g.trimesh.util.array_to_string( - np.array([[1, 2, 3], [4, 5, 6]]), value_format='{} {}'), - '1 1 2 2 3 3\n4 4 5 5 6 6' + np.array([[1, 2, 3], [4, 5, 6]]), value_format="{} {}" + ), + "1 1 2 2 3 3\n4 4 5 5 6 6", ) def test_raises_if_array_is_structured(self): with self.assertRaises(ValueError): - g.trimesh.util.array_to_string(np.array( - [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] - )) + g.trimesh.util.array_to_string( + np.array( + [(1, 1.1), (2, 2.2)], + dtype=[("some_int", np.int64), ("some_float", np.float64)], + ) + ) def test_raises_if_array_is_not_flat(self): with self.assertRaises(ValueError): @@ -452,16 +469,15 @@ def test_raises_if_array_is_not_flat(self): class StructuredArrayToString(unittest.TestCase): - def test_converts_a_structured_array_with_1d_elements(self): self.assertEqual( g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ) ), - '1 1.10000000\n2 2.20000000' + "1 1.10000000\n2 2.20000000", ) def test_converts_a_structured_array_with_2d_elements(self): @@ -469,11 +485,10 @@ def test_converts_a_structured_array_with_2d_elements(self): g.trimesh.util.structured_array_to_string( np.array( [([1, 2], 1.1), ([3, 4], 2.2)], - dtype=[('some_int', np.int64, 2), - ('some_float', np.float64)] + dtype=[("some_int", np.int64, 2), ("some_float", np.float64)], ) ), - '1 2 1.10000000\n3 4 2.20000000' + "1 2 1.10000000\n3 4 2.20000000", ) def test_uses_the_specified_column_delimiter(self): @@ -481,11 +496,11 @@ def test_uses_the_specified_column_delimiter(self): g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ), - col_delim='col' + col_delim="col", ), - '1col1.10000000\n2col2.20000000' + "1col1.10000000\n2col2.20000000", ) def test_uses_the_specified_row_delimiter(self): @@ -493,11 +508,11 @@ def test_uses_the_specified_row_delimiter(self): g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ), - row_delim='row' + row_delim="row", ), - '1 1.10000000row2 2.20000000' + "1 1.10000000row2 2.20000000", ) def test_uses_the_specified_value_format(self): @@ -505,11 +520,11 @@ def test_uses_the_specified_value_format(self): g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ), - value_format='{:.1f}' + value_format="{:.1f}", ), - '1.0 1.1\n2.0 2.2' + "1.0 1.1\n2.0 2.2", ) def test_supports_uints(self): @@ -517,10 +532,10 @@ def test_supports_uints(self): g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.uint8), ('some_float', np.float64)] + dtype=[("some_int", np.uint8), ("some_float", np.float64)], ) ), - '1 1.10000000\n2 2.20000000' + "1 1.10000000\n2 2.20000000", ) def test_raises_if_array_is_unstructured(self): @@ -532,9 +547,9 @@ def test_raises_if_value_format_specifies_repeats(self): g.trimesh.util.structured_array_to_string( np.array( [(1, 1.1), (2, 2.2)], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ), - value_format='{} {}' + value_format="{} {}", ) def test_raises_if_array_is_not_flat(self): @@ -542,11 +557,11 @@ def test_raises_if_array_is_not_flat(self): g.trimesh.util.structured_array_to_string( np.array( [[(1, 1.1), (2, 2.2)], [(1, 1.1), (2, 2.2)]], - dtype=[('some_int', np.int64), ('some_float', np.float64)] + dtype=[("some_int", np.int64), ("some_float", np.float64)], ) ) -if __name__ == '__main__': +if __name__ == "__main__": trimesh.util.attach_to_log() unittest.main() diff --git a/trimesh/base.py b/trimesh/base.py index 32fb01078..5ff980828 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -435,6 +435,7 @@ def face_normals(self, values): if not np.allclose(compare, values[:20]): log.debug("face_normals didn't match triangles, ignoring!") return + # otherwise store face normals self._cache["face_normals"] = values diff --git a/trimesh/util.py b/trimesh/util.py index 698ae1b6d..088f2985e 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -17,44 +17,23 @@ import logging import random import shutil -import sys import tempfile +import time import uuid import zipfile -import numpy as np - -ABC = abc.ABC - -# a flag we can check elsewhere for Python 3 -PY3 = sys.version_info.major >= 3 +# for type checking +from collections.abc import Mapping +from io import BytesIO, StringIO -if PY3: - # for type checking - basestring = str - # Python 3 - from io import BytesIO, StringIO - from shutil import which - from time import perf_counter as now -else: - # Python 2 - from distutils.spawn import find_executable as which # noqa - - from StringIO import StringIO - # monkey patch StringIO so `with` statements work - StringIO.__enter__ = lambda a: a - StringIO.__exit__ = lambda a, b, c, d: a.close() - BytesIO = StringIO - from time import time as now # noqa - - -try: - from collections.abc import Mapping -except ImportError: - from collections.abc import Mapping +import numpy as np # create a default logger -log = logging.getLogger('trimesh') +log = logging.getLogger("trimesh") + +ABC = abc.ABC +now = time.time +which = shutil.which # include constants here so we don't have to import # a floating point threshold for 0.0 @@ -67,7 +46,7 @@ _STRICT = False _IDENTITY = np.eye(4, dtype=np.float64) -_IDENTITY.flags['WRITEABLE'] = False +_IDENTITY.flags["WRITEABLE"] = False def has_module(name): @@ -87,12 +66,11 @@ def has_module(name): """ # this should work on Python 2.7 and 3.4+ import pkgutil + return pkgutil.find_loader(name) is not None -def unitize(vectors, - check_valid=False, - threshold=None): +def unitize(vectors, check_valid=False, threshold=None): """ Unitize a vector or an array or row-vectors. @@ -123,8 +101,7 @@ def unitize(vectors, # for (m, d) arrays take the per-row unit vector # using sqrt and avoiding exponents is slightly faster # also dot with ones is faser than .sum(axis=1) - norm = np.sqrt(np.dot(vectors * vectors, - [1.0] * vectors.shape[1])) + norm = np.sqrt(np.dot(vectors * vectors, [1.0] * vectors.shape[1])) # non-zero norms valid = norm > threshold # in-place reciprocal of nonzero norms @@ -141,7 +118,7 @@ def unitize(vectors, else: unit = vectors.copy() else: - raise ValueError('vectors must be (n, ) or (n, d)!') + raise ValueError("vectors must be (n, ) or (n, d)!") if check_valid: return unit[valid], valid @@ -183,7 +160,7 @@ def is_file(obj): is_file : bool True if object is a file """ - return hasattr(obj, 'read') or hasattr(obj, 'write') + return hasattr(obj, "read") or hasattr(obj, "write") def is_pathlib(obj): @@ -202,7 +179,7 @@ def is_pathlib(obj): """ # check class name rather than a pathlib import name = obj.__class__.__name__ - return hasattr(obj, 'absolute') and name.endswith('Path') + return hasattr(obj, "absolute") and name.endswith("Path") def is_string(obj): @@ -219,7 +196,7 @@ def is_string(obj): is_string : bool True if obj is a string """ - return isinstance(obj, basestring) + return isinstance(obj, str) def is_none(obj): @@ -240,9 +217,7 @@ def is_none(obj): """ if obj is None: return True - if (is_sequence(obj) and - len(obj) == 1 and - obj[0] is None): + if is_sequence(obj) and len(obj) == 1 and obj[0] is None: return True return False @@ -261,21 +236,21 @@ def is_sequence(obj): is_sequence : bool True if object is sequence """ - seq = (not hasattr(obj, "strip") and - hasattr(obj, "__getitem__") or - hasattr(obj, "__iter__")) + seq = ( + not hasattr(obj, "strip") + and hasattr(obj, "__getitem__") + or hasattr(obj, "__iter__") + ) # check to make sure it is not a set, string, or dictionary - seq = seq and all(not isinstance(obj, i) for i in (dict, - set, - basestring)) + seq = seq and all(not isinstance(obj, i) for i in (dict, set, str)) # PointCloud objects can look like an array but are not - seq = seq and type(obj).__name__ not in ['PointCloud'] + seq = seq and type(obj).__name__ not in ["PointCloud"] # numpy sometimes returns objects that are single float64 values # but sure look like sequences, so we check the shape - if hasattr(obj, 'shape'): + if hasattr(obj, "shape"): seq = seq and obj.shape != () return seq @@ -330,8 +305,7 @@ def is_shape(obj, shape, allow_zeros=False): # if the obj.shape is different length than # the goal shape it means they have different number # of dimensions and thus the obj is not the query shape - if (not hasattr(obj, 'shape') or - len(obj.shape) != len(shape)): + if not hasattr(obj, "shape") or len(obj.shape) != len(shape): return False # empty lists with any flexible dimensions match @@ -428,8 +402,7 @@ def vector_hemisphere(vectors, return_sign=False): # check the Y value and reverse vector # direction if negative. negative = vectors < -TOL_ZERO - zero = np.logical_not( - np.logical_or(negative, vectors > TOL_ZERO)) + zero = np.logical_not(np.logical_or(negative, vectors > TOL_ZERO)) signs = np.ones(len(vectors), dtype=np.float64) # negative Y values are reversed @@ -441,8 +414,7 @@ def vector_hemisphere(vectors, return_sign=False): elif is_shape(vectors, (-1, 3)): # 3D vector case negative = vectors < -TOL_ZERO - zero = np.logical_not( - np.logical_or(negative, vectors > TOL_ZERO)) + zero = np.logical_not(np.logical_or(negative, vectors > TOL_ZERO)) # move all negative Z to positive # then for zero Z vectors, move all negative Y to positive # then for zero Y vectors, move all negative X to positive @@ -453,12 +425,12 @@ def vector_hemisphere(vectors, return_sign=False): signs[np.logical_and(zero[:, 2], negative[:, 1])] = -1.0 # all on-plane vectors with zero Y values # and negative X values - signs[np.logical_and(np.logical_and(zero[:, 2], - zero[:, 1]), - negative[:, 0])] = -1.0 + signs[ + np.logical_and(np.logical_and(zero[:, 2], zero[:, 1]), negative[:, 0]) + ] = -1.0 else: - raise ValueError('vectors must be (n, 3)!') + raise ValueError("vectors must be (n, 3)!") # apply the signs to the vectors oriented = vectors * signs.reshape((-1, 1)) @@ -486,15 +458,14 @@ def vector_to_spherical(cartesian): """ cartesian = np.asanyarray(cartesian, dtype=np.float64) if not is_shape(cartesian, (-1, 3)): - raise ValueError('Cartesian points must be (n, 3)!') + raise ValueError("Cartesian points must be (n, 3)!") unit, valid = unitize(cartesian, check_valid=True) unit[np.abs(unit) < TOL_MERGE] = 0.0 x, y, z = unit.T spherical = np.zeros((len(cartesian), 2), dtype=np.float64) - spherical[valid] = np.column_stack((np.arctan2(y, x), - np.arccos(z))) + spherical[valid] = np.column_stack((np.arctan2(y, x), np.arccos(z))) return spherical @@ -514,14 +485,12 @@ def spherical_to_vector(spherical): """ spherical = np.asanyarray(spherical, dtype=np.float64) if not is_shape(spherical, (-1, 2)): - raise ValueError('spherical coordinates must be (n, 2)!') + raise ValueError("spherical coordinates must be (n, 2)!") theta, phi = spherical.T st, ct = np.sin(theta), np.cos(theta) sp, cp = np.sin(phi), np.cos(phi) - vectors = np.column_stack((ct * sp, - st * sp, - cp)) + vectors = np.column_stack((ct * sp, st * sp, cp)) return vectors @@ -558,6 +527,7 @@ def pairwise(iterable): # if we have a normal iterable use itertools import itertools + a, b = itertools.tee(iterable) # pop the first element of the second item next(b) @@ -570,7 +540,7 @@ def pairwise(iterable): # only included in recent-ish version of numpy multi_dot = np.linalg.multi_dot except AttributeError: - log.debug('np.linalg.multi_dot not available, using fallback') + log.debug("np.linalg.multi_dot not available, using fallback") def multi_dot(arrays): """ @@ -662,7 +632,7 @@ def row_norm(data): norm : (n,) float Norm of each row of input array """ - return np.sqrt(np.dot(data ** 2, [1] * data.shape[1])) + return np.sqrt(np.dot(data**2, [1] * data.shape[1])) def stack_3D(points, return_2D=False): @@ -690,15 +660,14 @@ def stack_3D(points, return_2D=False): if shape == (0,): is_2D = False elif len(shape) != 2: - raise ValueError('Points must be 2D array!') + raise ValueError("Points must be 2D array!") elif shape[1] == 2: - points = np.column_stack(( - points, np.zeros(len(points)))) + points = np.column_stack((points, np.zeros(len(points)))) is_2D = True elif shape[1] == 3: is_2D = False else: - raise ValueError('Points must be (n, 2) or (n, 3)!') + raise ValueError("Points must be (n, 2) or (n, 3)!") if return_2D: return points, is_2D @@ -721,7 +690,7 @@ def grid_arange(bounds, step): """ bounds = np.asanyarray(bounds, dtype=np.float64) if len(bounds) != 2: - raise ValueError('bounds must be (2, dimension!') + raise ValueError("bounds must be (2, dimension!") # allow single float or per-dimension spacing step = np.asanyarray(step, dtype=np.float64) @@ -729,8 +698,11 @@ def grid_arange(bounds, step): step = np.tile(step, bounds.shape[1]) grid_elements = [np.arange(*b, step=s) for b, s in zip(bounds.T, step)] - grid = np.vstack(np.meshgrid(*grid_elements, indexing='ij') - ).reshape(bounds.shape[1], -1).T + grid = ( + np.vstack(np.meshgrid(*grid_elements, indexing="ij")) + .reshape(bounds.shape[1], -1) + .T + ) return grid @@ -749,15 +721,18 @@ def grid_linspace(bounds, count): """ bounds = np.asanyarray(bounds, dtype=np.float64) if len(bounds) != 2: - raise ValueError('bounds must be (2, dimension!') + raise ValueError("bounds must be (2, dimension!") count = np.asanyarray(count, dtype=np.int64) if count.shape == (): count = np.tile(count, bounds.shape[1]) grid_elements = [np.linspace(*b, num=c) for b, c in zip(bounds.T, count)] - grid = np.vstack(np.meshgrid(*grid_elements, indexing='ij') - ).reshape(bounds.shape[1], -1).T + grid = ( + np.vstack(np.meshgrid(*grid_elements, indexing="ij")) + .reshape(bounds.shape[1], -1) + .T + ) return grid @@ -861,12 +836,14 @@ def decimal_to_digits(decimal, min_digits=None): return digits -def attach_to_log(level=logging.DEBUG, - handler=None, - loggers=None, - colors=True, - capture_warnings=True, - blacklist=None): +def attach_to_log( + level=logging.DEBUG, + handler=None, + loggers=None, + colors=True, + capture_warnings=True, + blacklist=None, +): """ Attach a stream handler to all loggers. @@ -886,12 +863,14 @@ def attach_to_log(level=logging.DEBUG, # default blacklist includes ipython debugging stuff if blacklist is None: - blacklist = ['TerminalIPythonApp', - 'PYREADLINE', - 'pyembree', - 'shapely', - 'matplotlib', - 'parso'] + blacklist = [ + "TerminalIPythonApp", + "PYREADLINE", + "pyembree", + "shapely", + "matplotlib", + "parso", + ] # make sure we log warnings from the warnings module logging.captureWarnings(capture_warnings) @@ -899,20 +878,27 @@ def attach_to_log(level=logging.DEBUG, # create a basic formatter formatter = logging.Formatter( "[%(asctime)s] %(levelname)-7s (%(filename)s:%(lineno)3s) %(message)s", - "%Y-%m-%d %H:%M:%S") + "%Y-%m-%d %H:%M:%S", + ) if colors: try: from colorlog import ColoredFormatter + formatter = ColoredFormatter( - ("%(log_color)s%(levelname)-8s%(reset)s " + - "%(filename)17s:%(lineno)-4s %(blue)4s%(message)s"), + ( + "%(log_color)s%(levelname)-8s%(reset)s " + + "%(filename)17s:%(lineno)-4s %(blue)4s%(message)s" + ), datefmt=None, reset=True, - log_colors={'DEBUG': 'cyan', - 'INFO': 'green', - 'WARNING': 'yellow', - 'ERROR': 'red', - 'CRITICAL': 'red'}) + log_colors={ + "DEBUG": "cyan", + "INFO": "green", + "WARNING": "yellow", + "ERROR": "red", + "CRITICAL": "red", + }, + ) except ImportError: pass @@ -929,16 +915,17 @@ def attach_to_log(level=logging.DEBUG, # de-duplicate loggers using a set loggers = set(logging.Logger.manager.loggerDict.values()) # add the warnings logging - loggers.add(logging.getLogger('py.warnings')) + loggers.add(logging.getLogger("py.warnings")) # disable pyembree warnings - logging.getLogger('pyembree').disabled = True + logging.getLogger("pyembree").disabled = True # loop through all available loggers for logger in loggers: # skip loggers on the blacklist - if (logger.__class__.__name__ != 'Logger' or - any(logger.name.startswith(b) for b in blacklist)): + if logger.__class__.__name__ != "Logger" or any( + logger.name.startswith(b) for b in blacklist + ): continue logger.addHandler(handler) logger.setLevel(level) @@ -993,8 +980,7 @@ def stack_lines(indices): shape = (-1, len(indices[0])) else: shape = (-1, 2) - return np.column_stack((indices[:-1], - indices[1:])).reshape(shape) + return np.column_stack((indices[:-1], indices[1:])).reshape(shape) def append_faces(vertices_seq, faces_seq): @@ -1036,11 +1022,7 @@ def append_faces(vertices_seq, faces_seq): return vertices, faces -def array_to_string(array, - col_delim=' ', - row_delim='\n', - digits=8, - value_format='{}'): +def array_to_string(array, col_delim=" ", row_delim="\n", digits=8, value_format="{}"): """ Convert a 1 or 2D array into a string with a specified number of digits and delimiter. The reason this exists is that the @@ -1076,27 +1058,25 @@ def array_to_string(array, # abort for non-flat arrays if len(array.shape) > 2: - raise ValueError('conversion only works on 1D/2D arrays not %s!', - str(array.shape)) + raise ValueError( + "conversion only works on 1D/2D arrays not %s!", str(array.shape) + ) # abort for structured arrays if array.dtype.names is not None: - raise ValueError( - 'array is structured, use structured_array_to_string instead') + raise ValueError("array is structured, use structured_array_to_string instead") # allow a value to be repeated in a value format - repeats = value_format.count('{') + repeats = value_format.count("{") - if array.dtype.kind in ['i', 'u']: + if array.dtype.kind in ["i", "u"]: # integer types don't need a specified precision format_str = value_format + col_delim - elif array.dtype.kind == 'f': + elif array.dtype.kind == "f": # add the digits formatting to floats - format_str = value_format.replace( - '{}', '{:.' + str(digits) + 'f}') + col_delim + format_str = value_format.replace("{}", "{:." + str(digits) + "f}") + col_delim else: - raise ValueError('dtype %s not convertible!', - array.dtype.name) + raise ValueError("dtype %s not convertible!", array.dtype.name) # length of extra delimiters at the end end_junk = len(col_delim) @@ -1104,7 +1084,7 @@ def array_to_string(array, if len(array.shape) == 2: format_str *= array.shape[1] # cut off the last column delimiter and add a row delimiter - format_str = format_str[:-len(col_delim)] + row_delim + format_str = format_str[: -len(col_delim)] + row_delim end_junk = len(row_delim) # expand format string to whole array @@ -1112,8 +1092,7 @@ def array_to_string(array, # if an array is repeated in the value format # do the shaping here so we don't need to specify indexes - shaped = np.tile(array.reshape((-1, 1)), - (1, repeats)).reshape(-1) + shaped = np.tile(array.reshape((-1, 1)), (1, repeats)).reshape(-1) # run the format operation and remove the extra delimiters formatted = format_str.format(*shaped)[:-end_junk] @@ -1121,11 +1100,9 @@ def array_to_string(array, return formatted -def structured_array_to_string(array, - col_delim=' ', - row_delim='\n', - digits=8, - value_format='{}'): +def structured_array_to_string( + array, col_delim=" ", row_delim="\n", digits=8, value_format="{}" +): """ Convert an unstructured array into a string with a specified number of digits and delimiter. The reason thisexists is @@ -1162,40 +1139,40 @@ def structured_array_to_string(array, # abort for non-flat arrays if len(array.shape) > 1: - raise ValueError('conversion only works on 1D/2D arrays not %s!', - str(array.shape)) + raise ValueError( + "conversion only works on 1D/2D arrays not %s!", str(array.shape) + ) # abort for unstructured arrays if array.dtype.names is None: - raise ValueError( - 'array is not structured, use array_to_string instead') + raise ValueError("array is not structured, use array_to_string instead") # do not allow a value to be repeated in a value format - if value_format.count('{') > 1: + if value_format.count("{") > 1: raise ValueError( - 'value_format %s is invalid, repeating unstructured array ' - + 'values is unsupported', value_format) + "value_format %s is invalid, repeating unstructured array " + + "values is unsupported", + value_format, + ) - format_str = '' + format_str = "" for name in array.dtype.names: kind = array[name].dtype.kind - element_row_length = ( - array[name].shape[1] if len(array[name].shape) == 2 else 1) - if kind in ['i', 'u']: + element_row_length = array[name].shape[1] if len(array[name].shape) == 2 else 1 + if kind in ["i", "u"]: # integer types need a no-decimal formatting - element_format_str = value_format.replace( - '{}', '{:0.0f}') + col_delim - elif kind == 'f': + element_format_str = value_format.replace("{}", "{:0.0f}") + col_delim + elif kind == "f": # add the digits formatting to floats - element_format_str = value_format.replace( - '{}', '{:.' + str(digits) + 'f}') + col_delim + element_format_str = ( + value_format.replace("{}", "{:." + str(digits) + "f}") + col_delim + ) else: - raise ValueError('dtype %s not convertible!', - array.dtype) + raise ValueError("dtype %s not convertible!", array.dtype) format_str += element_row_length * element_format_str # length of extra delimiters at the end - format_str = format_str[:-len(col_delim)] + row_delim + format_str = format_str[: -len(col_delim)] + row_delim # expand format string to whole array format_str *= len(array) @@ -1203,16 +1180,16 @@ def structured_array_to_string(array, count = len(array) # will upgrade everything to a float flattened = np.hstack( - [array[k].reshape((count, -1)) - for k in array.dtype.names]).reshape(-1) + [array[k].reshape((count, -1)) for k in array.dtype.names] + ).reshape(-1) # run the format operation and remove the extra delimiters - formatted = format_str.format(*flattened)[:-len(row_delim)] + formatted = format_str.format(*flattened)[: -len(row_delim)] return formatted -def array_to_encoded(array, dtype=None, encoding='base64'): +def array_to_encoded(array, dtype=None, encoding="base64"): """ Export a numpy array to a compact serializable dictionary. @@ -1240,21 +1217,20 @@ def array_to_encoded(array, dtype=None, encoding='base64'): if dtype is None: dtype = array.dtype - encoded = {'dtype': np.dtype(dtype).str, - 'shape': shape} - if encoding in ['base64', 'dict64']: + encoded = {"dtype": np.dtype(dtype).str, "shape": shape} + if encoding in ["base64", "dict64"]: packed = base64.b64encode(flat.astype(dtype).tobytes()) - if hasattr(packed, 'decode'): - packed = packed.decode('utf-8') - encoded['base64'] = packed - elif encoding == 'binary': - encoded['binary'] = array.tobytes(order='C') + if hasattr(packed, "decode"): + packed = packed.decode("utf-8") + encoded["base64"] = packed + elif encoding == "binary": + encoded["binary"] = array.tobytes(order="C") else: - raise ValueError(f'encoding {encoding} is not available!') + raise ValueError(f"encoding {encoding} is not available!") return encoded -def decode_keys(store, encoding='utf-8'): +def decode_keys(store, encoding="utf-8"): """ If a dictionary has keys that are bytes decode them to a str. @@ -1279,7 +1255,7 @@ def decode_keys(store, encoding='utf-8'): """ keys = store.keys() for key in keys: - if hasattr(key, 'decode'): + if hasattr(key, "decode"): decoded = key.decode(encoding) if key != decoded: store[key.decode(encoding)] = store[key] @@ -1287,7 +1263,7 @@ def decode_keys(store, encoding='utf-8'): return store -def comment_strip(text, starts_with='#', new_line='\n'): +def comment_strip(text, starts_with="#", new_line="\n"): """ Strip comments from a text block. @@ -1314,16 +1290,18 @@ def comment_strip(text, starts_with='#', new_line='\n'): # special case files that start with a comment if text.startswith(starts_with): - lead = '' + lead = "" else: lead = split[0] # take each comment up until the newline removed = [i.split(new_line, 1) for i in split] # add the leading string back on - result = lead + new_line + new_line.join( - i[1] for i in removed - if len(i) > 1 and len(i[1]) > 0) + result = ( + lead + + new_line + + new_line.join(i[1] for i in removed if len(i) > 1 and len(i[1]) > 0) + ) # strip leading and trailing whitespace result = result.strip() @@ -1353,19 +1331,17 @@ def encoded_to_array(encoded): as_array = np.asanyarray(encoded) return as_array else: - raise ValueError('Unable to extract numpy array from input') + raise ValueError("Unable to extract numpy array from input") encoded = decode_keys(encoded) - dtype = np.dtype(encoded['dtype']) - if 'base64' in encoded: - array = np.frombuffer(base64.b64decode(encoded['base64']), - dtype) - elif 'binary' in encoded: - array = np.frombuffer(encoded['binary'], - dtype=dtype) - if 'shape' in encoded: - array = array.reshape(encoded['shape']) + dtype = np.dtype(encoded["dtype"]) + if "base64" in encoded: + array = np.frombuffer(base64.b64decode(encoded["base64"]), dtype) + elif "binary" in encoded: + array = np.frombuffer(encoded["binary"], dtype=dtype) + if "shape" in encoded: + array = array.reshape(encoded["shape"]) return array @@ -1407,7 +1383,7 @@ def type_bases(obj, depth=4): bases = np.hstack(bases) except IndexError: bases = [] - return [i for i in bases if hasattr(i, '__name__')] + return [i for i in bases if hasattr(i, "__name__")] def type_named(obj, name): @@ -1434,7 +1410,7 @@ class : Optional[Callable] for base in type_bases(obj): if base.__name__ == name: return base - raise ValueError('Unable to extract class of name ' + name) + raise ValueError("Unable to extract class of name " + name) def concatenate(a, b=None): @@ -1475,11 +1451,12 @@ def concatenate(a, b=None): # if there are no meshes return an empty list return [] - is_mesh = [f for f in flat if is_instance_named(f, 'Trimesh')] - is_path = [f for f in flat if is_instance_named(f, 'Path')] + is_mesh = [f for f in flat if is_instance_named(f, "Trimesh")] + is_path = [f for f in flat if is_instance_named(f, "Path")] if len(is_path) > len(is_mesh): from .path.util import concatenate as concatenate_path + return concatenate_path(is_path) if len(is_mesh) == 0: @@ -1487,46 +1464,44 @@ def concatenate(a, b=None): # extract the trimesh type to avoid a circular import # and assert that all inputs are Trimesh objects - trimesh_type = type_named(is_mesh[0], 'Trimesh') + trimesh_type = type_named(is_mesh[0], "Trimesh") # append faces and vertices of meshes vertices, faces = append_faces( - [m.vertices.copy() for m in is_mesh], - [m.faces.copy() for m in is_mesh]) + [m.vertices.copy() for m in is_mesh], [m.faces.copy() for m in is_mesh] + ) - # only save face normals if already calculated + # save face normals if already calculated face_normals = None - if all('face_normals' in m._cache for m in is_mesh): - face_normals = np.vstack( - [m.face_normals for m in is_mesh]) - - # always save vertex normals - vertex_normals = vstack_empty( - [m.vertex_normals.copy() for m in is_mesh]) + if any("face_normals" in m._cache for m in is_mesh): + face_normals = np.vstack([m.face_normals for m in is_mesh]) + + # save vertex normals if any mesh has them + vertex_normals = None + if any("vertex_normals" in m._cache for m in is_mesh): + vertex_normals = vstack_empty([m.vertex_normals for m in is_mesh]) try: # concatenate visuals - visual = is_mesh[0].visual.concatenate( - [m.visual for m in is_mesh[1:]]) + visual = is_mesh[0].visual.concatenate([m.visual for m in is_mesh[1:]]) except BaseException: - log.debug('failed to combine visuals', exc_info=True) + log.debug("failed to combine visuals", exc_info=True) visual = None # create the mesh object - return trimesh_type(vertices=vertices, - faces=faces, - face_normals=face_normals, - vertex_normals=vertex_normals, - visual=visual, - process=False) + return trimesh_type( + vertices=vertices, + faces=faces, + face_normals=face_normals, + vertex_normals=vertex_normals, + visual=visual, + process=False, + ) -def submesh(mesh, - faces_sequence, - repair=True, - only_watertight=False, - min_faces=None, - append=False): +def submesh( + mesh, faces_sequence, repair=True, only_watertight=False, min_faces=None, append=False +): """ Return a subset of a mesh. @@ -1573,7 +1548,7 @@ def submesh(mesh, if len(index) == 0: # regardless of type empty arrays are useless continue - if index.dtype.kind == 'b': + if index.dtype.kind == "b": # if passed a bool with no true continue if not index.any(): continue @@ -1602,7 +1577,7 @@ def submesh(mesh, # we use type(mesh) rather than importing Trimesh from base # to avoid a circular import - trimesh_type = type_named(mesh, 'Trimesh') + trimesh_type = type_named(mesh, "Trimesh") if append: visual = None try: @@ -1617,29 +1592,30 @@ def submesh(mesh, faces=faces, face_normals=np.vstack(normals), visual=visual, - process=False) + process=False, + ) return appended if visuals is None: visuals = [None] * len(vertices) # generate a list of Trimesh objects - result = [trimesh_type( - vertices=v, - faces=f, - face_normals=n, - visual=c, - metadata=copy.deepcopy(mesh.metadata), - process=False) for v, f, n, c in zip(vertices, - faces, - normals, - visuals)] + result = [ + trimesh_type( + vertices=v, + faces=f, + face_normals=n, + visual=c, + metadata=copy.deepcopy(mesh.metadata), + process=False, + ) + for v, f, n, c in zip(vertices, faces, normals, visuals) + ] result = np.array(result) if only_watertight or repair: # fill_holes will attempt a repair and returns the # watertight status at the end of the repair attempt - watertight = np.array([i.fill_holes() and len(i.faces) >= 4 - for i in result]) + watertight = np.array([i.fill_holes() and len(i.faces) >= 4 for i in result]) if only_watertight: # remove unrepairable meshes result = result[watertight] @@ -1666,9 +1642,9 @@ def zero_pad(data, count, right=True): elif len(data) < count: padded = np.zeros(count) if right: - padded[-len(data):] = data + padded[-len(data) :] = data else: - padded[:len(data)] = data + padded[: len(data)] = data return padded else: return np.asanyarray(data) @@ -1691,15 +1667,17 @@ def jsonify(obj, **kwargs): dumped : str JSON dump of obj """ + class EdgeEncoder(json.JSONEncoder): def default(self, obj): # will work for numpy.ndarrays # as well as their int64/etc objects - if hasattr(obj, 'tolist'): + if hasattr(obj, "tolist"): return obj.tolist() - elif hasattr(obj, 'timestamp'): + elif hasattr(obj, "timestamp"): return obj.timestamp() return json.JSONEncoder.default(self, obj) + # run the dumps using our encoder return json.dumps(obj, cls=EdgeEncoder, **kwargs) @@ -1729,16 +1707,18 @@ def convert_like(item, like): return item # if it's an array with one item return it - if (is_sequence(item) and len(item) == 1 and - isinstance(item[0], like.__class__)): + if is_sequence(item) and len(item) == 1 and isinstance(item[0], like.__class__): return item[0] - if (isinstance(item, str) and - like.__class__.__name__ == 'Polygon' and - item.startswith('POLYGON')): + if ( + isinstance(item, str) + and like.__class__.__name__ == "Polygon" + and item.startswith("POLYGON") + ): # break our rule on imports but only a little bit # the import was a WKT serialized polygon from shapely import wkt + return wkt.loads(item) # otherwise just run the conversion @@ -1772,16 +1752,16 @@ def bounds_tree(bounds): if len(bounds.shape) == 3: # should be min-max per bound if bounds.shape[1] != 2: - raise ValueError('bounds not (n, 2, dimension)!') + raise ValueError("bounds not (n, 2, dimension)!") # reshape to one-row-per-hyperrectangle bounds = bounds.reshape((len(bounds), -1)) elif len(bounds.shape) != 2 or bounds.size == 0: - raise ValueError('Bounds must be (n, dimension * 2)!') + raise ValueError("Bounds must be (n, dimension * 2)!") # check to make sure we have correct shape dimension = bounds.shape[1] if (dimension % 2) != 0: - raise ValueError('Bounds must be (n,dimension*2)!') + raise ValueError("Bounds must be (n,dimension*2)!") dimension = int(dimension / 2) # some versions of rtree screw up indexes on stream loading @@ -1789,20 +1769,20 @@ def bounds_tree(bounds): # or if we have to do a loop to insert things which is 5x slower rtree_test = rtree.index.Index( [(1564, [0, 0, 0, 10, 10, 10], None)], - properties=rtree.index.Property(dimension=3)) - rtree_stream_ok = next(rtree_test.intersection( - [1, 1, 1, 2, 2, 2])) == 1564 + properties=rtree.index.Property(dimension=3), + ) + rtree_stream_ok = next(rtree_test.intersection([1, 1, 1, 2, 2, 2])) == 1564 properties = rtree.index.Property(dimension=dimension) if rtree_stream_ok: # stream load was verified working on import above - tree = rtree.index.Index(zip(np.arange(len(bounds)), - bounds, - [None] * len(bounds)), - properties=properties) + tree = rtree.index.Index( + zip(np.arange(len(bounds)), bounds, [None] * len(bounds)), + properties=properties, + ) else: # in some rtree versions stream loading goofs the index - log.warning('rtree stream loading broken! Try upgrading rtree!') + log.warning("rtree stream loading broken! Try upgrading rtree!") tree = rtree.index.Index(properties=properties) for i, b in enumerate(bounds): tree.insert(i, b) @@ -1823,14 +1803,11 @@ def wrap_as_stream(item): wrapped : file-like object Contains data from item """ - if not PY3: - # in python 2 StringIO handles bytes and str - return StringIO(item) if isinstance(item, str): return StringIO(item) elif isinstance(item, bytes): return BytesIO(item) - raise ValueError(f'{type(item).__name__} is not wrappable!') + raise ValueError(f"{type(item).__name__} is not wrappable!") def sigfig_round(values, sigfig=1): @@ -1862,7 +1839,7 @@ def sigfig_round(values, sigfig=1): Out[3]: 0.0001405 """ as_int, multiplier = sigfig_int(values, sigfig) - rounded = as_int * (10 ** multiplier) + rounded = as_int * (10**multiplier) return rounded @@ -1892,7 +1869,7 @@ def sigfig_int(values, sigfig): sigfig = np.asanyarray(sigfig, dtype=np.int64).reshape(-1) if sigfig.shape != values.shape: - raise ValueError('sigfig must match identifier') + raise ValueError("sigfig must match identifier") exponent = np.zeros(len(values)) nonzero = np.abs(values) > TOL_ZERO @@ -1926,16 +1903,15 @@ def decompress(file_obj, file_type): if isinstance(file_obj, bytes): file_obj = wrap_as_stream(file_obj) - if file_type.endswith('zip'): + if file_type.endswith("zip"): archive = zipfile.ZipFile(file_obj) - return {name: wrap_as_stream(archive.read(name)) - for name in archive.namelist()} - if 'tar' in file_type[-6:]: + return {name: wrap_as_stream(archive.read(name)) for name in archive.namelist()} + if "tar" in file_type[-6:]: import tarfile - archive = tarfile.open(fileobj=file_obj, mode='r') - return {name: archive.extractfile(name) - for name in archive.getnames()} - raise ValueError('Unsupported type passed!') + + archive = tarfile.open(fileobj=file_obj, mode="r") + return {name: archive.extractfile(name) for name in archive.getnames()} + raise ValueError("Unsupported type passed!") def compress(info, **kwargs): @@ -1954,17 +1930,12 @@ def compress(info, **kwargs): compressed : bytes Compressed file data """ - if PY3: - file_obj = BytesIO() - else: - file_obj = StringIO() - + file_obj = BytesIO() with zipfile.ZipFile( - file_obj, - mode='w', - compression=zipfile.ZIP_DEFLATED, **kwargs) as zipper: + file_obj, mode="w", compression=zipfile.ZIP_DEFLATED, **kwargs + ) as zipper: for name, data in info.items(): - if hasattr(data, 'read'): + if hasattr(data, "read"): # if we were passed a file object, read it data = data.read() zipper.writestr(name, data) @@ -1995,12 +1966,12 @@ def split_extension(file_name, special=None): file_name = str(file_name) if special is None: - special = ['tar.bz2', 'tar.gz'] + special = ["tar.bz2", "tar.gz"] if file_name.endswith(tuple(special)): for end in special: if file_name.endswith(end): return end - return file_name.split('.')[-1] + return file_name.split(".")[-1] def triangle_strips_to_faces(strips): @@ -2039,7 +2010,7 @@ def triangle_strips_to_faces(strips): # preallocate and slice the blob into rough triangles tri = np.zeros((len(blob) - 2, 3), dtype=np.int64) for i in range(3): - tri[:len(blob) - 3, i] = blob[i:-3 + i] + tri[: len(blob) - 3, i] = blob[i : -3 + i] # the last triangle is left off from the slicing, add it back tri[-1] = blob[-3:] @@ -2055,7 +2026,7 @@ def triangle_strips_to_faces(strips): length_index = np.append(0, np.cumsum(lengths - 2)) flip = np.zeros(length_index[-1], dtype=bool) for i in range(len(length_index) - 1): - flip[length_index[i] + 1:length_index[i + 1]][::2] = True + flip[length_index[i] + 1 : length_index[i + 1]][::2] = True tri[flip] = np.fliplr(tri[flip]) return tri @@ -2076,11 +2047,10 @@ def triangle_fans_to_faces(fans): Vertex indices representing triangles """ - faces = [np.transpose([ - fan[0] * np.ones(len(fan) - 2, dtype=int), - fan[1:-1], - fan[2:] - ]) for fan in fans] + faces = [ + np.transpose([fan[0] * np.ones(len(fan) - 2, dtype=int), fan[1:-1], fan[2:]]) + for fan in fans + ] return np.concatenate(faces, axis=1) @@ -2111,9 +2081,7 @@ def vstack_empty(tup): return np.vstack(stackable) -def write_encoded(file_obj, - stuff, - encoding='utf-8'): +def write_encoded(file_obj, stuff, encoding="utf-8"): """ If a file is open in binary mode and a string is passed, encode and write. @@ -2133,13 +2101,11 @@ def write_encoded(file_obj, encoding : str Encoding of text """ - binary_file = 'b' in getattr(file_obj, 'mode', 'b') - string_stuff = isinstance(stuff, basestring) + binary_file = "b" in getattr(file_obj, "mode", "b") + string_stuff = isinstance(stuff, str) binary_stuff = isinstance(stuff, bytes) - if not PY3: - file_obj.write(stuff) - elif binary_file and string_stuff: + if binary_file and string_stuff: file_obj.write(stuff.encode(encoding)) elif not binary_file and binary_stuff: file_obj.write(stuff.decode(encoding)) @@ -2164,8 +2130,7 @@ def unique_id(length=12): unique : str Unique alphanumeric identifier """ - return uuid.UUID(int=random.getrandbits(128), - version=4).hex[:length] + return uuid.UUID(int=random.getrandbits(128), version=4).hex[:length] def generate_basis(z, epsilon=1e-12): @@ -2193,7 +2158,7 @@ def generate_basis(z, epsilon=1e-12): z = np.array(z, dtype=np.float64, copy=True) # must be a 3D vector if z.shape != (3,): - raise ValueError('z must be (3,) float!') + raise ValueError("z must be (3,) float!") z_norm = np.linalg.norm(z) if z_norm < epsilon: @@ -2299,11 +2264,11 @@ def __getitem__(self, key): def __setitem__(self, key, value): if not isinstance(key, str): - raise ValueError('key must be a string, got %s' % str(key)) + raise ValueError("key must be a string, got %s" % str(key)) if key in self: - raise KeyError('Cannot set new value to existing key %s' % key) + raise KeyError("Cannot set new value to existing key %s" % key) if not callable(value): - raise ValueError('Cannot set value which is not callable.') + raise ValueError("Cannot set value which is not callable.") self._dict[key] = value def __iter__(self): @@ -2340,7 +2305,7 @@ def __exit__(self, *args, **kwargs): shutil.rmtree(self.path) -def decode_text(text, initial='utf-8'): +def decode_text(text, initial="utf-8"): """ Try to decode byte input as a string. @@ -2360,7 +2325,7 @@ def decode_text(text, initial='utf-8'): Data as a string """ # if not bytes just return input - if not hasattr(text, 'decode'): + if not hasattr(text, "decode"): return text try: # initially guess file is UTF-8 or specified encoding @@ -2368,18 +2333,19 @@ def decode_text(text, initial='utf-8'): except UnicodeDecodeError: # detect different file encodings import chardet + # try to detect the encoding of the file # only look at the first 1000 characters otherwise # for big files chardet looks at everything and is slow detect = chardet.detect(text[:1000]) # warn on files that aren't UTF-8 log.debug( - 'Data not {}! Trying {} (confidence {})'.format( - initial, - detect['encoding'], - detect['confidence'])) + "Data not {}! Trying {} (confidence {})".format( + initial, detect["encoding"], detect["confidence"] + ) + ) # try to decode again, unwrap in try - text = text.decode(detect['encoding'], errors='ignore') + text = text.decode(detect["encoding"], errors="ignore") return text @@ -2397,13 +2363,12 @@ def to_ascii(text): ascii : str Input as an ASCII string """ - if hasattr(text, 'encode'): + if hasattr(text, "encode"): # case for existing strings - return text.encode( - 'ascii', errors='ignore').decode('ascii') - elif hasattr(text, 'decode'): + return text.encode("ascii", errors="ignore").decode("ascii") + elif hasattr(text, "decode"): # case for bytes - return text.decode('ascii', errors='ignore') + return text.decode("ascii", errors="ignore") # otherwise just wrap as a string return str(text) @@ -2431,7 +2396,7 @@ def is_ccw(points, return_all=False): points = np.array(points, dtype=np.float64) if len(points.shape) != 2 or points.shape[1] != 2: - raise ValueError('only defined for `(n, 2)` points') + raise ValueError("only defined for `(n, 2)` points") # the "shoelace formula" product = np.subtract(*(points[:-1, [1, 0]] * points[1:]).T) @@ -2444,8 +2409,9 @@ def is_ccw(points, return_all=False): return ccw # the centroid of the polygon uses the same formula - centroid = ((points[:-1] + points[1:]) * - product.reshape((-1, 1))).sum(axis=0) / (6.0 * area) + centroid = ((points[:-1] + points[1:]) * product.reshape((-1, 1))).sum(axis=0) / ( + 6.0 * area + ) return ccw, area, centroid @@ -2477,9 +2443,7 @@ def unique_name(start, contains, counts=None): A name that is not contained in `contains` """ # exit early if name is not in bundle - if (start is not None and - len(start) > 0 and - start not in contains): + if start is not None and len(start) > 0 and start not in contains: return start # start checking with zero index unless found @@ -2488,9 +2452,9 @@ def unique_name(start, contains, counts=None): else: increment = counts.get(start, 0) if start is not None and len(start) > 0: - formatter = start + '_{}' + formatter = start + "_{}" # split by our delimiter once - split = start.rsplit('_', 1) + split = start.rsplit("_", 1) if len(split) == 2 and increment == 0: try: # start incrementing from the existing @@ -2498,11 +2462,11 @@ def unique_name(start, contains, counts=None): # if it is not an integer this will fail increment = int(split[1]) # include the first split value - formatter = split[0] + '_{}' + formatter = split[0] + "_{}" except BaseException: pass else: - formatter = 'geometry_{}' + formatter = "geometry_{}" # if contains is empty we will only need to check once for i in range(increment + 1, 2 + increment + len(contains)): @@ -2514,4 +2478,4 @@ def unique_name(start, contains, counts=None): # this should really never happen since we looped # through the full length of contains - raise ValueError('Unable to establish unique name!') + raise ValueError("Unable to establish unique name!") From a393abba772d8f379570eca76d13664e8a3defab Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 15:01:20 -0400 Subject: [PATCH 087/144] add in-process check --- tests/test_gltf.py | 24 ++++++++---------------- trimesh/util.py | 4 +++- trimesh/voxel/encoding.py | 16 ---------------- 3 files changed, 11 insertions(+), 33 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 825c95b1d..018e39f19 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -42,22 +42,14 @@ def validate_glb(data, name=None): capture_output=True) # -o prints JSON to stdout content = report.stdout.decode('utf-8') - # log the GLTF validator report if - # there are any warnings or hints - decode = g.json.loads(content) - - if (decode['issues']['numErrors'] > 0 or - report.returncode != 0): - # log the whole error report - g.log.error(content) - if name is not None: - g.log.error('failed on: %s', name) - raise ValueError(content) - - # print all warnings: extremely verbose - # if any(decode['issues'][i] > 0 for i in - # ['numWarnings', 'numInfos', 'numHints']): - # g.log.debug(content) + returncode = report.returncode + + if returncode != 0: + g.log.error(f'failed on: `{name}`') + g.log.error(f'validator: `{content}`') + g.log.error(f'stderr: `{report.stderr}`') + + raise ValueError("gltf_validator failed") class GLTFTest(g.unittest.TestCase): diff --git a/trimesh/util.py b/trimesh/util.py index 088f2985e..0da9aa344 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -1474,12 +1474,14 @@ def concatenate(a, b=None): # save face normals if already calculated face_normals = None if any("face_normals" in m._cache for m in is_mesh): - face_normals = np.vstack([m.face_normals for m in is_mesh]) + face_normals = vstack_empty([m.face_normals for m in is_mesh]) + assert face_normals.shape == faces.shape # save vertex normals if any mesh has them vertex_normals = None if any("vertex_normals" in m._cache for m in is_mesh): vertex_normals = vstack_empty([m.vertex_normals for m in is_mesh]) + assert vertex_normals.shape == vertices.shape try: # concatenate visuals diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 3942a27e0..1f21d6c0d 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -118,22 +118,6 @@ def stripped(self): def _flip(self, axes): return FlippedEncoding(self, axes) - def crc(self): - log.warning( - "`geometry.crc()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - - def hash(self): - log.warning( - "`geometry.hash()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - def __hash__(self): """ Get the hash of the current transformation matrix. From f7121ca78438b3a90efa3a80dc40b3ed003b751c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 15:54:01 -0400 Subject: [PATCH 088/144] fix subtle caching bug --- trimesh/caching.py | 174 +++++++++++++++++++------------------- trimesh/voxel/base.py | 33 +++----- trimesh/voxel/creation.py | 6 +- trimesh/voxel/encoding.py | 22 +---- 4 files changed, 100 insertions(+), 135 deletions(-) diff --git a/trimesh/caching.py b/trimesh/caching.py index a41396a9c..2a1c11484 100644 --- a/trimesh/caching.py +++ b/trimesh/caching.py @@ -48,6 +48,7 @@ def sha256(item): def hash_fallback(item): return int(_blake2b(item).hexdigest(), 16) + except BaseException: # fallback to sha256 hash_fallback = sha256 @@ -63,9 +64,11 @@ def hash_fallback(item): from xxhash import xxh64_intdigest as hash_fast except BaseException: # use hashlib as a fallback hashing library - log.debug('falling back to hashlib ' + - 'hashing: `pip install xxhash`' + - 'for 50x faster cache checks') + log.debug( + "falling back to hashlib " + + "hashing: `pip install xxhash`" + + "for 50x faster cache checks" + ) hash_fast = hash_fallback @@ -92,10 +95,9 @@ def tracked_array(array, dtype=None): if array is None: array = [] # make sure it is contiguous then view it as our subclass - tracked = np.ascontiguousarray( - array, dtype=dtype).view(TrackedArray) + tracked = np.ascontiguousarray(array, dtype=dtype).view(TrackedArray) # should always be contiguous here - assert tracked.flags['C_CONTIGUOUS'] + assert tracked.flags["C_CONTIGUOUS"] return tracked @@ -138,8 +140,11 @@ def get_cached(*args, **kwargs): # value not in cache so execute the function value = function(*args, **kwargs) # store the value - if self._cache.force_immutable and hasattr( - value, 'flags') and len(value.shape) > 0: + if ( + self._cache.force_immutable + and hasattr(value, "flags") + and len(value.shape) > 0 + ): value.flags.writeable = False self._cache.cache[name] = value @@ -189,14 +194,13 @@ def __array_wrap__(self, out_arr, context=None): See https://github.com/numpy/numpy/issues/5819 """ if out_arr.ndim: - return np.ndarray.__array_wrap__( - self, out_arr, context) + return np.ndarray.__array_wrap__(self, out_arr, context) # Match numpy's behavior and return a numpy dtype scalar return out_arr[()] @property def mutable(self): - return self.flags['WRITEABLE'] + return self.flags["WRITEABLE"] @mutable.setter def mutable(self, value): @@ -212,12 +216,12 @@ def __hash__(self): A hash of the array contents. """ # repeat the bookkeeping to get a contiguous array - if not self._dirty_hash and hasattr(self, '_hashed'): + if not self._dirty_hash and hasattr(self, "_hashed"): # we have a valid hash without recomputing. return self._hashed # run a hashing function on the C-order bytes copy - hashed = hash_fast(self.tobytes(order='C')) + hashed = hash_fast(self.tobytes(order="C")) # assign the value and set the flag self._hashed = hashed @@ -234,118 +238,95 @@ def __iadd__(self, *args, **kwargs): """ self._dirty_hash = True - return super(self.__class__, self).__iadd__( - *args, **kwargs) + return super(self.__class__, self).__iadd__(*args, **kwargs) def __isub__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__isub__( - *args, **kwargs) + return super(self.__class__, self).__isub__(*args, **kwargs) def fill(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).fill( - *args, **kwargs) + return super(self.__class__, self).fill(*args, **kwargs) def partition(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).partition( - *args, **kwargs) + return super(self.__class__, self).partition(*args, **kwargs) def put(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).put( - *args, **kwargs) + return super(self.__class__, self).put(*args, **kwargs) def byteswap(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).byteswap( - *args, **kwargs) + return super(self.__class__, self).byteswap(*args, **kwargs) def itemset(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).itemset( - *args, **kwargs) + return super(self.__class__, self).itemset(*args, **kwargs) def sort(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).sort( - *args, **kwargs) + return super(self.__class__, self).sort(*args, **kwargs) def setflags(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).setflags( - *args, **kwargs) + return super(self.__class__, self).setflags(*args, **kwargs) def __imul__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__imul__( - *args, **kwargs) + return super(self.__class__, self).__imul__(*args, **kwargs) def __idiv__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__idiv__( - *args, **kwargs) + return super(self.__class__, self).__idiv__(*args, **kwargs) def __itruediv__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__itruediv__( - *args, **kwargs) + return super(self.__class__, self).__itruediv__(*args, **kwargs) def __imatmul__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__imatmul__( - *args, **kwargs) + return super(self.__class__, self).__imatmul__(*args, **kwargs) def __ipow__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__ipow__( - *args, **kwargs) + return super(self.__class__, self).__ipow__(*args, **kwargs) def __imod__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__imod__( - *args, **kwargs) + return super(self.__class__, self).__imod__(*args, **kwargs) def __ifloordiv__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__ifloordiv__( - *args, **kwargs) + return super(self.__class__, self).__ifloordiv__(*args, **kwargs) def __ilshift__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__ilshift__( - *args, **kwargs) + return super(self.__class__, self).__ilshift__(*args, **kwargs) def __irshift__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__irshift__( - *args, **kwargs) + return super(self.__class__, self).__irshift__(*args, **kwargs) def __iand__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__iand__( - *args, **kwargs) + return super(self.__class__, self).__iand__(*args, **kwargs) def __ixor__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__ixor__( - *args, **kwargs) + return super(self.__class__, self).__ixor__(*args, **kwargs) def __ior__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__ior__( - *args, **kwargs) + return super(self.__class__, self).__ior__(*args, **kwargs) def __setitem__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__setitem__( - *args, **kwargs) + return super(self.__class__, self).__setitem__(*args, **kwargs) def __setslice__(self, *args, **kwargs): self._dirty_hash = True - return super(self.__class__, self).__setslice__( - *args, **kwargs) + return super(self.__class__, self).__setslice__(*args, **kwargs) class Cache: @@ -398,9 +379,11 @@ def verify(self): # things changed if id_new != self.id_current: if len(self.cache) > 0: - log.debug('%d items cleared from cache: %s', - len(self.cache), - str(list(self.cache.keys()))) + log.debug( + "%d items cleared from cache: %s", + len(self.cache), + str(list(self.cache.keys())), + ) # hash changed, so dump the cache # do it manually rather than calling clear() # as we are internal logic and can avoid function calls @@ -420,8 +403,7 @@ def clear(self, exclude=None): if exclude is None: self.cache = {} else: - self.cache = {k: v for k, v in self.cache.items() - if k in exclude} + self.cache = {k: v for k, v in self.cache.items() if k in exclude} def update(self, items): """ @@ -432,7 +414,7 @@ def update(self, items): if self.force_immutable: for v in self.cache.values(): - if hasattr(v, 'flags') and len(v.shape) > 0: + if hasattr(v, "flags") and len(v.shape) > 0: v.flags.writeable = False self.id_set() @@ -476,8 +458,7 @@ def __setitem__(self, key, value): # dumpy cache if ID function has changed self.verify() # make numpy arrays read-only if asked to - if self.force_immutable and hasattr( - value, 'flags') and len(value.shape) > 0: + if self.force_immutable and hasattr(value, "flags") and len(value.shape) > 0: value.flags.writeable = False # assign data to dict self.cache[key] = value @@ -523,8 +504,7 @@ def __init__(self, path, expire_days=30): # store how old we allow results to be self.expire_days = expire_days # store the location for saving results - self.path = os.path.abspath( - os.path.expanduser(path)) + self.path = os.path.abspath(os.path.expanduser(path)) # make sure the specified path exists os.makedirs(self.path, exist_ok=True) @@ -541,7 +521,7 @@ def get(self, key, fetch): function and store its result on disk. """ # hash the key so we have a fixed length string - key_hash = _sha256(key.encode('utf-8')).hexdigest() + key_hash = _sha256(key.encode("utf-8")).hexdigest() # full path of result on local disk path = os.path.join(self.path, key_hash) @@ -553,15 +533,15 @@ def get(self, key, fetch): # this nested condition means that # the file both exists and is recent # enough, so just return its contents - with open(path, 'rb') as f: + with open(path, "rb") as f: return f.read() - log.debug(f'not in cache fetching: `{key}`') + log.debug(f"not in cache fetching: `{key}`") # since we made it here our data isn't cached # run the expensive function to fetch the file raw = fetch() # write the data so we can save it - with open(path, 'wb') as f: + with open(path, "wb") as f: f.write(raw) # return the data @@ -598,7 +578,7 @@ def mutable(self): is_mutable : bool Can data be altered in the DataStore """ - return getattr(self, '_mutable', True) + return getattr(self, "_mutable", True) @mutable.setter def mutable(self, value): @@ -650,21 +630,36 @@ def __getitem__(self, key): def __setitem__(self, key, data): """ - Store an item in the DataStore + Store an item in the DataStore. + + Parameters + ------------- + key + A hashable key to store under + data + Usually a numpy array which will be subclassed + but anything hashable should be able to be stored. """ # we shouldn't allow setting on immutable datastores if not self.mutable: - raise ValueError('DataStore is configured immutable!') + raise ValueError("DataStore is configured immutable!") - if hasattr(data, 'hash'): + if isinstance(data, TrackedArray): # don't bother to re-track TrackedArray tracked = data - else: - # otherwise wrap data + elif isinstance(data, (np.ndarray, list, set, tuple)): + # wrap data if it is array-like tracked = tracked_array(data) - # apply our mutability setting + else: + try: + # will raise if this is not a hashable type + hash(data) + except BaseException: + raise ValueError("unhashable `{key}:{type(data)}`") + tracked = data - if hasattr(self, '_mutable'): + # apply our mutability setting + if hasattr(self, "_mutable"): # apply our mutability setting only if it was explicitly set tracked.mutable = self.mutable # store data @@ -678,7 +673,7 @@ def __len__(self): def update(self, values): if not isinstance(values, dict): - raise ValueError('Update only implemented for dicts') + raise ValueError("Update only implemented for dicts") for key, value in values.items(): self[key] = value @@ -693,8 +688,13 @@ def __hash__(self): """ # only hash values that aren't None # or if they are arrays require length greater than zero - return hash_fast(np.array( - [hash(v) for v in self.data.values() - if v is not None and - (not hasattr(v, '__len__') or len(v) > 0)], - dtype=np.int64).tobytes()) + return hash_fast( + np.array( + [ + hash(v) + for v in self.data.values() + if v is not None and (not hasattr(v, "__len__") or len(v) > 0) + ], + dtype=np.int64, + ).tobytes() + ) diff --git a/trimesh/voxel/base.py b/trimesh/voxel/base.py index 5d2f361c2..e4e6f10f5 100644 --- a/trimesh/voxel/base.py +++ b/trimesh/voxel/base.py @@ -12,39 +12,33 @@ from ..constants import log from ..exchange.binvox import export_binvox from ..parent import Geometry -from ..typed import NDArray, float64 from . import morphology, ops, transforms from .encoding import DenseEncoding, Encoding class VoxelGrid(Geometry): - def __init__(self, encoding, transform=None, metadata=None): - """ - Store 3D voxels. + """ + Store 3D voxels. + """ - Parameters - -------------- - encoding - A numpy array of voxels, or an encoding object - """ + def __init__(self, encoding, transform=None, metadata=None): if transform is None: transform = np.eye(4) if isinstance(encoding, np.ndarray): encoding = DenseEncoding(encoding.astype(bool)) if encoding.dtype != bool: raise ValueError("encoding must have dtype bool") - self._data = caching.DataStore() - self._cache = caching.Cache(id_function=self._data.__hash__) - self._transform = transforms.Transform(transform, datastore=self._data) self.encoding = encoding - self.metadata = {} + self._transform = transforms.Transform(transform, datastore=self._data) + self._cache = caching.Cache(id_function=self._data.__hash__) + self.metadata = {} # update the mesh metadata with passed metadata if isinstance(metadata, dict): self.metadata.update(metadata) elif metadata is not None: - raise ValueError(f"metadata should be a dict or None, not {type(metadata)}") + raise ValueError("metadata should be a dict or None, got %s" % str(metadata)) def __hash__(self): """ @@ -81,7 +75,7 @@ def encoding(self, encoding): self._data["encoding"] = encoding @property - def transform(self) -> NDArray[float64]: + def transform(self): """4x4 homogeneous transformation matrix.""" return self._transform.matrix @@ -95,12 +89,6 @@ def translation(self): """Location of voxel at [0, 0, 0].""" return self._transform.translation - @property - def origin(self): - """Deprecated. Use `self.translation`.""" - # DEPRECATED. Use translation instead - return self.translation - @property def scale(self): """ @@ -204,8 +192,7 @@ def is_filled(self, point): point = np.asanyarray(point) indices = self.points_to_indices(point) in_range = np.logical_and( - np.all(indices < np.array(self.shape), axis=-1), - np.all(indices >= 0, axis=-1), + np.all(indices < np.array(self.shape), axis=-1), np.all(indices >= 0, axis=-1) ) is_filled = np.zeros_like(in_range) diff --git a/trimesh/voxel/creation.py b/trimesh/voxel/creation.py index 85ee37507..2acc8bc88 100644 --- a/trimesh/voxel/creation.py +++ b/trimesh/voxel/creation.py @@ -39,11 +39,7 @@ def voxelize_subdivide(mesh, pitch, max_iter=10, edge_factor=2.0): # get the same mesh sudivided so every edge is shorter # than a factor of our pitch v, f, idx = remesh.subdivide_to_size( - mesh.vertices, - mesh.faces, - max_edge=max_edge, - max_iter=max_iter, - return_index=True, + mesh.vertices, mesh.faces, max_edge=max_edge, max_iter=max_iter, return_index=True ) # convert the vertices to their voxel grid position diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index 1f21d6c0d..f350962f9 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -4,7 +4,7 @@ import numpy as np from .. import caching -from ..util import ABC, log +from ..util import ABC from . import runlength try: @@ -283,9 +283,7 @@ def __init__(self, indices, values, shape=None): raise ValueError("indices must be 2D, got shaped %s" % str(indices.shape)) if data["values"].shape != (indices.shape[0],): raise ValueError( - "values and indices shapes inconsistent: {} and {}".format( - data["values"], data["indices"] - ) + "values and indices shapes inconsistent: {} and {}".format(data["values"], data["indices"]) ) if shape is None: self._shape = tuple(data["indices"].max(axis=0) + 1) @@ -454,22 +452,6 @@ def shape(self): def dtype(self): return self._dtype - def crc(self): - log.warning( - "`geometry.crc()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - - def hash(self): - log.warning( - "`geometry.hash()` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geometry.__hash__()` or `hash(geometry)`" - ) - return self.__hash__() - def __hash__(self): """ Get the hash of the current transformation matrix. From a22222356dac5ee2333270bbf92133862883fe97 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 15:59:57 -0400 Subject: [PATCH 089/144] run black on trimesh-setup --- docker/trimesh-setup | 133 ++++++++++++++++++++++--------------------- 1 file changed, 67 insertions(+), 66 deletions(-) mode change 100644 => 100755 docker/trimesh-setup diff --git a/docker/trimesh-setup b/docker/trimesh-setup old mode 100644 new mode 100755 index 53a70c289..7646d27b3 --- a/docker/trimesh-setup +++ b/docker/trimesh-setup @@ -6,17 +6,17 @@ environment for `trimesh` in a Debian Docker image. It probably isn't useful for most people unless you are running this exact configuration. """ -import os -import sys +import argparse import json +import logging +import os import shutil +import subprocess +import sys import tarfile import tempfile -import logging -import argparse -import subprocess -from io import BytesIO from fnmatch import fnmatch +from io import BytesIO # define system packages for our debian docker image # someday possibly add this to the `pyproject.toml` config @@ -74,7 +74,7 @@ config_json = """ """ -log = logging.getLogger('trimesh') +log = logging.getLogger("trimesh") log.setLevel(logging.DEBUG) log.addHandler(logging.StreamHandler(sys.stdout)) @@ -94,20 +94,18 @@ def apt(packages): return # start with updating the sources - log.debug(subprocess.check_output( - 'apt-get update -qq'.split()).decode('utf-8')) + log.debug(subprocess.check_output("apt-get update -qq".split()).decode("utf-8")) # the install command - install = 'apt-get install -qq --no-install-recommends'.split() + install = "apt-get install -qq --no-install-recommends".split() # de-duplicate package list install.extend(set(packages)) # call the install command - log.debug(subprocess.check_output(install).decode('utf-8')) + log.debug(subprocess.check_output(install).decode("utf-8")) # delete any temporary files - subprocess.check_output( - 'rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*'.split()) + subprocess.check_output("rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*".split()) def argsort(items): @@ -125,8 +123,7 @@ def argsort(items): index : int Index such `items[index] == min(items)` """ - return [i for (v, i) in sorted( - (v, i) for (i, v) in enumerate(items))] + return [i for (v, i) in sorted((v, i) for (i, v) in enumerate(items))] def fetch(url, sha256): @@ -152,13 +149,13 @@ def fetch(url, sha256): data = urlopen(url).read() hashed = hashlib.sha256(data).hexdigest() if hashed != sha256: - log.error(f'`{hashed}` != `{sha256}`') - raise ValueError('sha256 hash does not match!') + log.error(f"`{hashed}` != `{sha256}`") + raise ValueError("sha256 hash does not match!") return data -def copy_to_path(file_path, prefix='~'): +def copy_to_path(file_path, prefix="~"): """ Copy an executable file onto `PATH`, typically one of the options in the current user's home directory. @@ -172,44 +169,50 @@ def copy_to_path(file_path, prefix='~'): typically `~` for `/home/{current_user}`. """ # get the full path of the requested file - source = os.path.abspath( - os.path.expanduser(file_path)) + source = os.path.abspath(os.path.expanduser(file_path)) # get the file name file_name = os.path.split(source)[-1] # make sure the source file is readable and not empty - with open(source, 'rb') as f: + with open(source, "rb") as f: file_data = f.read() # check for empty files if len(file_data) == 0: - raise ValueError('empty file: {}'.format(file_path)) + raise ValueError(f"empty file: {file_path}") # get all locations in PATH - candidates = [os.path.abspath(os.path.expanduser(i)) - for i in os.environ['PATH'].split(':')] + candidates = [ + os.path.abspath(os.path.expanduser(i)) for i in os.environ["PATH"].split(":") + ] # cull candidates that don't start with our prefix if prefix is not None: # expand shortcut for user's home directory prefix = os.path.abspath(os.path.expanduser(prefix)) # if we are the root user don't cull the available copy locations - if not prefix.endswith('root'): + if not prefix.endswith("root"): # cull non-prefixed path entries candidates = [c for c in candidates if c.startswith(prefix)] + # we want to encourage it to put stuff in the home directory + encourage = [os.path.expanduser("~"), ".local"] + + # rank the candidate paths + scores = [len(c) - sum(len(e) for e in encourage if e in c) for c in candidates] + # try writing to the shortest paths first - for index in argsort([len(c) for c in candidates]): + for index in argsort(scores): path = os.path.join(candidates[index], file_name) try: shutil.copy(source, path) - print('wrote `{}`'.format(path)) + print(f"wrote `{path}`") return path except BaseException: pass # none of our candidates worked - raise ValueError('unable to write to file') + raise ValueError("unable to write to file") def extract(tar, member, path, chmod): @@ -219,7 +222,7 @@ def extract(tar, member, path, chmod): if os.path.isdir(path): return data = tar.extractfile(member=member) - if not hasattr(data, 'read'): + if not hasattr(data, "read"): return data = data.read() if len(data) == 0: @@ -228,7 +231,7 @@ def extract(tar, member, path, chmod): # make sure root path exists os.makedirs(os.path.dirname(path), exist_ok=True) - with open(path, 'wb') as f: + with open(path, "wb") as f: f.write(data) if chmod is not None: @@ -236,13 +239,15 @@ def extract(tar, member, path, chmod): os.chmod(path, int(str(chmod), base=8)) -def handle_fetch(url, - sha256, - target, - chmod=None, - extract_skip=None, - extract_only=None, - strip_components=0): +def handle_fetch( + url, + sha256, + target, + chmod=None, + extract_skip=None, + extract_only=None, + strip_components=0, +): """ A macro to fetch a remote resource (usually an executable) and move it somewhere on the file system. @@ -267,14 +272,14 @@ def handle_fetch(url, in the archive, i.e. at `1`, `a/b/c` is extracted to `target/b/c` """ # get the raw bytes - log.debug(f'fetching: `{url}`') + log.debug(f"fetching: `{url}`") raw = fetch(url=url, sha256=sha256) if len(raw) == 0: - raise ValueError(f'{url} is empty!') + raise ValueError(f"{url} is empty!") # if we have an archive that tar supports - if url.endswith(('.tar.gz', '.tar.xz', 'tar.bz2')): + if url.endswith((".tar.gz", ".tar.xz", "tar.bz2")): # mode needs to know what type of compression mode = f'r:{url.split(".")[-1]}' # get the archive @@ -285,46 +290,45 @@ def handle_fetch(url, for member in tar.getmembers(): # final name after stripping components - name = '/'.join(member.name.split('/')[strip_components:]) + name = "/".join(member.name.split("/")[strip_components:]) # if any of the skip patterns match continue if any(fnmatch(name, p) for p in extract_skip): - log.debug(f'skipping: `{name}`') + log.debug(f"skipping: `{name}`") continue if extract_only is None: path = os.path.join(target, name) - log.debug(f'extracting: `{path}`') + log.debug(f"extracting: `{path}`") extract(tar=tar, member=member, path=path, chmod=chmod) else: - name = name.split('/')[-1] + name = name.split("/")[-1] if name == extract_only: - if target.lower() == '$path': + if target.lower() == "$path": with tempfile.TemporaryDirectory() as D: path = os.path.join(D, name) - log.debug(f'extracting `{path}`') - extract( - tar=tar, member=member, path=path, chmod=chmod) + log.debug(f"extracting `{path}`") + extract(tar=tar, member=member, path=path, chmod=chmod) copy_to_path(path) return path = os.path.join(target, name) - log.debug(f'extracting `{path}`') + log.debug(f"extracting `{path}`") extract(tar=tar, member=member, path=path, chmod=chmod) return else: # a single file - name = url.split('/')[-1].strip() - if target.lower() == '$path': + name = url.split("/")[-1].strip() + if target.lower() == "$path": with tempfile.TemporaryDirectory() as D: temp_path = os.path.join(D, name) - with open(temp_path, 'wb') as f: + with open(temp_path, "wb") as f: f.write(raw) # move the file somewhere on the path path = copy_to_path(temp_path) else: path = target - with open(path, 'wb') as f: + with open(path, "wb") as f: f.write(raw) # apply chmod if requested @@ -334,39 +338,36 @@ def handle_fetch(url, def load_config(): - """ - """ + """ """ return json.loads(config_json) -if __name__ == '__main__': - +if __name__ == "__main__": config = load_config() options = set() for v in config.values(): options.update(v.keys()) - parser = argparse.ArgumentParser( - description='Install system packages for trimesh.') + parser = argparse.ArgumentParser(description="Install system packages for trimesh.") parser.add_argument( - '--install', - type=str, - action='append', - help=f'Install metapackages: {options}') + "--install", type=str, action="append", help=f"Install metapackages: {options}" + ) args = parser.parse_args() # collect `apt-get install`-able package apt_select = [] - handlers = {'fetch': lambda x: handle_fetch(**x), - 'apt': lambda x: apt_select.extend(x)} + handlers = { + "fetch": lambda x: handle_fetch(**x), + "apt": lambda x: apt_select.extend(x), + } # allow comma delimeters and de-duplicate if args.install is None: parser.print_help() exit() else: - select = set(' '.join(args.install).replace(',', ' ').split()) + select = set(" ".join(args.install).replace(",", " ").split()) log.debug(f'installing metapackages: `{", ".join(select)}`') From 40b468987a05f9a8878155a2282c1ea99fe183ae Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 16:03:39 -0400 Subject: [PATCH 090/144] simplify arraylike --- trimesh/base.py | 2 +- trimesh/path/arc.py | 4 ++-- trimesh/typed.py | 30 ++++-------------------------- 3 files changed, 7 insertions(+), 29 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index 5ff980828..25c3d93d5 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -2987,7 +2987,7 @@ def intersection( ) return result - def contains(self, points: ArrayLike[float64]) -> NDArray[bool]: + def contains(self, points: ArrayLike) -> NDArray[bool]: """ Given an array of points determine whether or not they are inside the mesh. This raises an error if called on a diff --git a/trimesh/path/arc.py b/trimesh/path/arc.py index 271d0e807..fff7c1a59 100644 --- a/trimesh/path/arc.py +++ b/trimesh/path/arc.py @@ -6,7 +6,7 @@ from ..constants import log from ..constants import res_path as res from ..constants import tol_path as tol -from ..typed import ArrayLike, FloatLike, NDArray, Optional, float64 +from ..typed import ArrayLike, NDArray, Optional, float64 # floating point zero _TOL_ZERO = 1e-12 @@ -36,7 +36,7 @@ def __getitem__(self, item): def arc_center( - points: ArrayLike[FloatLike], return_normal: bool = True, return_angle: bool = True + points: ArrayLike, return_normal: bool = True, return_angle: bool = True ) -> ArcInfo: """ Given three points on a 2D or 3D arc find the center, diff --git a/trimesh/typed.py b/trimesh/typed.py index 435b2049f..3226760c1 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,35 +1,13 @@ -from typing import List, Optional, Sequence, Tuple, Union - -import numpy as np +from typing import List, Optional, Sequence, Tuple # our default integer and floating point types from numpy import float64, int64 try: - from numpy.typing import NDArray + from numpy.typing import ArrayLike, NDArray except BaseException: NDArray = Sequence - -# for input arrays we want to say "list[int], ndarray[int64], etc" -# all the integer types -IntLike = Union[ - int, - np.int8, - np.int16, - np.int32, - int64, - np.intc, - np.intp, - np.uint8, - np.uint16, - np.uint32, - np.uint64, -] - -FloatLike = Union[float, np.float16, np.float32, float64, np.float_] -BoolLike = Union[bool, np.bool_] - -ArrayLike = Sequence + ArrayLike = Sequence -__all__ = ["NDArray", "ArrayLike", "Optional", "FloatLike", "IntLike", "BoolLike", "List", "Tuple"] +__all__ = ["NDArray", "ArrayLike", "Optional", "List", "Tuple", "float64", "int64"] From e95ccad2196eefb8c88df61cb4d17c54b4759fde Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 16:06:49 -0400 Subject: [PATCH 091/144] fix test_bounds --- trimesh/base.py | 3 +-- trimesh/caching.py | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index 25c3d93d5..ef28adad3 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -2732,9 +2732,8 @@ def mass_properties(self) -> Dict: 'center_mass' : Center of mass location, in global coordinate system """ # if the density or center of mass was overridden they will be put into data - density = self._data.data.get("density", [None])[0] + density = self._data.data.get("density", None) center_mass = self._data.data.get("center_mass", None) - mass = triangles.mass_properties( triangles=self.triangles, crosses=self.triangles_cross, diff --git a/trimesh/caching.py b/trimesh/caching.py index 2a1c11484..c05f6d871 100644 --- a/trimesh/caching.py +++ b/trimesh/caching.py @@ -594,7 +594,8 @@ def mutable(self, value): is_mutable = bool(value) # apply the flag to any data stored for v in self.data.values(): - v.mutable = value + if isinstance(v, TrackedArray): + v.mutable = value # save the mutable setting self._mutable = is_mutable From 19a7301b2f439849e5194127e6617aa8342f0e39 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 16:14:30 -0400 Subject: [PATCH 092/144] update points --- trimesh/base.py | 38 +++++++------- trimesh/points.py | 128 ++++++++++++++++++++++++---------------------- 2 files changed, 86 insertions(+), 80 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index ef28adad3..555198d02 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -307,7 +307,7 @@ def faces(self) -> NDArray[int64]: faces : (n, 3) int64 References for `self.vertices` for triangles. """ - return self._data.get("faces", np.empty(shape=(0, 3), dtype=np.int64)) + return self._data.get("faces", np.empty(shape=(0, 3), dtype=int64)) @faces.setter def faces(self, values: Union[List[List[int]], NDArray[int64]]): @@ -321,8 +321,8 @@ def faces(self, values: Union[List[List[int]], NDArray[int64]]): """ if values is None or len(values) == 0: return self._data.data.pop("faces", None) - if not (isinstance(values, np.ndarray) and values.dtype == np.int64): - values = np.asanyarray(values, dtype=np.int64) + if not (isinstance(values, np.ndarray) and values.dtype == int64): + values = np.asanyarray(values, dtype=int64) # automatically triangulate quad faces if len(values.shape) == 2 and values.shape[1] != 3: @@ -355,7 +355,7 @@ def face_normals(self): Returns ----------- - normals : (len(self.faces), 3) np.float64 + normals : (len(self.faces), 3) float64 Normal vectors of each face """ # check shape of cached normals @@ -368,7 +368,7 @@ def face_normals(self): # if we have no faces exit early if faces is None or len(faces) == 0: - return np.array([], dtype=np.int64).reshape((0, 3)) + return np.array([], dtype=int64).reshape((0, 3)) # if the shape of cached normals equals the shape of faces return if np.shape(cached) == np.shape(faces): @@ -389,7 +389,7 @@ def face_normals(self): return normals # make a padded list of normals for correct shape - padded = np.zeros((len(self.triangles), 3), dtype=np.float64) + padded = np.zeros((len(self.triangles), 3), dtype=float64) padded[valid] = normals # put calculated face normals into cache manually @@ -411,7 +411,7 @@ def face_normals(self, values): if values is None: return # make sure candidate face normals are C-contiguous float - values = np.asanyarray(values, order="C", dtype=np.float64) + values = np.asanyarray(values, order="C", dtype=float64) # face normals need to correspond to faces if len(values) == 0 or values.shape != self.faces.shape: log.debug("face_normals incorrect shape, ignoring!") @@ -454,7 +454,7 @@ def vertices(self): vertices : (n, 3) float Points in cartesian space referenced by self.faces """ - return self._data.get("vertices", np.empty(shape=(0, 3), dtype=np.float64)) + return self._data.get("vertices", np.empty(shape=(0, 3), dtype=float64)) @vertices.setter def vertices(self, values): @@ -466,7 +466,9 @@ def vertices(self, values): values : (n, 3) float Points in space """ - self._data["vertices"] = np.asanyarray(values, order="C", dtype=np.float64) + if values is None or len(values) == 0: + return self._data.data.pop("vertices", None) + self._data["vertices"] = np.asanyarray(values, order="C", dtype=float64) @caching.cache_decorator def vertex_normals(self): @@ -505,7 +507,7 @@ def vertex_normals(self, values: NDArray[float64]): Unit normal vectors for each vertex """ if values is not None: - values = np.asanyarray(values, order="C", dtype=np.float64) + values = np.asanyarray(values, order="C", dtype=float64) if values.shape == self.vertices.shape: # check to see if they assigned all zeros if values.ptp() < tol.merge: @@ -635,7 +637,7 @@ def center_mass(self, value): center_mass : (3, ) float Volumetric center of mass of the mesh. """ - value = np.array(value, dtype=np.float64) + value = np.array(value, dtype=float64) if value.shape != (3,): raise ValueError("shape must be (3,) float!") self._data["center_mass"] = value @@ -1220,7 +1222,7 @@ def update_vertices( # create the inverse mask if not passed if inverse is None: - inverse = np.zeros(len(self.vertices), dtype=np.int64) + inverse = np.zeros(len(self.vertices), dtype=int64) if mask.dtype.kind == "b": inverse[mask] = np.arange(mask.sum()) elif mask.dtype.kind == "i": @@ -1784,7 +1786,7 @@ def facets_area(self) -> NDArray[float64]: # use native python sum in tight loop as opposed to array.sum() # as in this case the lower function call overhead of # native sum provides roughly a 50% speedup - areas = np.array([sum(area_faces[i]) for i in self.facets], dtype=np.float64) + areas = np.array([sum(area_faces[i]) for i in self.facets], dtype=float64) return areas @caching.cache_decorator @@ -2367,12 +2369,12 @@ def unwrap(self, image=None): export = result.export(file_type="obj") uv_recon = np.array( [L[3:].split() for L in str.splitlines(export) if L.startswith("vt ")], - dtype=np.float64, + dtype=float64, ) assert np.allclose(uv_recon, uv) v_recon = np.array( [L[2:].split() for L in str.splitlines(export) if L.startswith("v ")], - dtype=np.float64, + dtype=float64, ) assert np.allclose(v_recon, self.vertices[vmap]) @@ -2430,7 +2432,7 @@ def remove_unreferenced_vertices(self) -> None: referenced = np.zeros(len(self.vertices), dtype=bool) referenced[self.faces] = True - inverse = np.zeros(len(self.vertices), dtype=np.int64) + inverse = np.zeros(len(self.vertices), dtype=int64) inverse[referenced] = np.arange(referenced.sum()) self.update_vertices(mask=referenced, inverse=inverse) @@ -2441,7 +2443,7 @@ def unmerge_vertices(self) -> None: three unique vertex indices and no faces are adjacent. """ # new faces are incrementing so every vertex is unique - faces = np.arange(len(self.faces) * 3, dtype=np.int64).reshape((-1, 3)) + faces = np.arange(len(self.faces) * 3, dtype=int64).reshape((-1, 3)) # use update_vertices to apply mask to # all properties that are per-vertex @@ -2465,7 +2467,7 @@ def apply_transform(self, matrix: NDArray[float64]) -> "Trimesh": Homogeneous transformation matrix """ # get c-order float64 matrix - matrix = np.asanyarray(matrix, order="C", dtype=np.float64) + matrix = np.asanyarray(matrix, order="C", dtype=float64) # only support homogeneous transformations if matrix.shape != (4, 4): diff --git a/trimesh/points.py b/trimesh/points.py index 76909ec13..b03fd01af 100644 --- a/trimesh/points.py +++ b/trimesh/points.py @@ -7,6 +7,7 @@ import copy import numpy as np +from numpy import float64 from . import caching, grouping, transformations, util from .constants import tol @@ -15,9 +16,7 @@ from .visual.color import VertexColor -def point_plane_distance(points, - plane_normal, - plane_origin=None): +def point_plane_distance(points, plane_normal, plane_origin=None): """ The minimum perpendicular distance of a point to a plane. @@ -35,7 +34,7 @@ def point_plane_distance(points, distances : (n,) float Distance from point to plane """ - points = np.asanyarray(points, dtype=np.float64) + points = np.asanyarray(points, dtype=float64) if plane_origin is None: w = points else: @@ -83,7 +82,7 @@ def plane_fit(points): Unit normal vector of plane """ # make sure input is numpy array - points = np.asanyarray(points, dtype=np.float64) + points = np.asanyarray(points, dtype=float64) assert points.ndim == 2 or points.ndim == 3 # with only one point set, np.dot is faster if points.ndim == 2: @@ -99,17 +98,14 @@ def plane_fit(points): # points offset by the plane origin x = points - C[:, None, :] # create a (p, 3, 3) matrix - M = np.einsum('pnd, pnm->pdm', x, x) + M = np.einsum("pnd, pnm->pdm", x, x) # run SVD N = np.linalg.svd(M)[0][..., -1] # return the centroid(s) and normal(s) return C, N -def radial_sort(points, - origin, - normal, - start=None): +def radial_sort(points, origin, normal, start=None): """ Sorts a set of points radially (by angle) around an axis specified by origin and normal vector. @@ -141,23 +137,24 @@ def radial_sort(points, else: normal, start = util.unitize([normal, start]) if np.abs(1 - np.abs(np.dot(normal, start))) < tol.zero: - raise ValueError('start must not parallel with normal') + raise ValueError("start must not parallel with normal") axis0 = np.cross(start, normal) axis1 = np.cross(axis0, normal) vectors = points - origin # calculate the angles of the points on the axis - angles = np.arctan2(np.dot(vectors, axis0), - np.dot(vectors, axis1)) + angles = np.arctan2(np.dot(vectors, axis0), np.dot(vectors, axis1)) # return the points sorted by angle return points[angles.argsort()[::-1]] -def project_to_plane(points, - plane_normal, - plane_origin, - transform=None, - return_transform=False, - return_planar=True): +def project_to_plane( + points, + plane_normal, + plane_origin, + transform=None, + return_transform=False, + return_planar=True, +): """ Project (n, 3) points onto a plane. @@ -178,13 +175,13 @@ def project_to_plane(points, """ if np.all(np.abs(plane_normal) < tol.zero): - raise NameError('Normal must be nonzero!') + raise NameError("Normal must be nonzero!") if transform is None: transform = plane_transform(plane_origin, plane_normal) transformed = transformations.transform_points(points, transform) - transformed = transformed[:, 0:(3 - int(return_planar))] + transformed = transformed[:, 0 : (3 - int(return_planar))] if return_transform: polygon_to_3D = np.linalg.inv(transform) @@ -215,7 +212,7 @@ def remove_close(points, radius): tree = cKDTree(points) # get the index of every pair of points closer than our radius - pairs = tree.query_pairs(radius, output_type='ndarray') + pairs = tree.query_pairs(radius, output_type="ndarray") # how often each vertex index appears in a pair # this is essentially a cheaply computed "vertex degree" @@ -267,7 +264,7 @@ def k_means(points, k, **kwargs): from scipy.cluster.vq import kmeans from scipy.spatial import cKDTree - points = np.asanyarray(points, dtype=np.float64) + points = np.asanyarray(points, dtype=float64) points_std = points.std(axis=0) points_std[points_std < tol.zero] = 1 whitened = points / points_std @@ -310,10 +307,10 @@ def tsp(points, start=0): The euclidean distance between points in traversal """ # points should be float - points = np.asanyarray(points, dtype=np.float64) + points = np.asanyarray(points, dtype=float64) if len(points.shape) != 2: - raise ValueError('points must be (n, dimension)!') + raise ValueError("points must be (n, dimension)!") # start should be an index start = int(start) @@ -326,7 +323,7 @@ def tsp(points, start=0): traversal = np.zeros(len(points), dtype=np.int64) - 1 traversal[0] = start # list of distances - distances = np.zeros(len(points) - 1, dtype=np.float64) + distances = np.zeros(len(points) - 1, dtype=float64) # a mask of indexes in order index_mask = np.arange(len(points), dtype=np.int64) @@ -343,8 +340,7 @@ def tsp(points, start=0): # do NlogN distance query # use dot instead of .sum(axis=1) or np.linalg.norm # as it is faster, also don't square root here - dist = np.dot((points[unvisited] - current) ** 2, - sum_ones) + dist = np.dot((points[unvisited] - current) ** 2, sum_ones) # minimum distance index min_index = dist.argmin() @@ -377,19 +373,19 @@ def plot_points(points, show=True): import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # NOQA - points = np.asanyarray(points, dtype=np.float64) + points = np.asanyarray(points, dtype=float64) if len(points.shape) != 2: - raise ValueError('Points must be (n, 2|3)!') + raise ValueError("Points must be (n, 2|3)!") if points.shape[1] == 3: fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') + ax = fig.add_subplot(111, projection="3d") ax.scatter(*points.T) elif points.shape[1] == 2: plt.scatter(*points.T) else: - raise ValueError(f'points not 2D/3D: {points.shape}') + raise ValueError(f"points not 2D/3D: {points.shape}") if show: plt.show() @@ -424,8 +420,8 @@ def __init__(self, vertices, colors=None, metadata=None, **kwargs): # load vertices self.vertices = vertices - if 'vertex_colors' in kwargs and colors is None: - colors = kwargs['vertex_colors'] + if "vertex_colors" in kwargs and colors is None: + colors = kwargs["vertex_colors"] # save visual data to vertex color object self.visual = VertexColor(colors=colors, obj=self) @@ -520,8 +516,7 @@ def merge_vertices(self): self.vertices = self.vertices[unique] # apply unique mask to colors - if (self.colors is not None and - len(self.colors) == len(inverse)): + if self.colors is not None and len(self.colors) == len(inverse): self.colors = self.colors[unique] def apply_transform(self, transform): @@ -534,8 +529,7 @@ def apply_transform(self, transform): transform : (4, 4) float Homogeneous transformation to apply to PointCloud """ - self.vertices = transformations.transform_points( - self.vertices, matrix=transform) + self.vertices = transformations.transform_points(self.vertices, matrix=transform) return self @property @@ -548,8 +542,7 @@ def bounds(self): bounds : (2, 3) float Minimum, Maximum verteex """ - return np.array([self.vertices.min(axis=0), - self.vertices.max(axis=0)]) + return np.array([self.vertices.min(axis=0), self.vertices.max(axis=0)]) @property def extents(self): @@ -585,18 +578,21 @@ def vertices(self): vertices : (n, 3) float Points in the PointCloud """ - return self._data['vertices'] + return self._data.get("vertices", np.empty(shape=(0, 3), dtype=float64)) @vertices.setter - def vertices(self, data): - if data is None: - self._data['vertices'] = None - else: - # we want to copy data for new object - data = np.array(data, dtype=np.float64, copy=True) - if not util.is_shape(data, (-1, 3)): - raise ValueError('Point clouds must be (n, 3)!') - self._data['vertices'] = data + def vertices(self, values): + """ + Assign vertex values to the point cloud. + + Parameters + -------------- + values : (n, 3) float + Points in space + """ + if values is None or len(values) == 0: + return self._data.data.pop("vertices", None) + self._data["vertices"] = np.asanyarray(values, order="C", dtype=float64) @property def colors(self): @@ -627,6 +623,7 @@ def kdtree(self): """ from scipy.spatial import cKDTree + tree = cKDTree(self.vertices.view(np.ndarray)) return tree @@ -641,6 +638,7 @@ def convex_hull(self): A watertight mesh of the hull of the points """ from . import convex + return convex.convex_hull(self.vertices) def scene(self): @@ -653,6 +651,7 @@ def scene(self): Scene object containing this PointCloud """ from .scene.scene import Scene + return Scene(self) def show(self, **kwargs): @@ -676,10 +675,8 @@ def export(self, file_obj=None, file_type=None, **kwargs): If file name is passed this is not required """ from .exchange.export import export_mesh - return export_mesh(self, - file_obj=file_obj, - file_type=file_type, - **kwargs) + + return export_mesh(self, file_obj=file_obj, file_type=file_type, **kwargs) def query(self, input_points, **kwargs): """ @@ -694,8 +691,8 @@ def query(self, input_points, **kwargs): Result of the query. """ from .proximity import query_from_points - return query_from_points( - self.vertices, input_points, self.kdtree, **kwargs) + + return query_from_points(self.vertices, input_points, self.kdtree, **kwargs) def __add__(self, other): if len(other.colors) == len(self.colors) == 0: @@ -703,10 +700,17 @@ def __add__(self, other): else: # preserve colors # if one point cloud has no color property use black - other_colors = [[0, 0, 0, 255]] * \ - len(other.vertices) if len(other.colors) == 0 else other.colors - self_colors = [[0, 0, 0, 255]] * \ - len(self.vertices) if len(self.colors) == 0 else self.colors + other_colors = ( + [[0, 0, 0, 255]] * len(other.vertices) + if len(other.colors) == 0 + else other.colors + ) + self_colors = ( + [[0, 0, 0, 255]] * len(self.vertices) + if len(self.colors) == 0 + else self.colors + ) colors = np.vstack((self_colors, other_colors)) - return PointCloud(vertices=np.vstack( - (self.vertices, other.vertices)), colors=colors) + return PointCloud( + vertices=np.vstack((self.vertices, other.vertices)), colors=colors + ) From 7c251e02bfc2f4625338622ee200214c0b1eb573 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 16:15:36 -0400 Subject: [PATCH 093/144] remove floatlike --- tests/test_typed.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_typed.py b/tests/test_typed.py index 677bb7869..bd50c835d 100644 --- a/tests/test_typed.py +++ b/tests/test_typed.py @@ -1,10 +1,10 @@ import numpy as np -from trimesh.typed import ArrayLike, FloatLike, NDArray, float64, int64 +from trimesh.typed import ArrayLike, NDArray, float64, int64 # see if we pass mypy -def _check(values: ArrayLike[FloatLike]) -> NDArray[int64]: +def _check(values: ArrayLike) -> NDArray[int64]: return (np.array(values, dtype=float64) * 100).astype(int64) def _run() -> NDArray[int64]: From 3efcbf159a1c6463454b1529d83a30e6a39f7f8c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 16:51:19 -0400 Subject: [PATCH 094/144] try embed --- tests/test_gltf.py | 716 ++++++++++++++---------------- trimesh/exchange/gltf.py | 920 ++++++++++++++++++++------------------- trimesh/util.py | 2 +- 3 files changed, 797 insertions(+), 841 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 018e39f19..da3491fca 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -6,7 +6,7 @@ # Khronos' official file validator # can be installed with the helper script: # `trimesh/docker/builds/gltf_validator.bash` -_gltf_validator = g.trimesh.util.which('gltf_validator') +_gltf_validator = g.trimesh.util.which("gltf_validator") def validate_glb(data, name=None): @@ -26,36 +26,33 @@ def validate_glb(data, name=None): ValueError If Khronos validator reports errors. """ - # subprocess options not in old python - if g.PY_VER < (3, 7): - return if _gltf_validator is None: - g.log.warning('no gltf_validator!') + g.log.warning("no gltf_validator!") return - with g.tempfile.NamedTemporaryFile(suffix='.glb') as f: + with g.tempfile.NamedTemporaryFile(suffix=".glb") as f: f.write(data) f.flush() # run the khronos gltf-validator - report = g.subprocess.run( - [_gltf_validator, f.name, '-o'], - capture_output=True) + report = g.subprocess.run([_gltf_validator, f.name, "-o"], capture_output=True) # -o prints JSON to stdout - content = report.stdout.decode('utf-8') + content = report.stdout.decode("utf-8") returncode = report.returncode if returncode != 0: - g.log.error(f'failed on: `{name}`') - g.log.error(f'validator: `{content}`') - g.log.error(f'stderr: `{report.stderr}`') + from IPython import embed + + embed() + g.log.error(f"failed on: `{name}`") + g.log.error(f"validator: `{content}`") + g.log.error(f"stderr: `{report.stderr}`") raise ValueError("gltf_validator failed") class GLTFTest(g.unittest.TestCase): - def test_duck(self): - scene = g.get_mesh('Duck.glb', process=False) + scene = g.get_mesh("Duck.glb", process=False) # should have one mesh assert len(scene.geometry) == 1 @@ -64,31 +61,28 @@ def test_duck(self): geom = next(iter(scene.geometry.values())) # vertex normals should have been loaded - assert 'vertex_normals' in geom._cache.cache + assert "vertex_normals" in geom._cache.cache # should not be watertight assert not geom.is_volume # make sure export doesn't crash - export = scene.export(file_type='glb') - validate_glb(export) + export = scene.export(file_type="glb") + validate_glb(export, "Duck.glb") # check a roundtrip - reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + reloaded = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make basic assertions g.scene_equal(scene, reloaded) # if we merge ugly it should now be watertight - geom.merge_vertices( - merge_tex=True, merge_norm=True) + geom.merge_vertices(merge_tex=True, merge_norm=True) assert geom.is_volume def test_strips(self): - a = g.get_mesh('mode5.gltf') + a = g.get_mesh("mode5.gltf") assert len(a.geometry) > 0 - b = g.get_mesh('mode5.gltf', merge_primitives=True) + b = g.get_mesh("mode5.gltf", merge_primitives=True) assert len(b.geometry) > 0 def test_buffer_dedupe(self): @@ -99,102 +93,87 @@ def test_buffer_dedupe(self): box_3.visual.face_colors = [0, 255, 0, 255] tm = g.trimesh.transformations.translation_matrix - scene.add_geometry( - box_1, 'box_1', - transform=tm((1, 1, 1))) - scene.add_geometry( - box_2, 'box_2', - transform=tm((-1, -1, -1))) - scene.add_geometry( - box_3, 'box_3', - transform=tm((-1, 20, -1))) - a = g.json.loads(scene.export( - file_type='gltf')['model.gltf'].decode('utf-8')) - assert len(a['buffers']) <= 3 + scene.add_geometry(box_1, "box_1", transform=tm((1, 1, 1))) + scene.add_geometry(box_2, "box_2", transform=tm((-1, -1, -1))) + scene.add_geometry(box_3, "box_3", transform=tm((-1, 20, -1))) + a = g.json.loads(scene.export(file_type="gltf")["model.gltf"].decode("utf-8")) + assert len(a["buffers"]) <= 3 def test_tex_export(self): # load textured PLY - mesh = g.get_mesh('fuze.ply') - assert hasattr(mesh.visual, 'uv') + mesh = g.get_mesh("fuze.ply") + assert hasattr(mesh.visual, "uv") # make sure export as GLB doesn't crash on scenes - export = mesh.scene().export(file_type='glb', unitize_normals=True) - validate_glb(export) + export = mesh.scene().export(file_type="glb", unitize_normals=True) + validate_glb(export, "fuze.ply") # make sure it works on meshes - export = mesh.export(file_type='glb', unitize_normals=True) - validate_glb(export) + export = mesh.export(file_type="glb", unitize_normals=True) + validate_glb(export, "fuze.ply") def test_cesium(self): # A GLTF with a multi- primitive mesh - s = g.get_mesh('CesiumMilkTruck.glb') + s = g.get_mesh("CesiumMilkTruck.glb") # should be one Trimesh object per GLTF "primitive" assert len(s.geometry) == 4 # every geometry displayed once, except wheels twice assert len(s.graph.nodes_geometry) == 5 # make sure export doesn't crash - export = s.export(file_type='glb') + export = s.export(file_type="glb") validate_glb(export) - reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + reloaded = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make basic assertions g.scene_equal(s, reloaded) def test_alphamode(self): # A GLTF with combinations of AlphaMode and AlphaCutoff - s = g.get_mesh('AlphaBlendModeTest.glb') + s = g.get_mesh("AlphaBlendModeTest.glb") # should be 5 test geometries - assert len([geom for geom in - s.geometry if geom.startswith('Test')]) == 5 - assert s.geometry['TestCutoffDefaultMesh'].visual.material.alphaMode == 'MASK' - assert s.geometry['TestCutoff25Mesh'].visual.material.alphaMode == 'MASK' - assert s.geometry['TestCutoff25Mesh'].visual.material.alphaCutoff == 0.25 - assert s.geometry['TestCutoff75Mesh'].visual.material.alphaMode == 'MASK' - assert s.geometry['TestCutoff75Mesh'].visual.material.alphaCutoff == 0.75 - assert s.geometry['TestBlendMesh'].visual.material.alphaMode == 'BLEND' + assert len([geom for geom in s.geometry if geom.startswith("Test")]) == 5 + assert s.geometry["TestCutoffDefaultMesh"].visual.material.alphaMode == "MASK" + assert s.geometry["TestCutoff25Mesh"].visual.material.alphaMode == "MASK" + assert s.geometry["TestCutoff25Mesh"].visual.material.alphaCutoff == 0.25 + assert s.geometry["TestCutoff75Mesh"].visual.material.alphaMode == "MASK" + assert s.geometry["TestCutoff75Mesh"].visual.material.alphaCutoff == 0.75 + assert s.geometry["TestBlendMesh"].visual.material.alphaMode == "BLEND" # defaults OPAQUE - assert s.geometry['TestOpaqueMesh'].visual.material.alphaMode is None + assert s.geometry["TestOpaqueMesh"].visual.material.alphaMode is None - export = s.export(file_type='glb') + export = s.export(file_type="glb") validate_glb(export) # roundtrip it - rs = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + rs = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make basic assertions g.scene_equal(s, rs) # make sure export keeps alpha modes # should be the same - assert len( - [geom for geom in rs.geometry if geom.startswith('Test')]) == 5 - assert rs.geometry['TestCutoffDefaultMesh'].visual.material.alphaMode == 'MASK' - assert rs.geometry['TestCutoff25Mesh'].visual.material.alphaMode == 'MASK' - assert rs.geometry['TestCutoff25Mesh'].visual.material.alphaCutoff == 0.25 - assert rs.geometry['TestCutoff75Mesh'].visual.material.alphaMode == 'MASK' - assert rs.geometry['TestCutoff75Mesh'].visual.material.alphaCutoff == 0.75 - assert rs.geometry['TestBlendMesh'].visual.material.alphaMode == 'BLEND' + assert len([geom for geom in rs.geometry if geom.startswith("Test")]) == 5 + assert rs.geometry["TestCutoffDefaultMesh"].visual.material.alphaMode == "MASK" + assert rs.geometry["TestCutoff25Mesh"].visual.material.alphaMode == "MASK" + assert rs.geometry["TestCutoff25Mesh"].visual.material.alphaCutoff == 0.25 + assert rs.geometry["TestCutoff75Mesh"].visual.material.alphaMode == "MASK" + assert rs.geometry["TestCutoff75Mesh"].visual.material.alphaCutoff == 0.75 + assert rs.geometry["TestBlendMesh"].visual.material.alphaMode == "BLEND" # defaults OPAQUE - assert rs.geometry['TestOpaqueMesh'].visual.material.alphaMode is None + assert rs.geometry["TestOpaqueMesh"].visual.material.alphaMode is None def test_units(self): - # Trimesh will store units as a GLTF extra if they # are defined so check that. - original = g.get_mesh('pins.glb') + original = g.get_mesh("pins.glb") # export it as a a GLB file - export = original.export(file_type='glb') + export = original.export(file_type="glb") validate_glb(export) - kwargs = g.trimesh.exchange.gltf.load_glb( - g.trimesh.util.wrap_as_stream(export)) + kwargs = g.trimesh.exchange.gltf.load_glb(g.trimesh.util.wrap_as_stream(export)) # roundtrip it reloaded = g.trimesh.exchange.load.load_kwargs(kwargs) # make basic assertions @@ -203,7 +182,7 @@ def test_units(self): # make assertions on original and reloaded for scene in [original, reloaded]: # units should be stored as an extra - assert scene.units == 'mm' + assert scene.units == "mm" # make sure we have two unique geometries assert len(scene.geometry) == 2 @@ -211,32 +190,27 @@ def test_units(self): assert len(scene.graph.nodes_geometry) == 7 # all meshes should be well constructed - assert all(m.is_volume for m in - scene.geometry.values()) + assert all(m.is_volume for m in scene.geometry.values()) # check unit conversions for fun extents = scene.extents.copy() - as_in = scene.convert_units('in') + as_in = scene.convert_units("in") # should all be exactly mm -> in conversion factor - assert g.np.allclose( - extents / as_in.extents, 25.4, atol=.001) + assert g.np.allclose(extents / as_in.extents, 25.4, atol=0.001) - m = g.get_mesh('testplate.glb') - assert m.units == 'meters' + m = g.get_mesh("testplate.glb") + assert m.units == "meters" def test_basic(self): # split a multibody mesh into a scene - scene = g.trimesh.scene.split_scene( - g.get_mesh('cycloidal.ply')) + scene = g.trimesh.scene.split_scene(g.get_mesh("cycloidal.ply")) # should be 117 geometries assert len(scene.geometry) >= 117 # a dict with {file name: str} - export = scene.export(file_type='gltf') + export = scene.export(file_type="gltf") # load from just resolver - r = g.trimesh.load(file_obj=None, - file_type='gltf', - resolver=export) + r = g.trimesh.load(file_obj=None, file_type="gltf", resolver=export) # will assert round trip is roughly equal g.scene_equal(r, scene) @@ -244,28 +218,26 @@ def test_basic(self): # try loading from a ZIP archive zipped = g.trimesh.util.compress(export) r = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream(zipped), - file_type='zip') + file_obj=g.trimesh.util.wrap_as_stream(zipped), file_type="zip" + ) # try loading from a file name # will require a file path resolver with g.TemporaryDirectory() as d: for file_name, data in export.items(): - with open(g.os.path.join(d, file_name), 'wb') as f: + with open(g.os.path.join(d, file_name), "wb") as f: f.write(data) # load from file path of header GLTF - rd = g.trimesh.load( - g.os.path.join(d, 'model.gltf')) + rd = g.trimesh.load(g.os.path.join(d, "model.gltf")) # will assert round trip is roughly equal g.scene_equal(rd, scene) def test_merge_buffers(self): # split a multibody mesh into a scene - scene = g.trimesh.scene.split_scene( - g.get_mesh('cycloidal.ply')) + scene = g.trimesh.scene.split_scene(g.get_mesh("cycloidal.ply")) # export a gltf with the merge_buffers option set to true - export = scene.export(file_type='gltf', merge_buffers=True) + export = scene.export(file_type="gltf", merge_buffers=True) # We should end up with a single .bin and scene.gltf assert len(export.keys()) == 2 @@ -273,28 +245,28 @@ def test_merge_buffers(self): # reload the export reloaded = g.trimesh.exchange.load.load_kwargs( g.trimesh.exchange.gltf.load_gltf( - file_obj=None, - resolver=g.trimesh.visual.resolvers.ZipResolver(export))) + file_obj=None, resolver=g.trimesh.visual.resolvers.ZipResolver(export) + ) + ) # check to make sure the geometry keys are the same assert set(reloaded.geometry.keys()) == set(scene.geometry.keys()) def test_merge_primitives(self): # test to see if the `merge_primitives` logic is working - a = g.get_mesh('CesiumMilkTruck.glb') + a = g.get_mesh("CesiumMilkTruck.glb") assert len(a.geometry) == 4 # should combine the multiple primitives into a single mesh - b = g.get_mesh( - 'CesiumMilkTruck.glb', merge_primitives=True) + b = g.get_mesh("CesiumMilkTruck.glb", merge_primitives=True) assert len(b.geometry) == 2 def test_specular_glossiness(self): - s = g.get_mesh('pyramid.zip') + s = g.get_mesh("pyramid.zip") assert len(s.geometry) > 0 - assert 'GLTF' in s.geometry + assert "GLTF" in s.geometry - mat = s.geometry['GLTF'].visual.material + mat = s.geometry["GLTF"].visual.material assert isinstance(mat, g.trimesh.visual.material.PBRMaterial) color = g.np.array(mat.baseColorTexture)[:, :, :3] @@ -309,9 +281,9 @@ def test_specular_glossiness(self): assert color.dtype == g.np.uint8 assert g.np.allclose(color, [255, 255, 255, 255]) - metallic_roughness = g.np.array( - mat.metallicRoughnessTexture, - dtype=g.np.float32) / 255.0 + metallic_roughness = ( + g.np.array(mat.metallicRoughnessTexture, dtype=g.np.float32) / 255.0 + ) assert metallic_roughness.shape[0] == 84 and metallic_roughness.shape[1] == 71 metallic = metallic_roughness[:, :, 0] @@ -332,11 +304,11 @@ def test_specular_glossiness(self): def test_write_dir(self): # try loading from a file name # will require a file path resolver - original = g.get_mesh('fuze.obj') + original = g.get_mesh("fuze.obj") assert isinstance(original, g.trimesh.Trimesh) s = original.scene() with g.TemporaryDirectory() as d: - path = g.os.path.join(d, 'heyy.gltf') + path = g.os.path.join(d, "heyy.gltf") s.export(file_obj=path) r = g.trimesh.load(path) assert isinstance(r, g.trimesh.Scene) @@ -346,72 +318,60 @@ def test_write_dir(self): def test_merge_primitives_materials(self): # test to see if the `merge_primitives` logic is working - a = g.get_mesh('rgb_cube_with_primitives.gltf', - merge_primitives=True) - assert len(a.geometry['Cube'].visual.material) == 3 + a = g.get_mesh("rgb_cube_with_primitives.gltf", merge_primitives=True) + assert len(a.geometry["Cube"].visual.material) == 3 # what the face materials should be - truth = [0, 0, 0, 0, 1, 1, - 1, 1, 2, 2, 2, 2] - assert g.np.allclose( - a.geometry['Cube'].visual.face_materials, - truth) + truth = [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2] + assert g.np.allclose(a.geometry["Cube"].visual.face_materials, truth) # make sure copying did the things correctly c = a.copy() - assert g.np.allclose( - c.geometry['Cube'].visual.face_materials, - truth) + assert g.np.allclose(c.geometry["Cube"].visual.face_materials, truth) def test_merge_primitives_materials_roundtrip(self): # test to see if gltf loaded with `merge_primitives` # and then exported back # to gltf, produces a valid gltf. - a = g.get_mesh('rgb_cube_with_primitives.gltf', - merge_primitives=True) - result = a.export(file_type='gltf', merge_buffers=True) + a = g.get_mesh("rgb_cube_with_primitives.gltf", merge_primitives=True) + result = a.export(file_type="gltf", merge_buffers=True) with g.TemporaryDirectory() as d: for file_name, data in result.items(): - with open(g.os.path.join(d, file_name), 'wb') as f: + with open(g.os.path.join(d, file_name), "wb") as f: f.write(data) - rd = g.trimesh.load( - g.os.path.join(d, 'model.gltf'), merge_primitives=True) + rd = g.trimesh.load(g.os.path.join(d, "model.gltf"), merge_primitives=True) assert isinstance(rd, g.trimesh.Scene) # will assert round trip is roughly equal # TODO : restore # g.scene_equal(rd, a) def test_optional_camera(self): - gltf_cameras_key = 'cameras' + gltf_cameras_key = "cameras" # if there's no camera in the scene, then it shouldn't be added to the # gltf box = g.trimesh.creation.box([1, 1, 1]) scene = g.trimesh.Scene(box) - export = scene.export(file_type='gltf') - assert gltf_cameras_key not in g.json.loads( - export['model.gltf'].decode('utf8')) + export = scene.export(file_type="gltf") + assert gltf_cameras_key not in g.json.loads(export["model.gltf"].decode("utf8")) # `scene.camera` creates a camera if it does not exist. # once in the scene, it should be added to the gltf. box = g.trimesh.creation.box([1, 1, 1]) scene = g.trimesh.Scene(box) scene.set_camera() - export = scene.export(file_type='gltf') - assert gltf_cameras_key in g.json.loads( - export['model.gltf'].decode('utf8')) + export = scene.export(file_type="gltf") + assert gltf_cameras_key in g.json.loads(export["model.gltf"].decode("utf8")) def test_gltf_pole(self): - scene = g.get_mesh('simple_pole.glb') + scene = g.get_mesh("simple_pole.glb") # should have multiple primitives assert len(scene.geometry) == 11 - export = scene.export(file_type='glb') + export = scene.export(file_type="glb") validate_glb(export) # check a roundtrip - reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + reloaded = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make basic assertions g.scene_equal(scene, reloaded) @@ -423,26 +383,22 @@ def test_material_primary_colors(self): scene = g.trimesh.Scene([sphere]) def to_integer(args): - args['materials'][0]['pbrMetallicRoughness']['baseColorFactor'] = [ - 1, 0, 0, 1] + args["materials"][0]["pbrMetallicRoughness"]["baseColorFactor"] = [1, 0, 0, 1] - export = scene.export(file_type='glb', tree_postprocessor=to_integer) + export = scene.export(file_type="glb", tree_postprocessor=to_integer) validate_glb(export) reloaded = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream(export), - file_type='glb') + file_obj=g.trimesh.util.wrap_as_stream(export), file_type="glb" + ) assert len(reloaded.geometry) == 1 # get meshes back sphere_b = list(reloaded.geometry.values())[0] - assert ( - sphere_b.visual.material.baseColorFactor == ( - 255, 0, 0, 255)).all() + assert (sphere_b.visual.material.baseColorFactor == (255, 0, 0, 255)).all() def test_material_hash(self): - # load mesh twice independently - a = g.get_mesh('fuze.obj') - b = g.get_mesh('fuze.obj') + a = g.get_mesh("fuze.obj") + b = g.get_mesh("fuze.obj") # move one of the meshes away from the other a.apply_translation([a.scale, 0, 0]) @@ -454,19 +410,22 @@ def test_material_hash(self): # create a scene with two meshes scene = g.trimesh.Scene([a, b]) # get the exported GLTF header of a scene with both meshes - header = g.json.loads(scene.export( - file_type='gltf', unitize_normals=True)['model.gltf'].decode('utf-8')) + header = g.json.loads( + scene.export(file_type="gltf", unitize_normals=True)["model.gltf"].decode( + "utf-8" + ) + ) # header should contain exactly one material - assert len(header['materials']) == 1 + assert len(header["materials"]) == 1 # both meshes should be contained in the export - assert len(header['meshes']) == 2 + assert len(header["meshes"]) == 2 # get a reloaded version - export = scene.export(file_type='glb', unitize_normals=True) + export = scene.export(file_type="glb", unitize_normals=True) validate_glb(export) reloaded = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream(export), - file_type='glb') + file_obj=g.trimesh.util.wrap_as_stream(export), file_type="glb" + ) # meshes should have survived assert len(reloaded.geometry) == 2 @@ -474,8 +433,7 @@ def test_material_hash(self): ar, br = reloaded.geometry.values() # should have been loaded as a PBR material - assert isinstance(ar.visual.material, - g.trimesh.visual.material.PBRMaterial) + assert isinstance(ar.visual.material, g.trimesh.visual.material.PBRMaterial) # materials should have the same memory location assert id(ar.visual.material) == id(br.visual.material) @@ -491,144 +449,138 @@ def test_node_name(self): # an export-import cycle. # a scene - s = g.get_mesh('cycloidal.3DXML') + s = g.get_mesh("cycloidal.3DXML") # export as GLB then re-load - export = s.export(file_type='glb') + export = s.export(file_type="glb") validate_glb(export) - r = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + r = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make sure we have the same geometries before and after assert set(s.geometry.keys()) == set(r.geometry.keys()) # make sure the node names are the same before and after - assert (set(s.graph.nodes_geometry) == - set(r.graph.nodes_geometry)) + assert set(s.graph.nodes_geometry) == set(r.graph.nodes_geometry) def test_nested_scale(self): # nested transforms with scale - s = g.get_mesh('nested.glb') + s = g.get_mesh("nested.glb") assert len(s.graph.nodes_geometry) == 3 assert g.np.allclose( - [[-1.16701, -2.3366, -0.26938], - [0.26938, 1., 0.26938]], - s.bounds, atol=1e-4) + [[-1.16701, -2.3366, -0.26938], [0.26938, 1.0, 0.26938]], s.bounds, atol=1e-4 + ) def test_schema(self): # get a copy of the GLTF schema and do simple checks s = g.trimesh.exchange.gltf.get_schema() # make sure it has at least the keys we expect - assert set(s['properties'].keys()).issuperset( - {'accessors', - 'animations', - 'asset', - 'buffers', - 'bufferViews', - 'cameras', - 'images', - 'materials', - 'meshes', - 'nodes', - 'samplers', - 'scene', - 'scenes', - 'skins', - 'textures', - 'extensions', - 'extras'}) + assert set(s["properties"].keys()).issuperset( + { + "accessors", + "animations", + "asset", + "buffers", + "bufferViews", + "cameras", + "images", + "materials", + "meshes", + "nodes", + "samplers", + "scene", + "scenes", + "skins", + "textures", + "extensions", + "extras", + } + ) # lightly check to see that no references exist - assert '$ref' not in g.json.dumps(s) + assert "$ref" not in g.json.dumps(s) def test_export_custom_attributes(self): # Write and read custom vertex attributes to gltf sphere = g.trimesh.primitives.Sphere() v_count, _ = sphere.vertices.shape - sphere.vertex_attributes[ - '_CustomFloat32Scalar'] = g.np.random.rand( - v_count, 1).astype(g.np.float32) - sphere.vertex_attributes[ - '_CustomFloat32Vec3'] = g.np.random.rand( - v_count, 3).astype(g.np.float32) - sphere.vertex_attributes[ - '_CustomFloat32Mat4'] = g.np.random.rand( - v_count, 4, 4).astype(g.np.float32) + sphere.vertex_attributes["_CustomFloat32Scalar"] = g.np.random.rand( + v_count, 1 + ).astype(g.np.float32) + sphere.vertex_attributes["_CustomFloat32Vec3"] = g.np.random.rand( + v_count, 3 + ).astype(g.np.float32) + sphere.vertex_attributes["_CustomFloat32Mat4"] = g.np.random.rand( + v_count, 4, 4 + ).astype(g.np.float32) # export as GLB bytes - export = sphere.export(file_type='glb') + export = sphere.export(file_type="glb") # this should validate just fine validate_glb(export) # uint32 is slightly off-label and may cause # validators to fail but if you're a bad larry who # doesn't follow the rules it should be fine - sphere.vertex_attributes[ - '_CustomUInt32Scalar'] = g.np.random.randint( - 0, 1000, size=(v_count, 1)).astype(g.np.uint32) + sphere.vertex_attributes["_CustomUInt32Scalar"] = g.np.random.randint( + 0, 1000, size=(v_count, 1) + ).astype(g.np.uint32) # when you add a uint16/int16 the gltf-validator # complains about the 4-byte boundaries even though # all their lengths and offsets mod 4 are zero # not sure if that's a validator bug or what - sphere.vertex_attributes[ - '_CustomUInt16Scalar'] = g.np.random.randint( - 0, 1000, size=(v_count, 1)).astype(g.np.uint16) - sphere.vertex_attributes[ - '_CustomInt16Scalar'] = g.np.random.randint( - 0, 1000, size=(v_count, 1)).astype(g.np.int16) + sphere.vertex_attributes["_CustomUInt16Scalar"] = g.np.random.randint( + 0, 1000, size=(v_count, 1) + ).astype(g.np.uint16) + sphere.vertex_attributes["_CustomInt16Scalar"] = g.np.random.randint( + 0, 1000, size=(v_count, 1) + ).astype(g.np.int16) # export as GLB then re-load - export = sphere.export(file_type='glb') + export = sphere.export(file_type="glb") - r = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + r = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") for _, val in r.geometry.items(): - assert set( - val.vertex_attributes.keys()) == set( - sphere.vertex_attributes.keys()) + assert set(val.vertex_attributes.keys()) == set( + sphere.vertex_attributes.keys() + ) for key in val.vertex_attributes: is_same = g.np.array_equal( - val.vertex_attributes[key], - sphere.vertex_attributes[key]) + val.vertex_attributes[key], sphere.vertex_attributes[key] + ) assert is_same is True def test_extras(self): # if GLTF extras are defined, make sure they survive a round trip - s = g.get_mesh('cycloidal.3DXML') + s = g.get_mesh("cycloidal.3DXML") - scene_extensions = {'mesh_ext': {'ext_data': 1.23}} + scene_extensions = {"mesh_ext": {"ext_data": 1.23}} # some dummy data dummy = { - 'who': 'likes cheese', - 'potatoes': 25, - 'gtlf_extensions': scene_extensions} + "who": "likes cheese", + "potatoes": 25, + "gtlf_extensions": scene_extensions, + } # export as GLB with extras passed to the exporter then re-load s.metadata = dummy - export = s.export(file_type='glb') + export = s.export(file_type="glb") validate_glb(export) - r = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + r = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make sure extras survived a round trip - assert all(r.metadata[k] == v - for k, v in dummy.items()) + assert all(r.metadata[k] == v for k, v in dummy.items()) def test_extras_nodes(self): - - mesh_extensions = {'mesh_ext': {'ext_data': 1.23}} + mesh_extensions = {"mesh_ext": {"ext_data": 1.23}} test_metadata = { - 'test_str': 'test_value', - 'test_int': 1, - 'test_float': 0.123456789, - 'test_bool': True, - 'test_array': [1, 2, 3], - 'test_dict': {'a': 1, 'b': 2}, - 'gltf_extensions': mesh_extensions + "test_str": "test_value", + "test_int": 1, + "test_float": 0.123456789, + "test_bool": True, + "test_array": [1, 2, 3], + "test_dict": {"a": 1, "b": 2}, + "gltf_extensions": mesh_extensions, } sphere1 = g.trimesh.primitives.Sphere(radius=1.0) @@ -645,66 +597,64 @@ def test_extras_nodes(self): node_name="Sphere1", geom_name="Geom Sphere1", transform=tf1, - metadata={'field': 'extra_data1'}) - node_extensions = {'mesh_ext': {'ext_data': 1.23}} - sphere2_metadata = { - 'field': 'extra_data2', - 'gltf_extensions': node_extensions} - s.add_geometry(sphere2, - node_name="Sphere2", - geom_name="Geom Sphere2", - parent_node_name="Sphere1", - transform=tf2, - metadata=sphere2_metadata) + metadata={"field": "extra_data1"}, + ) + node_extensions = {"mesh_ext": {"ext_data": 1.23}} + sphere2_metadata = {"field": "extra_data2", "gltf_extensions": node_extensions} + s.add_geometry( + sphere2, + node_name="Sphere2", + geom_name="Geom Sphere2", + parent_node_name="Sphere1", + transform=tf2, + metadata=sphere2_metadata, + ) # Test extras appear in the exported model nodes files = s.export(None, "gltf") gltf_data = files["model.gltf"] - assert 'test_value' in gltf_data.decode('utf8') + assert "test_value" in gltf_data.decode("utf8") # Check node extras survive a round trip - export = s.export(file_type='glb') + export = s.export(file_type="glb") validate_glb(export) - r = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + r = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") files = r.export(None, "gltf") gltf_data = files["model.gltf"] # Check that the mesh and node metadata/extras survived - assert 'test_value' in gltf_data.decode('utf8') - assert 'extra_data1' in gltf_data.decode('utf8') + assert "test_value" in gltf_data.decode("utf8") + assert "extra_data1" in gltf_data.decode("utf8") # Check that the extensions were removed from the metadata; # they should be saved as 'extensions' in the gltf file - assert 'gltf_extensions' not in gltf_data.decode('utf8') + assert "gltf_extensions" not in gltf_data.decode("utf8") # Check that the node transforms and metadata/extras survived edge = r.graph.transforms.edge_data[("world", "Sphere1")] - assert g.np.allclose(edge['matrix'], tf1) - assert edge['metadata']['field'] == 'extra_data1' + assert g.np.allclose(edge["matrix"], tf1) + assert edge["metadata"]["field"] == "extra_data1" edge = r.graph.transforms.edge_data[("Sphere1", "Sphere2")] - assert g.np.allclose(edge['matrix'], tf2) - assert edge['metadata']['field'] == 'extra_data2' + assert g.np.allclose(edge["matrix"], tf2) + assert edge["metadata"]["field"] == "extra_data2" # Check that the node's extensions survived - assert edge['metadata']['gltf_extensions'] == node_extensions + assert edge["metadata"]["gltf_extensions"] == node_extensions # Check that the mesh extensions survived for mesh in r.geometry.values(): - assert mesh.metadata['gltf_extensions'] == mesh_extensions + assert mesh.metadata["gltf_extensions"] == mesh_extensions # all geometry should be the same assert set(r.geometry.keys()) == set(s.geometry.keys()) for mesh in r.geometry.values(): # metadata should have all survived - assert all(mesh.metadata[k] == v - for k, v in test_metadata.items()) + assert all(mesh.metadata[k] == v for k, v in test_metadata.items()) def test_read_scene_extras(self): # loads a glb with scene extras - scene = g.get_mesh('monkey.glb', process=False) + scene = g.get_mesh("monkey.glb", process=False) # expected data - check = {'name': 'monkey', 'age': 32, 'height': 0.987} + check = {"name": "monkey", "age": 32, "height": 0.987} meta = scene.metadata for key in check: @@ -714,21 +664,29 @@ def test_read_scene_extras(self): def test_load_empty_nodes(self): # loads a glb with no meshes - scene = g.get_mesh('empty_nodes.glb', process=False) + scene = g.get_mesh("empty_nodes.glb", process=False) # expected data - check = {"parent": [[1.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 0.0, 0.0], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0]], - "children_1": [[1.0, 0.0, 0.0, -5.0], - [0.0, 1.0, 0.0, 5.0], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0]], - "children_2": [[1.0, 0.0, 0.0, 5.0], - [0.0, 1.0, 0.0, 5.0], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0]]} + check = { + "parent": [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + "children_1": [ + [1.0, 0.0, 0.0, -5.0], + [0.0, 1.0, 0.0, 5.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + "children_2": [ + [1.0, 0.0, 0.0, 5.0], + [0.0, 1.0, 0.0, 5.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + } # get the scene nodes objs = scene.graph.to_flattened() @@ -741,7 +699,7 @@ def test_load_empty_nodes(self): assert objs[key]["transform"] == check[key] def test_same_name(self): - s = g.get_mesh('TestScene.gltf') + s = g.get_mesh("TestScene.gltf") # hardcode correct bounds to check against bounds = s.dump(concatenate=True).bounds @@ -752,118 +710,105 @@ def test_same_name(self): assert g.np.allclose(s.bounds, bounds, atol=1e-3) # if merged should have combined the icosahedrons - s = g.get_mesh('TestScene.gltf', merge_primitives=True) + s = g.get_mesh("TestScene.gltf", merge_primitives=True) assert len(s.graph.nodes_geometry) == 7 assert len(s.geometry) == 6 assert g.np.allclose(s.bounds, bounds, atol=1e-3) def test_vertex_colors(self): # get a mesh with face colors - m = g.get_mesh('machinist.XAML') + m = g.get_mesh("machinist.XAML") # export as GLB then re-import - export = m.export(file_type='glb') + export = m.export(file_type="glb") validate_glb(export) - r = next(iter( - g.trimesh.load(g.trimesh.util.wrap_as_stream( - export), - file_type='glb').geometry.values())) + r = next( + iter( + g.trimesh.load( + g.trimesh.util.wrap_as_stream(export), file_type="glb" + ).geometry.values() + ) + ) # original mesh should have vertex colors - assert m.visual.kind == 'face' + assert m.visual.kind == "face" assert m.visual.vertex_colors.ptp(axis=0).ptp() > 0 # vertex colors should have survived import-export - assert g.np.allclose(m.visual.vertex_colors, - r.visual.vertex_colors) + assert g.np.allclose(m.visual.vertex_colors, r.visual.vertex_colors) def test_vertex_attrib(self): # test concatenation with texture - m = g.get_mesh('fuze.obj') + m = g.get_mesh("fuze.obj") - colors = (g.random( - (len(m.vertices), 4)) * 255).astype(g.np.uint8) + colors = (g.random((len(m.vertices), 4)) * 255).astype(g.np.uint8) # set the color vertex attribute - m.visual.vertex_attributes['color'] = colors - export = m.export(file_type='glb', unitize_normals=True) + m.visual.vertex_attributes["color"] = colors + export = m.export(file_type="glb", unitize_normals=True) validate_glb(export) - r = next(iter( - g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb').geometry.values())) + r = next( + iter( + g.trimesh.load( + g.trimesh.util.wrap_as_stream(export), file_type="glb" + ).geometry.values() + ) + ) # make sure the color vertex attributes survived the roundtrip - assert g.np.allclose( - r.visual.vertex_attributes['color'], colors) + assert g.np.allclose(r.visual.vertex_attributes["color"], colors) def test_export_postprocess(self): scene = g.trimesh.Scene() sphere = g.trimesh.primitives.Sphere() - sphere.visual.material = g.trimesh.visual.material.PBRMaterial( - name='unlit_test') + sphere.visual.material = g.trimesh.visual.material.PBRMaterial(name="unlit_test") scene.add_geometry(sphere) def add_unlit(gltf_tree): - for material_dict in gltf_tree['materials']: - if 'unlit' in material_dict.get('name', '').lower(): - material_dict["extensions"] = { - "KHR_materials_unlit": {} - } + for material_dict in gltf_tree["materials"]: + if "unlit" in material_dict.get("name", "").lower(): + material_dict["extensions"] = {"KHR_materials_unlit": {}} gltf_tree["extensionsUsed"] = ["KHR_materials_unlit"] gltf_1 = g.trimesh.exchange.gltf.export_gltf(scene) - gltf_2 = g.trimesh.exchange.gltf.export_gltf( - scene, tree_postprocessor=add_unlit) + gltf_2 = g.trimesh.exchange.gltf.export_gltf(scene, tree_postprocessor=add_unlit) def extract_materials(gltf_files): - return g.json.loads(gltf_files['model.gltf'].decode('utf8'))[ - 'materials'] + return g.json.loads(gltf_files["model.gltf"].decode("utf8"))["materials"] assert "extensions" not in extract_materials(gltf_1)[-1] assert "extensions" in extract_materials(gltf_2)[-1] def test_primitive_geometry_meta(self): # Model with primitives - s = g.get_mesh('CesiumMilkTruck.glb') + s = g.get_mesh("CesiumMilkTruck.glb") # check to see if names are somewhat sane assert set(s.geometry.keys()) == { - 'Cesium_Milk_Truck', - 'Cesium_Milk_Truck_1', - 'Cesium_Milk_Truck_2', - 'Wheels'} + "Cesium_Milk_Truck", + "Cesium_Milk_Truck_1", + "Cesium_Milk_Truck_2", + "Wheels", + } # Assert that primitive geometries are marked as such - assert s.geometry['Cesium_Milk_Truck'].metadata[ - 'from_gltf_primitive'] - assert s.geometry['Cesium_Milk_Truck_1'].metadata[ - 'from_gltf_primitive'] - assert s.geometry['Cesium_Milk_Truck_2'].metadata[ - 'from_gltf_primitive'] + assert s.geometry["Cesium_Milk_Truck"].metadata["from_gltf_primitive"] + assert s.geometry["Cesium_Milk_Truck_1"].metadata["from_gltf_primitive"] + assert s.geometry["Cesium_Milk_Truck_2"].metadata["from_gltf_primitive"] # Assert that geometries that are not primitives # are not marked as such - assert not s.geometry['Wheels'].metadata[ - 'from_gltf_primitive'] + assert not s.geometry["Wheels"].metadata["from_gltf_primitive"] # make sure the flags survive being merged - m = g.get_mesh('CesiumMilkTruck.glb', - merge_primitives=True) + m = g.get_mesh("CesiumMilkTruck.glb", merge_primitives=True) # names should be non-insane - assert set(m.geometry.keys()) == { - 'Cesium_Milk_Truck', 'Wheels'} - assert not s.geometry['Wheels'].metadata[ - 'from_gltf_primitive'] - assert s.geometry['Cesium_Milk_Truck'].metadata[ - 'from_gltf_primitive'] + assert set(m.geometry.keys()) == {"Cesium_Milk_Truck", "Wheels"} + assert not s.geometry["Wheels"].metadata["from_gltf_primitive"] + assert s.geometry["Cesium_Milk_Truck"].metadata["from_gltf_primitive"] def test_points(self): # test a simple pointcloud export-import cycle points = g.np.arange(30).reshape((-1, 3)) - export = g.trimesh.Scene( - g.trimesh.PointCloud(points)).export(file_type='glb') + export = g.trimesh.Scene(g.trimesh.PointCloud(points)).export(file_type="glb") validate_glb(export) - reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb') + reloaded = g.trimesh.load(g.trimesh.util.wrap_as_stream(export), file_type="glb") # make sure points survived export and reload - assert g.np.allclose(next(iter( - reloaded.geometry.values())).vertices, points) + assert g.np.allclose(next(iter(reloaded.geometry.values())).vertices, points) def test_bulk(self): # Try exporting every loadable model to GLTF and checking @@ -873,7 +818,7 @@ def test_bulk(self): assert g.trimesh.tol.strict # check mesh, path, pointcloud exports - for root in [g.dir_models, g.os.path.join(g.dir_models, '2D')]: + for root in [g.dir_models, g.os.path.join(g.dir_models, "2D")]: for fn in g.os.listdir(root): path_in = g.os.path.join(root, fn) try: @@ -886,31 +831,30 @@ def test_bulk(self): # voxels don't have an export to gltf mode if isinstance(geom, g.trimesh.voxel.VoxelGrid): try: - geom.export(file_type='glb') + geom.export(file_type="glb") except ValueError: # should have raised so all good continue - raise ValueError( - 'voxel was allowed to export wrong GLB!') - if hasattr(geom, 'vertices') and len(geom.vertices) == 0: + raise ValueError("voxel was allowed to export wrong GLB!") + if hasattr(geom, "vertices") and len(geom.vertices) == 0: continue - if hasattr(geom, 'geometry') and len(geom.geometry) == 0: + if hasattr(geom, "geometry") and len(geom.geometry) == 0: continue - g.log.info(f'Testing: {fn}') + g.log.info(f"Testing: {fn}") # check a roundtrip which will validate on export # and crash on reload if we've done anything screwey # unitize normals will unitize any normals to comply with # the validator although there are probably reasons you'd # want to roundtrip non-unit normals for things, stuff, and # activities - export = geom.export(file_type='glb', unitize_normals=True) + export = geom.export(file_type="glb", unitize_normals=True) validate_glb(export, name=fn) # shouldn't crash on a reload reloaded = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream(export), - file_type='glb') + file_obj=g.trimesh.util.wrap_as_stream(export), file_type="glb" + ) if isinstance(geom, g.trimesh.Trimesh): assert g.np.isclose(geom.area, reloaded.area) @@ -921,38 +865,34 @@ def test_bulk(self): def test_interleaved(self): # do a quick check on a mesh that uses byte stride - with open(g.get_path('BoxInterleaved.glb'), 'rb') as f: + with open(g.get_path("BoxInterleaved.glb"), "rb") as f: k = g.trimesh.exchange.gltf.load_glb(f) # get the kwargs for the mesh constructor - c = k['geometry']['Mesh'] + c = k["geometry"]["Mesh"] # should have vertex normals - assert c['vertex_normals'].shape == c['vertices'].shape + assert c["vertex_normals"].shape == c["vertices"].shape # interleaved vertex normals should all be unit vectors - assert g.np.allclose( - 1.0, g.np.linalg.norm(c['vertex_normals'], axis=1)) + assert g.np.allclose(1.0, g.np.linalg.norm(c["vertex_normals"], axis=1)) # should also load as a box - m = g.get_mesh('BoxInterleaved.glb').geometry['Mesh'] + m = g.get_mesh("BoxInterleaved.glb").geometry["Mesh"] assert g.np.isclose(m.volume, 1.0) def test_equal_by_default(self): # all things being equal we shouldn't be moving things # for the usual load-export loop - s = g.get_mesh('fuze.obj') + s = g.get_mesh("fuze.obj") # export as GLB then re-load - export = s.export(file_type='glb', unitize_normals=True) + export = s.export(file_type="glb", unitize_normals=True) validate_glb(export) reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type='glb', process=False) + g.trimesh.util.wrap_as_stream(export), file_type="glb", process=False + ) assert len(reloaded.geometry) == 1 m = next(iter(reloaded.geometry.values())) - assert g.np.allclose(m.visual.uv, - s.visual.uv) - assert g.np.allclose(m.vertices, - s.vertices) - assert g.np.allclose(m.faces, - s.faces) + assert g.np.allclose(m.visual.uv, s.visual.uv) + assert g.np.allclose(m.vertices, s.vertices) + assert g.np.allclose(m.faces, s.faces) # will run a kdtree check g.texture_equal(s, m) @@ -962,7 +902,7 @@ def test_gltf_by_name(self): with g.TemporaryDirectory() as d: # export the GLTF file by name - file_path = g.os.path.join(d, 'hi.gltf') + file_path = g.os.path.join(d, "hi.gltf") # export the file by path m.export(file_path) # reload the gltf from the file path @@ -970,20 +910,18 @@ def test_gltf_by_name(self): assert isinstance(r, g.trimesh.Scene) assert len(r.geometry) == 1 - assert g.np.isclose( - next( - iter( - r.geometry.values())).volume, - m.volume) + assert g.np.isclose(next(iter(r.geometry.values())).volume, m.volume) def test_embed_buffer(self): - - scene = g.trimesh.Scene({ - 'thing': g.trimesh.primitives.Sphere(), - 'other': g.trimesh.creation.capsule()}) + scene = g.trimesh.Scene( + { + "thing": g.trimesh.primitives.Sphere(), + "other": g.trimesh.creation.capsule(), + } + ) with g.trimesh.util.TemporaryDirectory() as D: - path = g.os.path.join(D, 'hi.gltf') + path = g.os.path.join(D, "hi.gltf") scene.export(path) # should export with separate buffers @@ -993,7 +931,7 @@ def test_embed_buffer(self): assert set(reloaded.geometry.keys()) == set(scene.geometry.keys()) with g.trimesh.util.TemporaryDirectory() as D: - path = g.os.path.join(D, 'hi.gltf') + path = g.os.path.join(D, "hi.gltf") scene.export(path, embed_buffers=True) # should export with embeded bufferes @@ -1004,8 +942,8 @@ def test_embed_buffer(self): def test_webp(self): # load textured file - mesh = g.get_mesh('fuze.ply') - assert hasattr(mesh.visual, 'uv') + mesh = g.get_mesh("fuze.ply") + assert hasattr(mesh.visual, "uv") for extension in ["glb"]: export = mesh.export(file_type=extension, extension_webp=True) @@ -1013,30 +951,30 @@ def test_webp(self): # roundtrip reloaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream(export), - file_type=extension) + g.trimesh.util.wrap_as_stream(export), file_type=extension + ) g.scene_equal(g.trimesh.Scene(mesh), reloaded) def test_relative_paths(self): # try with a relative path with g.TemporaryDirectory() as d: - g.os.makedirs(g.os.path.join(d, 'fused')) + g.os.makedirs(g.os.path.join(d, "fused")) g.os.chdir(d) - g.trimesh.creation.box().export('fused/hi.gltf') - r = g.trimesh.load('fused/hi.gltf') + g.trimesh.creation.box().export("fused/hi.gltf") + r = g.trimesh.load("fused/hi.gltf") assert g.np.isclose(r.volume, 1.0) with g.TemporaryDirectory() as d: # now try it without chaging to that directory - full = g.os.path.join(d, 'hi', 'there', 'different', 'levels') - path = g.os.path.join(full, 'hey.gltf') + full = g.os.path.join(d, "hi", "there", "different", "levels") + path = g.os.path.join(full, "hey.gltf") g.os.makedirs(full) g.trimesh.creation.box().export(path) r = g.trimesh.load(path) assert g.np.isclose(r.volume, 1.0) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 209175716..33aafdfd8 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -20,17 +20,10 @@ # magic numbers which have meaning in GLTF # most are uint32's of UTF-8 text -_magic = {"gltf": 1179937895, - "json": 1313821514, - "bin": 5130562} +_magic = {"gltf": 1179937895, "json": 1313821514, "bin": 5130562} # GLTF data type codes: little endian numpy dtypes -_dtypes = {5120: " 0: tree["buffers"] = buffers tree["bufferViews"] = views # dump tree with compact separators - files["model.gltf"] = util.jsonify( - tree, separators=(',', ':')).encode("utf-8") + files["model.gltf"] = util.jsonify(tree, separators=(",", ":")).encode("utf-8") if tol.strict: validate(tree) @@ -165,12 +158,13 @@ def export_gltf(scene, def export_glb( - scene, - include_normals=None, - unitize_normals=False, - tree_postprocessor=None, - buffer_postprocessor=None, - extension_webp=False): + scene, + include_normals=None, + unitize_normals=False, + tree_postprocessor=None, + buffer_postprocessor=None, + extension_webp=False, +): """ Export a scene as a binary GLTF (GLB) file. @@ -194,8 +188,7 @@ def export_glb( Exported result in GLB 2.0 """ # if we were passed a bare Trimesh or Path3D object - if (not util.is_instance_named(scene, "Scene") and - hasattr(scene, "scene")): + if not util.is_instance_named(scene, "Scene") and hasattr(scene, "scene"): # generate a scene with just that mesh in it scene = scene.scene() @@ -204,7 +197,8 @@ def export_glb( unitize_normals=unitize_normals, include_normals=include_normals, buffer_postprocessor=buffer_postprocessor, - extension_webp=extension_webp) + extension_webp=extension_webp, + ) # allow custom postprocessing if tree_postprocessor is not None: @@ -222,7 +216,7 @@ def export_glb( tree["bufferViews"] = views # export the tree to JSON for the header - content = util.jsonify(tree, separators=(',', ':')) + content = util.jsonify(tree, separators=(",", ":")) # add spaces to content, so the start of the data # is 4 byte aligned as per spec content += (4 - ((len(content) + 20) % 4)) * " " @@ -232,28 +226,29 @@ def export_glb( # the initial header of the file header = _byte_pad( - np.array([_magic["gltf"], # magic, turns into glTF - 2, # GLTF version - # length is the total length of the Binary glTF - # including Header and all Chunks, in bytes. - len(content) + len(buffer_data) + 28, - # contentLength is the length, in bytes, - # of the glTF content (JSON) - len(content), - # magic number which is 'JSON' - _magic["json"]], - dtype=" 0: - tree['extensionsUsed'] = list(extensions_used) + tree["extensionsUsed"] = list(extensions_used) # Also add WebP to required (no fallback currently implemented) # 'extensionsRequired' aren't currently used so this doesn't overwrite if extension_webp: - tree['extensionsRequired'] = ["EXT_texture_webp"] + tree["extensionsRequired"] = ["EXT_texture_webp"] if buffer_postprocessor is not None: buffer_postprocessor(buffer_items, tree) # convert accessors back to a flat list - tree['accessors'] = list(tree['accessors'].values()) + tree["accessors"] = list(tree["accessors"].values()) # cull empty or unpopulated fields # check keys that might be empty so we can remove them - check = ['textures', 'materials', 'images', 'accessors', 'meshes'] + check = ["textures", "materials", "images", "accessors", "meshes"] # remove the keys with nothing stored in them [tree.pop(key) for key in check if len(tree[key]) == 0] return tree, buffer_items -def _append_mesh(mesh, - name, - tree, - buffer_items, - include_normals, - unitize_normals, - mat_hashes, - extension_webp): +def _append_mesh( + mesh, + name, + tree, + buffer_items, + include_normals, + unitize_normals, + mat_hashes, + extension_webp, +): """ Append a mesh to the scene structure and put the data into buffer_items. @@ -781,34 +771,40 @@ def _append_mesh(mesh, """ # return early from empty meshes to avoid crashing later if len(mesh.faces) == 0 or len(mesh.vertices) == 0: - log.debug('skipping empty mesh!') + log.debug("skipping empty mesh!") return # convert mesh data to the correct dtypes # faces: 5125 is an unsigned 32 bit integer # accessors refer to data locations # mesh faces are stored as flat list of integers - acc_face = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5125, - "type": "SCALAR"}, - data=mesh.faces.astype(uint32)) + acc_face = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={"componentType": 5125, "type": "SCALAR"}, + data=mesh.faces.astype(uint32), + ) # vertices: 5126 is a float32 # create or reuse an accessor for these vertices - acc_vertex = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5126, - "type": "VEC3", - "byteOffset": 0}, - data=mesh.vertices.astype(float32)) + acc_vertex = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={"componentType": 5126, "type": "VEC3", "byteOffset": 0}, + data=mesh.vertices.astype(float32), + ) # meshes reference accessor indexes - current = {"name": name, - "extras": {}, - "primitives": [{ - "attributes": {"POSITION": acc_vertex}, - "indices": acc_face, - "mode": _GL_TRIANGLES}]} + current = { + "name": name, + "extras": {}, + "primitives": [ + { + "attributes": {"POSITION": acc_vertex}, + "indices": acc_face, + "mode": _GL_TRIANGLES, + } + ], + } # if units are defined, store them as an extra # the GLTF spec says everything is implicit meters # we're not doing that as our unit conversions are expensive @@ -816,77 +812,83 @@ def _append_mesh(mesh, # https://github.com/KhronosGroup/glTF/tree/master/extensions try: # skip jsonify any metadata, skipping internal keys - current['extras'] = _jsonify(mesh.metadata) + current["extras"] = _jsonify(mesh.metadata) # extract extensions if any - extensions = current['extras'].pop('gltf_extensions', None) + extensions = current["extras"].pop("gltf_extensions", None) if isinstance(extensions, dict): - current['extensions'] = extensions + current["extensions"] = extensions - if mesh.units not in [None, 'm', 'meters', 'meter']: + if mesh.units not in [None, "m", "meters", "meter"]: current["extras"]["units"] = str(mesh.units) except BaseException: - log.debug('metadata not serializable, dropping!', - exc_info=True) + log.debug("metadata not serializable, dropping!", exc_info=True) # check to see if we have vertex or face colors # or if a TextureVisual has colors included as an attribute - if mesh.visual.kind in ['vertex', 'face']: + if mesh.visual.kind in ["vertex", "face"]: vertex_colors = mesh.visual.vertex_colors - elif (hasattr(mesh.visual, 'vertex_attributes') and - 'color' in mesh.visual.vertex_attributes): - vertex_colors = mesh.visual.vertex_attributes['color'] + elif ( + hasattr(mesh.visual, "vertex_attributes") + and "color" in mesh.visual.vertex_attributes + ): + vertex_colors = mesh.visual.vertex_attributes["color"] else: vertex_colors = None if vertex_colors is not None: # convert color data to bytes and append acc_color = _data_append( - acc=tree['accessors'], + acc=tree["accessors"], buff=buffer_items, - blob={"componentType": 5121, - "normalized": True, - "type": "VEC4", - "byteOffset": 0}, - data=vertex_colors.astype(uint8)) + blob={ + "componentType": 5121, + "normalized": True, + "type": "VEC4", + "byteOffset": 0, + }, + data=vertex_colors.astype(uint8), + ) # add the reference for vertex color - current["primitives"][0]["attributes"][ - "COLOR_0"] = acc_color + current["primitives"][0]["attributes"]["COLOR_0"] = acc_color - if hasattr(mesh.visual, 'material'): + if hasattr(mesh.visual, "material"): # append the material and then set from returned index current_material = _append_material( mat=mesh.visual.material, tree=tree, buffer_items=buffer_items, mat_hashes=mat_hashes, - extension_webp=extension_webp) + extension_webp=extension_webp, + ) # if mesh has UV coordinates defined export them - has_uv = (hasattr(mesh.visual, 'uv') and - mesh.visual.uv is not None and - len(mesh.visual.uv) == len(mesh.vertices)) + has_uv = ( + hasattr(mesh.visual, "uv") + and mesh.visual.uv is not None + and len(mesh.visual.uv) == len(mesh.vertices) + ) if has_uv: # slice off W if passed uv = mesh.visual.uv.copy()[:, :2] # reverse the Y for GLTF uv[:, 1] = 1.0 - uv[:, 1] # add an accessor describing the blob of UV's - acc_uv = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5126, - "type": "VEC2", - "byteOffset": 0}, - data=uv.astype(float32)) + acc_uv = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={"componentType": 5126, "type": "VEC2", "byteOffset": 0}, + data=uv.astype(float32), + ) # add the reference for UV coordinates current["primitives"][0]["attributes"]["TEXCOORD_0"] = acc_uv # only reference the material if we had UV coordinates current["primitives"][0]["material"] = current_material - if (include_normals or - (include_normals is None and - 'vertex_normals' in mesh._cache.cache)): + if include_normals or ( + include_normals is None and "vertex_normals" in mesh._cache.cache + ): # store vertex normals if requested if unitize_normals: normals = mesh.vertex_normals.copy() @@ -899,16 +901,18 @@ def _append_mesh(mesh, normals = mesh.vertex_normals acc_norm = _data_append( - acc=tree['accessors'], + acc=tree["accessors"], buff=buffer_items, - blob={"componentType": 5126, - "count": len(mesh.vertices), - "type": "VEC3", - "byteOffset": 0}, - data=normals.astype(float32)) + blob={ + "componentType": 5126, + "count": len(mesh.vertices), + "type": "VEC3", + "byteOffset": 0, + }, + data=normals.astype(float32), + ) # add the reference for vertex color - current["primitives"][0]["attributes"][ - "NORMAL"] = acc_norm + current["primitives"][0]["attributes"]["NORMAL"] = acc_norm # for each attribute with a leading underscore, assign them to trimesh # vertex_attributes @@ -920,18 +924,18 @@ def _append_mesh(mesh, # GLTF has no floating point type larger than 32 bits so clip # any float64 or larger to float32 - if attrib.dtype.kind == 'f' and attrib.dtype.itemsize > 4: + if attrib.dtype.kind == "f" and attrib.dtype.itemsize > 4: data = attrib.astype(np.float32) else: data = attrib # store custom vertex attributes - current["primitives"][0][ - "attributes"][key] = _data_append( - acc=tree['accessors'], - buff=buffer_items, - blob=_build_accessor(data), - data=data) + current["primitives"][0]["attributes"][key] = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob=_build_accessor(data), + data=data, + ) tree["meshes"].append(current) @@ -956,9 +960,8 @@ def _build_views(buffer_items): current_pos = 0 for current_item in buffer_items.values(): views.append( - {"buffer": 0, - "byteOffset": current_pos, - "byteLength": len(current_item)}) + {"buffer": 0, "byteOffset": current_pos, "byteLength": len(current_item)} + ) assert (current_pos % 4) == 0 assert (len(current_item) % 4) == 0 current_pos += len(current_item) @@ -984,8 +987,7 @@ def _build_accessor(array): if len(shape) == 2: vec_length = shape[1] if vec_length > 4: - raise ValueError( - "The GLTF spec does not support vectors larger than 4") + raise ValueError("The GLTF spec does not support vectors larger than 4") if vec_length > 1: data_type = "VEC%d" % vec_length else: @@ -997,20 +999,17 @@ def _build_accessor(array): data_type = "MAT%d" % shape[2] # get the array data type as a str stripping off endian - lookup = array.dtype.str.lstrip('<>') + lookup = array.dtype.str.lstrip("<>") - if lookup == 'u4': + if lookup == "u4": # spec: UNSIGNED_INT is only allowed when the accessor # contains indices i.e. the accessor is only referenced # by `primitive.indices` - log.debug('custom uint32 may cause validation failures') + log.debug("custom uint32 may cause validation failures") # map the numpy dtype to a GLTF code (i.e. 5121) componentType = _dtypes_lookup[lookup] - accessor = { - "componentType": componentType, - "type": data_type, - "byteOffset": 0} + accessor = {"componentType": componentType, "type": data_type, "byteOffset": 0} if len(shape) < 3: accessor["max"] = array.max(axis=0).tolist() @@ -1043,14 +1042,16 @@ def _byte_pad(data, bound=4): # extra bytes to pad with count = bound - (len(data) % bound) # bytes(count) only works on Python 3 - pad = (' ' * count).encode('utf-8') + pad = (" " * count).encode("utf-8") # combine the padding and data result = b"".join([data, pad]) # we should always divide evenly if tol.strict and (len(result) % bound) != 0: raise ValueError( - 'byte_pad failed! ori:{} res:{} pad:{} req:{}'.format( - len(data), len(result), count, bound)) + "byte_pad failed! ori:{} res:{} pad:{} req:{}".format( + len(data), len(result), count, bound + ) + ) return result return data @@ -1091,36 +1092,42 @@ def _append_path(path, name, tree, buffer_items): # data is the second value of the fifth field # which is a (data type, data) tuple acc_vertex = _data_append( - acc=tree['accessors'], + acc=tree["accessors"], buff=buffer_items, - blob={"componentType": 5126, - "type": "VEC3", - "byteOffset": 0}, - data=vxlist[4][1].astype(float32)) + blob={"componentType": 5126, "type": "VEC3", "byteOffset": 0}, + data=vxlist[4][1].astype(float32), + ) current = { "name": name, - "primitives": [{ - "attributes": {"POSITION": acc_vertex}, - "mode": _GL_LINES, # i.e. 1 - "material": material_idx}]} + "primitives": [ + { + "attributes": {"POSITION": acc_vertex}, + "mode": _GL_LINES, # i.e. 1 + "material": material_idx, + } + ], + } # if units are defined, store them as an extra: # https://github.com/KhronosGroup/glTF/tree/master/extensions try: current["extras"] = _jsonify(path.metadata) except BaseException: - log.debug('failed to serialize metadata, dropping!', - exc_info=True) + log.debug("failed to serialize metadata, dropping!", exc_info=True) if path.colors is not None: - acc_color = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5121, - "normalized": True, - "type": "VEC4", - "byteOffset": 0}, - data=np.array(vxlist[5][1]).astype(uint8)) + acc_color = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={ + "componentType": 5121, + "normalized": True, + "type": "VEC4", + "byteOffset": 0, + }, + data=np.array(vxlist[5][1]).astype(uint8), + ) # add color to attributes current["primitives"][0]["attributes"]["COLOR_0"] = acc_color @@ -1146,22 +1153,26 @@ def _append_point(points, name, tree, buffer_items): # convert the points to the unnamed args for # a pyglet vertex list - vxlist = rendering.points_to_vertexlist( - points=points.vertices, colors=points.colors) + vxlist = rendering.points_to_vertexlist(points=points.vertices, colors=points.colors) # data is the second value of the fifth field # which is a (data type, data) tuple - acc_vertex = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5126, - "type": "VEC3", - "byteOffset": 0}, - data=vxlist[4][1].astype(float32)) - current = {"name": name, - "primitives": [{ - "attributes": {"POSITION": acc_vertex}, - "mode": _GL_POINTS, - "material": len(tree["materials"])}]} + acc_vertex = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={"componentType": 5126, "type": "VEC3", "byteOffset": 0}, + data=vxlist[4][1].astype(float32), + ) + current = { + "name": name, + "primitives": [ + { + "attributes": {"POSITION": acc_vertex}, + "mode": _GL_POINTS, + "material": len(tree["materials"]), + } + ], + } # TODO add color support to Points object # this is just exporting everying as black @@ -1170,20 +1181,24 @@ def _append_point(points, name, tree, buffer_items): if len(np.shape(points.colors)) == 2: # colors may be returned as "c3f" or other RGBA color_type, color_data = vxlist[5] - if '3' in color_type: - kind = 'VEC3' - elif '4' in color_type: - kind = 'VEC4' + if "3" in color_type: + kind = "VEC3" + elif "4" in color_type: + kind = "VEC4" else: - raise ValueError('unknown color: %s', color_type) - acc_color = _data_append(acc=tree['accessors'], - buff=buffer_items, - blob={"componentType": 5121, - "count": vxlist[0], - "normalized": True, - "type": kind, - "byteOffset": 0}, - data=np.array(color_data).astype(uint8)) + raise ValueError("unknown color: %s", color_type) + acc_color = _data_append( + acc=tree["accessors"], + buff=buffer_items, + blob={ + "componentType": 5121, + "count": vxlist[0], + "normalized": True, + "type": kind, + "byteOffset": 0, + }, + data=np.array(color_data).astype(uint8), + ) # add color to attributes current["primitives"][0]["attributes"]["COLOR_0"] = acc_color tree["meshes"].append(current) @@ -1204,13 +1219,13 @@ def _parse_textures(header, views, resolver=None): # loop through images for i, img in enumerate(header["images"]): # get the bytes representing an image - if 'bufferView' in img: + if "bufferView" in img: blob = views[img["bufferView"]] - elif 'uri' in img: + elif "uri" in img: # will get bytes from filesystem or base64 URI - blob = _uri_to_bytes(uri=img['uri'], resolver=resolver) + blob = _uri_to_bytes(uri=img["uri"], resolver=resolver) else: - log.debug(f'unable to load image from: {img.keys()}') + log.debug(f"unable to load image from: {img.keys()}") continue # i.e. 'image/jpeg' # mime = img['mimeType'] @@ -1239,6 +1254,7 @@ def _parse_materials(header, views, resolver=None): materials : list List of trimesh.visual.texture.Material objects """ + def parse_values_and_textures(input_dict): result = {} for k, v in input_dict.items(): @@ -1255,9 +1271,11 @@ def parse_values_and_textures(input_dict): # check to see if this is using a webp extension texture # should this be case sensitive? - webp = texture.get( - 'extensions', {}).get( - 'EXT_texture_webp', {}).get('source') + webp = ( + texture.get("extensions", {}) + .get("EXT_texture_webp", {}) + .get("source") + ) if webp is not None: idx = webp else: @@ -1267,8 +1285,7 @@ def parse_values_and_textures(input_dict): # store the actual image as the value result[k] = images[idx] except BaseException: - log.debug('unable to store texture', - exc_info=True) + log.debug("unable to store texture", exc_info=True) return result images = _parse_textures(header, views, resolver) @@ -1284,8 +1301,9 @@ def parse_values_and_textures(input_dict): # add keys of keys to top level dict loopable.update(loopable.pop("pbrMetallicRoughness")) - ext = mat.get('extensions', {}).get( - 'KHR_materials_pbrSpecularGlossiness', None) + ext = mat.get("extensions", {}).get( + "KHR_materials_pbrSpecularGlossiness", None + ) if isinstance(ext, dict): ext_params = parse_values_and_textures(ext) loopable.update(specular_to_pbr(**ext_params)) @@ -1298,12 +1316,14 @@ def parse_values_and_textures(input_dict): return materials -def _read_buffers(header, - buffers, - mesh_kwargs, - ignore_broken=False, - merge_primitives=False, - resolver=None): +def _read_buffers( + header, + buffers, + mesh_kwargs, + ignore_broken=False, + merge_primitives=False, + resolver=None, +): """ Given binary data and a layout return the kwargs to create a scene object. @@ -1345,10 +1365,10 @@ def _read_buffers(header, # load data from buffers into numpy arrays # using the layout described by accessors - access = [None] * len(header['accessors']) + access = [None] * len(header["accessors"]) for index, a in enumerate(header["accessors"]): # number of items - count = a['count'] + count = a["count"] # what is the datatype dtype = np.dtype(_dtypes[a["componentType"]]) # basically how many columns @@ -1359,7 +1379,7 @@ def _read_buffers(header, # number of items when flattened # i.e. a (4, 4) MAT4 has 16 per_count = np.abs(np.prod(per_item)) - if 'bufferView' in a: + if "bufferView" in a: # data was stored in a buffer view so get raw bytes # load the bytes data into correct dtype and shape @@ -1373,7 +1393,7 @@ def _read_buffers(header, # both bufferView *and* accessors are allowed # to have a byteOffset - start = a.get('byteOffset', 0) + start = a.get("byteOffset", 0) if "byteStride" in buffer_view: # how many bytes for each chunk @@ -1386,25 +1406,24 @@ def _read_buffers(header, # and then pull chunks per-stride # do as a list comprehension as the numpy # buffer wangling was - raw = b''.join( - data[i:i + per_row] for i in - range(start, start + length, stride)) + raw = b"".join( + data[i : i + per_row] + for i in range(start, start + length, stride) + ) # the reshape should fail if we screwed up - access[index] = np.frombuffer( - raw, dtype=dtype).reshape(shape) + access[index] = np.frombuffer(raw, dtype=dtype).reshape(shape) else: # length is the number of bytes per item times total length = dtype.itemsize * count * per_count access[index] = np.frombuffer( - data[start:start + length], dtype=dtype).reshape(shape) + data[start : start + length], dtype=dtype + ).reshape(shape) else: # a "sparse" accessor should be initialized as zeros - access[index] = np.zeros( - count * per_count, dtype=dtype).reshape(shape) + access[index] = np.zeros(count * per_count, dtype=dtype).reshape(shape) # load images and textures into material objects - materials = _parse_materials( - header, views=views, resolver=resolver) + materials = _parse_materials(header, views=views, resolver=resolver) mesh_prim = collections.defaultdict(list) # load data from accessors into Trimesh objects @@ -1417,13 +1436,13 @@ def _read_buffers(header, for index, m in enumerate(header.get("meshes", [])): try: # GLTF spec indicates implicit units are meters - metadata = {'units': 'meters'} + metadata = {"units": "meters"} # try to load all mesh metadata - if isinstance(m.get('extras'), dict): - metadata.update(m['extras']) + if isinstance(m.get("extras"), dict): + metadata.update(m["extras"]) # put any mesh extensions in a field of the metadata - if 'extensions' in m: - metadata['gltf_extensions'] = m['extensions'] + if "extensions" in m: + metadata["gltf_extensions"] = m["extensions"] for p in m["primitives"]: # if we don't have a triangular mesh continue @@ -1433,50 +1452,48 @@ def _read_buffers(header, kwargs["metadata"].update(metadata) # i.e. GL_LINES, GL_TRIANGLES, etc # specification says the default mode is GL_TRIANGLES - mode = p.get('mode', _GL_TRIANGLES) + mode = p.get("mode", _GL_TRIANGLES) # colors, normals, etc - attr = p['attributes'] + attr = p["attributes"] # create a unique mesh name per- primitive - name = m.get('name', 'GLTF') + name = m.get("name", "GLTF") # make name unique across multiple meshes name = unique_name(name, meshes, counts=name_counts) if mode == _GL_LINES: # load GL_LINES into a Path object from ..path.entities import Line + kwargs["vertices"] = access[attr["POSITION"]] - kwargs['entities'] = [Line( - points=np.arange(len(kwargs['vertices'])))] + kwargs["entities"] = [Line(points=np.arange(len(kwargs["vertices"])))] elif mode == _GL_POINTS: kwargs["vertices"] = access[attr["POSITION"]] elif mode in (_GL_TRIANGLES, _GL_STRIP): # get vertices from accessors kwargs["vertices"] = access[attr["POSITION"]] # get faces from accessors - if 'indices' in p: + if "indices" in p: if mode == _GL_STRIP: # this is triangle strips - flat = access[p['indices']].reshape(-1) - kwargs['faces'] = util.triangle_strips_to_faces( - [flat]) + flat = access[p["indices"]].reshape(-1) + kwargs["faces"] = util.triangle_strips_to_faces([flat]) else: - kwargs["faces"] = access[p["indices"] - ].reshape((-1, 3)) + kwargs["faces"] = access[p["indices"]].reshape((-1, 3)) else: # indices are apparently optional and we are supposed to # do the same thing as webGL drawArrays? - kwargs['faces'] = np.arange( - len(kwargs['vertices']) * 3, - dtype=np.int64).reshape((-1, 3)) + kwargs["faces"] = np.arange( + len(kwargs["vertices"]) * 3, dtype=np.int64 + ).reshape((-1, 3)) - if 'NORMAL' in attr: + if "NORMAL" in attr: # vertex normals are specified - kwargs['vertex_normals'] = access[attr['NORMAL']] + kwargs["vertex_normals"] = access[attr["NORMAL"]] # do we have UV coordinates visuals = None if "material" in p: if materials is None: - log.debug('no materials! `pip install pillow`') + log.debug("no materials! `pip install pillow`") else: uv = None if "TEXCOORD_0" in attr: @@ -1486,41 +1503,43 @@ def _read_buffers(header, uv[:, 1] = 1.0 - uv[:, 1] # create a texture visual visuals = visual.texture.TextureVisuals( - uv=uv, material=materials[p["material"]]) + uv=uv, material=materials[p["material"]] + ) - if 'COLOR_0' in attr: + if "COLOR_0" in attr: try: # try to load vertex colors from the accessors - colors = access[attr['COLOR_0']] - if len(colors) == len(kwargs['vertices']): + colors = access[attr["COLOR_0"]] + if len(colors) == len(kwargs["vertices"]): if visuals is None: # just pass to mesh as vertex color - kwargs['vertex_colors'] = colors + kwargs["vertex_colors"] = colors else: # we ALSO have texture so save as vertex # attribute - visuals.vertex_attributes['color'] = colors + visuals.vertex_attributes["color"] = colors except BaseException: # survive failed colors - log.debug('failed to load colors', exc_info=True) + log.debug("failed to load colors", exc_info=True) if visuals is not None: - kwargs['visual'] = visuals + kwargs["visual"] = visuals # By default the created mesh is not from primitive, # in case it is the value will be updated # each primitive gets it's own Trimesh object if len(m["primitives"]) > 1: - kwargs['metadata']['from_gltf_primitive'] = True + kwargs["metadata"]["from_gltf_primitive"] = True else: - kwargs['metadata']['from_gltf_primitive'] = False + kwargs["metadata"]["from_gltf_primitive"] = False # custom attributes starting with a `_` - custom = {a: access[attr[a]] for a in attr.keys() - if a.startswith('_')} + custom = { + a: access[attr[a]] for a in attr.keys() if a.startswith("_") + } if len(custom) > 0: kwargs["vertex_attributes"] = custom else: - log.debug('skipping primitive with mode %s!', mode) + log.debug("skipping primitive with mode %s!", mode) continue # this should absolutely not be stomping on itself assert name not in meshes @@ -1528,8 +1547,7 @@ def _read_buffers(header, mesh_prim[index].append(name) except BaseException as E: if ignore_broken: - log.debug('failed to load mesh', - exc_info=True), + log.debug("failed to load mesh", exc_info=True), else: raise E @@ -1557,27 +1575,28 @@ def _read_buffers(header, # get all meshes for this group current = [meshes[n] for n in names] - v_seq = [p['vertices'] for p in current] - f_seq = [p['faces'] for p in current] + v_seq = [p["vertices"] for p in current] + f_seq = [p["faces"] for p in current] v, f = util.append_faces(v_seq, f_seq) - materials = [p['visual'].material for p in current] + materials = [p["visual"].material for p in current] face_materials = [] for i, p in enumerate(current): - face_materials += [i] * len(p['faces']) + face_materials += [i] * len(p["faces"]) visuals = visual.texture.TextureVisuals( - material=visual.material.MultiMaterial( - materials=materials), - face_materials=face_materials) - if 'metadata' in meshes[names[0]]: - metadata = meshes[names[0]]['metadata'] + material=visual.material.MultiMaterial(materials=materials), + face_materials=face_materials, + ) + if "metadata" in meshes[names[0]]: + metadata = meshes[names[0]]["metadata"] else: metadata = {} meshes[name] = { - 'vertices': v, - 'faces': f, - 'visual': visuals, - 'metadata': metadata, - 'process': False} + "vertices": v, + "faces": f, + "visual": visuals, + "metadata": metadata, + "process": False, + } mesh_prim_replace[mesh_index] = [name] # avoid altering inside loop mesh_prim = mesh_prim_replace @@ -1594,11 +1613,7 @@ def _read_buffers(header, name_index = {} name_counts = {} for i, n in enumerate(nodes): - name_index[unique_name( - n.get('name', str(i)), - name_index, - counts=name_counts - )] = i + name_index[unique_name(n.get("name", str(i)), name_index, counts=name_counts)] = i # invert the dict so we can look up by index # node index (int) : name (str) names = {v: k for k, v in name_index.items()} @@ -1614,14 +1629,14 @@ def _read_buffers(header, # unvisited, pairs of node indexes queue = collections.deque() - if 'scene' in header: + if "scene" in header: # specify the index of scenes if specified - scene_index = header['scene'] + scene_index = header["scene"] else: # otherwise just use the first index scene_index = 0 - if 'scenes' in header: + if "scenes" in header: # start the traversal from the base frame to the roots for root in header["scenes"][scene_index].get("nodes", []): # add transform from base frame to these root nodes @@ -1658,9 +1673,9 @@ def _read_buffers(header, # parent -> child relationships have matrix stored in child # for the transform from parent to child if "matrix" in child: - kwargs["matrix"] = np.array( - child["matrix"], - dtype=np.float64).reshape((4, 4)).T + kwargs["matrix"] = ( + np.array(child["matrix"], dtype=np.float64).reshape((4, 4)).T + ) else: # if no matrix set identity kwargs["matrix"] = _EYE @@ -1669,20 +1684,21 @@ def _read_buffers(header, # GLTF applies these in order: T * R * S if "translation" in child: kwargs["matrix"] = np.dot( - kwargs["matrix"], - transformations.translation_matrix(child["translation"])) + kwargs["matrix"], transformations.translation_matrix(child["translation"]) + ) if "rotation" in child: # GLTF rotations are stored as (4,) XYZW unit quaternions # we need to re- order to our quaternion style, WXYZ quat = np.reshape(child["rotation"], 4)[[3, 0, 1, 2]] # add the rotation to the matrix kwargs["matrix"] = np.dot( - kwargs["matrix"], transformations.quaternion_matrix(quat)) + kwargs["matrix"], transformations.quaternion_matrix(quat) + ) if "scale" in child: # add scale to the matrix kwargs["matrix"] = np.dot( - kwargs["matrix"], - np.diag(np.concatenate((child['scale'], [1.0])))) + kwargs["matrix"], np.diag(np.concatenate((child["scale"], [1.0]))) + ) # treat node metadata similarly to mesh metadata if isinstance(child.get("extras"), dict): @@ -1707,32 +1723,33 @@ def _read_buffers(header, kwargs["geometry"] = geom_name # no transformations kwargs["matrix"] = _EYE - kwargs['frame_from'] = names[b] + kwargs["frame_from"] = names[b] # if we have more than one primitive assign a new UUID # frame name for the primitives after the first one - frame_to = f'{names[b]}_{util.unique_id(length=6)}' - kwargs['frame_to'] = frame_to + frame_to = f"{names[b]}_{util.unique_id(length=6)}" + kwargs["frame_to"] = frame_to # append the edge with the mesh frame graph.append(kwargs.copy()) elif len(geometries) == 1: kwargs["geometry"] = geometries[0] - if 'name' in child: - kwargs['frame_to'] = names[b] + if "name" in child: + kwargs["frame_to"] = names[b] graph.append(kwargs.copy()) else: # if the node doesn't have any geometry just add graph.append(kwargs) # kwargs for load_kwargs - result = {"class": "Scene", - "geometry": meshes, - "graph": graph, - "base_frame": base_frame} + result = { + "class": "Scene", + "geometry": meshes, + "graph": graph, + "base_frame": base_frame, + } try: # load any scene extras into scene.metadata # use a try except to avoid nested key checks - result['metadata'] = header['scenes'][ - header['scene']]['extras'] + result["metadata"] = header["scenes"][header["scene"]]["extras"] except BaseException: pass try: @@ -1740,7 +1757,7 @@ def _read_buffers(header, # use a try except to avoid nested key checks if "metadata" not in result: result["metadata"] = {} - result['metadata']['gltf_extensions'] = header['extensions'] + result["metadata"]["gltf_extensions"] = header["extensions"] except BaseException: pass @@ -1767,7 +1784,9 @@ def _convert_camera(camera): "perspective": { "aspectRatio": camera.fov[0] / camera.fov[1], "yfov": np.radians(camera.fov[1]), - "znear": float(camera.z_near)}} + "znear": float(camera.z_near), + }, + } return result @@ -1793,18 +1812,18 @@ def _append_image(img, tree, buffer_items, extension_webp): None if image append failed for any reason """ # probably not a PIL image so exit - if not hasattr(img, 'format'): + if not hasattr(img, "format"): return None if extension_webp: # support WebP if extension is specified - save_as = 'WEBP' - elif img.format == 'JPEG': + save_as = "WEBP" + elif img.format == "JPEG": # don't re-encode JPEGs - save_as = 'JPEG' + save_as = "JPEG" else: # for everything else just use PNG - save_as = 'png' + save_as = "png" # get the image data into a bytes object with util.BytesIO() as f: @@ -1814,12 +1833,10 @@ def _append_image(img, tree, buffer_items, extension_webp): index = _buffer_append(buffer_items, data) # append buffer index and the GLTF-acceptable mimetype - tree['images'].append({ - 'bufferView': index, - 'mimeType': f'image/{save_as.lower()}'}) + tree["images"].append({"bufferView": index, "mimeType": f"image/{save_as.lower()}"}) # index is length minus one - return len(tree['images']) - 1 + return len(tree["images"]) - 1 def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): @@ -1857,7 +1874,7 @@ def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): return mat_hashes[hashed] # convert passed input to PBR if necessary - if hasattr(mat, 'to_pbr'): + if hasattr(mat, "to_pbr"): as_pbr = mat.to_pbr() else: as_pbr = mat @@ -1866,89 +1883,92 @@ def _append_material(mat, tree, buffer_items, mat_hashes, extension_webp): result = {"pbrMetallicRoughness": {}} try: # try to convert base color to (4,) float color - result['baseColorFactor'] = visual.color.to_float( - as_pbr.baseColorFactor).reshape(4).tolist() + result["baseColorFactor"] = ( + visual.color.to_float(as_pbr.baseColorFactor).reshape(4).tolist() + ) except BaseException: pass try: - result['emissiveFactor'] = as_pbr.emissiveFactor.reshape(3).tolist() + result["emissiveFactor"] = as_pbr.emissiveFactor.reshape(3).tolist() except BaseException: pass # if name is defined, export if isinstance(as_pbr.name, str): - result['name'] = as_pbr.name + result["name"] = as_pbr.name # if alphaMode is defined, export if isinstance(as_pbr.alphaMode, str): - result['alphaMode'] = as_pbr.alphaMode + result["alphaMode"] = as_pbr.alphaMode # if alphaCutoff is defined, export if isinstance(as_pbr.alphaCutoff, float): - result['alphaCutoff'] = as_pbr.alphaCutoff + result["alphaCutoff"] = as_pbr.alphaCutoff # if doubleSided is defined, export if isinstance(as_pbr.doubleSided, bool): - result['doubleSided'] = as_pbr.doubleSided + result["doubleSided"] = as_pbr.doubleSided # if scalars are defined correctly export if isinstance(as_pbr.metallicFactor, float): - result['metallicFactor'] = as_pbr.metallicFactor + result["metallicFactor"] = as_pbr.metallicFactor if isinstance(as_pbr.roughnessFactor, float): - result['roughnessFactor'] = as_pbr.roughnessFactor + result["roughnessFactor"] = as_pbr.roughnessFactor # which keys of the PBRMaterial are images image_mapping = { - 'baseColorTexture': as_pbr.baseColorTexture, - 'emissiveTexture': as_pbr.emissiveTexture, - 'normalTexture': as_pbr.normalTexture, - 'occlusionTexture': as_pbr.occlusionTexture, - 'metallicRoughnessTexture': as_pbr.metallicRoughnessTexture} + "baseColorTexture": as_pbr.baseColorTexture, + "emissiveTexture": as_pbr.emissiveTexture, + "normalTexture": as_pbr.normalTexture, + "occlusionTexture": as_pbr.occlusionTexture, + "metallicRoughnessTexture": as_pbr.metallicRoughnessTexture, + } for key, img in image_mapping.items(): if img is None: continue # try adding the base image to the export object index = _append_image( - img=img, - tree=tree, - buffer_items=buffer_items, - extension_webp=extension_webp) + img=img, tree=tree, buffer_items=buffer_items, extension_webp=extension_webp + ) # if the image was added successfully it will return index # if it failed for any reason, it will return None if index is not None: # add a reference to the base color texture - result[key] = {'index': len(tree['textures'])} + result[key] = {"index": len(tree["textures"])} # add an object for the texture according to the WebP extension if extension_webp: - tree['textures'].append({'extensions': {'EXT_texture_webp': - {'source': index}}}) + tree["textures"].append( + {"extensions": {"EXT_texture_webp": {"source": index}}} + ) else: - tree['textures'].append({'source': index}) + tree["textures"].append({"source": index}) # for our PBRMaterial object we flatten all keys # however GLTF would like some of them under the # "pbrMetallicRoughness" key - pbr_subset = ['baseColorTexture', - 'baseColorFactor', - 'roughnessFactor', - 'metallicFactor', - 'metallicRoughnessTexture'] + pbr_subset = [ + "baseColorTexture", + "baseColorFactor", + "roughnessFactor", + "metallicFactor", + "metallicRoughnessTexture", + ] # move keys down a level for key in pbr_subset: if key in result: result["pbrMetallicRoughness"][key] = result.pop(key) # if we didn't have any PBR keys remove the empty key - if len(result['pbrMetallicRoughness']) == 0: - result.pop('pbrMetallicRoughness') + if len(result["pbrMetallicRoughness"]) == 0: + result.pop("pbrMetallicRoughness") # which index are we inserting material at - index = len(tree['materials']) + index = len(tree["materials"]) # add the material to the data structure - tree['materials'].append(result) + tree["materials"].append(result) # add the material index in-place mat_hashes[hashed] = index @@ -1974,6 +1994,7 @@ def validate(header): """ # a soft dependency import jsonschema + # will do the reference replacement schema = get_schema() # validate the passed header against the schema @@ -1997,15 +2018,13 @@ def get_schema(): from ..schemas import resolve # get a blob of a zip file including the GLTF 2.0 schema - blob = resources.get( - 'schema/gltf2.schema.zip', decode=False) + blob = resources.get("schema/gltf2.schema.zip", decode=False) # get the zip file as a dict keyed by file name - archive = util.decompress(util.wrap_as_stream(blob), 'zip') + archive = util.decompress(util.wrap_as_stream(blob), "zip") # get a resolver object for accessing the schema resolver = ZipResolver(archive) # get a loaded dict from the base file - unresolved = json.loads(util.decode_text( - resolver.get('glTF.schema.json'))) + unresolved = json.loads(util.decode_text(resolver.get("glTF.schema.json"))) # resolve `$ref` references to other files in the schema schema = resolve(unresolved, resolver=resolver) @@ -2013,5 +2032,4 @@ def get_schema(): # exporters -_gltf_loaders = {"glb": load_glb, - "gltf": load_gltf} +_gltf_loaders = {"glb": load_glb, "gltf": load_gltf} diff --git a/trimesh/util.py b/trimesh/util.py index 0da9aa344..443b46dc0 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -1482,7 +1482,7 @@ def concatenate(a, b=None): if any("vertex_normals" in m._cache for m in is_mesh): vertex_normals = vstack_empty([m.vertex_normals for m in is_mesh]) assert vertex_normals.shape == vertices.shape - + try: # concatenate visuals visual = is_mesh[0].visual.concatenate([m.visual for m in is_mesh[1:]]) From befc4c1047d707b3b054d4a9e76402c25ff353a9 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 18:25:30 -0400 Subject: [PATCH 095/144] partially revert #1895 --- tests/test_gltf.py | 21 ++++++++++++++------- trimesh/exchange/gltf.py | 11 ++++++----- trimesh/util.py | 2 +- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index da3491fca..5acb56ce0 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -6,7 +6,7 @@ # Khronos' official file validator # can be installed with the helper script: # `trimesh/docker/builds/gltf_validator.bash` -_gltf_validator = g.trimesh.util.which("gltf_validator") +_gltf_validator = g.shutil.which("gltf_validator") def validate_glb(data, name=None): @@ -33,16 +33,19 @@ def validate_glb(data, name=None): with g.tempfile.NamedTemporaryFile(suffix=".glb") as f: f.write(data) f.flush() - # run the khronos gltf-validator - report = g.subprocess.run([_gltf_validator, f.name, "-o"], capture_output=True) + + # gltf_validator has occasional bugs being run outside + # of the current working directory + temp_dir, file_name = g.os.path.split(f.name) + # run khronos gltf_validator + report = g.subprocess.run( + [_gltf_validator, file_name, "-o"], cwd=temp_dir, capture_output=True + ) # -o prints JSON to stdout content = report.stdout.decode("utf-8") returncode = report.returncode if returncode != 0: - from IPython import embed - - embed() g.log.error(f"failed on: `{name}`") g.log.error(f"validator: `{content}`") g.log.error(f"stderr: `{report.stderr}`") @@ -378,8 +381,12 @@ def test_gltf_pole(self): def test_material_primary_colors(self): primary_color_material = g.trimesh.visual.material.PBRMaterial() primary_color_material.baseColorFactor = (255, 0, 0, 255) - sphere = g.trimesh.primitives.Sphere() + sphere = g.trimesh.creation.icosphere() + sphere.visual = g.trimesh.visual.TextureVisuals(material=primary_color_material) sphere.visual.material = primary_color_material + # material will *not* export without uv coordinates to gltf + # as GLTF requires TEXCOORD_0 be defined if there is a material + sphere.visual.uv = g.np.zeros((len(sphere.vertices), 2)) scene = g.trimesh.Scene([sphere]) def to_integer(args): diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index 33aafdfd8..e06318990 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -739,10 +739,10 @@ def _append_mesh( name, tree, buffer_items, - include_normals, - unitize_normals, - mat_hashes, - extension_webp, + include_normals: bool, + unitize_normals: bool, + mat_hashes: dict, + extension_webp: bool, ): """ Append a mesh to the scene structure and put the @@ -883,8 +883,9 @@ def _append_mesh( ) # add the reference for UV coordinates current["primitives"][0]["attributes"]["TEXCOORD_0"] = acc_uv + # only reference the material if we had UV coordinates - current["primitives"][0]["material"] = current_material + current["primitives"][0]["material"] = current_material if include_normals or ( include_normals is None and "vertex_normals" in mesh._cache.cache diff --git a/trimesh/util.py b/trimesh/util.py index 443b46dc0..0da9aa344 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -1482,7 +1482,7 @@ def concatenate(a, b=None): if any("vertex_normals" in m._cache for m in is_mesh): vertex_normals = vstack_empty([m.vertex_normals for m in is_mesh]) assert vertex_normals.shape == vertices.shape - + try: # concatenate visuals visual = is_mesh[0].visual.concatenate([m.visual for m in is_mesh[1:]]) From 2ef582ad9582e490edb12dfdfb4ef88f8a559ff5 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 18:55:37 -0400 Subject: [PATCH 096/144] remove stub temporarydirectory --- tests/generic.py | 4 +--- trimesh/util.py | 21 --------------------- 2 files changed, 1 insertion(+), 24 deletions(-) diff --git a/tests/generic.py b/tests/generic.py index d766c66ab..53076a2d9 100644 --- a/tests/generic.py +++ b/tests/generic.py @@ -37,7 +37,7 @@ tf = trimesh.transformations - +TemporaryDirectory = tempfile.TemporaryDirectory # make a dummy profiler which does nothing class DummyProfiler(object): @@ -564,8 +564,6 @@ def wrapload(exported, file_type, **kwargs): ) -TemporaryDirectory = trimesh.util.TemporaryDirectory - # all the JSON files with truth data data = _load_data() diff --git a/trimesh/util.py b/trimesh/util.py index 0da9aa344..a2904829f 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -2286,27 +2286,6 @@ def __call__(self, key, *args, **kwargs): return self[key](*args, **kwargs) -class TemporaryDirectory: - """ - Same basic usage as tempfile.TemporaryDirectory - but functional in Python 2.7+. - - Example - --------- - ``` - with trimesh.util.TemporaryDirectory() as path: - writable = os.path.join(path, 'hi.txt') - ``` - """ - - def __enter__(self): - self.path = tempfile.mkdtemp() - return self.path - - def __exit__(self, *args, **kwargs): - shutil.rmtree(self.path) - - def decode_text(text, initial="utf-8"): """ Try to decode byte input as a string. From 6210b25110d6c3eb06c696e1294fd326931635bd Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 18:55:57 -0400 Subject: [PATCH 097/144] remove unused import --- trimesh/util.py | 1 - 1 file changed, 1 deletion(-) diff --git a/trimesh/util.py b/trimesh/util.py index a2904829f..f252b7ad6 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -17,7 +17,6 @@ import logging import random import shutil -import tempfile import time import uuid import zipfile From ab6d61ed8d7490819d670ea8f6b3d65a273ede1c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 19:07:40 -0400 Subject: [PATCH 098/144] use built-in temporarydirectory --- tests/test_gltf.py | 4 ++-- tests/test_obj.py | 4 ++-- trimesh/exchange/binvox.py | 3 ++- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 5acb56ce0..0900f7b8e 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -927,7 +927,7 @@ def test_embed_buffer(self): } ) - with g.trimesh.util.TemporaryDirectory() as D: + with g.TemporaryDirectory() as D: path = g.os.path.join(D, "hi.gltf") scene.export(path) @@ -937,7 +937,7 @@ def test_embed_buffer(self): reloaded = g.trimesh.load(path) assert set(reloaded.geometry.keys()) == set(scene.geometry.keys()) - with g.trimesh.util.TemporaryDirectory() as D: + with g.TemporaryDirectory() as D: path = g.os.path.join(D, "hi.gltf") scene.export(path, embed_buffers=True) diff --git a/tests/test_obj.py b/tests/test_obj.py index cb5e940b4..2a0037ea6 100644 --- a/tests/test_obj.py +++ b/tests/test_obj.py @@ -180,7 +180,7 @@ def test_mtl(self): m = g.get_mesh("fuze.obj") # export the mesh including data obj, data = g.trimesh.exchange.export.export_obj(m, return_texture=True) - with g.trimesh.util.TemporaryDirectory() as path: + with g.TemporaryDirectory() as path: # where is the OBJ file going to be saved obj_path = g.os.path.join(path, "test.obj") with open(obj_path, "w") as f: @@ -308,7 +308,7 @@ def test_mtl_color_roundtrip(self): m.visual.material.diffuse = colors[2] m.visual.material.glossiness = 0.52622 - with g.trimesh.util.TemporaryDirectory() as d: + with g.TemporaryDirectory() as d: # exporting by filename will automatically # create a FilePathResolver which writes the # `mtl` file to the same directory diff --git a/trimesh/exchange/binvox.py b/trimesh/exchange/binvox.py index df5924113..8bcbd7b51 100644 --- a/trimesh/exchange/binvox.py +++ b/trimesh/exchange/binvox.py @@ -9,6 +9,7 @@ import collections import os import subprocess +from tempfile import TemporaryDirectory import numpy as np @@ -565,7 +566,7 @@ def voxelize_mesh(mesh, if binvoxer.file_type != 'binvox': raise ValueError( 'Only "binvox" binvoxer `file_type` currently supported') - with util.TemporaryDirectory() as folder: + with TemporaryDirectory() as folder: model_path = os.path.join(folder, 'model.%s' % export_type) with open(model_path, 'wb') as fp: mesh.export(fp, file_type=export_type) From 7c1c45d2e61ce5ab2d1fa43bf0edffa2b52306b5 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 20:24:16 -0400 Subject: [PATCH 099/144] exit temporary directory for windows --- tests/test_gltf.py | 4 + tests/test_graph.py | 112 ++++++++++++++------------- trimesh/graph.py | 180 +++++++++++++++++--------------------------- 3 files changed, 129 insertions(+), 167 deletions(-) diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 0900f7b8e..3c8e07f9d 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -965,6 +965,7 @@ def test_webp(self): def test_relative_paths(self): # try with a relative path + cwd = g.os.path.abspath(g.os.path.expanduser(".")) with g.TemporaryDirectory() as d: g.os.makedirs(g.os.path.join(d, "fused")) g.os.chdir(d) @@ -972,6 +973,9 @@ def test_relative_paths(self): r = g.trimesh.load("fused/hi.gltf") assert g.np.isclose(r.volume, 1.0) + # avoid a windows file-access error + g.os.chdir(cwd) + with g.TemporaryDirectory() as d: # now try it without chaging to that directory full = g.os.path.join(d, "hi", "there", "different", "levels") diff --git a/tests/test_graph.py b/tests/test_graph.py index 402ab2f2c..a389e2383 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -5,13 +5,20 @@ class GraphTest(g.unittest.TestCase): - def setUp(self): - self.engines = ['scipy', 'networkx'] + self.engines = [] + try: + self.engines.append("scipy") + except BaseException: + pass + try: + self.engines.append("networkx") + except BaseException: + pass def test_soup(self): # a soup of random triangles, with no adjacent pairs - soup = g.get_mesh('soup.stl') + soup = g.get_mesh("soup.stl") assert len(soup.face_adjacency) == 0 assert len(soup.face_adjacency_radius) == 0 @@ -23,13 +30,13 @@ def test_soup(self): def test_components(self): # a soup of random triangles, with no adjacent pairs - soup = g.get_mesh('soup.stl') + soup = g.get_mesh("soup.stl") # a mesh with multiple watertight bodies - mult = g.get_mesh('cycloidal.ply') + mult = g.get_mesh("cycloidal.ply") # a mesh with a single watertight body - sing = g.get_mesh('featuretype.STL') + sing = g.get_mesh("featuretype.STL") # mesh with a single tetrahedron - tet = g.get_mesh('tet.ply') + tet = g.get_mesh("tet.ply") for engine in self.engines: # without requiring watertight the split should be into every face @@ -77,7 +84,7 @@ def test_vertex_adjacency_graph(self): f = g.trimesh.graph.vertex_adjacency_graph # a mesh with a single watertight body - sing = g.get_mesh('featuretype.STL') + sing = g.get_mesh("featuretype.STL") vert_adj_g = f(sing) assert len(sing.vertices) == len(vert_adj_g) @@ -89,36 +96,35 @@ def test_engine_time(self): g.trimesh.graph.facets(mesh=mesh, engine=engine) tic.append(g.time.time()) - tic_diff = g.np.diff(tic) - tic_min = tic_diff.min() - tic_diff /= tic_min - g.log.info('graph engine on %s (scale %f sec):\n%s', - mesh.metadata['file_name'], - tic_min, - str(g.np.column_stack((self.engines, - tic_diff)))) + diff = g.np.abs(g.np.diff(tic)) + if diff.min() > 0.0: + diff /= diff.min() + + g.log.info( + "graph engine on %s (scale %f sec):\n%s", + mesh.metadata["file_name"], + diff.min(), + str(g.np.column_stack((self.engines, diff))), + ) def test_smoothed(self): # Make sure smoothing is keeping the same number # of faces. - for name in ['ADIS16480.STL', 'featuretype.STL']: + for name in ["ADIS16480.STL", "featuretype.STL"]: mesh = g.get_mesh(name) assert len(mesh.faces) == len(mesh.smoothed().faces) def test_engines(self): edges = g.np.arange(10).reshape((-1, 2)) for i in range(0, 20): - check_engines(nodes=g.np.arange(i), - edges=edges) - edges = g.np.column_stack((g.np.arange(1, 11), - g.np.arange(0, 10))) + check_engines(nodes=g.np.arange(i), edges=edges) + edges = g.np.column_stack((g.np.arange(1, 11), g.np.arange(0, 10))) for i in range(0, 20): - check_engines(nodes=g.np.arange(i), - edges=edges) + check_engines(nodes=g.np.arange(i), edges=edges) def test_watertight(self): - m = g.get_mesh('shared.STL') # NOQA + m = g.get_mesh("shared.STL") # NOQA # assert m.is_watertight # assert m.is_winding_consistent # assert m.is_volume @@ -128,15 +134,12 @@ def test_traversals(self): # generate some simple test data simple_nodes = g.np.arange(20) - simple_edges = g.np.column_stack((simple_nodes[:-1], - simple_nodes[1:])) - simple_edges = g.np.vstack(( - simple_edges, - [[19, 0], - [10, 1000], - [500, 501]])).astype(g.np.int64) - - all_edges = g.data['edges'] + simple_edges = g.np.column_stack((simple_nodes[:-1], simple_nodes[1:])) + simple_edges = g.np.vstack( + (simple_edges, [[19, 0], [10, 1000], [500, 501]]) + ).astype(g.np.int64) + + all_edges = g.data["edges"] all_edges.append(simple_edges) for edges in all_edges: @@ -147,8 +150,8 @@ def test_traversals(self): nodes = g.np.unique(edges) # the basic BFS/DFS traversal - dfs_basic = g.trimesh.graph.traversals(edges, 'dfs') - bfs_basic = g.trimesh.graph.traversals(edges, 'bfs') + dfs_basic = g.trimesh.graph.traversals(edges, "dfs") + bfs_basic = g.trimesh.graph.traversals(edges, "bfs") # check return types assert all(i.dtype == g.np.int64 for i in dfs_basic) assert all(i.dtype == g.np.int64 for i in bfs_basic) @@ -169,15 +172,13 @@ def test_traversals(self): dfs = g.trimesh.graph.fill_traversals(traversal, edges) # edges that are included in the new separated traversal inc = g.trimesh.util.vstack_empty( - [g.np.column_stack((i[:-1], i[1:])) - for i in dfs]) + [g.np.column_stack((i[:-1], i[1:])) for i in dfs] + ) # make a set from edges included in the traversal - inc_set = set(g.trimesh.grouping.hashable_rows( - g.np.sort(inc, axis=1))) + inc_set = set(g.trimesh.grouping.hashable_rows(g.np.sort(inc, axis=1))) # make a set of the source edges we were supposed to include - edge_set = set(g.trimesh.grouping.hashable_rows( - g.np.sort(edges, axis=1))) + edge_set = set(g.trimesh.grouping.hashable_rows(g.np.sort(edges, axis=1))) # we should have exactly the same edges # after the filled traversal as we started with @@ -192,7 +193,7 @@ def test_traversals(self): def test_adjacency(self): for add_degen in [False, True]: - for name in ['featuretype.STL', 'soup.stl']: + for name in ["featuretype.STL", "soup.stl"]: m = g.get_mesh(name) if add_degen: # make the first face degenerate @@ -202,15 +203,13 @@ def test_adjacency(self): # check the various paths of calling face adjacency a = g.trimesh.graph.face_adjacency( - m.faces.view(g.np.ndarray).copy(), - return_edges=False) + m.faces.view(g.np.ndarray).copy(), return_edges=False + ) b, be = g.trimesh.graph.face_adjacency( - m.faces.view(g.np.ndarray).copy(), - return_edges=True) - c = g.trimesh.graph.face_adjacency( - mesh=m, return_edges=False) - c, ce = g.trimesh.graph.face_adjacency( - mesh=m, return_edges=True) + m.faces.view(g.np.ndarray).copy(), return_edges=True + ) + c = g.trimesh.graph.face_adjacency(mesh=m, return_edges=False) + c, ce = g.trimesh.graph.face_adjacency(mesh=m, return_edges=True) # make sure they all return the expected result assert g.np.allclose(a, b) assert g.np.allclose(a, c) @@ -218,9 +217,9 @@ def test_adjacency(self): assert len(ce) == len(a) # package properties to loop through - zips = zip(m.face_adjacency, - m.face_adjacency_edges, - m.face_adjacency_unshared) + zips = zip( + m.face_adjacency, m.face_adjacency_edges, m.face_adjacency_unshared + ) for a, e, v in zips: # get two adjacenct faces as a set fa = set(m.faces[a[0]]) @@ -249,11 +248,10 @@ def check_engines(edges, nodes): returning the exact same values """ results = [] - engines = [None, 'scipy', 'networkx'] + engines = [None, "scipy", "networkx"] for engine in engines: - c = g.trimesh.graph.connected_components( - edges, nodes=nodes, engine=engine) + c = g.trimesh.graph.connected_components(edges, nodes=nodes, engine=engine) if len(c) > 0: # check to see if every resulting component # was in the passed set of nodes @@ -270,6 +268,6 @@ def check_engines(edges, nodes): raise E -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/trimesh/graph.py b/trimesh/graph.py index 6ce4f1652..a816402d6 100644 --- a/trimesh/graph.py +++ b/trimesh/graph.py @@ -31,9 +31,7 @@ nx = exceptions.ExceptionWrapper(E) -def face_adjacency(faces=None, - mesh=None, - return_edges=False): +def face_adjacency(faces=None, mesh=None, return_edges=False): """ Returns an (n, 2) list of face indices. Each pair of faces in the list shares an edge, making them adjacent. @@ -89,7 +87,7 @@ def face_adjacency(faces=None, edge_groups = grouping.group_rows(edges, require_count=2) if len(edge_groups) == 0: - log.debug('No adjacent faces detected! Did you merge vertices?') + log.debug("No adjacent faces detected! Did you merge vertices?") # the pairs of all adjacent faces # so for every row in face_idx, self.faces[face_idx[*][0]] and @@ -153,8 +151,7 @@ def face_adjacency_unshared(mesh): # the non- shared vertex index is the same shape # as face_adjacency holding vertex indices vs face indices - vid_unshared = np.zeros_like(mesh.face_adjacency, - dtype=np.int64) - 1 + vid_unshared = np.zeros_like(mesh.face_adjacency, dtype=np.int64) - 1 # get the shared edges between adjacent faces edges = mesh.face_adjacency_edges @@ -164,9 +161,12 @@ def face_adjacency_unshared(mesh): faces = mesh.faces[fid] # should have one True per row of (3,) # index of vertex not included in shared edge - unshared = np.logical_not(np.logical_or( - faces == edges[:, 0].reshape((-1, 1)), - faces == edges[:, 1].reshape((-1, 1)))) + unshared = np.logical_not( + np.logical_or( + faces == edges[:, 0].reshape((-1, 1)), + faces == edges[:, 1].reshape((-1, 1)), + ) + ) # each row should have exactly one uncontained verted row_ok = unshared.sum(axis=1) == 1 # any degenerate row should be ignored @@ -199,27 +199,23 @@ def face_adjacency_radius(mesh): # distance # R = --------------- # 2 * sin(theta) - nonzero = mesh.face_adjacency_angles > np.radians(.01) - denominator = np.abs( - 2.0 * np.sin(mesh.face_adjacency_angles[nonzero])) + nonzero = mesh.face_adjacency_angles > np.radians(0.01) + denominator = np.abs(2.0 * np.sin(mesh.face_adjacency_angles[nonzero])) # consider the distance between the non- shared vertices of the # face adjacency pair as the key distance point_pairs = mesh.vertices[mesh.face_adjacency_unshared] - vectors = np.diff(point_pairs, - axis=1).reshape((-1, 3)) + vectors = np.diff(point_pairs, axis=1).reshape((-1, 3)) # the vertex indices of the shared edge for the adjacency pairx edges = mesh.face_adjacency_edges # unit vector along shared the edge - edges_vec = util.unitize(np.diff(mesh.vertices[edges], - axis=1).reshape((-1, 3))) + edges_vec = util.unitize(np.diff(mesh.vertices[edges], axis=1).reshape((-1, 3))) # the vector of the perpendicular projection to the shared edge perp = np.subtract( - vectors, (util.diagonal_dot( - vectors, edges_vec).reshape( - (-1, 1)) * edges_vec)) + vectors, (util.diagonal_dot(vectors, edges_vec).reshape((-1, 1)) * edges_vec) + ) # the length of the perpendicular projection span = util.row_norm(perp) @@ -276,8 +272,7 @@ def shared_edges(faces_a, faces_b): """ e_a = np.sort(faces_to_edges(faces_a), axis=1) e_b = np.sort(faces_to_edges(faces_b), axis=1) - shared = grouping.boolean_rows( - e_a, e_b, operation=np.intersect1d) + shared = grouping.boolean_rows(e_a, e_b, operation=np.intersect1d) return shared @@ -314,15 +309,15 @@ def facets(mesh, engine=None): # if span is zero we know faces are small/parallel nonzero = np.abs(span) > tol.zero # faces with a radii/span ratio larger than a threshold pass - parallel[nonzero] = (radii[nonzero] / - span[nonzero]) ** 2 > tol.facet_threshold + parallel[nonzero] = (radii[nonzero] / span[nonzero]) ** 2 > tol.facet_threshold # run connected components on the parallel faces to group them components = connected_components( mesh.face_adjacency[parallel], nodes=np.arange(len(mesh.faces)), min_len=2, - engine=engine) + engine=engine, + ) return components @@ -361,19 +356,13 @@ def split(mesh, only_watertight=True, adjacency=None, engine=None, **kwargs): min_len = 1 components = connected_components( - edges=adjacency, - nodes=np.arange(len(mesh.faces)), - min_len=min_len, - engine=engine) - meshes = mesh.submesh( - components, only_watertight=only_watertight, **kwargs) + edges=adjacency, nodes=np.arange(len(mesh.faces)), min_len=min_len, engine=engine + ) + meshes = mesh.submesh(components, only_watertight=only_watertight, **kwargs) return meshes -def connected_components(edges, - min_len=1, - nodes=None, - engine=None): +def connected_components(edges, min_len=1, nodes=None, engine=None): """ Find groups of connected nodes from an edge list. @@ -395,6 +384,7 @@ def connected_components(edges, components : (n,) sequence of (*,) int Nodes which are connected """ + def components_networkx(): """ Find connected components using networkx @@ -411,8 +401,7 @@ def components_csgraph(): Find connected components using scipy.sparse.csgraph """ # label each node - labels = connected_component_labels(edges, - node_count=node_count) + labels = connected_component_labels(edges, node_count=node_count) # we have to remove results that contain nodes outside # of the specified node set and reindex @@ -440,7 +429,7 @@ def components_csgraph(): return [] if not util.is_shape(edges, (-1, 2)): - raise ValueError('edges must be (n, 2)!') + raise ValueError("edges must be (n, 2)!") # find the maximum index referenced in either nodes or edges counts = [0] @@ -457,9 +446,9 @@ def components_csgraph(): edges = edges[edges_ok] # networkx is pure python and is usually 5-10x slower than scipy - engines = collections.OrderedDict(( - ('scipy', components_csgraph), - ('networkx', components_networkx))) + engines = collections.OrderedDict( + (("scipy", components_csgraph), ("networkx", components_networkx)) + ) # if a graph engine has explicitly been requested use it if engine in engines: @@ -473,7 +462,7 @@ def components_csgraph(): # will be raised if the library didn't import correctly above except BaseException: continue - raise ImportError('no graph engines available!') + raise ImportError("no graph engines available!") def connected_component_labels(edges, node_count=None): @@ -493,8 +482,7 @@ def connected_component_labels(edges, node_count=None): Component labels for each node """ matrix = edges_to_coo(edges, node_count) - body_count, labels = csgraph.connected_components( - matrix, directed=False) + body_count, labels = csgraph.connected_components(matrix, directed=False) if node_count is not None: assert len(labels) == node_count @@ -502,9 +490,7 @@ def connected_component_labels(edges, node_count=None): return labels -def split_traversal(traversal, - edges, - edges_hash=None): +def split_traversal(traversal, edges, edges_hash=None): """ Given a traversal as a list of nodes, split the traversal if a sequential index pair is not in the given edges. @@ -523,20 +509,16 @@ def split_traversal(traversal, --------------- split : sequence of (p,) int """ - traversal = np.asanyarray(traversal, - dtype=np.int64) + traversal = np.asanyarray(traversal, dtype=np.int64) # hash edge rows for contains checks if edges_hash is None: - edges_hash = grouping.hashable_rows( - np.sort(edges, axis=1)) + edges_hash = grouping.hashable_rows(np.sort(edges, axis=1)) # turn the (n,) traversal into (n-1, 2) edges - trav_edge = np.column_stack((traversal[:-1], - traversal[1:])) + trav_edge = np.column_stack((traversal[:-1], traversal[1:])) # hash each edge so we can compare to edge set - trav_hash = grouping.hashable_rows( - np.sort(trav_edge, axis=1)) + trav_hash = grouping.hashable_rows(np.sort(trav_edge, axis=1)) # check if each edge is contained in edge set contained = np.in1d(trav_hash, edges_hash) @@ -546,14 +528,10 @@ def split_traversal(traversal, split = [traversal] else: # find contiguous groups of contained edges - blocks = grouping.blocks(contained, - min_len=1, - only_nonzero=True) + blocks = grouping.blocks(contained, min_len=1, only_nonzero=True) # turn edges back in to sequence of traversals - split = [np.append(trav_edge[b][:, 0], - trav_edge[b[-1]][1]) - for b in blocks] + split = [np.append(trav_edge[b][:, 0], trav_edge[b[-1]][1]) for b in blocks] # close traversals if necessary for i, t in enumerate(split): @@ -612,22 +590,17 @@ def fill_traversals(traversals, edges, edges_hash=None): for nodes in traversals: # split traversals to remove edges # that don't actually exist - splits.extend(split_traversal( - traversal=nodes, - edges=edges, - edges_hash=edges_hash)) + splits.extend( + split_traversal(traversal=nodes, edges=edges, edges_hash=edges_hash) + ) # turn the split traversals back into (n, 2) edges - included = util.vstack_empty([np.column_stack((i[:-1], i[1:])) - for i in splits]) + included = util.vstack_empty([np.column_stack((i[:-1], i[1:])) for i in splits]) if len(included) > 0: # sort included edges in place included.sort(axis=1) # make sure any edges not included in split traversals # are just added as a length 2 traversal - splits.extend(grouping.boolean_rows( - edges, - included, - operation=np.setdiff1d)) + splits.extend(grouping.boolean_rows(edges, included, operation=np.setdiff1d)) else: # no edges were included, so our filled traversal # is just the original edges copied over @@ -636,7 +609,7 @@ def fill_traversals(traversals, edges, edges_hash=None): return splits -def traversals(edges, mode='bfs'): +def traversals(edges, mode="bfs"): """ Given an edge list generate a sequence of ordered depth first search traversals using scipy.csgraph routines. @@ -657,16 +630,16 @@ def traversals(edges, mode='bfs'): if len(edges) == 0: return [] elif not util.is_shape(edges, (-1, 2)): - raise ValueError('edges are not (n, 2)!') + raise ValueError("edges are not (n, 2)!") # pick the traversal method mode = str(mode).lower().strip() - if mode == 'bfs': + if mode == "bfs": func = csgraph.breadth_first_order - elif mode == 'dfs': + elif mode == "dfs": func = csgraph.depth_first_order else: - raise ValueError('traversal mode must be either dfs or bfs') + raise ValueError("traversal mode must be either dfs or bfs") # make sure edges are sorted so we can query # an ordered pair later @@ -683,10 +656,9 @@ def traversals(edges, mode='bfs'): # starting at any node start = nodes.pop() # get an (n,) ordered traversal - ordered = func(graph, - i_start=start, - return_predecessors=False, - directed=False).astype(np.int64) + ordered = func( + graph, i_start=start, return_predecessors=False, directed=False + ).astype(np.int64) traversals.append(ordered) # remove the nodes we've consumed @@ -717,9 +689,8 @@ def edges_to_coo(edges, count=None, data=None): Sparse COO """ edges = np.asanyarray(edges, dtype=np.int64) - if not (len(edges) == 0 or - util.is_shape(edges, (-1, 2))): - raise ValueError('edges must be (n, 2)!') + if not (len(edges) == 0 or util.is_shape(edges, (-1, 2))): + raise ValueError("edges must be (n, 2)!") # if count isn't specified just set it to largest # value referenced in edges @@ -732,9 +703,7 @@ def edges_to_coo(edges, count=None, data=None): if data is None: data = np.ones(len(edges), dtype=bool) - matrix = coo_matrix((data, edges.T), - dtype=data.dtype, - shape=(count, count)) + matrix = coo_matrix((data, edges.T), dtype=data.dtype, shape=(count, count)) return matrix @@ -758,12 +727,12 @@ def neighbors(edges, max_index=None, directed=False): """ neighbors = collections.defaultdict(set) if directed: - [neighbors[edge[0]].add(edge[1]) - for edge in edges] + [neighbors[edge[0]].add(edge[1]) for edge in edges] else: - [(neighbors[edge[0]].add(edge[1]), - neighbors[edge[1]].add(edge[0])) - for edge in edges] + [ + (neighbors[edge[0]].add(edge[1]), neighbors[edge[1]].add(edge[0])) + for edge in edges + ] if max_index is None: max_index = edges.max() + 1 @@ -817,26 +786,20 @@ def smoothed(mesh, angle=None, facet_minarea=10): try: # we can survive not knowing facets # exclude facets with few faces - facets = [f for f in mesh.facets - if areas[f].sum() > min_area] + facets = [f for f in mesh.facets if areas[f].sum() > min_area] if len(facets) > 0: # mask for removing adjacency pairs where # one of the faces is contained in a facet - mask = np.ones(len(mesh.faces), - dtype=bool) + mask = np.ones(len(mesh.faces), dtype=bool) mask[np.hstack(facets)] = False # apply the mask to adjacency adjacency = adjacency[mask[adjacency].all(axis=1)] # nodes are no longer every faces nodes = np.unique(adjacency) except BaseException: - log.warning('failed to calculate facets', - exc_info=True) + log.warning("failed to calculate facets", exc_info=True) # run connected components on facet adjacency - components = connected_components( - adjacency, - min_len=2, - nodes=nodes) + components = connected_components(adjacency, min_len=2, nodes=nodes) # add back coplanar groups if any exist if len(facets) > 0: @@ -852,19 +815,16 @@ def smoothed(mesh, angle=None, facet_minarea=10): if len(unique) != len(mesh.faces): # things like single loose faces # or groups below facet_minlen - broke = np.setdiff1d( - np.arange(len(mesh.faces)), unique) + broke = np.setdiff1d(np.arange(len(mesh.faces)), unique) components.extend(broke.reshape((-1, 1))) # get a submesh as a single appended Trimesh - smooth = mesh.submesh(components, - only_watertight=False, - append=True) + smooth = mesh.submesh(components, only_watertight=False, append=True) # store face indices from original mesh - smooth.metadata['original_components'] = components + smooth.metadata["original_components"] = components # smoothed should have exactly the same number of faces if len(smooth.faces) != len(mesh.faces): - log.warning('face count in smooth wrong!') + log.warning("face count in smooth wrong!") return smooth @@ -890,8 +850,7 @@ def is_watertight(edges, edges_sorted=None): edges_sorted = np.sort(edges, axis=1) # group sorted edges - groups = grouping.group_rows( - edges_sorted, require_count=2) + groups = grouping.group_rows(edges_sorted, require_count=2) watertight = bool((len(groups) * 2) == len(edges)) # are opposing edges reversed @@ -918,9 +877,10 @@ def graph_to_svg(graph): import subprocess import tempfile + with tempfile.NamedTemporaryFile() as dot_file: nx.drawing.nx_agraph.write_dot(graph, dot_file.name) - svg = subprocess.check_output(['dot', dot_file.name, '-Tsvg']) + svg = subprocess.check_output(["dot", dot_file.name, "-Tsvg"]) return svg From 640ca9ef15a079bb53bd3fa2ad36ae9f5f6cd62e Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 21:21:47 -0400 Subject: [PATCH 100/144] fix version logic in CI --- .github/workflows/release.yml | 2 +- .github/workflows/test.yml | 4 ++-- Dockerfile | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 46ab54f14..4b8787058 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -132,7 +132,7 @@ jobs: - name: Tag Version id: set_tag run: | - export VER=$(python trimesh/version.py) + export VER=$(python -c "print(eval(next(L.split('=')[1] for L in open('pyproject.toml') if 'version' in L)))") echo "::set-output name=tag_name::${VER}" - name: Create Release id: create_release diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 992b7f9d1..ddc3873ea 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -63,10 +63,10 @@ jobs: with: path: ~/.trimesh-cache key: trimesh-cache - - name: Set up Python 3.10 + - name: Set up Python 3.11 uses: actions/setup-python@v4 with: - python-version: "3.10" + python-version: "3.11" - name: Install Trimesh run: pip install .[easy,test] - name: Run Corpus Check diff --git a/Dockerfile b/Dockerfile index 29f2b4aa8..e2512e288 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,8 +30,7 @@ USER user # then delete any included test directories # and remove Cython after all the building is complete RUN pip install --user /home/user[easy] && \ - find /home/user/.local -type d -name tests -prune -exec rm -rf {} \; && \ - pip uninstall -y cython + find /home/user/.local -type d -name tests -prune -exec rm -rf {} \; #################################### ### Build output image most things should run on @@ -68,7 +67,7 @@ RUN trimesh-setup --install=test,gltf_validator,llvmpipe,binvox USER user # install things like pytest -RUN pip install -e .[all,easy,recommend,test] +RUN pip install -e .[all] # run pytest wrapped with xvfb for simple viewer tests RUN xvfb-run pytest --cov=trimesh \ From 642241eadfd776880dedd3340076c9d59e884a3c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 19 Sep 2023 21:54:22 -0400 Subject: [PATCH 101/144] make version.py more complicated --- .github/workflows/release.yml | 2 +- trimesh/version.py | 68 ++++++++++++++++++++++++++++++----- 2 files changed, 61 insertions(+), 9 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4b8787058..46ab54f14 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -132,7 +132,7 @@ jobs: - name: Tag Version id: set_tag run: | - export VER=$(python -c "print(eval(next(L.split('=')[1] for L in open('pyproject.toml') if 'version' in L)))") + export VER=$(python trimesh/version.py) echo "::set-output name=tag_name::${VER}" - name: Create Release id: create_release diff --git a/trimesh/version.py b/trimesh/version.py index 032e7c9d5..32f93f3f1 100644 --- a/trimesh/version.py +++ b/trimesh/version.py @@ -1,13 +1,65 @@ -# get the version trimesh was installed with from metadata -try: - # Python >= 3.8 +""" +# version.py + +Get the current version from package metadata or pyproject.toml +if everything else fails. +""" + + +def _get_version(): + """ + Try all our methods to get the version. + """ + for method in [_importlib, _pkgresources, _pyproject]: + try: + return method() + except BaseException: + pass + return None + + +def _importlib() -> str: + """ + Get the version string using package metadata on Python >= 3.8 + """ + from importlib.metadata import version - __version__ = version('trimesh') -except BaseException: - # Python < 3.8 + + return version("trimesh") + + +def _pkgresources() -> str: + """ + Get the version string using package metadata on Python < 3.8 + """ from pkg_resources import get_distribution - __version__ = get_distribution('trimesh').version -if __name__ == '__main__': + return get_distribution("trimesh").version + + +def _pyproject() -> str: + """ + Get the version string from the pyproject.toml file. + """ + import json + import os + + # use a path relative to this file + pyproject = os.path.abspath( + os.path.join( + os.path.dirname(os.path.abspath(os.path.expanduser(__file__))), + "..", + "pyproject.toml", + ) + ) + with open(pyproject) as f: + # json.loads cleans up the string and removes the quotes + return next(json.loads(L.split("=")[1]) for L in f if "version" in L) + + +# try all our tricks +__version__ = _get_version() + +if __name__ == "__main__": # print version if run directly i.e. in a CI script print(__version__) From a09d6fea186264d248b3513b0b8f0c67d6ec5fc2 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Wed, 20 Sep 2023 14:27:05 -0400 Subject: [PATCH 102/144] update docs --- README.md | 4 ++-- docs/conf.py | 12 ++--------- docs/examples.py | 35 +++++++++++++++---------------- docs/{ => guides}/contributing.md | 0 docs/{ => guides}/docker.md | 0 docs/{ => guides}/install.md | 0 docs/index.rst | 9 +++----- docs/requirements.txt | 6 +++--- 8 files changed, 27 insertions(+), 39 deletions(-) rename docs/{ => guides}/contributing.md (100%) rename docs/{ => guides}/docker.md (100%) rename docs/{ => guides}/install.md (100%) diff --git a/README.md b/README.md index 104ae2b04..edc805641 100644 --- a/README.md +++ b/README.md @@ -7,9 +7,9 @@ | :warning: WARNING | |---------------------------| -| `trimesh >= 4.0.0` on `main` makes minimum Python 3.7 and is in pre-release | +| `trimesh >= 4.0.0` on `release-candidate` makes minimum Python 3.7 is in pre-release | | Testing with `pip install --pre trimesh` would be much appreciated! | -| Projects that support Python<3.7 should update requirement to `trimesh<4` | +| Projects that support `python<3.7` should update requirement to `trimesh<4` | Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). diff --git a/docs/conf.py b/docs/conf.py index 62aa3a568..8143a655b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -61,7 +61,7 @@ def abspath(rel): # for a list of supported languages. # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -79,17 +79,9 @@ def abspath(rel): # The theme to use for HTML and HTML Help pages html_theme = "furo" -# options for rtd-theme +# options for furo html_theme_options = { "display_version": True, - "prev_next_buttons_location": "bottom", - "style_external_links": False, - # toc options - "collapse_navigation": True, - "sticky_navigation": True, - "navigation_depth": 4, - "includehidden": True, - "titles_only": False, } # Add any paths that contain custom static files (such as style sheets) here, diff --git a/docs/examples.py b/docs/examples.py index a1e90570a..4106e6525 100644 --- a/docs/examples.py +++ b/docs/examples.py @@ -11,21 +11,18 @@ import os import sys -log = logging.getLogger('trimesh') +log = logging.getLogger("trimesh") log.addHandler(logging.StreamHandler(sys.stdout)) log.setLevel(logging.DEBUG) # current working directory -pwd = os.path.abspath(os.path.expanduser( - os.path.dirname(__file__))) +pwd = os.path.abspath(os.path.expanduser(os.path.dirname(__file__))) # where are our notebooks to render -source = os.path.abspath(os.path.join( - pwd, '..', 'examples')) +source = os.path.abspath(os.path.join(pwd, "..", "examples")) # which index file are we generating -target = os.path.abspath(os.path.join( - pwd, "examples.md")) +target = os.path.abspath(os.path.join(pwd, "examples.md")) def extract_docstring(loaded): @@ -45,21 +42,23 @@ def extract_docstring(loaded): Cleaned up docstring. """ - source = loaded['cells'][0]['source'] + source = loaded["cells"][0]["source"] assert source[0].strip() == '"""' assert source[-1].strip() == '"""' - return ' '.join(i.strip() for i in source[1:-1]) + return " ".join(i.strip() for i in source[1:-1]) -if __name__ == '__main__': - - markdown = ['# Examples', - 'Several examples are available as rendered IPython notebooks.', '', ] +if __name__ == "__main__": + markdown = [ + "# Examples", + "Several examples are available as rendered IPython notebooks.", + "", + ] for fn in os.listdir(source): - if not fn.lower().endswith('.ipynb'): + if not fn.lower().endswith(".ipynb"): continue path = os.path.join(source, fn) with open(path) as f: @@ -68,10 +67,10 @@ def extract_docstring(loaded): log.info(f'`{fn}`: "{doc}"\n') link = f'examples.{fn.split(".")[0]}.html' - markdown.append(f'### [{fn}]({link})') + markdown.append(f"### [{fn}]({link})") markdown.append(doc) - markdown.append('') + markdown.append("") - final = '\n'.join(markdown) - with open(target, 'w') as f: + final = "\n".join(markdown) + with open(target, "w") as f: f.write(final) diff --git a/docs/contributing.md b/docs/guides/contributing.md similarity index 100% rename from docs/contributing.md rename to docs/guides/contributing.md diff --git a/docs/docker.md b/docs/guides/docker.md similarity index 100% rename from docs/docker.md rename to docs/guides/docker.md diff --git a/docs/install.md b/docs/guides/install.md similarity index 100% rename from docs/install.md rename to docs/guides/install.md diff --git a/docs/index.rst b/docs/index.rst index 32e6a3a71..3b7888f2c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,6 +1,3 @@ - .. toctree:: - :maxdepth: 2 - .. include:: README.rst Links @@ -13,7 +10,7 @@ Install .. toctree:: :maxdepth: 2 - install.md + guides/install.md Examples ========== @@ -27,14 +24,14 @@ Contributing .. toctree:: :maxdepth: 1 - Contributing + Contributing Docker ========== .. toctree:: :maxdepth: 1 - Docker + Docker API Reference ============= diff --git a/docs/requirements.txt b/docs/requirements.txt index 2121f623c..c27590be5 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -3,11 +3,11 @@ recommonmark==0.7.1 jupyter==1.0.0 # get sphinx version range from furo install -furo==2023.8.19 +furo==2023.9.10 myst-parser==2.0.0 pyopenssl==23.2.0 autodocsumm==0.2.11 jinja2==3.1.2 -matplotlib==3.7.2 -nbconvert==7.7.4 +matplotlib==3.8.0 +nbconvert==7.8.0 From b1d4b5f383b4cad504f07d1842137ccfa50976b0 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Wed, 20 Sep 2023 14:51:15 -0400 Subject: [PATCH 103/144] ruff on merges --- pyproject.toml | 2 +- trimesh/path/packing.py | 4 ++-- trimesh/visual/material.py | 46 +++++++++++++++++++------------------- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d7096938d..fcfe6a9bc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ requires = ["setuptools >= 61.0", "wheel"] [project] name = "trimesh" requires-python = ">=3.7" -version = "4.0.0.rc0" +version = "4.0.0.rc1" authors = [{name = "Michael Dawson-Haggerty", email = "mikedh@kerfed.com"}] license = {text = "MIT"} description = "Import, export, process, analyze and view triangular meshes." diff --git a/trimesh/path/packing.py b/trimesh/path/packing.py index 2b917adea..9c6de3e8e 100644 --- a/trimesh/path/packing.py +++ b/trimesh/path/packing.py @@ -459,8 +459,8 @@ def rectangles(extents, # run a single insertion order # don't shuffle the first run, shuffle subsequent runs bounds, insert = rectangles_single( - extents=extents, - size=size, + extents=extents, + size=size, shuffle=(i != 0), rotate=rotate ) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 9d07911d1..fd270cefc 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -724,7 +724,7 @@ def empty_material(color=None): return SimpleMaterial(image=image) -def pack(materials, uvs, deduplicate=True, padding=1, +def pack(materials, uvs, deduplicate=True, padding=1, max_tex_size_individual=8192, max_tex_size_fused=8192): """ Pack multiple materials with texture into a single material. @@ -746,7 +746,7 @@ def pack(materials, uvs, deduplicate=True, padding=1, max_tex_size_individual : int Maximum size of each individual texture. max_tex_size_fused : int | None - Maximum size of the combined texture. + Maximum size of the combined texture. Individual texture size will be reduced to fit. Set to None to allow infite size. @@ -825,23 +825,23 @@ def get_metallic_roughness_texture(mat): img = np.array(mat.metallicRoughnessTexture.convert("RGB")) else: img = np.array(mat.metallicRoughnessTexture) - + if len(img.shape) == 2 or img.shape[-1] == 1: img = img.reshape(*img.shape[:2], 1) - img = np.concatenate([img, - np.ones_like(img[..., :1])*255, - np.zeros_like(img[..., :1])], + img = np.concatenate([img, + np.ones_like(img[..., :1])*255, + np.zeros_like(img[..., :1])], axis=-1) elif img.shape[-1] == 2: img = np.concatenate([img, np.zeros_like(img[..., :1])], axis=-1) if mat.metallicFactor is not None: - img[..., 0] = np.round(img[..., 0].astype(np.float64) * + img[..., 0] = np.round(img[..., 0].astype(np.float64) * mat.metallicFactor).astype(np.uint8) if mat.roughnessFactor is not None: - img[..., 1] = np.round(img[..., 1].astype(np.float64) * + img[..., 1] = np.round(img[..., 1].astype(np.float64) * mat.roughnessFactor).astype(np.uint8) - img = Image.fromarray(img, mode='RGB') + img = Image.fromarray(img, mode='RGB') else: metallic = 0.0 if mat.metallicFactor is None else mat.metallicFactor roughness = 1.0 if mat.roughnessFactor is None else mat.roughnessFactor @@ -868,11 +868,11 @@ def get_emissive_texture(mat): [0, 0, 0], (1, 1, 3)).astype(np.uint8)) # make sure we're always returning in RGBA mode return img.convert('RGB') - + def get_normal_texture(mat): # there is no default normal texture return getattr(mat, 'normalTexture', None) - + def get_occlusion_texture(mat): occlusion_texture = getattr(mat, 'occlusionTexture', None) if occlusion_texture is None: @@ -882,7 +882,7 @@ def get_occlusion_texture(mat): return occlusion_texture def pad_image(src, padding=1): - # uses replication padding on all 4 sides + # uses replication padding on all 4 sides if isinstance(padding, int): padding = (padding, padding) @@ -895,7 +895,7 @@ def pad_image(src, padding=1): result = src[y, x] return result - + def resize_images(images, sizes): resized = [] for img, size in zip(images, sizes): @@ -905,10 +905,10 @@ def resize_images(images, sizes): img = img.resize(size) resized.append(img) return resized - + def pack_images(images, power_resize=True, random_seed=42): # random seed needs to be identical to achieve same results - # TODO: we could alternatively reuse the offsets from the first packing call + # TODO: we could alternatively reuse the offsets from the first packing call np.random.seed(random_seed) return packing.images(images, power_resize=power_resize) @@ -938,9 +938,9 @@ def pack_images(images, power_resize=True, random_seed=42): while down_scale_iterations > 0: # collect the images from the materials images = [get_base_color_texture(materials[g[0]]) for g in mat_idx] - + if use_pbr: - # if we have PBR materials, collect all possible textures and + # if we have PBR materials, collect all possible textures and # determine the largest size per material metallic_roughness = [get_metallic_roughness_texture( materials[g[0]]) for g in mat_idx] @@ -960,7 +960,7 @@ def pack_images(images, power_resize=True, random_seed=42): unpadded_sizes.append(max_tex_size) - # use the same size for all of them to ensure + # use the same size for all of them to ensure # that texture atlassing is identical images = resize_images(images, unpadded_sizes) metallic_roughness = resize_images(metallic_roughness, unpadded_sizes) @@ -978,8 +978,8 @@ def pack_images(images, power_resize=True, random_seed=42): unpadded_sizes.append(tex_size) images = [ - Image.fromarray(pad_image(np.array(img), padding), img.mode) - for img in images + Image.fromarray(pad_image(np.array(img), padding), img.mode) + for img in images ] # pack the multiple images into a single large image @@ -999,10 +999,10 @@ def pack_images(images, power_resize=True, random_seed=42): pad_image( np.array(img), padding), img.mode) for img in metallic_roughness] - # even if we only need the first two channels, store RGB, because + # even if we only need the first two channels, store RGB, because # PIL 'LA' mode images are interpreted incorrectly in other 3D software final_metallic_roughness, _ = pack_images(metallic_roughness) - + if all(np.array(x).max() == 0 for x in emissive): # if all emissive textures are black, don't use emissive emissive = None @@ -1122,7 +1122,7 @@ def pack_images(images, power_resize=True, random_seed=42): doubleSided=False, # TODO how to handle this? normalTexture=final_normals, occlusionTexture=final_occlusion, - ), + ), stacked) else: return SimpleMaterial(image=final), stacked From 51b9a241c983b7f77aac3ed9a1998e2e841758df Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 21 Sep 2023 23:15:20 -0400 Subject: [PATCH 104/144] convert convex decomposition to pyVHACD --- docker/vhacd.bash | 14 ------- pyproject.toml | 3 +- tests/test_decomposition.py | 29 ++++++------- tests/test_integrate.py | 41 ------------------- tests/test_vhacd.py | 39 ------------------ trimesh/base.py | 71 ++++++-------------------------- trimesh/decomposition.py | 26 ++++++++---- trimesh/interfaces/__init__.py | 3 +- trimesh/interfaces/blender.py | 69 ++++++++++++++----------------- trimesh/interfaces/generic.py | 62 ++++++++++++---------------- trimesh/interfaces/gmsh.py | 75 ++++++++++++++++++++-------------- trimesh/interfaces/scad.py | 46 +++++++++++---------- trimesh/interfaces/vhacd.py | 66 ------------------------------ 13 files changed, 172 insertions(+), 372 deletions(-) delete mode 100644 docker/vhacd.bash delete mode 100644 tests/test_integrate.py delete mode 100644 tests/test_vhacd.py delete mode 100644 trimesh/interfaces/vhacd.py diff --git a/docker/vhacd.bash b/docker/vhacd.bash deleted file mode 100644 index 01c92ef83..000000000 --- a/docker/vhacd.bash +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -xe - -# Set the installation path. -VHACD_PATH=/usr/local/bin/testVHACD - -# Grab the VHACD (convex segmenter) binary. -wget https://github.com/mikedh/v-hacd-1/raw/master/bin/linux/testVHACD -O "${VHACD_PATH}" -nv - -# Check the hash of the downloaded file -echo "e1e79b2c1b274a39950ffc48807ecb0c81a2192e7d0993c686da90bd33985130 ${VHACD_PATH}" | sha256sum --check - -# Make it executable. -chmod +x ${VHACD_PATH} diff --git a/pyproject.toml b/pyproject.toml index fcfe6a9bc..6da990a32 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,8 @@ recommend = [ "psutil", "xatlas", "scikit-image", - "python-fcl" + "python-fcl", + "pyVHACD", ] # requires pip >= 21.2 diff --git a/tests/test_decomposition.py b/tests/test_decomposition.py index 13a103bb4..5b064c808 100644 --- a/tests/test_decomposition.py +++ b/tests/test_decomposition.py @@ -5,27 +5,22 @@ class DecompositionTest(g.unittest.TestCase): - def test_convex_decomposition(self): - mesh = g.get_mesh('quadknot.obj') - - engines = [('vhacd', g.trimesh.interfaces.vhacd.exists)] - - for engine, exists in engines: - if not exists: - g.log.warning( - 'skipping convex decomposition engine %s', engine) - continue + try: + import pyVHACD # noqa + except ImportError: + return - g.log.info('Testing convex decomposition with engine %s', engine) - meshes = mesh.convex_decomposition(engine=engine) - self.assertTrue(len(meshes) > 1) - for m in meshes: - self.assertTrue(m.is_watertight) + mesh = g.get_mesh("quadknot.obj") + meshes = mesh.convex_decomposition() + assert len(meshes) > 1 + for m in meshes: + assert m.is_watertight + assert m.is_convex - g.log.info('convex decomposition succeeded with %s', engine) + g.trimesh.Scene(meshes).show() -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_integrate.py b/tests/test_integrate.py deleted file mode 100644 index d8d0222a8..000000000 --- a/tests/test_integrate.py +++ /dev/null @@ -1,41 +0,0 @@ -try: - from . import generic as g -except BaseException: - import generic as g - - -class IntegrateTest(g.unittest.TestCase): - - def test_integrate(self): - try: - import sympy as sp - - from trimesh.integrate import symbolic_barycentric - except BaseException: - g.log.warning('no sympy', exc_info=True) - return - - m = g.get_mesh('featuretype.STL') - - integrator, expr = symbolic_barycentric('1') - assert g.np.allclose(integrator(m).sum(), m.area) - x, y, z = sp.symbols('x y z') - functions = [x**2 + y**2, x + y + z] - - for f in functions: - integrator, expr = symbolic_barycentric(f) - integrator_p, expr_p = symbolic_barycentric(str(f)) - - g.log.debug('expression %s was integrated to %s', - str(f), - str(expr)) - - summed = integrator(m).sum() - summed_p = integrator_p(m).sum() - assert g.np.allclose(summed, summed_p) - assert not g.np.allclose(summed, 0.0) - - -if __name__ == '__main__': - g.trimesh.util.attach_to_log() - g.unittest.main() diff --git a/tests/test_vhacd.py b/tests/test_vhacd.py deleted file mode 100644 index 3a24a38c9..000000000 --- a/tests/test_vhacd.py +++ /dev/null @@ -1,39 +0,0 @@ -try: - from . import generic as g -except BaseException: - import generic as g - - -class VHACDTest(g.unittest.TestCase): - - def test_vhacd(self): - - # exit if no VHACD - if not g.trimesh.interfaces.vhacd.exists: - g.log.warning( - 'not testing convex decomposition (no vhacd)!') - return - - g.log.info('testing convex decomposition using vhacd') - # get a bunny - mesh = g.get_mesh('bunny.ply') - # run a convex decomposition using vhacd - decomposed = mesh.convex_decomposition( - maxhulls=10, debug=True) - - if len(decomposed) != 10: - # it should return the correct number of meshes - raise ValueError(f'{len(decomposed)} != 10') - - # make sure everything is convex - # also this will fail if the type is returned incorrectly - assert all(i.is_convex for i in decomposed) - - # make sure every result is actually a volume - # ie watertight, consistent winding, positive nonzero volume - assert all(i.is_volume for i in decomposed) - - -if __name__ == '__main__': - g.trimesh.util.attach_to_log() - g.unittest.main() diff --git a/trimesh/base.py b/trimesh/base.py index 555198d02..cdb3f5b62 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -139,9 +139,7 @@ def __init__( # regenerated from self._data, but may be slow to calculate. # In order to maintain consistency # the cache is cleared when self._data.__hash__() changes - self._cache = caching.Cache( - id_function=self._data.__hash__, force_immutable=True - ) + self._cache = caching.Cache(id_function=self._data.__hash__, force_immutable=True) self._cache.update(initial_cache) # check for None only to avoid warning messages in subclasses @@ -194,9 +192,7 @@ def __init__( if isinstance(metadata, dict): self.metadata.update(metadata) elif metadata is not None: - raise ValueError( - "metadata should be a dict or None, got %s" % str(metadata) - ) + raise ValueError("metadata should be a dict or None, got %s" % str(metadata)) # store per-face and per-vertex attributes which will # be updated when an update_faces call is made @@ -427,9 +423,7 @@ def face_normals(self, values): return # make sure the first few normals match the first few triangles - check, valid = triangles.normals( - self.vertices.view(np.ndarray)[self.faces[:20]] - ) + check, valid = triangles.normals(self.vertices.view(np.ndarray)[self.faces[:20]]) compare = np.zeros((len(valid), 3)) compare[valid] = check if not np.allclose(compare, values[:20]): @@ -607,9 +601,7 @@ def centroid(self) -> NDArray[float64]: # use the centroid of each triangle weighted by # the area of the triangle to find the overall centroid try: - centroid = np.average( - self.triangles_center, weights=self.area_faces, axis=0 - ) + centroid = np.average(self.triangles_center, weights=self.area_faces, axis=0) except BaseException: # if all triangles are zero-area weights will not work centroid = self.triangles_center.mean(axis=0) @@ -1212,11 +1204,7 @@ def update_vertices( # make sure mask is a numpy array mask = np.asanyarray(mask) - if ( - (mask.dtype.name == "bool" and mask.all()) - or len(mask) == 0 - or self.is_empty - ): + if (mask.dtype.name == "bool" and mask.all()) or len(mask) == 0 or self.is_empty: # mask doesn't remove any vertices so exit early return @@ -1534,9 +1522,7 @@ def face_adjacency_radius(self) -> NDArray[float64]: radii : (len(self.face_adjacency), ) float Approximate radius formed by triangle pair """ - radii, self._cache["face_adjacency_span"] = graph.face_adjacency_radius( - mesh=self - ) + radii, self._cache["face_adjacency_span"] = graph.face_adjacency_radius(mesh=self) return radii @caching.cache_decorator @@ -1842,9 +1828,7 @@ def facets_boundary(self) -> List[NDArray[int64]]: edges = self.edges_sorted.reshape((-1, 6)) # get the edges for each facet edges_facet = [edges[i].reshape((-1, 2)) for i in self.facets] - edges_boundary = [ - i[grouping.group_rows(i, require_count=1)] for i in edges_facet - ] + edges_boundary = [i[grouping.group_rows(i, require_count=1)] for i in edges_facet] return edges_boundary @caching.cache_decorator @@ -2886,46 +2870,17 @@ def to_dict(self) -> Dict[str, Union[str, List[List[float]], List[List[int]]]]: "faces": self.faces.tolist(), } - def convex_decomposition(self, maxhulls=20, **kwargs): - """ - Compute an approximate convex decomposition of a mesh. - - testVHACD Parameters which can be passed as kwargs: - - Name Default - ----------------------------------------------------- - resolution 100000 - max. concavity 0.001 - plane down-sampling 4 - convex-hull down-sampling 4 - alpha 0.05 - beta 0.05 - maxhulls 10 - pca 0 - mode 0 - max. vertices per convex-hull 64 - min. volume to add vertices to convex-hulls 0.0001 - convex-hull approximation 1 - OpenCL acceleration 1 - OpenCL platform ID 0 - OpenCL device ID 0 - output output.wrl - log log.txt - - - Parameters - ------------ - maxhulls : int - Maximum number of convex hulls to return - **kwargs : testVHACD keyword arguments + def convex_decomposition(self) -> List["Trimesh"]: + """ + Compute an approximate convex decomposition of a mesh + using `pip install pyVHACD`. Returns ------- - meshes : list of trimesh.Trimesh + meshes List of convex meshes that approximate the original """ - result = decomposition.convex_decomposition(self, maxhulls=maxhulls, **kwargs) - return result + return [Trimesh(**kwargs) for kwargs in decomposition.convex_decomposition(self)] def union( self, other: "Trimesh", engine: Optional[str] = None, **kwargs diff --git a/trimesh/decomposition.py b/trimesh/decomposition.py index b6ec8c328..12cab73bc 100644 --- a/trimesh/decomposition.py +++ b/trimesh/decomposition.py @@ -1,7 +1,9 @@ -from . import interfaces +from typing import Dict, List +import numpy as np -def convex_decomposition(mesh, **kwargs): + +def convex_decomposition(mesh) -> List[Dict]: """ Compute an approximate convex decomposition of a mesh. @@ -16,8 +18,18 @@ def convex_decomposition(mesh, **kwargs): List of **kwargs for Trimeshes that are nearly convex and approximate the original. """ - # decompositions require testVHACD - if interfaces.vhacd.exists: - return interfaces.vhacd.convex_decomposition(mesh, **kwargs) - else: - raise ValueError('convex compositions require testVHACD installed!') + from pyVHACD import compute_vhacd + + # the faces are triangulated in a (len(face), ...vertex-index) + # for vtkPolyData + # i.e. so if shaped to four columns the first column is all 3 + faces = ( + np.column_stack((np.ones(len(mesh.faces), dtype=np.int64) * 3, mesh.faces)) + .ravel() + .astype(np.uint32) + ) + + return [ + {"vertices": v, "faces": f.reshape((-1, 4))[:, 1:]} + for v, f in compute_vhacd(mesh.vertices, faces) + ] diff --git a/trimesh/interfaces/__init__.py b/trimesh/interfaces/__init__.py index 4c2bb091e..0878b76bb 100644 --- a/trimesh/interfaces/__init__.py +++ b/trimesh/interfaces/__init__.py @@ -3,7 +3,6 @@ from . import gmsh from . import scad from . import blender -from . import vhacd # add to __all__ as per pep8 -__all__ = ['scad', 'blender', 'vhacd'] +__all__ = ["scad", "blender"] diff --git a/trimesh/interfaces/blender.py b/trimesh/interfaces/blender.py index e640c3d14..610a9be92 100644 --- a/trimesh/interfaces/blender.py +++ b/trimesh/interfaces/blender.py @@ -5,53 +5,49 @@ from ..constants import log from .generic import MeshScript -_search_path = os.environ.get('PATH', '') -if platform.system() == 'Windows': +_search_path = os.environ.get("PATH", "") +if platform.system() == "Windows": # try to find Blender install on Windows # split existing path by delimiter - _search_path = [i for i in _search_path.split(';') if len(i) > 0] - for pf in [r'C:\Program Files', - r'C:\Program Files (x86)']: + _search_path = [i for i in _search_path.split(";") if len(i) > 0] + for pf in [r"C:\Program Files", r"C:\Program Files (x86)"]: pf = os.path.join(pf, "Blender Foundation") if os.path.exists(pf): for p in os.listdir(pf): if "Blender" in p: _search_path.append(os.path.join(pf, p)) - _search_path = ';'.join(_search_path) - log.debug('searching for blender in: %s', _search_path) + _search_path = ";".join(_search_path) + log.debug("searching for blender in: %s", _search_path) -if platform.system() == 'Darwin': +if platform.system() == "Darwin": # try to find Blender on Mac OSX - _search_path = [i for i in _search_path.split(':') if len(i) > 0] - _search_path.append('/Applications/blender.app/Contents/MacOS') - _search_path.append('/Applications/Blender.app/Contents/MacOS') - _search_path.append('/Applications/Blender/blender.app/Contents/MacOS') - _search_path = ':'.join(_search_path) - log.debug('searching for blender in: %s', _search_path) - -_blender_executable = util.which('blender', path=_search_path) + _search_path = [i for i in _search_path.split(":") if len(i) > 0] + _search_path.append("/Applications/blender.app/Contents/MacOS") + _search_path.append("/Applications/Blender.app/Contents/MacOS") + _search_path.append("/Applications/Blender/blender.app/Contents/MacOS") + _search_path = ":".join(_search_path) + log.debug("searching for blender in: %s", _search_path) + +_blender_executable = util.which("blender", path=_search_path) exists = _blender_executable is not None -def boolean(meshes, operation='difference', debug=False): +def boolean(meshes, operation="difference", debug=False): """ Run a boolean operation with multiple meshes using Blender. """ if not exists: - raise ValueError('No blender available!') + raise ValueError("No blender available!") operation = str.upper(operation) - if operation == 'INTERSECTION': - operation = 'INTERSECT' + if operation == "INTERSECTION": + operation = "INTERSECT" # get the template from our resources folder - template = resources.get('templates/blender_boolean.py.tmpl') - script = template.replace('$OPERATION', operation) + template = resources.get("templates/blender_boolean.py.tmpl") + script = template.replace("$OPERATION", operation) - with MeshScript(meshes=meshes, - script=script, - debug=debug) as blend: - result = blend.run(_blender_executable + - ' --background --python $SCRIPT') + with MeshScript(meshes=meshes, script=script, debug=debug) as blend: + result = blend.run(_blender_executable + " --background --python $SCRIPT") for m in util.make_sequence(result): # blender returns actively incorrect face normals @@ -65,19 +61,16 @@ def unwrap(mesh, angle_limit=66, island_margin=0.0, debug=False): Run an unwrap operation using blender. """ if not exists: - raise ValueError('No blender available!') + raise ValueError("No blender available!") # get the template from our resources folder - template = resources.get('templates/blender_unwrap.py') - script = template.replace('$ANGLE_LIMIT', "%.6f" % angle_limit - ).replace('$ISLAND_MARGIN', "%.6f" % island_margin) - - with MeshScript(meshes=[mesh], - script=script, - exchange="obj", - debug=debug) as blend: - result = blend.run(_blender_executable + - ' --background --python $SCRIPT') + template = resources.get("templates/blender_unwrap.py") + script = template.replace("$ANGLE_LIMIT", "%.6f" % angle_limit).replace( + "$ISLAND_MARGIN", "%.6f" % island_margin + ) + + with MeshScript(meshes=[mesh], script=script, exchange="obj", debug=debug) as blend: + result = blend.run(_blender_executable + " --background --python $SCRIPT") for m in util.make_sequence(result): # blender returns actively incorrect face normals diff --git a/trimesh/interfaces/generic.py b/trimesh/interfaces/generic.py index 0a7d9cff0..694129bf4 100644 --- a/trimesh/interfaces/generic.py +++ b/trimesh/interfaces/generic.py @@ -10,14 +10,7 @@ class MeshScript: - - def __init__(self, - meshes, - script, - exchange='stl', - debug=False, - **kwargs): - + def __init__(self, meshes, script, exchange="stl", debug=False, **kwargs): self.debug = debug self.kwargs = kwargs self.meshes = meshes @@ -32,32 +25,31 @@ def __enter__(self): digit_count = len(str(len(self.meshes))) self.mesh_pre = [ NamedTemporaryFile( - suffix=f'.{self.exchange}', - prefix=f'{str(i).zfill(digit_count)}_', - mode='wb', - delete=False) for i in range(len(self.meshes))] + suffix=f".{self.exchange}", + prefix=f"{str(i).zfill(digit_count)}_", + mode="wb", + delete=False, + ) + for i in range(len(self.meshes)) + ] self.mesh_post = NamedTemporaryFile( - suffix=f'.{self.exchange}', - mode='rb', - delete=False) - self.script_out = NamedTemporaryFile( - mode='wb', delete=False) + suffix=f".{self.exchange}", mode="rb", delete=False + ) + self.script_out = NamedTemporaryFile(mode="wb", delete=False) # export the meshes to a temporary STL container for mesh, file_obj in zip(self.meshes, self.mesh_pre): mesh.export(file_obj=file_obj.name) - self.replacement = {'MESH_' + str(i): m.name - for i, m in enumerate(self.mesh_pre)} - self.replacement['MESH_PRE'] = str( - [i.name for i in self.mesh_pre]) - self.replacement['MESH_POST'] = self.mesh_post.name - self.replacement['SCRIPT'] = self.script_out.name + self.replacement = {"MESH_" + str(i): m.name for i, m in enumerate(self.mesh_pre)} + self.replacement["MESH_PRE"] = str([i.name for i in self.mesh_pre]) + self.replacement["MESH_POST"] = self.mesh_post.name + self.replacement["SCRIPT"] = self.script_out.name script_text = Template(self.script).substitute(self.replacement) - if platform.system() == 'Windows': - script_text = script_text.replace('\\', '\\\\') - self.script_out.write(script_text.encode('utf-8')) + if platform.system() == "Windows": + script_text = script_text.replace("\\", "\\\\") + self.script_out.write(script_text.encode("utf-8")) # close all temporary files self.script_out.close() @@ -67,21 +59,20 @@ def __enter__(self): return self def run(self, command): - command_run = Template(command).substitute( - self.replacement).split() + command_run = Template(command).substitute(self.replacement).split() # run the binary startupinfo = None - if platform.system() == 'Windows': + if platform.system() == "Windows": startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW if self.debug: - log.info('executing: {}'.format(' '.join(command_run))) + log.info("executing: {}".format(" ".join(command_run))) try: - output = check_output(command_run, - stderr=subprocess.STDOUT, - startupinfo=startupinfo) + output = check_output( + command_run, stderr=subprocess.STDOUT, startupinfo=startupinfo + ) except CalledProcessError as e: # Log output if debug is enabled if self.debug: @@ -92,14 +83,13 @@ def run(self, command): log.info(output.decode()) # bring the binaries result back as a set of Trimesh kwargs - mesh_results = exchange.load.load_mesh( - self.mesh_post.name, **self.kwargs) + mesh_results = exchange.load.load_mesh(self.mesh_post.name, **self.kwargs) return mesh_results def __exit__(self, *args, **kwargs): if self.debug: - log.info(f'MeshScript.debug: not deleting {self.script_out.name}') + log.info(f"MeshScript.debug: not deleting {self.script_out.name}") return # delete all the temporary files by name # they are closed but their names are still available diff --git a/trimesh/interfaces/gmsh.py b/trimesh/interfaces/gmsh.py index 0a2f23f10..00eed2ee6 100644 --- a/trimesh/interfaces/gmsh.py +++ b/trimesh/interfaces/gmsh.py @@ -41,33 +41,45 @@ def load_gmsh(file_name, gmsh_args=None): # Mesh.Algorithm=2 MeshAdapt/Delaunay, there are others but they may include quads # With this planes are meshed using Delaunay and cylinders are meshed # using MeshAdapt - args = [("Mesh.Algorithm", 2), - ("Mesh.CharacteristicLengthFromCurvature", 1), - ("Mesh.MinimumCirclePoints", 32)] + args = [ + ("Mesh.Algorithm", 2), + ("Mesh.CharacteristicLengthFromCurvature", 1), + ("Mesh.MinimumCirclePoints", 32), + ] # add passed argument tuples last so we can override defaults if gmsh_args is not None: args.extend(gmsh_args) # formats GMSH can load - supported = ['.brep', '.stp', '.step', '.igs', '.iges', - '.bdf', '.msh', '.inp', '.diff', '.mesh'] + supported = [ + ".brep", + ".stp", + ".step", + ".igs", + ".iges", + ".bdf", + ".msh", + ".inp", + ".diff", + ".mesh", + ] # check extensions to make sure it is supported format if file_name is not None: - if not any(file_name.lower().endswith(e) - for e in supported): + if not any(file_name.lower().endswith(e) for e in supported): raise ValueError( - 'Supported formats are: BREP (.brep), STEP (.stp or .step), ' + - 'IGES (.igs or .iges), Nastran (.bdf), Gmsh (.msh), Abaqus (*.inp), ' + - 'Diffpack (*.diff), Inria Medit (*.mesh)') + "Supported formats are: BREP (.brep), STEP (.stp or .step), " + + "IGES (.igs or .iges), Nastran (.bdf), Gmsh (.msh), Abaqus (*.inp), " + + "Diffpack (*.diff), Inria Medit (*.mesh)" + ) else: - raise ValueError('No import since no file was provided!') + raise ValueError("No import since no file was provided!") # if we initialize with sys.argv it could be anything if not gmsh.isInitialized(): gmsh.initialize() gmsh.option.setNumber("General.Terminal", 1) - gmsh.model.add('Surface_Mesh_Generation') + gmsh.model.add("Surface_Mesh_Generation") # loop through our numbered args which do things, stuff for arg in args: gmsh.option.setNumber(*arg) @@ -75,13 +87,14 @@ def load_gmsh(file_name, gmsh_args=None): gmsh.open(file_name) # create a temporary file for the results - out_data = tempfile.NamedTemporaryFile(suffix='.stl', delete=False) + out_data = tempfile.NamedTemporaryFile(suffix=".stl", delete=False) # windows gets mad if two processes try to open the same file out_data.close() # we have to mesh the surface as these are analytic BREP formats - if any(file_name.lower().endswith(e) - for e in ['.brep', '.stp', '.step', '.igs', '.iges']): + if any( + file_name.lower().endswith(e) for e in [".brep", ".stp", ".step", ".igs", ".iges"] + ): gmsh.model.geo.synchronize() # generate the mesh gmsh.model.mesh.generate(2) @@ -93,7 +106,7 @@ def load_gmsh(file_name, gmsh_args=None): gmsh.view.write(1, out_data.name) # load the data from the temporary outfile - with open(out_data.name, 'rb') as f: + with open(out_data.name, "rb") as f: kwargs = load_stl(f) gmsh.finalize() @@ -101,10 +114,7 @@ def load_gmsh(file_name, gmsh_args=None): return kwargs -def to_volume(mesh, - file_name=None, - max_element=None, - mesher_id=1): +def to_volume(mesh, file_name=None, max_element=None, mesher_id=1): """ Convert a surface mesh to a 3D volume mesh generated by gmsh. @@ -150,7 +160,7 @@ def to_volume(mesh, # checks mesher selection if mesher_id not in [1, 3, 4, 7, 9, 10]: - raise ValueError('unavailable mesher selected!') + raise ValueError("unavailable mesher selected!") else: mesher_id = int(mesher_id) @@ -160,22 +170,25 @@ def to_volume(mesh, if file_name is not None: # check extensions to make sure it is supported format - if not any(file_name.lower().endswith(e) - for e in ['.bdf', '.msh', '.inp', '.diff', '.mesh']): + if not any( + file_name.lower().endswith(e) + for e in [".bdf", ".msh", ".inp", ".diff", ".mesh"] + ): raise ValueError( - 'Only Nastran (.bdf), Gmsh (.msh), Abaqus (*.inp), ' + - 'Diffpack (*.diff) and Inria Medit (*.mesh) formats ' + - 'are available!') + "Only Nastran (.bdf), Gmsh (.msh), Abaqus (*.inp), " + + "Diffpack (*.diff) and Inria Medit (*.mesh) formats " + + "are available!" + ) # exports to disk for gmsh to read using a temp file - mesh_file = tempfile.NamedTemporaryFile(suffix='.stl', delete=False) + mesh_file = tempfile.NamedTemporaryFile(suffix=".stl", delete=False) mesh_file.close() mesh.export(mesh_file.name) # starts Gmsh Python API script gmsh.initialize() gmsh.option.setNumber("General.Terminal", 1) - gmsh.model.add('Nastran_stl') + gmsh.model.add("Nastran_stl") gmsh.merge(mesh_file.name) dimtag = gmsh.model.getEntities()[0] @@ -195,16 +208,16 @@ def to_volume(mesh, dim2 = dimtag2[0] tag2 = dimtag2[1] p2 = gmsh.model.addPhysicalGroup(dim2, [tag2]) - gmsh.model.setPhysicalName(dim, p2, 'Nastran_bdf') + gmsh.model.setPhysicalName(dim, p2, "Nastran_bdf") data = None # if file name is None, return msh data using a tempfile if file_name is None: - out_data = tempfile.NamedTemporaryFile(suffix='.msh', delete=False) + out_data = tempfile.NamedTemporaryFile(suffix=".msh", delete=False) # windows gets mad if two processes try to open the same file out_data.close() gmsh.write(out_data.name) - with open(out_data.name, 'rb') as f: + with open(out_data.name, "rb") as f: data = f.read() else: gmsh.write(file_name) diff --git a/trimesh/interfaces/scad.py b/trimesh/interfaces/scad.py index d1e00d86d..4c97238e2 100644 --- a/trimesh/interfaces/scad.py +++ b/trimesh/interfaces/scad.py @@ -7,25 +7,25 @@ from .generic import MeshScript # start the search with the user's PATH -_search_path = os.environ.get('PATH', '') +_search_path = os.environ.get("PATH", "") # add additional search locations on windows -if platform.system() == 'Windows': +if platform.system() == "Windows": # split existing path by delimiter - _search_path = [i for i in _search_path.split(';') if len(i) > 0] - _search_path.append(os.path.normpath(r'C:\Program Files\OpenSCAD')) - _search_path.append(os.path.normpath(r'C:\Program Files (x86)\OpenSCAD')) - _search_path = ';'.join(_search_path) - log.debug('searching for scad in: %s', _search_path) + _search_path = [i for i in _search_path.split(";") if len(i) > 0] + _search_path.append(os.path.normpath(r"C:\Program Files\OpenSCAD")) + _search_path.append(os.path.normpath(r"C:\Program Files (x86)\OpenSCAD")) + _search_path = ";".join(_search_path) + log.debug("searching for scad in: %s", _search_path) # add mac-specific search locations -if platform.system() == 'Darwin': - _search_path = [i for i in _search_path.split(':') if len(i) > 0] - _search_path.append('/Applications/OpenSCAD.app/Contents/MacOS') - _search_path = ':'.join(_search_path) - log.debug('searching for scad in: %s', _search_path) +if platform.system() == "Darwin": + _search_path = [i for i in _search_path.split(":") if len(i) > 0] + _search_path.append("/Applications/OpenSCAD.app/Contents/MacOS") + _search_path = ":".join(_search_path) + log.debug("searching for scad in: %s", _search_path) # try to find the SCAD executable by name -_scad_executable = which('openscad', path=_search_path) +_scad_executable = which("openscad", path=_search_path) if _scad_executable is None: - _scad_executable = which('OpenSCAD', path=_search_path) + _scad_executable = which("OpenSCAD", path=_search_path) exists = _scad_executable is not None @@ -44,18 +44,20 @@ def interface_scad(meshes, script, debug=False, **kwargs): $mesh_0, $mesh_1, etc. """ if not exists: - raise ValueError('No SCAD available!') + raise ValueError("No SCAD available!") # OFF is a simple text format that references vertices by-index # making it slightly preferable to STL for this kind of exchange duty try: - with MeshScript(meshes=meshes, script=script, - debug=debug, exchange='off') as scad: - result = scad.run(_scad_executable + ' $SCRIPT -o $MESH_POST') + with MeshScript( + meshes=meshes, script=script, debug=debug, exchange="off" + ) as scad: + result = scad.run(_scad_executable + " $SCRIPT -o $MESH_POST") except CalledProcessError as e: # Check if scad is complaining about an empty top level geometry. # If so, just return an empty Trimesh object. if "Current top level object is empty." in e.output.decode(): from .. import Trimesh + return Trimesh() else: raise @@ -63,12 +65,12 @@ def interface_scad(meshes, script, debug=False, **kwargs): return result -def boolean(meshes, operation='difference', debug=False, **kwargs): +def boolean(meshes, operation="difference", debug=False, **kwargs): """ Run an operation on a set of meshes """ - script = operation + '(){' + script = operation + "(){" for i in range(len(meshes)): - script += 'import(\"$MESH_' + str(i) + '\");' - script += '}' + script += 'import("$MESH_' + str(i) + '");' + script += "}" return interface_scad(meshes, script, debug=debug, **kwargs) diff --git a/trimesh/interfaces/vhacd.py b/trimesh/interfaces/vhacd.py deleted file mode 100644 index 5c7a15549..000000000 --- a/trimesh/interfaces/vhacd.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import platform - -from ..constants import log -from ..util import which -from .generic import MeshScript - -_search_path = os.environ.get("PATH", "") - -if platform.system() == 'Windows': - # split existing path by delimiter - _search_path = [i for i in _search_path.split(';') if len(i) > 0] - _search_path.append(r'C:\Program Files') - _search_path.append(r'C:\Program Files (x86)') - _search_path = ';'.join(_search_path) - log.debug('searching for vhacd in: %s', _search_path) -else: - _search_path = ":".join( - [os.path.expanduser(os.path.expandvars(p)) for p in _search_path.split(":")] - ) - -_vhacd_executable = None -for _name in ['vhacd', 'testVHACD', 'TestVHACD']: - _vhacd_executable = which(_name, path=_search_path) - if _vhacd_executable is not None: - break -exists = _vhacd_executable is not None - - -def convex_decomposition(mesh, debug=False, **kwargs): - """ - Run VHACD to generate an approximate convex decomposition - of a single mesh. - - Parameters - -------------- - mesh : trimesh.Trimesh - Mesh to be decomposed into convex components - - Returns - ------------ - meshes : (n,) trimesh.Trimesh - List of convex meshes - """ - if not exists: - raise ValueError('No vhacd available!') - - argstring = ' --input $MESH_0 --output $MESH_POST --log $SCRIPT' - - # pass through extra arguments from the input dictionary - for key, value in kwargs.items(): - argstring += f' --{str(key)} {str(value)}' - - with MeshScript(meshes=[mesh], - script='', - exchange='obj', - group_material=False, - split_object=True, - debug=debug) as vhacd: - result = vhacd.run(_vhacd_executable + argstring) - - # if we got a scene back return a list of meshes - if hasattr(result, 'geometry') and isinstance(result.geometry, dict): - return list(result.geometry.values()) - - return result From 68af44b0a243a2881405bb5687a2a43a3e7b7a93 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 22 Sep 2023 01:09:01 -0400 Subject: [PATCH 105/144] remove show --- tests/test_decomposition.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/test_decomposition.py b/tests/test_decomposition.py index 5b064c808..75ae8cd49 100644 --- a/tests/test_decomposition.py +++ b/tests/test_decomposition.py @@ -18,8 +18,6 @@ def test_convex_decomposition(self): assert m.is_watertight assert m.is_convex - g.trimesh.Scene(meshes).show() - if __name__ == "__main__": g.trimesh.util.attach_to_log() From 1fa13c7d2a2398dfa3705c67dfe003903ae49b4f Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 22 Sep 2023 12:55:29 -0400 Subject: [PATCH 106/144] update decomposition --- trimesh/exchange/urdf.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/trimesh/exchange/urdf.py b/trimesh/exchange/urdf.py index b60072201..fbfc77b31 100644 --- a/trimesh/exchange/urdf.py +++ b/trimesh/exchange/urdf.py @@ -3,7 +3,6 @@ import numpy as np from ..constants import log, tol -from ..decomposition import convex_decomposition from ..version import __version__ @@ -53,9 +52,7 @@ def export_urdf(mesh, # Perform a convex decomposition try: - convex_pieces = convex_decomposition(mesh, **kwargs) - if not isinstance(convex_pieces, list): - convex_pieces = [convex_pieces] + convex_pieces = mesh.convex_decomposition() except BaseException: log.error('problem with convex decomposition, using hull', exc_info=True) From 9c5f1e64e1619ad7e12bfc4cb9ee313bff9949b2 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 22 Sep 2023 14:07:43 -0400 Subject: [PATCH 107/144] update docs --- docs/guides/contributing.md | 10 +- docs/guides/install.md | 36 ++-- trimesh/path/packing.py | 160 +++++++++------- trimesh/visual/__init__.py | 46 ++--- trimesh/visual/base.py | 1 + trimesh/visual/color.py | 226 +++++++++++----------- trimesh/visual/gloss.py | 141 ++++++++------ trimesh/visual/material.py | 361 +++++++++++++++++++----------------- trimesh/visual/objects.py | 17 +- trimesh/visual/texture.py | 23 +-- 10 files changed, 536 insertions(+), 485 deletions(-) diff --git a/docs/guides/contributing.md b/docs/guides/contributing.md index 690d63302..6eafd6d21 100644 --- a/docs/guides/contributing.md +++ b/docs/guides/contributing.md @@ -71,16 +71,12 @@ if __name__ == '__main__': When you remove the embed and see the profile result you can then tweak the lines that are slow before finishing the function. ### Automatic Formatting -Before opening a pull request I run some auto-formatting rules which will run autopep8 and yell at you about any `ruff` rule violations. There is a convenience script baked into `setup.py` to run all of these which you can run with: +The only check in that's required to pass in CI is `ruff`, which I usually run with: ``` -python setup.py --format +ruff . --fix ``` +It can fix a lot of formatting issues automatically. We also periodically run `black` to autoformat the codebase. -This is equivalent to running `codespell`, `autopep8`, and `flake8` on trimesh, examples, and tests. You can also run it yourself with these options: -``` -autopep8 --recursive --verbose --in-place --aggressive trimesh -flake8 trimesh -``` ## Docstrings diff --git a/docs/guides/install.md b/docs/guides/install.md index 8466fc01f..bd015a2bf 100644 --- a/docs/guides/install.md +++ b/docs/guides/install.md @@ -18,7 +18,7 @@ If you\'d like most soft dependencies which should install cleanly on Mac, Windo pip install trimesh[easy] ``` -Or if you want the full experience, you can try the `all` extra, where packages may only be available for Linux: +Or if you want the full experience, you can try the `all` extra which includes all the testing and recommended packages: ``` pip install trimesh[all] ``` @@ -42,7 +42,7 @@ Ubuntu-Debian Notes Blender and openSCAD are soft dependencies used for boolean operations with subprocess, you can get them with `apt`: ``` -sudo apt-get install openscad blender +sudo apt-get install blender ``` Dependency Overview @@ -54,31 +54,31 @@ Trimesh has a lot of soft-required upstream packages. We try to make sure they'r | Package | Description | Alternatives | Level | | ------ | --------- | ---------- | ----- | | `numpy` | The base container for fast array types. | | `required` | -| `scipy` | Provides convex hulls (`scipy.spatial.ConvexHull`), fast graph operations (`scipy.sparse.csgraph`), fast nearest-point queries (`scipy.spatial.cKDTree`), b-spline evaluation (`scipy.interpolate`). | Nothing comes to mind, it does a whole heck of a lot. | `easy` | +| `scipy` | Provides convex hulls (`scipy.spatial.ConvexHull`), fast graph operations (`scipy.sparse.csgraph`), fast nearest-point queries (`scipy.spatial.cKDTree`), b-spline evaluation (`scipy.interpolate`). | | `easy` | | `lxml` | Parse XML documents. We use this over the built-in ones as it was slightly faster, and there was a format implemented which was extremely annoying to handle without the ability to get parent nodes (which `lxml` has but built-in XML doesn't). | Standard library's XML | `easy` | -| `networkx` | A nice-to-use pure Python graph library that's faster than you'd think. It implements DFS, BFS, and the usual FAANG-interview-question algorithms. A lot of the commonly run stuff in trimesh has been re-written to use `scipy.sparse.csgraph` as it's also an easy install and is way faster in most cases. But if you have a small-ish graph the API for `networkx` is way easier to "grok". | `graph-tool`, `scipy.sparse.csgraph` | `easy` | -| `shapely` | Bindings to `GEOS` for 2D spatial stuff: "set-theoretic analysis and manipulation of planar features." It lets you offset, union, and query polygons nicely. | `clipper` maybe? | `easy` | +| `networkx` | Pure Python graph library that's reasonably fast and has a nice API. `scipy.sparse.csgraph` is way faster in most cases but is hard to understand and doesn't implement as many algorithms. | `graph-tool`, `scipy.sparse.csgraph` | `easy` | +| `shapely` | Bindings to `GEOS` for 2D spatial stuff: "set-theoretic analysis and manipulation of planar features" which lets you offset, union, and query polygons. | `clipper` | `easy` | | `rtree` | Query ND rectangles with a spatial tree for a "broad phase" intersection. Used in polygon generation ("given N closed curves which curve contains the other curve?") and as the broad-phase for the built-in-numpy slow ray query engine. | `fcl` maybe? | `easy` | |`requests`| Do network queries in `trimesh.exchange.load_remote`, will *only* make network requests when asked | | `easy`| -|`sympy`| Evaluate symbolic algebra | | `easy`| +|`sympy`| Evaluate symbolic algebra | | `recommend`| |`xxhash`| Quickly hash arrays, used for our cache checking | | `easy`| -|`msgpack`| A serialization method that supports bytes-blobs. | `protobuf` | `easy`| -|`chardet`| When we fail to decode text as UTF-8 we then check with chardet which guesses an encoding. This lets us load files even with weird encodings. | | `easy`| +|`chardet`| When we fail to decode text as UTF-8 we then check with chardet which guesses an encoding, letting us load files even with weird encodings. | | `easy`| |`colorlog`| Printing logs with colors. | | `easy`| -|`pillow`| Reading raster images for textures, and rendering polygons into raster images. | | `easy`| +|`pillow`| Reading raster images for textures and render polygons into raster images. | | `easy`| |`svg.path`| Parsing SVG path strings. | | `easy`| |`jsonschema`| Validating our exports for formats like GLTF. | | `easy`| |`pycollada`| Parse `dae` files. | | `easy`| -|`pyglet`| OpenGL bindings for our simple debug viewer. | | `easy`| -|`xatlas`| Unwrap meshes to generate UV coordinates quickly and well. | | `all`| -|`python-fcl`| Do collision queries between meshes | | `all`| -|`glooey`| Provide a viewer with widgets. | | `all`| -|`meshio`| Load additional mesh formats. | | `all`| -|`scikit-image`| Used in voxel ops | | `all`| -|`mapbox-earcut`| Triangulate 2D polygons | `triangle` which has an unusual license | `all`| -|`psutil`| Get current memory usage, useful for checking to see if we're going to run out of memory instantiating a giant array | | `all`| +|`pyglet<2`| OpenGL bindings for our simple debug viewer. | | `recommend`| +|`xatlas`| Unwrap meshes to generate UV coordinates quickly and well. | | `recommend`| +|`python-fcl`| Do collision queries between meshes | | `recommend`| +|`glooey`| Provide a viewer with widgets. | | `recommend`| +|`meshio`| Load additional mesh formats. | | `recommend`| +|`scikit-image`| Used in voxel ops | | `recommend`| +|`mapbox-earcut`| Triangulate 2D polygons | `triangle` which has an unusual license | `easy`| +|`psutil`| Get current memory usage, useful for checking to see if we're going to run out of memory instantiating a giant array | | `recommend`| |`ruff`| A static code analyzer that replaces `flake8`. | `flake8` | `test`| -|`autopep8`| A code formatter which fixes whitespace issues automatically. | | `test`| +|`black`| A code formatter which fixes whitespace issues automatically. | | `test`| |`pytest`| A test runner. | | `test`| |`pytest-cov`| A plugin to calculate test coverage. | | `test`| |`pyinstrument`| A sampling based profiler for performance tweaking. | | `test`| +|`pyvhacd`| A binding for VHACD which provides convex decompositions | | `recommend`| diff --git a/trimesh/path/packing.py b/trimesh/path/packing.py index 9c6de3e8e..fe9b194e2 100644 --- a/trimesh/path/packing.py +++ b/trimesh/path/packing.py @@ -205,8 +205,7 @@ def rectangles_single(extents, size=None, shuffle=False, rotate=True): # if no bounds are passed start it with the size of a large # rectangle exactly which will require re-rooting for # subsequent insertions - root_bounds = [[0.0] * dimension, - extents[extents.ptp(axis=1).argmax()]] + root_bounds = [[0.0] * dimension, extents[extents.ptp(axis=1).argmax()]] else: # restrict the bounds to passed size and disallow re-rooting root_bounds = [[0.0] * dimension, size] @@ -273,12 +272,10 @@ def rectangles_single(extents, size=None, shuffle=False, rotate=True): # this node has children so it is occupied new_root.occupied = True # create a bin for both bounds - new_root.child = [RectangleBin(bounds_ori), - RectangleBin(bounds_ins)] + new_root.child = [RectangleBin(bounds_ori), RectangleBin(bounds_ins)] # insert the original sheet into the new tree - root_offset = new_root.child[0].insert( - bounds.ptp(axis=0), rotate=rotate) + root_offset = new_root.child[0].insert(bounds.ptp(axis=0), rotate=rotate) # we sized the cells so original tree would fit assert root_offset is not None @@ -335,13 +332,12 @@ def paths(paths, **kwargs): packable = [] original = [] for index, path in enumerate(paths): - quantity = path.metadata.get('quantity', 1) + quantity = path.metadata.get("quantity", 1) original.extend([index] * quantity) packable.extend([path.polygons_closed[path.root[0]]] * quantity) # pack the polygons using rectangular bin packing - transforms, consume = polygons( - polygons=packable, **kwargs) + transforms, consume = polygons(polygons=packable, **kwargs) positioned = [] for index, matrix in zip(np.nonzero(consume)[0], transforms): @@ -385,34 +381,37 @@ def polygons(polygons, **kwargs): # run packing for a number of iterations bounds, consume = rectangles(extents=extents, **kwargs) - log.debug('%i/%i parts were packed successfully', - consume.sum(), len(polygons)) + log.debug("%i/%i parts were packed successfully", consume.sum(), len(polygons)) # transformations to packed positions roll = roll_transform(bounds=bounds, extents=extents[consume]) - transforms = np.array([np.dot(b, a) for a, b in - zip(obb[consume], roll)]) + transforms = np.array([np.dot(b, a) for a, b in zip(obb[consume], roll)]) if tol.strict: # original bounds should not overlap assert not bounds_overlap(bounds) # confirm transfor check_bound = np.array( - [polygon_bounds(polygons[index], matrix=m) - for index, m in zip(np.nonzero(consume)[0], transforms)]) + [ + polygon_bounds(polygons[index], matrix=m) + for index, m in zip(np.nonzero(consume)[0], transforms) + ] + ) assert not bounds_overlap(check_bound) return transforms, consume -def rectangles(extents, - size=None, - density_escape=0.99, - spacing=0.0, - iterations=50, - rotate=True, - quanta=None): +def rectangles( + extents, + size=None, + density_escape=0.99, + spacing=0.0, + iterations=50, + rotate=True, + quanta=None, +): """ Run multiple iterations of rectangle packing, this is the core function for all rectangular packing. @@ -459,11 +458,8 @@ def rectangles(extents, # run a single insertion order # don't shuffle the first run, shuffle subsequent runs bounds, insert = rectangles_single( - extents=extents, - size=size, - shuffle=(i != 0), - rotate=rotate - ) + extents=extents, size=size, shuffle=(i != 0), rotate=rotate + ) count = insert.sum() extents_all = bounds.reshape((-1, dim)).ptp(axis=0) @@ -490,12 +486,12 @@ def rectangles(extents, # shrink the bounds by spacing result[0] += [[[spacing], [-spacing]]] - log.debug(f'packed with density {best_density:0.5f}') + log.debug(f"packed with density {best_density:0.5f}") return result -def images(images, power_resize=False): +def images(images, power_resize: bool = False, deduplicate: bool = False): """ Pack a list of images and return result and offsets. @@ -507,6 +503,7 @@ def images(images, power_resize=False): Should the result image be upsized to the nearest power of two? Not every GPU supports materials that aren't a power of two size. + deduplicate Returns ----------- @@ -516,11 +513,23 @@ def images(images, power_resize=False): Offsets for original image to pack """ from PIL import Image - # use the number of pixels as the rectangle size - bounds, insert = rectangles( - extents=[i.size for i in images], rotate=False) - # really should have inserted all the rect - assert insert.all() + + if deduplicate: + # only pack duplicate images once + _, index, inverse = np.unique( + [hash(i.tobytes()) for i in images], return_index=True, return_inverse=True + ) + # use the number of pixels as the rectangle size + bounds, insert = rectangles(extents=[images[i].size for i in index], rotate=False) + # really should have inserted all the rect + assert insert.all() + # re-index back to original indexes + bounds = bounds[inverse] + else: + # use the number of pixels as the rectangle size + bounds, insert = rectangles(extents=[i.size for i in images], rotate=False) + # really should have inserted all the rect + assert insert.all() # offsets should be integer multiple of pizels offset = bounds[:, 0].round().astype(int) @@ -573,16 +582,22 @@ def meshes(meshes, **kwargs): # generate the transforms from an origin centered AABB # to the final placed and rotated AABB - transforms = np.array([ - np.dot(r, np.linalg.inv(o)) for - o, r in zip(obb_transform[consume], - roll_transform(bounds=bounds, - extents=obb_extent[consume]))], - dtype=np.float64) + transforms = np.array( + [ + np.dot(r, np.linalg.inv(o)) + for o, r in zip( + obb_transform[consume], + roll_transform(bounds=bounds, extents=obb_extent[consume]), + ) + ], + dtype=np.float64, + ) # copy the meshes and move into position - placed = [meshes[index].copy().apply_transform(T) - for index, T in zip(np.nonzero(consume)[0], transforms)] + placed = [ + meshes[index].copy().apply_transform(T) + for index, T in zip(np.nonzero(consume)[0], transforms) + ] return placed, transforms, consume @@ -640,7 +655,7 @@ def roll_transform(bounds, extents): into the position determined by `bounds`. """ if len(bounds) != len(extents): - raise ValueError('`bounds` must match `extents`') + raise ValueError("`bounds` must match `extents`") if len(extents) == 0: return [] @@ -655,19 +670,28 @@ def roll_transform(bounds, extents): # a lookup table for rotations for rolling cuboiods # as `lookup[dimension - 2][roll]` # implemented for 2D and 3D - lookup = [np.array([np.eye(3), - np.array([[0., -1., 0.], - [1., 0., 0.], - [0., 0., 1.]])]), - np.array([np.eye(4), - [[-0., -0., -1., -0.], - [-1., -0., -0., -0.], - [0., 1., 0., 0.], - [0., 0., 0., 1.]], - [[-0., -1., -0., -0.], - [0., 0., 1., 0.], - [-1., -0., -0., -0.], - [0., 0., 0., 1.]]])] + lookup = [ + np.array( + [np.eye(3), np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])] + ), + np.array( + [ + np.eye(4), + [ + [-0.0, -0.0, -1.0, -0.0], + [-1.0, -0.0, -0.0, -0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + [ + [-0.0, -1.0, -0.0, -0.0], + [0.0, 0.0, 1.0, 0.0], + [-1.0, -0.0, -0.0, -0.0], + [0.0, 0.0, 0.0, 1.0], + ], + ] + ), + ] # rectangular rotation involves rolling for roll in range(extents.shape[1]): @@ -691,17 +715,21 @@ def roll_transform(bounds, extents): if dimension == 3: # make sure bounds match inputs from ..creation import box - assert all(allclose(box(extents=e).apply_transform(m).bounds, b) - for b, e, m in zip(bounds, extents, result)) + + assert all( + allclose(box(extents=e).apply_transform(m).bounds, b) + for b, e, m in zip(bounds, extents, result) + ) elif dimension == 2: # in 2D check with a rectangle from .creation import rectangle + assert all( - allclose(rectangle( - bounds=[-e / 2, e / 2]).apply_transform(m).bounds, b) - for b, e, m in zip(bounds, extents, result)) + allclose(rectangle(bounds=[-e / 2, e / 2]).apply_transform(m).bounds, b) + for b, e, m in zip(bounds, extents, result) + ) else: - raise ValueError('unsupported dimension') + raise ValueError("unsupported dimension") return result @@ -725,9 +753,9 @@ def bounds_overlap(bounds, epsilon=1e-8): True if any bound intersects any other bound. """ # pad AABB by epsilon for deterministic intersections - padded = np.array(bounds) + np.reshape( - [epsilon, -epsilon], (1, 2, 1)) + padded = np.array(bounds) + np.reshape([epsilon, -epsilon], (1, 2, 1)) tree = bounds_tree(padded) # every returned AABB should not overlap with any other AABB - return any(set(tree.intersection(current.ravel())) != - {i} for i, current in enumerate(bounds)) + return any( + set(tree.intersection(current.ravel())) != {i} for i, current in enumerate(bounds) + ) diff --git a/trimesh/visual/__init__.py b/trimesh/visual/__init__.py index a99f48887..63a7f25aa 100644 --- a/trimesh/visual/__init__.py +++ b/trimesh/visual/__init__.py @@ -6,14 +6,16 @@ Handle visual properties for meshes, including color and texture """ -from .color import (ColorVisuals, - random_color, - to_rgba, - DEFAULT_COLOR, - interpolate, - uv_to_color, - uv_to_interpolated_color, - linear_color_map) +from .color import ( + ColorVisuals, + random_color, + to_rgba, + DEFAULT_COLOR, + interpolate, + uv_to_color, + uv_to_interpolated_color, + linear_color_map, +) from .texture import TextureVisuals from .objects import create_visual, concatenate @@ -27,16 +29,18 @@ # explicitly list imports in __all__ # as otherwise flake8 gets mad -__all__ = ['color', - 'texture', - 'resolvers', - 'TextureVisuals', - 'ColorVisuals', - 'random_color', - 'to_rgba', - 'create_visual', - 'DEFAULT_COLOR', - 'interpolate', - 'linear_color_map', - 'uv_to_color', - 'uv_to_interpolated_color'] +__all__ = [ + "color", + "texture", + "resolvers", + "TextureVisuals", + "ColorVisuals", + "random_color", + "to_rgba", + "create_visual", + "DEFAULT_COLOR", + "interpolate", + "linear_color_map", + "uv_to_color", + "uv_to_interpolated_color", +] diff --git a/trimesh/visual/base.py b/trimesh/visual/base.py index 38f567fc6..b07c060bf 100644 --- a/trimesh/visual/base.py +++ b/trimesh/visual/base.py @@ -13,6 +13,7 @@ class Visuals(ABC): """ Parent of Visual classes. """ + @abc.abstractproperty def kind(self): pass diff --git a/trimesh/visual/color.py b/trimesh/visual/color.py index 51b25199e..6b5ccae35 100644 --- a/trimesh/visual/color.py +++ b/trimesh/visual/color.py @@ -38,10 +38,7 @@ class ColorVisuals(Visuals): Store color information about a mesh. """ - def __init__(self, - mesh=None, - face_colors=None, - vertex_colors=None): + def __init__(self, mesh=None, face_colors=None, vertex_colors=None): """ Store color information about a mesh. @@ -57,17 +54,14 @@ def __init__(self, """ self.mesh = mesh self._data = caching.DataStore() - self._cache = caching.Cache( - id_function=self._data.__hash__) + self._cache = caching.Cache(id_function=self._data.__hash__) self.defaults = { - 'material_diffuse': np.array([102, 102, 102, 255], - dtype=np.uint8), - 'material_ambient': np.array([64, 64, 64, 255], - dtype=np.uint8), - 'material_specular': np.array([197, 197, 197, 255], - dtype=np.uint8), - 'material_shine': 77.0} + "material_diffuse": np.array([102, 102, 102, 255], dtype=np.uint8), + "material_ambient": np.array([64, 64, 64, 255], dtype=np.uint8), + "material_specular": np.array([197, 197, 197, 255], dtype=np.uint8), + "material_shine": 77.0, + } try: if face_colors is not None: @@ -75,7 +69,7 @@ def __init__(self, if vertex_colors is not None: self.vertex_colors = vertex_colors except ValueError: - util.log.warning('unable to convert colors!') + util.log.warning("unable to convert colors!") @caching.cache_decorator def transparency(self): @@ -86,10 +80,10 @@ def transparency(self): ---------- transparency: bool, does the current visual contain transparency """ - if 'vertex_colors' in self._data: - a_min = self._data['vertex_colors'][:, 3].min() - elif 'face_colors' in self._data: - a_min = self._data['face_colors'][:, 3].min() + if "vertex_colors" in self._data: + a_min = self._data["vertex_colors"][:, 3].min() + elif "face_colors" in self._data: + a_min = self._data["face_colors"][:, 3].min() else: return False @@ -125,10 +119,10 @@ def kind(self): self._verify_hash() # check modes in data - if 'vertex_colors' in self._data: - return 'vertex' - elif 'face_colors' in self._data: - return 'face' + if "vertex_colors" in self._data: + return "vertex" + elif "face_colors" in self._data: + return "face" return None @@ -166,7 +160,7 @@ def face_colors(self): colors : (len(mesh.faces), 4) uint8 RGBA color for each face """ - return self._get_colors(name='face') + return self._get_colors(name="face") @face_colors.setter def face_colors(self, values): @@ -184,20 +178,19 @@ def face_colors(self, values): (4,) int, set the whole mesh this color """ if values is None: - if 'face_colors' in self._data: - self._data.data.pop('face_colors') + if "face_colors" in self._data: + self._data.data.pop("face_colors") return colors = to_rgba(values) - if (self.mesh is not None and - colors.shape == (4,)): + if self.mesh is not None and colors.shape == (4,): count = len(self.mesh.faces) colors = np.tile(colors, (count, 1)) # if we set any color information, clear the others self._data.clear() - self._data['face_colors'] = colors + self._data["face_colors"] = colors self._cache.verify() @property @@ -209,7 +202,7 @@ def vertex_colors(self): ------------ colors: (len(mesh.vertices), 4) uint8, color for each vertex """ - return self._get_colors(name='vertex') + return self._get_colors(name="vertex") @vertex_colors.setter def vertex_colors(self, values): @@ -227,29 +220,29 @@ def vertex_colors(self, values): (4,) int, set the whole mesh this color """ if values is None: - if 'vertex_colors' in self._data: - self._data.data.pop('vertex_colors') + if "vertex_colors" in self._data: + self._data.data.pop("vertex_colors") return # make sure passed values are numpy array values = np.asanyarray(values) # Ensure the color shape is sane - if (self.mesh is not None and not - (values.shape == (len(self.mesh.vertices), 3) or - values.shape == (len(self.mesh.vertices), 4) or - values.shape == (3,) or - values.shape == (4,))): + if self.mesh is not None and not ( + values.shape == (len(self.mesh.vertices), 3) + or values.shape == (len(self.mesh.vertices), 4) + or values.shape == (3,) + or values.shape == (4,) + ): return colors = to_rgba(values) - if (self.mesh is not None and - colors.shape == (4,)): + if self.mesh is not None and colors.shape == (4,): count = len(self.mesh.vertices) colors = np.tile(colors, (count, 1)) # if we set any color information, clear the others self._data.clear() - self._data['vertex_colors'] = colors + self._data["vertex_colors"] = colors self._cache.verify() def _get_colors(self, name): @@ -279,17 +272,17 @@ def _get_colors(self, name): count = None try: - if name == 'face': + if name == "face": count = len(self.mesh.faces) - elif name == 'vertex': + elif name == "vertex": count = len(self.mesh.vertices) except BaseException: pass # the face or vertex colors - key_colors = str(name) + '_colors' + key_colors = str(name) + "_colors" # the initial hash of the colors - key_hash = key_colors + '_hash' + key_hash = key_colors + "_hash" if key_colors in self._data: # if a user has explicitly stored or changed the color it @@ -305,12 +298,12 @@ def _get_colors(self, name): if hash(colors) != self._cache[key_hash]: # call the setter on the property using exec # this avoids having to pass a setter to this function - if name == 'face': + if name == "face": self.face_colors = colors - elif name == 'vertex': + elif name == "vertex": self.vertex_colors = colors else: - raise ValueError('unsupported name!!!') + raise ValueError("unsupported name!!!") self._cache.verify() # return the stored copy of the colors return self._data[key_colors] @@ -319,24 +312,20 @@ def _get_colors(self, name): if self.kind is None: # no colors are defined, so create a (count, 4) tiled # copy of the default color - colors = np.tile(self.defaults['material_diffuse'], - (count, 1)) - elif (self.kind == 'vertex' and - name == 'face'): + colors = np.tile(self.defaults["material_diffuse"], (count, 1)) + elif self.kind == "vertex" and name == "face": colors = vertex_to_face_color( - vertex_colors=self.vertex_colors, - faces=self.mesh.faces) - elif (self.kind == 'face' and - name == 'vertex'): + vertex_colors=self.vertex_colors, faces=self.mesh.faces + ) + elif self.kind == "face" and name == "vertex": colors = face_to_vertex_color( - mesh=self.mesh, - face_colors=self.face_colors) + mesh=self.mesh, face_colors=self.face_colors + ) else: - raise ValueError('self.kind not accepted values!!') + raise ValueError("self.kind not accepted values!!") - if (count is not None and - colors.shape != (count, 4)): - raise ValueError('face colors incorrect shape!') + if count is not None and colors.shape != (count, 4): + raise ValueError("face colors incorrect shape!") # subclass the array to track for changes using a hash colors = caching.tracked_array(colors) @@ -356,14 +345,14 @@ def _verify_hash(self): the DataStore at self._data since the user action has made them user data. """ - if not hasattr(self, '_cache') or len(self._cache) == 0: + if not hasattr(self, "_cache") or len(self._cache) == 0: return - for name in ['face', 'vertex']: + for name in ["face", "vertex"]: # the face or vertex colors - key_colors = str(name) + '_colors' + key_colors = str(name) + "_colors" # the initial hash of the colors - key_hash = key_colors + '_hash' + key_hash = key_colors + "_hash" if key_colors not in self._cache: continue @@ -372,25 +361,25 @@ def _verify_hash(self): # if the cached colors have been changed since creation # move them to data if hash(colors) != self._cache[key_hash]: - if name == 'face': + if name == "face": self.face_colors = colors - elif name == 'vertex': + elif name == "vertex": self.vertex_colors = colors else: - raise ValueError('unsupported name!!!') + raise ValueError("unsupported name!!!") self._cache.verify() def update_vertices(self, mask): """ Apply a mask to remove or duplicate vertex properties. """ - self._update_key(mask, 'vertex_colors') + self._update_key(mask, "vertex_colors") def update_faces(self, mask): """ Apply a mask to remove or duplicate face properties """ - self._update_key(mask, 'face_colors') + self._update_key(mask, "face_colors") def face_subset(self, face_index): """ @@ -430,12 +419,12 @@ def main_color(self): """ if self.kind is None: return DEFAULT_COLOR - elif self.kind == 'face': + elif self.kind == "face": colors = self.face_colors - elif self.kind == 'vertex': + elif self.kind == "vertex": colors = self.vertex_colors else: - raise ValueError('color kind incorrect!') + raise ValueError("color kind incorrect!") # find the unique colors unique, inverse = unique_rows(colors) @@ -457,6 +446,7 @@ def to_texture(self): Copy of the current visuals as a texture. """ from .texture import TextureVisuals + mat, uv = color_to_uv(vertex_colors=self.vertex_colors) return TextureVisuals(material=mat, uv=uv) @@ -479,6 +469,7 @@ def concatenate(self, other, *args): """ # avoid a circular import from . import objects + result = objects.concatenate(self, other, *args) return result @@ -512,7 +503,7 @@ def __init__(self, colors=None, obj=None): @property def kind(self): - return 'vertex' + return "vertex" def update_vertices(self, mask): if self._colors is not None: @@ -558,9 +549,7 @@ def concatenate(self, other): concate : VertexColor Object with both colors """ - return VertexColor(colors=np.vstack( - self.vertex_colors, - other.vertex_colors)) + return VertexColor(colors=np.vstack(self.vertex_colors, other.vertex_colors)) def __hash__(self): return self._colors.__hash__() @@ -589,29 +578,26 @@ def to_rgba(colors, dtype=np.uint8): # integer value for opaque alpha given our datatype opaque = np.iinfo(dtype).max - if colors.dtype.kind == 'f': + if colors.dtype.kind == "f": # replace any `nan` or `inf` values with zero colors[~np.isfinite(colors)] = 0.0 - if (colors.dtype.kind == 'f' and colors.max() < (1.0 + 1e-8)): + if colors.dtype.kind == "f" and colors.max() < (1.0 + 1e-8): colors = (colors * opaque).round().astype(dtype) - elif (colors.max() <= opaque): + elif colors.max() <= opaque: colors = colors.astype(dtype) else: - raise ValueError('colors non-convertible!') + raise ValueError("colors non-convertible!") if util.is_shape(colors, (-1, 3)): # add an opaque alpha for RGB colors - colors = np.column_stack(( - colors, - opaque * np.ones(len(colors)))).astype(dtype) + colors = np.column_stack((colors, opaque * np.ones(len(colors)))).astype(dtype) elif util.is_shape(colors, (3,)): # if passed a single RGB color add an alpha colors = np.append(colors, opaque).astype(dtype) - if not (util.is_shape(colors, (4,)) or - util.is_shape(colors, (-1, 4))): - raise ValueError('Colors not of appropriate shape!') + if not (util.is_shape(colors, (4,)) or util.is_shape(colors, (-1, 4))): + raise ValueError("Colors not of appropriate shape!") return colors @@ -633,14 +619,14 @@ def to_float(colors): # colors as numpy array colors = np.asanyarray(colors) - if colors.dtype.kind == 'f': + if colors.dtype.kind == "f": return colors - elif colors.dtype.kind in 'iu': + elif colors.dtype.kind in "iu": # integer value for opaque alpha given our datatype opaque = np.iinfo(colors.dtype).max return colors.astype(np.float64) / opaque else: - raise ValueError('only works on int or float colors!') + raise ValueError("only works on int or float colors!") def hex_to_rgba(color): @@ -655,12 +641,12 @@ def hex_to_rgba(color): ----------- rgba: (4,) np.uint8, RGBA color """ - value = str(color).lstrip('#').strip() + value = str(color).lstrip("#").strip() if len(value) == 6: - rgb = [int(value[i:i + 2], 16) for i in (0, 2, 4)] + rgb = [int(value[i : i + 2], 16) for i in (0, 2, 4)] rgba = np.append(rgb, 255).astype(np.uint8) else: - raise ValueError('Only RGB supported') + raise ValueError("Only RGB supported") return rgba @@ -677,11 +663,11 @@ def random_color(dtype=np.uint8): ---------- color: (4,) dtype, random color that looks OK """ - hue = np.random.random() + .61803 + hue = np.random.random() + 0.61803 hue %= 1.0 - color = np.array(colorsys.hsv_to_rgb(hue, .99, .99)) - if np.dtype(dtype).kind in 'iu': - max_value = (2**(np.dtype(dtype).itemsize * 8)) - 1 + color = np.array(colorsys.hsv_to_rgb(hue, 0.99, 0.99)) + if np.dtype(dtype).kind in "iu": + max_value = (2 ** (np.dtype(dtype).itemsize * 8)) - 1 color *= max_value color = np.append(color, max_value).astype(dtype) return color @@ -705,10 +691,7 @@ def vertex_to_face_color(vertex_colors, faces): return face_colors.astype(np.uint8) -def face_to_vertex_color( - mesh, - face_colors, - dtype=np.uint8): +def face_to_vertex_color(mesh, face_colors, dtype=np.uint8): """ Convert face colors into vertex colors. @@ -769,7 +752,7 @@ def colors_to_materials(colors, count=None): unique, index = unique_rows(rgba) diffuse = rgba[unique] else: - raise ValueError('Colors not convertible!') + raise ValueError("Colors not convertible!") return diffuse, index @@ -795,24 +778,20 @@ def linear_color_map(values, color_range=None): """ if color_range is None: - color_range = np.array([[255, 0, 0, 255], - [0, 255, 0, 255]], - dtype=np.uint8) + color_range = np.array([[255, 0, 0, 255], [0, 255, 0, 255]], dtype=np.uint8) else: - color_range = np.asanyarray(color_range, - dtype=np.uint8) + color_range = np.asanyarray(color_range, dtype=np.uint8) if color_range.shape != (2, 4): - raise ValueError('color_range must be RGBA (2, 4)') + raise ValueError("color_range must be RGBA (2, 4)") # float 1D array clamped to 0.0 - 1.0 - values = np.clip(np.asanyarray( - values, dtype=np.float64).ravel(), - 0.0, 1.0).reshape((-1, 1)) + values = np.clip(np.asanyarray(values, dtype=np.float64).ravel(), 0.0, 1.0).reshape( + (-1, 1) + ) # the stacked component colors - color = [np.ones((len(values), 4)) * c - for c in color_range.astype(np.float64)] + color = [np.ones((len(values), 4)) * c for c in color_range.astype(np.float64)] # interpolated colors colors = (color[1] * values) + (color[0] * (1.0 - values)) @@ -848,6 +827,7 @@ def interpolate(values, color_map=None, dtype=np.uint8): cmap = linear_color_map else: from matplotlib.pyplot import get_cmap + cmap = get_cmap(color_map) # make input always float @@ -888,8 +868,9 @@ def uv_to_color(uv, image): # access colors from pixel locations # make sure image is RGBA before getting values - colors = np.asanyarray(image.convert('RGBA'))[ - y.round().astype(np.int64), x.round().astype(np.int64)] + colors = np.asanyarray(image.convert("RGBA"))[ + y.round().astype(np.int64), x.round().astype(np.int64) + ] # conversion to RGBA should have corrected shape assert colors.ndim == 2 and colors.shape[1] == 4 @@ -921,8 +902,8 @@ def uv_to_interpolated_color(uv, image): uv = np.asanyarray(uv, dtype=np.float64) # get texture image pixel positions of UV coordinates - x = (uv[:, 0] * (image.width - 1)) - y = ((1 - uv[:, 1]) * (image.height - 1)) + x = uv[:, 0] * (image.width - 1) + y = (1 - uv[:, 1]) * (image.height - 1) x_floor = np.floor(x).astype(np.int64) % image.width y_floor = np.floor(y).astype(np.int64) % image.height @@ -933,7 +914,7 @@ def uv_to_interpolated_color(uv, image): dx = x % image.width - x_floor dy = y % image.height - y_floor - img = np.asanyarray(image.convert('RGBA')) + img = np.asanyarray(image.convert("RGBA")) colors00 = img[y_floor, x_floor] colors01 = img[y_floor, x_ceil] @@ -951,8 +932,11 @@ def uv_to_interpolated_color(uv, image): a11 = np.repeat(a11[:, None], 4, axis=1) # interpolated colors as floating point then convert back to uint8 - colors = (a00 * colors00 + a01 * colors01 + - a10 * colors10 + a11 * colors11).round().astype(np.uint8) + colors = ( + (a00 * colors00 + a01 * colors01 + a10 * colors10 + a11 * colors11) + .round() + .astype(np.uint8) + ) # conversion to RGBA should have corrected shape assert colors.ndim == 2 and colors.shape[1] == 4 @@ -995,8 +979,8 @@ def color_to_uv(vertex_colors): size = int(np.ceil(np.sqrt(len(unique)))) ctype = vertex_colors.shape[1] - colors = np.zeros((size ** 2, ctype), dtype=vertex_colors.dtype) - colors[:len(unique)] = vertex_colors[unique] + colors = np.zeros((size**2, ctype), dtype=vertex_colors.dtype) + colors[: len(unique)] = vertex_colors[unique] # PIL has reversed x-y coordinates image = Image.fromarray(colors.reshape((size, size, ctype))[::-1]) diff --git a/trimesh/visual/gloss.py b/trimesh/visual/gloss.py index 2424c93c1..8aa42bc74 100644 --- a/trimesh/visual/gloss.py +++ b/trimesh/visual/gloss.py @@ -11,12 +11,13 @@ def specular_to_pbr( - specularFactor=None, - glossinessFactor=None, - specularGlossinessTexture=None, - diffuseTexture=None, - diffuseFactor=None, - **kwargs): + specularFactor=None, + glossinessFactor=None, + specularGlossinessTexture=None, + diffuseTexture=None, + diffuseFactor=None, + **kwargs, +): """ Convert the KHR_materials_pbrSpecularGlossiness to a metallicRoughness visual. @@ -52,12 +53,12 @@ def specular_to_pbr( # https://github.com/KhronosGroup/glTF/blob/89427b26fcac884385a2e6d5803d917ab5d1b04f/extensions/2.0/Archived/KHR_materials_pbrSpecularGlossiness/examples/convert-between-workflows-bjs/js/babylon.pbrUtilities.js#L33-L64 if isinstance(Image, ExceptionWrapper): - log.debug('unable to convert specular-glossy material without pillow!') + log.debug("unable to convert specular-glossy material without pillow!") result = {} if isinstance(diffuseTexture, dict): - result['baseColorTexture'] = diffuseTexture + result["baseColorTexture"] = diffuseTexture if diffuseFactor is not None: - result['baseColorFactor'] = diffuseFactor + result["baseColorFactor"] = diffuseFactor return result dielectric_specular = np.array([0.04, 0.04, 0.04], dtype=np.float32) @@ -73,8 +74,11 @@ def solve_metallic(diffuse, specular, one_minus_specular_strength): specular = specular[..., None] a = dielectric_specular[0] - b = diffuse * one_minus_specular_strength / \ - (1.0 - dielectric_specular[0]) + specular - 2.0 * dielectric_specular[0] + b = ( + diffuse * one_minus_specular_strength / (1.0 - dielectric_specular[0]) + + specular + - 2.0 * dielectric_specular[0] + ) c = dielectric_specular[0] - specular D = b * b - 4.0 * a * c D = np.clip(D, epsilon, None) @@ -84,7 +88,7 @@ def solve_metallic(diffuse, specular, one_minus_specular_strength): return metallic def get_perceived_brightness(rgb): - return np.sqrt(np.dot(rgb[..., :3]**2, [0.299, 0.587, 0.114])) + return np.sqrt(np.dot(rgb[..., :3] ** 2, [0.299, 0.587, 0.114])) def toPIL(img, mode=None): if isinstance(img, Image): @@ -101,15 +105,16 @@ def get_float(val): return val.tolist() def get_diffuse(diffuseFactor, diffuseTexture): - diffuseFactor = diffuseFactor if diffuseFactor is not None else [ - 1.0, 1.0, 1.0, 1.0] + diffuseFactor = ( + diffuseFactor if diffuseFactor is not None else [1.0, 1.0, 1.0, 1.0] + ) diffuseFactor = np.array(diffuseFactor, dtype=np.float32) if diffuseTexture is not None: - if diffuseTexture.mode == 'BGR': - diffuseTexture = diffuseTexture.convert('RGB') - elif diffuseTexture.mode == 'BGRA': - diffuseTexture = diffuseTexture.convert('RGBA') + if diffuseTexture.mode == "BGR": + diffuseTexture = diffuseTexture.convert("RGB") + elif diffuseTexture.mode == "BGRA": + diffuseTexture = diffuseTexture.convert("RGBA") diffuse = np.array(diffuseTexture) / 255.0 # diffuseFactor must be applied to linear scaled colors . @@ -128,25 +133,27 @@ def get_diffuse(diffuseFactor, diffuseTexture): # this should actually not happen, but it seems like many materials are not complying with the spec diffuse = np.concatenate([diffuse, alpha], axis=-1) else: - diffuse[...,-1:] *= alpha + diffuse[..., -1:] *= alpha elif diffuse.shape[-1] == diffuseFactor.shape[-1]: diffuse = diffuse * diffuseFactor elif diffuse.shape[-1] == 3 and diffuseFactor.shape[-1] == 4: - diffuse = np.concatenate([diffuse, np.ones_like( - diffuse[..., :1])], axis=-1) * diffuseFactor + diffuse = ( + np.concatenate([diffuse, np.ones_like(diffuse[..., :1])], axis=-1) + * diffuseFactor + ) else: log.warning( - '`diffuseFactor` and `diffuseTexture` have incompatible shapes: ' + - f'{diffuseFactor.shape} and {diffuse.shape}') + "`diffuseFactor` and `diffuseTexture` have incompatible shapes: " + + f"{diffuseFactor.shape} and {diffuse.shape}" + ) else: diffuse = diffuseFactor if diffuseFactor is not None else [1, 1, 1, 1] diffuse = np.array(diffuse, dtype=np.float32) return diffuse def get_specular_glossiness( - specularFactor, - glossinessFactor, - specularGlossinessTexture): + specularFactor, glossinessFactor, specularGlossinessTexture + ): if specularFactor is None: specularFactor = [1.0, 1.0, 1.0] specularFactor = np.array(specularFactor, dtype=np.float32) @@ -160,19 +167,22 @@ def get_specular_glossiness( # be multiplied with the provided factors if specularGlossinessTexture is not None: - if specularGlossinessTexture.mode == 'BGR': - specularGlossinessTexture = specularGlossinessTexture.convert('RGB') - elif specularGlossinessTexture.mode == 'BGRA': - specularGlossinessTexture = specularGlossinessTexture.convert('RGBA') + if specularGlossinessTexture.mode == "BGR": + specularGlossinessTexture = specularGlossinessTexture.convert("RGB") + elif specularGlossinessTexture.mode == "BGRA": + specularGlossinessTexture = specularGlossinessTexture.convert("RGBA") specularGlossinessTexture = np.array(specularGlossinessTexture) / 255.0 specularTexture, glossinessTexture = None, None - if (len(specularGlossinessTexture.shape) == 2 or - specularGlossinessTexture.shape[-1]) == 1: + if ( + len(specularGlossinessTexture.shape) == 2 + or specularGlossinessTexture.shape[-1] + ) == 1: # use the one channel as a multiplier for specular and glossiness specularTexture = glossinessTexture = specularGlossinessTexture.reshape( - (-1, -1, 1)) + (-1, -1, 1) + ) elif specularGlossinessTexture.shape[-1] == 3: # all channels are specular, glossiness is only a factor specularTexture = specularGlossinessTexture[..., :3] @@ -210,13 +220,19 @@ def get_specular_glossiness( if diffuseTexture is not None and specularGlossinessTexture is not None: # reshape to the size of the largest texture - max_shape = [max(diffuseTexture.size[i], - specularGlossinessTexture.size[i]) for i in range(2)] - if (diffuseTexture.size[0] != max_shape[0] or - diffuseTexture.size[1] != max_shape[1]): + max_shape = [ + max(diffuseTexture.size[i], specularGlossinessTexture.size[i]) + for i in range(2) + ] + if ( + diffuseTexture.size[0] != max_shape[0] + or diffuseTexture.size[1] != max_shape[1] + ): diffuseTexture = diffuseTexture.resize(max_shape) - if (specularGlossinessTexture.size[0] != max_shape[0] or - specularGlossinessTexture.size[1] != max_shape[1]): + if ( + specularGlossinessTexture.size[0] != max_shape[0] + or specularGlossinessTexture.size[1] != max_shape[1] + ): specularGlossinessTexture = specularGlossinessTexture.resize(max_shape) def srgb2lin(s): @@ -241,10 +257,9 @@ def convert_texture_srgb2lin(texture): # only scale the color channels, not the alpha channel if color_channels == 4 or color_channels == 2: color_channels -= 1 - result[...,:color_channels] = srgb2lin(result[...,:color_channels]) + result[..., :color_channels] = srgb2lin(result[..., :color_channels]) return result - def lin2srgb(lin): """ Converts linear color values to sRGB color values. @@ -267,27 +282,32 @@ def convert_texture_lin2srgb(texture): # only scale the color channels, not the alpha channel if color_channels == 4 or color_channels == 2: color_channels -= 1 - result[...,:color_channels] = lin2srgb(result[...,:color_channels]) + result[..., :color_channels] = lin2srgb(result[..., :color_channels]) return result diffuse = get_diffuse(diffuseFactor, diffuseTexture) specular, glossiness, one_minus_specular_strength = get_specular_glossiness( - specularFactor, glossinessFactor, specularGlossinessTexture) + specularFactor, glossinessFactor, specularGlossinessTexture + ) metallic = solve_metallic( get_perceived_brightness(diffuse), get_perceived_brightness(specular), - one_minus_specular_strength) + one_minus_specular_strength, + ) if not isinstance(metallic, np.ndarray): metallic = np.array(metallic, dtype=np.float32) diffuse_rgb = diffuse[..., :3] opacity = diffuse[..., -1] if diffuse.shape[-1] == 4 else None - base_color_from_diffuse = diffuse_rgb * (one_minus_specular_strength / ( - 1.0 - dielectric_specular[0]) / np.clip((1.0 - metallic), epsilon, None)) - base_color_from_specular = ( - specular - dielectric_specular * - (1.0 - metallic)) * (1.0 / np.clip(metallic, epsilon, None)) + base_color_from_diffuse = diffuse_rgb * ( + one_minus_specular_strength + / (1.0 - dielectric_specular[0]) + / np.clip((1.0 - metallic), epsilon, None) + ) + base_color_from_specular = (specular - dielectric_specular * (1.0 - metallic)) * ( + 1.0 / np.clip(metallic, epsilon, None) + ) mm = metallic * metallic base_color = mm * base_color_from_specular + (1.0 - mm) * base_color_from_diffuse base_color = np.clip(base_color, 0.0, 1.0) @@ -298,9 +318,12 @@ def convert_texture_lin2srgb(texture): result = {} if len(base_color.shape) > 1: # convert back to sRGB - result['baseColorTexture'] = toPIL(convert_texture_lin2srgb(base_color), mode=('RGB' if base_color.shape[-1] == 3 else 'RGBA')) + result["baseColorTexture"] = toPIL( + convert_texture_lin2srgb(base_color), + mode=("RGB" if base_color.shape[-1] == 3 else "RGBA"), + ) else: - result['baseColorFactor'] = base_color.tolist() + result["baseColorFactor"] = base_color.tolist() if len(metallic.shape) > 1 or len(glossiness.shape) > 1: if len(glossiness.shape) == 1: @@ -309,12 +332,16 @@ def convert_texture_lin2srgb(texture): metallic = np.tile(metallic, (glossiness.shape[0], glossiness.shape[1], 1)) # we need to use RGB textures, because 2 channel textures can cause problems - result['metallicRoughnessTexture'] = toPIL( - np.concatenate([metallic, 1.0 - glossiness, np.zeros_like(metallic)], axis=-1), mode='RGB') - result['metallicFactor'] = 1.0 - result['roughnessFactor'] = 1.0 + result["metallicRoughnessTexture"] = toPIL( + np.concatenate( + [metallic, 1.0 - glossiness, np.zeros_like(metallic)], axis=-1 + ), + mode="RGB", + ) + result["metallicFactor"] = 1.0 + result["roughnessFactor"] = 1.0 else: - result['metallicFactor'] = get_float(metallic) - result['roughnessFactor'] = get_float(1.0 - glossiness) + result["metallicFactor"] = get_float(metallic) + result["roughnessFactor"] = get_float(1.0 - glossiness) return result diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index fd270cefc..c2c5ae70c 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -19,11 +19,11 @@ class Material(util.ABC): def __init__(self, *args, **kwargs): - raise NotImplementedError('must be subclassed!') + raise NotImplementedError("must be subclassed!") @abc.abstractmethod def __hash__(self): - raise NotImplementedError('must be subclassed!') + raise NotImplementedError("must be subclassed!") @abc.abstractproperty def main_color(self): @@ -38,9 +38,9 @@ def main_color(self): @property def name(self): - if hasattr(self, '_name'): + if hasattr(self, "_name"): return self._name - return 'material_0' + return "material_0" @name.setter def name(self, value): @@ -55,14 +55,15 @@ class SimpleMaterial(Material): Hold a single image texture. """ - def __init__(self, - image=None, - diffuse=None, - ambient=None, - specular=None, - glossiness=None, - **kwargs): - + def __init__( + self, + image=None, + diffuse=None, + ambient=None, + specular=None, + glossiness=None, + **kwargs, + ): # save image self.image = image @@ -108,11 +109,13 @@ def to_obj(self, name=None): name = self.name # create an MTL file - mtl = [f'newmtl {name}', - 'Ka {:0.8f} {:0.8f} {:0.8f}'.format(*Ka), - 'Kd {:0.8f} {:0.8f} {:0.8f}'.format(*Kd), - 'Ks {:0.8f} {:0.8f} {:0.8f}'.format(*Ks), - f'Ns {self.glossiness:0.8f}'] + mtl = [ + f"newmtl {name}", + "Ka {:0.8f} {:0.8f} {:0.8f}".format(*Ka), + "Kd {:0.8f} {:0.8f} {:0.8f}".format(*Kd), + "Ks {:0.8f} {:0.8f} {:0.8f}".format(*Ks), + f"Ns {self.glossiness:0.8f}", + ] # collect the OBJ data into files data = {} @@ -121,10 +124,10 @@ def to_obj(self, name=None): image_type = self.image.format # what is the name of the export image to save if image_type is None: - image_type = 'png' - image_name = f'{name}.{image_type.lower()}' + image_type = "png" + image_name = f"{name}.{image_type.lower()}" # save the reference to the image - mtl.append(f'map_Kd {image_name}') + mtl.append(f"map_Kd {image_name}") # save the image texture as bytes in the original format f_obj = util.BytesIO() @@ -132,7 +135,7 @@ def to_obj(self, name=None): f_obj.seek(0) data[image_name] = f_obj.read() - data[f'{name}.mtl'] = '\n'.join(mtl).encode('utf-8') + data[f"{name}.mtl"] = "\n".join(mtl).encode("utf-8") return data, name @@ -146,7 +149,7 @@ def __hash__(self): hash : int Hash of image and parameters """ - if hasattr(self.image, 'tobytes'): + if hasattr(self.image, "tobytes"): # start with hash of raw image bytes hashed = hash(self.image.tobytes()) else: @@ -155,11 +158,11 @@ def __hash__(self): # we will add additional parameters with # an in-place xor of the additional value # if stored as numpy arrays add parameters - if hasattr(self.ambient, 'tobytes'): + if hasattr(self.ambient, "tobytes"): hashed ^= hash(self.ambient.tobytes()) - if hasattr(self.diffuse, 'tobytes'): + if hasattr(self.diffuse, "tobytes"): hashed ^= hash(self.diffuse.tobytes()) - if hasattr(self.specular, 'tobytes'): + if hasattr(self.specular, "tobytes"): hashed ^= hash(self.specular.tobytes()) if isinstance(self.glossiness, float): hashed ^= hash(int(self.glossiness * 1000)) @@ -174,7 +177,7 @@ def main_color(self): @property def glossiness(self): - if hasattr(self, '_glossiness'): + if hasattr(self, "_glossiness"): return self._glossiness return 1.0 @@ -197,9 +200,11 @@ def to_pbr(self): # convert specular exponent to roughness roughness = (2 / (self.glossiness + 2)) ** (1.0 / 4.0) - return PBRMaterial(roughnessFactor=roughness, - baseColorTexture=self.image, - baseColorFactor=self.diffuse) + return PBRMaterial( + roughnessFactor=roughness, + baseColorTexture=self.image, + baseColorFactor=self.diffuse, + ) class MultiMaterial(Material): @@ -221,8 +226,7 @@ def to_pbr(self): """ TODO : IMPLEMENT """ - pbr = [m for m in self.materials - if isinstance(m, PBRMaterial)] + pbr = [m for m in self.materials if isinstance(m, PBRMaterial)] if len(pbr) == 0: return PBRMaterial() return pbr[0] @@ -237,8 +241,7 @@ def __hash__(self): hash : int Xor hash of the contained materials. """ - hashed = int(np.bitwise_xor.reduce( - [hash(m) for m in self.materials])) + hashed = int(np.bitwise_xor.reduce([hash(m) for m in self.materials])) return hashed @@ -298,22 +301,23 @@ class PBRMaterial(Material): Parameters with `Texture` in them must be PIL.Image objects """ - def __init__(self, - name=None, - emissiveFactor=None, - emissiveTexture=None, - baseColorFactor=None, - metallicFactor=None, - roughnessFactor=None, - normalTexture=None, - occlusionTexture=None, - baseColorTexture=None, - metallicRoughnessTexture=None, - doubleSided=False, - alphaMode=None, - alphaCutoff=None, - **kwargs): - + def __init__( + self, + name=None, + emissiveFactor=None, + emissiveTexture=None, + baseColorFactor=None, + metallicFactor=None, + roughnessFactor=None, + normalTexture=None, + occlusionTexture=None, + baseColorTexture=None, + metallicRoughnessTexture=None, + doubleSided=False, + alphaMode=None, + alphaCutoff=None, + **kwargs, + ): # store values in an internal dict self._data = {} @@ -343,8 +347,8 @@ def __init__(self, if len(kwargs) > 0: util.log.debug( - 'unsupported material keys: {}'.format( - ', '.join(kwargs.keys()))) + "unsupported material keys: {}".format(", ".join(kwargs.keys())) + ) @property def emissiveFactor(self): @@ -359,19 +363,19 @@ def emissiveFactor(self): Ech element in the array MUST be greater than or equal to 0 and less than or equal to 1. """ - return self._data.get('emissiveFactor') + return self._data.get("emissiveFactor") @emissiveFactor.setter def emissiveFactor(self, value): if value is None: # passing none effectively removes value - self._data.pop('emissiveFactor', None) + self._data.pop("emissiveFactor", None) else: # non-None values must be a floating point emissive = np.array(value, dtype=np.float64).reshape(3) if emissive.min() < -_eps or emissive.max() > (1 + _eps): - raise ValueError('all factors must be between 0.0-1.0') - self._data['emissiveFactor'] = emissive + raise ValueError("all factors must be between 0.0-1.0") + self._data["emissiveFactor"] = emissive @property def alphaMode(self): @@ -385,19 +389,19 @@ def alphaMode(self): alphaMode : str One of 'OPAQUE', 'MASK', 'BLEND' """ - return self._data.get('alphaMode') + return self._data.get("alphaMode") @alphaMode.setter def alphaMode(self, value): if value is None: # passing none effectively removes value - self._data.pop('alphaMode', None) + self._data.pop("alphaMode", None) else: # non-None values must be one of three values value = str(value).upper().strip() - if value not in ['OPAQUE', 'MASK', 'BLEND']: - raise ValueError('incorrect alphaMode: %s', value) - self._data['alphaMode'] = value + if value not in ["OPAQUE", "MASK", "BLEND"]: + raise ValueError("incorrect alphaMode: %s", value) + self._data["alphaMode"] = value @property def alphaCutoff(self): @@ -415,15 +419,15 @@ def alphaCutoff(self): alphaCutoff : float Value of cutoff. """ - return self._data.get('alphaCutoff') + return self._data.get("alphaCutoff") @alphaCutoff.setter def alphaCutoff(self, value): if value is None: # passing none effectively removes value - self._data.pop('alphaCutoff', None) + self._data.pop("alphaCutoff", None) else: - self._data['alphaCutoff'] = float(value) + self._data["alphaCutoff"] = float(value) @property def doubleSided(self): @@ -435,15 +439,15 @@ def doubleSided(self): doubleSided : bool Specifies whether the material is double sided. """ - return self._data.get('doubleSided') + return self._data.get("doubleSided") @doubleSided.setter def doubleSided(self, value): if value is None: # passing none effectively removes value - self._data.pop('doubleSided', None) + self._data.pop("doubleSided", None) else: - self._data['doubleSided'] = bool(value) + self._data["doubleSided"] = bool(value) @property def metallicFactor(self): @@ -458,15 +462,15 @@ def metallicFactor(self): metallicFactor : float How metally is the material """ - return self._data.get('metallicFactor') + return self._data.get("metallicFactor") @metallicFactor.setter def metallicFactor(self, value): if value is None: # passing none effectively removes value - self._data.pop('metallicFactor', None) + self._data.pop("metallicFactor", None) else: - self._data['metallicFactor'] = float(value) + self._data["metallicFactor"] = float(value) @property def roughnessFactor(self): @@ -480,15 +484,15 @@ def roughnessFactor(self): roughnessFactor : float Roughness of material. """ - return self._data.get('roughnessFactor') + return self._data.get("roughnessFactor") @roughnessFactor.setter def roughnessFactor(self, value): if value is None: # passing none effectively removes value - self._data.pop('roughnessFactor', None) + self._data.pop("roughnessFactor", None) else: - self._data['roughnessFactor'] = float(value) + self._data["roughnessFactor"] = float(value) @property def baseColorFactor(self): @@ -502,16 +506,16 @@ def baseColorFactor(self): color : (4,) uint8 RGBA color """ - return self._data.get('baseColorFactor') + return self._data.get("baseColorFactor") @baseColorFactor.setter def baseColorFactor(self, value): if value is None: # passing none effectively removes value - self._data.pop('baseColorFactor', None) + self._data.pop("baseColorFactor", None) else: # non-None values must be RGBA color - self._data['baseColorFactor'] = color.to_rgba(value) + self._data["baseColorFactor"] = color.to_rgba(value) @property def normalTexture(self): @@ -523,15 +527,15 @@ def normalTexture(self): image : PIL.Image Normal texture. """ - return self._data.get('normalTexture') + return self._data.get("normalTexture") @normalTexture.setter def normalTexture(self, value): if value is None: # passing none effectively removes value - self._data.pop('normalTexture', None) + self._data.pop("normalTexture", None) else: - self._data['normalTexture'] = value + self._data["normalTexture"] = value @property def emissiveTexture(self): @@ -543,15 +547,15 @@ def emissiveTexture(self): image : PIL.Image Emissive texture. """ - return self._data.get('emissiveTexture') + return self._data.get("emissiveTexture") @emissiveTexture.setter def emissiveTexture(self, value): if value is None: # passing none effectively removes value - self._data.pop('emissiveTexture', None) + self._data.pop("emissiveTexture", None) else: - self._data['emissiveTexture'] = value + self._data["emissiveTexture"] = value @property def occlusionTexture(self): @@ -563,15 +567,15 @@ def occlusionTexture(self): image : PIL.Image Occlusion texture. """ - return self._data.get('occlusionTexture') + return self._data.get("occlusionTexture") @occlusionTexture.setter def occlusionTexture(self, value): if value is None: # passing none effectively removes value - self._data.pop('occlusionTexture', None) + self._data.pop("occlusionTexture", None) else: - self._data['occlusionTexture'] = value + self._data["occlusionTexture"] = value @property def baseColorTexture(self): @@ -583,16 +587,16 @@ def baseColorTexture(self): image : PIL.Image Color texture. """ - return self._data.get('baseColorTexture') + return self._data.get("baseColorTexture") @baseColorTexture.setter def baseColorTexture(self, value): if value is None: # passing none effectively removes value - self._data.pop('baseColorTexture', None) + self._data.pop("baseColorTexture", None) else: # non-None values must be RGBA color - self._data['baseColorTexture'] = value + self._data["baseColorTexture"] = value @property def metallicRoughnessTexture(self): @@ -604,27 +608,27 @@ def metallicRoughnessTexture(self): image : PIL.Image Metallic-roughness texture. """ - return self._data.get('metallicRoughnessTexture') + return self._data.get("metallicRoughnessTexture") @metallicRoughnessTexture.setter def metallicRoughnessTexture(self, value): if value is None: # passing none effectively removes value - self._data.pop('metallicRoughnessTexture', None) + self._data.pop("metallicRoughnessTexture", None) else: - self._data['metallicRoughnessTexture'] = value + self._data["metallicRoughnessTexture"] = value @property def name(self): - return self._data.get('name') + return self._data.get("name") @name.setter def name(self, value): if value is None: # passing none effectively removes value - self._data.pop('name', None) + self._data.pop("name", None) else: - self._data['name'] = value + self._data["name"] = value def copy(self): # doing a straight deepcopy fails due to PIL images @@ -633,7 +637,7 @@ def copy(self): for k, v in self._data.items(): if v is None: continue - if hasattr(v, 'copy'): + if hasattr(v, "copy"): # use an objects explicit copy if available kwargs[k] = v.copy() else: @@ -655,8 +659,7 @@ def to_color(self, uv): ------------- colors """ - colors = color.uv_to_color( - uv=uv, image=self.baseColorTexture) + colors = color.uv_to_color(uv=uv, image=self.baseColorTexture) if colors is None and self.baseColorFactor is not None: colors = self.baseColorFactor.copy() return colors @@ -672,8 +675,7 @@ def to_simple(self): Contains material information in a simple manner """ - return SimpleMaterial(image=self.baseColorTexture, - diffuse=self.baseColorFactor) + return SimpleMaterial(image=self.baseColorTexture, diffuse=self.baseColorFactor) @property def main_color(self): @@ -691,9 +693,11 @@ def __hash__(self): hash : int Hash of image and parameters """ - return hash(b''.join( - np.asanyarray(v).tobytes() - for v in self._data.values() if v is not None)) + return hash( + b"".join( + np.asanyarray(v).tobytes() for v in self._data.values() if v is not None + ) + ) def empty_material(color=None): @@ -717,15 +721,21 @@ def empty_material(color=None): final = np.array([255, 255, 255, 255], dtype=np.uint8) if np.shape(color) in ((3,), (4,)): - final[:len(color)] = color + final[: len(color)] = color # create a one pixel RGB image image = Image.fromarray(final.reshape((1, 1, 4)).astype(np.uint8)) return SimpleMaterial(image=image) -def pack(materials, uvs, deduplicate=True, padding=1, - max_tex_size_individual=8192, max_tex_size_fused=8192): +def pack( + materials, + uvs, + deduplicate=True, + padding=1, + max_tex_size_individual=8192, + max_tex_size_fused=8192, +): """ Pack multiple materials with texture into a single material. @@ -794,24 +804,26 @@ def get_base_color_texture(mat): mode = img.mode img = np.array(img) if mat.alphaMode == "MASK": - img[...,3] = np.where(img[...,3] > mat.alphaCutoff*255, 255, 0) + img[..., 3] = np.where(img[..., 3] > mat.alphaCutoff * 255, 255, 0) elif mat.alphaMode == "OPAQUE" or mat.alphaMode is None: if "A" in mode: - img[...,3] = 255 + img[..., 3] = 255 img = Image.fromarray(img, mode) - elif getattr(mat, 'image', None) is not None: + elif getattr(mat, "image", None) is not None: img = mat.image - elif np.shape(getattr(mat, 'diffuse', [])) == (4,): + elif np.shape(getattr(mat, "diffuse", [])) == (4,): # return a one pixel image - img = Image.fromarray(np.reshape( - color.to_rgba(mat.diffuse), (1, 1, 4)).astype(np.uint8)) + img = Image.fromarray( + np.reshape(color.to_rgba(mat.diffuse), (1, 1, 4)).astype(np.uint8) + ) if img is None: # return a one pixel image - img = Image.fromarray(np.reshape( - [100, 100, 100, 255], (1, 1, 4)).astype(np.uint8)) + img = Image.fromarray( + np.reshape([100, 100, 100, 255], (1, 1, 4)).astype(np.uint8) + ) # make sure we're always returning in RGBA mode - return img.convert('RGBA') + return img.convert("RGBA") def get_metallic_roughness_texture(mat): """ @@ -828,27 +840,35 @@ def get_metallic_roughness_texture(mat): if len(img.shape) == 2 or img.shape[-1] == 1: img = img.reshape(*img.shape[:2], 1) - img = np.concatenate([img, - np.ones_like(img[..., :1])*255, - np.zeros_like(img[..., :1])], - axis=-1) + img = np.concatenate( + [ + img, + np.ones_like(img[..., :1]) * 255, + np.zeros_like(img[..., :1]), + ], + axis=-1, + ) elif img.shape[-1] == 2: img = np.concatenate([img, np.zeros_like(img[..., :1])], axis=-1) if mat.metallicFactor is not None: - img[..., 0] = np.round(img[..., 0].astype(np.float64) * - mat.metallicFactor).astype(np.uint8) + img[..., 0] = np.round( + img[..., 0].astype(np.float64) * mat.metallicFactor + ).astype(np.uint8) if mat.roughnessFactor is not None: - img[..., 1] = np.round(img[..., 1].astype(np.float64) * - mat.roughnessFactor).astype(np.uint8) - img = Image.fromarray(img, mode='RGB') + img[..., 1] = np.round( + img[..., 1].astype(np.float64) * mat.roughnessFactor + ).astype(np.uint8) + img = Image.fromarray(img, mode="RGB") else: metallic = 0.0 if mat.metallicFactor is None else mat.metallicFactor roughness = 1.0 if mat.roughnessFactor is None else mat.roughnessFactor metallic_roughnesss = np.round( - np.array([metallic, roughness, 0.0], dtype=np.float64) * 255) + np.array([metallic, roughness, 0.0], dtype=np.float64) * 255 + ) img = Image.fromarray( - metallic_roughnesss[None, None].astype(np.uint8), mode='RGB') + metallic_roughnesss[None, None].astype(np.uint8), mode="RGB" + ) return img def get_emissive_texture(mat): @@ -864,21 +884,20 @@ def get_emissive_texture(mat): c = color.to_rgba(mat.emissiveFactor) img = Image.fromarray(c.reshape((1, 1, -1))) else: - img = Image.fromarray(np.reshape( - [0, 0, 0], (1, 1, 3)).astype(np.uint8)) + img = Image.fromarray(np.reshape([0, 0, 0], (1, 1, 3)).astype(np.uint8)) # make sure we're always returning in RGBA mode - return img.convert('RGB') + return img.convert("RGB") def get_normal_texture(mat): # there is no default normal texture - return getattr(mat, 'normalTexture', None) + return getattr(mat, "normalTexture", None) def get_occlusion_texture(mat): - occlusion_texture = getattr(mat, 'occlusionTexture', None) + occlusion_texture = getattr(mat, "occlusionTexture", None) if occlusion_texture is None: occlusion_texture = Image.fromarray(np.array([[255]], dtype=np.uint8)) else: - occlusion_texture = occlusion_texture.convert('L') + occlusion_texture = occlusion_texture.convert("L") return occlusion_texture def pad_image(src, padding=1): @@ -886,8 +905,10 @@ def pad_image(src, padding=1): if isinstance(padding, int): padding = (padding, padding) - x, y = np.meshgrid(np.arange( - src.shape[1] + 2 * padding[0]), np.arange(src.shape[0] + 2 * padding[1])) + x, y = np.meshgrid( + np.arange(src.shape[1] + 2 * padding[0]), + np.arange(src.shape[0] + 2 * padding[1]), + ) x -= padding[0] y -= padding[1] x = np.clip(x, 0, src.shape[1] - 1) @@ -910,13 +931,12 @@ def pack_images(images, power_resize=True, random_seed=42): # random seed needs to be identical to achieve same results # TODO: we could alternatively reuse the offsets from the first packing call np.random.seed(random_seed) - return packing.images(images, power_resize=power_resize) + return packing.images(images, deduplicate=True, power_resize=power_resize) if deduplicate: # start by collecting a list of indexes for each material hash unique_idx = collections.defaultdict(list) - [unique_idx[hash(m)].append(i) - for i, m in enumerate(materials)] + [unique_idx[hash(m)].append(i) for i, m in enumerate(materials)] # now we only need the indexes and don't care about the hashes mat_idx = list(unique_idx.values()) else: @@ -942,15 +962,16 @@ def pack_images(images, power_resize=True, random_seed=42): if use_pbr: # if we have PBR materials, collect all possible textures and # determine the largest size per material - metallic_roughness = [get_metallic_roughness_texture( - materials[g[0]]) for g in mat_idx] + metallic_roughness = [ + get_metallic_roughness_texture(materials[g[0]]) for g in mat_idx + ] emissive = [get_emissive_texture(materials[g[0]]) for g in mat_idx] normals = [get_normal_texture(materials[g[0]]) for g in mat_idx] occlusion = [get_occlusion_texture(materials[g[0]]) for g in mat_idx] unpadded_sizes = [] for textures in zip(images, metallic_roughness, emissive, normals, occlusion): - # remove None textures + # remove None textures textures = [tex for tex in textures if tex is not None] tex_sizes = np.stack([np.array(tex.size) for tex in textures]) max_tex_size = tex_sizes.max(axis=0) @@ -978,16 +999,17 @@ def pack_images(images, power_resize=True, random_seed=42): unpadded_sizes.append(tex_size) images = [ - Image.fromarray(pad_image(np.array(img), padding), img.mode) - for img in images + Image.fromarray(pad_image(np.array(img), padding), img.mode) for img in images ] # pack the multiple images into a single large image final, offsets = pack_images(images) # if the final image is too large, reduce the maximum texture size and repeat - if max_tex_size_fused is not None and \ - final.size[0] * final.size[1] > max_tex_size_fused**2: + if ( + max_tex_size_fused is not None + and final.size[0] * final.size[1] > max_tex_size_fused**2 + ): down_scale_iterations -= 1 max_tex_size_individual //= 2 else: @@ -995,10 +1017,9 @@ def pack_images(images, power_resize=True, random_seed=42): if use_pbr: metallic_roughness = [ - Image.fromarray( - pad_image( - np.array(img), - padding), img.mode) for img in metallic_roughness] + Image.fromarray(pad_image(np.array(img), padding), img.mode) + for img in metallic_roughness + ] # even if we only need the first two channels, store RGB, because # PIL 'LA' mode images are interpreted incorrectly in other 3D software final_metallic_roughness, _ = pack_images(metallic_roughness) @@ -1009,22 +1030,18 @@ def pack_images(images, power_resize=True, random_seed=42): final_emissive = None else: emissive = [ - Image.fromarray( - pad_image( - np.array(img), - padding), - mode=img.mode) for img in emissive] + Image.fromarray(pad_image(np.array(img), padding), mode=img.mode) + for img in emissive + ] final_emissive, _ = pack_images(emissive) if all(n is not None for n in normals): # only use normal texture if all materials use them # how else would you handle missing normals? normals = [ - Image.fromarray( - pad_image( - np.array(img), - padding), - mode=img.mode) for img in normals] + Image.fromarray(pad_image(np.array(img), padding), mode=img.mode) + for img in normals + ] final_normals, _ = pack_images(normals) else: final_normals = None @@ -1032,16 +1049,13 @@ def pack_images(images, power_resize=True, random_seed=42): if any(np.array(o).min() < 255 for o in occlusion): # only use occlusion texture if any material actually has an occlusion value occlusion = [ - Image.fromarray( - pad_image( - np.array(img), - padding), - mode=img.mode) for img in occlusion] + Image.fromarray(pad_image(np.array(img), padding), mode=img.mode) + for img in occlusion + ] final_occlusion, _ = pack_images(occlusion) else: final_occlusion = None - # the size of the final texture image final_size = np.array(final.size, dtype=np.float64) # collect scaled new UV coordinates by material index @@ -1060,10 +1074,12 @@ def pack_images(images, power_resize=True, random_seed=42): # the case of uv==1.0 half_pixel_width = 1.0 / (2 * img.size[0]) half_pixel_height = 1.0 / (2 * img.size[1]) - wrap_mask_u = ((g_uvs[:, 0] <= -half_pixel_width) | - (g_uvs[:, 0] >= (1.0 + half_pixel_width))) - wrap_mask_v = ((g_uvs[:, 1] <= -half_pixel_height) | - (g_uvs[:, 1] >= (1.0 + half_pixel_height))) + wrap_mask_u = (g_uvs[:, 0] <= -half_pixel_width) | ( + g_uvs[:, 0] >= (1.0 + half_pixel_width) + ) + wrap_mask_v = (g_uvs[:, 1] <= -half_pixel_height) | ( + g_uvs[:, 1] >= (1.0 + half_pixel_height) + ) wrap_mask = np.stack([wrap_mask_u, wrap_mask_v], axis=-1) g_uvs[wrap_mask] = g_uvs[wrap_mask] % 1.0 @@ -1079,7 +1095,8 @@ def pack_images(images, power_resize=True, random_seed=42): material_textures = [(get_base_color_texture, final)] if use_pbr: material_textures.append( - (get_metallic_roughness_texture, final_metallic_roughness)) + (get_metallic_roughness_texture, final_metallic_roughness) + ) if final_emissive: material_textures.append((get_emissive_texture, final_emissive)) if final_normals: @@ -1104,8 +1121,7 @@ def pack_images(images, power_resize=True, random_seed=42): for reference, (_, final_texture) in zip(check_flat, material_textures): # get the pixel color from the packed image - compare = color.uv_to_interpolated_color( - uv=stacked, image=final_texture) + compare = color.uv_to_interpolated_color(uv=stacked, image=final_texture) # should be exactly identical # note this is only true for simple colors # interpolation on complicated stuff can break this @@ -1118,11 +1134,12 @@ def pack_images(images, power_resize=True, random_seed=42): metallicRoughnessTexture=final_metallic_roughness, emissiveTexture=final_emissive, emissiveFactor=[1.0, 1.0, 1.0] if final_emissive else None, - alphaMode=None, # unfortunately, we can't handle alpha blending well - doubleSided=False, # TODO how to handle this? + alphaMode=None, # unfortunately, we can't handle alpha blending well + doubleSided=False, # TODO how to handle this? normalTexture=final_normals, occlusionTexture=final_occlusion, ), - stacked) + stacked, + ) else: return SimpleMaterial(image=final), stacked diff --git a/trimesh/visual/objects.py b/trimesh/visual/objects.py index 18ff302f3..8c6bb6029 100644 --- a/trimesh/visual/objects.py +++ b/trimesh/visual/objects.py @@ -56,12 +56,12 @@ def concatenate(visuals, *args): visuals = np.array(visuals) # if there are any texture visuals convert all to texture - if any(v.kind == 'texture' for v in visuals): + if any(v.kind == "texture" for v in visuals): # first collect materials and UV coordinates mats = [] uvs = [] for v in visuals: - if v.kind == 'texture': + if v.kind == "texture": mats.append(v.material) if v.uv is None: # otherwise use zeros @@ -72,8 +72,7 @@ def concatenate(visuals, *args): else: # create a material and UV coordinates from vertex colors - color_mat, color_uv = color_to_uv( - vertex_colors=v.vertex_colors) + color_mat, color_uv = color_to_uv(vertex_colors=v.vertex_colors) mats.append(color_mat) uvs.append(color_uv) # pack the materials and UV coordinates into one @@ -82,13 +81,11 @@ def concatenate(visuals, *args): # convert all visuals to the kind of the first kind = visuals[0].kind - if kind == 'face': - colors = np.vstack([ - v.face_colors for v in visuals]) + if kind == "face": + colors = np.vstack([v.face_colors for v in visuals]) return ColorVisuals(face_colors=colors) - elif kind == 'vertex': - colors = np.vstack([ - v.vertex_colors for v in visuals]) + elif kind == "vertex": + colors = np.vstack([v.vertex_colors for v in visuals]) return ColorVisuals(vertex_colors=colors) return ColorVisuals() diff --git a/trimesh/visual/texture.py b/trimesh/visual/texture.py index 10da4296a..22bde1a4d 100644 --- a/trimesh/visual/texture.py +++ b/trimesh/visual/texture.py @@ -9,11 +9,7 @@ class TextureVisuals(Visuals): - def __init__(self, - uv=None, - material=None, - image=None, - face_materials=None): + def __init__(self, uv=None, material=None, image=None, face_materials=None): """ Store a single material and per-vertex UV coordinates for a mesh. @@ -68,7 +64,7 @@ def kind(self): kind : str What type of visuals are defined """ - return 'texture' + return "texture" @property def defined(self): @@ -104,7 +100,7 @@ def uv(self): uv : (n, 2) float or None Pixel position per-vertex. """ - return self.vertex_attributes.get('uv', None) + return self.vertex_attributes.get("uv", None) @uv.setter def uv(self, values): @@ -117,10 +113,9 @@ def uv(self, values): Pixel locations on a texture per- vertex """ if values is None: - self.vertex_attributes.pop('uv') + self.vertex_attributes.pop("uv") else: - self.vertex_attributes['uv'] = np.asanyarray( - values, dtype=np.float64) + self.vertex_attributes["uv"] = np.asanyarray(values, dtype=np.float64) def copy(self, uv=None): """ @@ -138,7 +133,8 @@ def copy(self, uv=None): copied = TextureVisuals( uv=uv, material=self.material.copy(), - face_materials=copy.copy(self.face_materials)) + face_materials=copy.copy(self.face_materials), + ) return copied @@ -188,7 +184,7 @@ def update_vertices(self, mask): updates[key] = value[mask] except BaseException: # usual reason is an incorrect size or index - util.log.warning(f'failed to update visual: `{key}`') + util.log.warning(f"failed to update visual: `{key}`") # clear all values from the vertex attributes self.vertex_attributes.clear() # apply the updated values @@ -216,6 +212,7 @@ def concatenate(self, others): Concatenated visual objects """ from .objects import concatenate + return concatenate(self, others) @@ -251,7 +248,7 @@ def unmerge_faces(faces, *args, **kwargs): """ # unfortunately Python2 doesn't let us put named kwargs # after an `*args` sequence so we have to do this ugly get - maintain_faces = kwargs.get('maintain_faces', False) + maintain_faces = kwargs.get("maintain_faces", False) # don't alter faces if maintain_faces: From c184c172d85a85c25921ff4ad7b86d80ca9514f5 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sat, 23 Sep 2023 14:08:26 -0400 Subject: [PATCH 108/144] add more options to image packing --- trimesh/path/__init__.py | 3 +- trimesh/path/creation.py | 124 +++----- trimesh/path/curve.py | 31 +- trimesh/path/entities.py | 191 +++++------ trimesh/path/exchange/dxf.py | 545 +++++++++++++++----------------- trimesh/path/exchange/export.py | 21 +- trimesh/path/exchange/load.py | 19 +- trimesh/path/exchange/misc.py | 56 ++-- trimesh/path/exchange/svg_io.py | 334 +++++++++---------- trimesh/path/intersections.py | 9 +- trimesh/path/packing.py | 78 ++++- trimesh/path/path.py | 353 ++++++++++----------- trimesh/path/raster.py | 29 +- trimesh/path/repair.py | 24 +- trimesh/path/segments.py | 100 +++--- trimesh/path/simplify.py | 86 +++-- trimesh/path/traversal.py | 103 +++--- trimesh/path/util.py | 8 +- trimesh/util.py | 47 ++- trimesh/visual/material.py | 64 ++-- 20 files changed, 1039 insertions(+), 1186 deletions(-) diff --git a/trimesh/path/__init__.py b/trimesh/path/__init__.py index 06e300ee0..242d1fe82 100644 --- a/trimesh/path/__init__.py +++ b/trimesh/path/__init__.py @@ -9,8 +9,9 @@ from .path import Path2D, Path3D except BaseException as E: from .. import exceptions + Path2D = exceptions.ExceptionWrapper(E) Path3D = exceptions.ExceptionWrapper(E) # explicitly add objects to all as per pep8 -__all__ = ['Path2D', 'Path3D'] +__all__ = ["Path2D", "Path3D"] diff --git a/trimesh/path/creation.py b/trimesh/path/creation.py index 6ab272d1b..1d8c73030 100644 --- a/trimesh/path/creation.py +++ b/trimesh/path/creation.py @@ -6,12 +6,9 @@ from .entities import Arc, Line -def circle_pattern(pattern_radius, - circle_radius, - count, - center=None, - angle=None, - **kwargs): +def circle_pattern( + pattern_radius, circle_radius, count, center=None, angle=None, **kwargs +): """ Create a Path2D representing a circle pattern. @@ -41,35 +38,29 @@ def circle_pattern(pattern_radius, elif isinstance(angle, float) or isinstance(angle, int): angles = np.linspace(0.0, angle, count) else: - raise ValueError('angle must be float or int!') + raise ValueError("angle must be float or int!") if center is None: center = [0.0, 0.0] # centers of circles - centers = np.column_stack(( - np.cos(angles), np.sin(angles))) * pattern_radius + centers = np.column_stack((np.cos(angles), np.sin(angles))) * pattern_radius vert = [] ents = [] for circle_center in centers: # (3,3) center points of arc - three = arc.to_threepoint(angles=[0, np.pi], - center=circle_center, - radius=circle_radius) + three = arc.to_threepoint( + angles=[0, np.pi], center=circle_center, radius=circle_radius + ) # add a single circle entity - ents.append( - Arc( - points=np.arange(3) + len(vert), - closed=True)) + ents.append(Arc(points=np.arange(3) + len(vert), closed=True)) # keep flat array by extend instead of append vert.extend(three) # translate vertices to pattern center vert = np.array(vert) + center - pattern = Path2D(entities=ents, - vertices=vert, - **kwargs) + pattern = Path2D(entities=ents, vertices=vert, **kwargs) return pattern @@ -102,14 +93,11 @@ def circle(radius, center=None, **kwargs): radius = float(radius) # (3, 2) float, points on arc - three = arc.to_threepoint(angles=[0, np.pi], - center=center, - radius=radius) + three = arc.to_threepoint(angles=[0, np.pi], center=center, radius=radius) # generate the path object result = Path2D( - entities=[Arc(points=np.arange(3), closed=True)], - vertices=three, - **kwargs) + entities=[Arc(points=np.arange(3), closed=True)], vertices=three, **kwargs + ) return result @@ -140,9 +128,8 @@ def rectangle(bounds, **kwargs): bounds = np.array([-half, half]) # should have one bounds or multiple bounds - if not (util.is_shape(bounds, (2, 2)) or - util.is_shape(bounds, (-1, 2, 2))): - raise ValueError('bounds must be (m, 2, 2) or (2, 2)') + if not (util.is_shape(bounds, (2, 2)) or util.is_shape(bounds, (-1, 2, 2))): + raise ValueError("bounds must be (m, 2, 2) or (2, 2)") # hold Line objects lines = [] @@ -152,15 +139,10 @@ def rectangle(bounds, **kwargs): # loop through each rectangle for lower, upper in bounds.reshape((-1, 2, 2)): lines.append(Line((np.arange(5) % 4) + len(vertices))) - vertices.extend([lower, - [upper[0], lower[1]], - upper, - [lower[0], upper[1]]]) + vertices.extend([lower, [upper[0], lower[1]], upper, [lower[0], upper[1]]]) # create the Path2D with specified rectangles - rect = Path2D(entities=lines, - vertices=vertices, - **kwargs) + rect = Path2D(entities=lines, vertices=vertices, **kwargs) return rect @@ -186,24 +168,20 @@ def box_outline(extents=None, transform=None, **kwargs): from .exchange.load import load_path # create vertices for the box - vertices = [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, - 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1] - vertices = np.array(vertices, - order='C', - dtype=np.float64).reshape((-1, 3)) + vertices = [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1] + vertices = np.array(vertices, order="C", dtype=np.float64).reshape((-1, 3)) vertices -= 0.5 # resize the vertices based on passed size if extents is not None: extents = np.asanyarray(extents, dtype=np.float64) if extents.shape != (3,): - raise ValueError('Extents must be (3,)!') + raise ValueError("Extents must be (3,)!") vertices *= extents # apply transform if passed if transform is not None: - vertices = transformations.transform_points( - vertices, transform) + vertices = transformations.transform_points(vertices, transform) # vertex indices indices = [0, 1, 3, 2, 0, 4, 5, 7, 6, 4, 0, 2, 6, 7, 3, 1, 5] @@ -212,13 +190,15 @@ def box_outline(extents=None, transform=None, **kwargs): return outline -def grid(side, - count=5, - transform=None, - plane_origin=None, - plane_normal=None, - include_circle=True, - sections_circle=32): +def grid( + side, + count=5, + transform=None, + plane_origin=None, + plane_normal=None, + include_circle=True, + sections_circle=32, +): """ Create a Path3D for a grid visualization of a plane. @@ -267,50 +247,48 @@ def grid(side, circle_res = int((r / radii[0]) * sections_circle) # generate a circule pattern theta = np.linspace(0.0, np.pi * 2, circle_res) - circle = np.column_stack((np.cos(theta), - np.sin(theta))) * r + circle = np.column_stack((np.cos(theta), np.sin(theta))) * r # append the circle pattern vertices.append(circle) - entities.append(Line( - points=np.arange(len(circle)) + current)) + entities.append(Line(points=np.arange(len(circle)) + current)) # keep the vertex count correct current += len(circle) # generate a series of grid lines - vertices.append([[-rmax, r], - [rmax, r], - [-rmax, -r], - [rmax, -r], - [r, -rmax], - [r, rmax], - [-r, -rmax], - [-r, rmax]]) + vertices.append( + [ + [-rmax, r], + [rmax, r], + [-rmax, -r], + [rmax, -r], + [r, -rmax], + [r, rmax], + [-r, -rmax], + [-r, rmax], + ] + ) # append an entity per grid line for i in [0, 2, 4, 6]: - entities.append(Line( - points=np.arange(2) + current + i)) + entities.append(Line(points=np.arange(2) + current + i)) current += len(vertices[-1]) # add the middle lines which were skipped - vertices.append([[0, rmax], - [0, -rmax], - [-rmax, 0], - [rmax, 0]]) + vertices.append([[0, rmax], [0, -rmax], [-rmax, 0], [rmax, 0]]) entities.append(Line(points=np.arange(2) + current)) entities.append(Line(points=np.arange(2) + current + 2)) # stack vertices into clean (n, 3) float vertices = np.vstack(vertices) # if plane was passed instead of transform create the matrix here - if (transform is None and plane_origin is not None and plane_normal is not None): - transform = np.linalg.inv(plane_transform( - origin=plane_origin, normal=plane_normal)) + if transform is None and plane_origin is not None and plane_normal is not None: + transform = np.linalg.inv( + plane_transform(origin=plane_origin, normal=plane_normal) + ) # stack vertices to 3D vertices = np.column_stack((vertices, np.zeros(len(vertices)))) # apply transform if passed if transform is not None: - vertices = transformations.transform_points( - vertices, matrix=transform) + vertices = transformations.transform_points(vertices, matrix=transform) # combine result into a Path3D object grid_path = Path3D(entities=entities, vertices=vertices) return grid_path diff --git a/trimesh/path/curve.py b/trimesh/path/curve.py index 35a888d42..90c05923d 100644 --- a/trimesh/path/curve.py +++ b/trimesh/path/curve.py @@ -28,10 +28,9 @@ def discretize_bezier(points, count=None, scale=1.0): # this is so we can figure out how finely we have to sample t norm = np.linalg.norm(np.diff(points, axis=0), axis=1).sum() count = np.ceil(norm / (res.seg_frac * scale)) - count = int(np.clip( - count, - res.min_sections * len(points), - res.max_sections * len(points))) + count = int( + np.clip(count, res.min_sections * len(points), res.max_sections * len(points)) + ) count = int(count) # parameterize incrementing 0.0 - 1.0 @@ -42,22 +41,20 @@ def discretize_bezier(points, count=None, scale=1.0): # binomial coefficients, i, and each point iterable = zip(binomial(n), np.arange(len(points)), points) # run the actual interpolation - stacked = [((t**i) * (t_d**(n - i))).reshape((-1, 1)) - * p * c for c, i, p in iterable] + stacked = [ + ((t**i) * (t_d ** (n - i))).reshape((-1, 1)) * p * c for c, i, p in iterable + ] result = np.sum(stacked, axis=0) # test to make sure end points are correct - test = np.sum((result[[0, -1]] - points[[0, -1]])**2, axis=1) + test = np.sum((result[[0, -1]] - points[[0, -1]]) ** 2, axis=1) assert (test < tol.merge).all() assert len(result) >= 2 return result -def discretize_bspline(control, - knots, - count=None, - scale=1.0): +def discretize_bspline(control, knots, count=None, scale=1.0): """ Given a B-Splines control points and knot vector, return a sampled version of the curve. @@ -80,14 +77,19 @@ def discretize_bspline(control, # evaluate the b-spline using scipy/fitpack from scipy.interpolate import splev + # (n, d) control points where d is the dimension of vertices control = np.asanyarray(control, dtype=np.float64) degree = len(knots) - len(control) - 1 if count is None: norm = np.linalg.norm(np.diff(control, axis=0), axis=1).sum() - count = int(np.clip(norm / (res.seg_frac * scale), - res.min_sections * len(control), - res.max_sections * len(control))) + count = int( + np.clip( + norm / (res.seg_frac * scale), + res.min_sections * len(control), + res.max_sections * len(control), + ) + ) ipl = np.linspace(knots[0], knots[-1], count) discrete = splev(ipl, [knots, control.T, degree]) @@ -124,4 +126,5 @@ def binomial(n): return [1, 5, 10, 10, 5, 1] else: from scipy.special import binom + return binom(n, np.arange(n + 1)) diff --git a/trimesh/path/entities.py b/trimesh/path/entities.py index 6106c29c5..586ea79a2 100644 --- a/trimesh/path/entities.py +++ b/trimesh/path/entities.py @@ -16,14 +16,9 @@ class Entity(ABC): - - def __init__(self, - points, - closed=None, - layer=None, - metadata=None, - color=None, - **kwargs): + def __init__( + self, points, closed=None, layer=None, metadata=None, color=None, **kwargs + ): # points always reference vertex indices and are int self.points = np.asanyarray(points, dtype=np.int64) # save explicit closed @@ -52,7 +47,7 @@ def metadata(self): metadata : dict Bag of properties. """ - if not hasattr(self, '_metadata'): + if not hasattr(self, "_metadata"): self._metadata = {} # note that we don't let a new dict be assigned return self._metadata @@ -68,7 +63,7 @@ def layer(self): layer : any Hashable layer identifier. """ - return self.metadata.get('layer') + return self.metadata.get("layer") @layer.setter def layer(self, value): @@ -80,7 +75,7 @@ def layer(self, value): layer : any Hashable layer indicator """ - self.metadata['layer'] = value + self.metadata["layer"] = value def to_dict(self): """ @@ -92,9 +87,11 @@ def to_dict(self): as_dict : dict Has keys 'type', 'points', 'closed' """ - return {'type': self.__class__.__name__, - 'points': self.points.tolist(), - 'closed': self.closed} + return { + "type": self.__class__.__name__, + "points": self.points.tolist(), + "closed": self.closed, + } @property def closed(self): @@ -107,8 +104,7 @@ def closed(self): closed : bool Is the entity closed or not? """ - closed = (len(self.points) > 2 and - self.points[0] == self.points[-1]) + closed = len(self.points) > 2 and self.points[0] == self.points[-1] return closed @property @@ -134,9 +130,9 @@ def nodes(self): self.points = [0,1,2] returns: [[0,1], [1,2]] """ - return np.column_stack((self.points, - self.points)).reshape( - -1)[1:-1].reshape((-1, 2)) + return ( + np.column_stack((self.points, self.points)).reshape(-1)[1:-1].reshape((-1, 2)) + ) @property def end_points(self): @@ -194,7 +190,7 @@ def _orient(self, curve): orient : (n, dimension) float Original curve, but possibly reversed """ - if hasattr(self, '_direction') and self._direction < 0: + if hasattr(self, "_direction") and self._direction < 0: return curve[::-1] return curve @@ -212,8 +208,9 @@ def bounds(self, vertices): bounds : (2, dimension) float Coordinates of AABB, in (min, max) form """ - bounds = np.array([vertices[self.points].min(axis=0), - vertices[self.points].max(axis=0)]) + bounds = np.array( + [vertices[self.points].min(axis=0), vertices[self.points].max(axis=0)] + ) return bounds def length(self, vertices): @@ -256,7 +253,7 @@ def copy(self): """ copied = deepcopy(self) # only copy metadata if set - if hasattr(self, '_metadata'): + if hasattr(self, "_metadata"): copied._metadata = deepcopy(self._metadata) # check for very annoying subtle copy failures assert id(copied._metadata) != id(self._metadata) @@ -285,11 +282,9 @@ def _bytes(self): """ # give consistent ordering of points for hash if self.points[0] > self.points[-1]: - return (self.__class__.__name__.encode('utf-8') + - self.points.tobytes()) + return self.__class__.__name__.encode("utf-8") + self.points.tobytes() else: - return (self.__class__.__name__.encode('utf-8') + - self.points[::-1].tobytes()) + return self.__class__.__name__.encode("utf-8") + self.points[::-1].tobytes() class Text(Entity): @@ -297,16 +292,18 @@ class Text(Entity): Text to annotate a 2D or 3D path. """ - def __init__(self, - origin, - text, - height=None, - vector=None, - normal=None, - align=None, - layer=None, - color=None, - metadata=None): + def __init__( + self, + origin, + text, + height=None, + vector=None, + normal=None, + align=None, + layer=None, + color=None, + metadata=None, + ): """ An entity for text labels. @@ -349,20 +346,20 @@ def __init__(self, # None or (2,) str if align is None: # if not set make everything centered - align = ['center', 'center'] + align = ["center", "center"] elif util.is_string(align): # if only one is passed set for both # horizontal and vertical align = [align, align] elif len(align) != 2: # otherwise raise rror - raise ValueError('align must be (2,) str') + raise ValueError("align must be (2,) str") self.align = align # make sure text is a string - if hasattr(text, 'decode'): - self.text = text.decode('utf-8') + if hasattr(text, "decode"): + self.text = text.decode("utf-8") else: self.text = str(text) @@ -381,7 +378,7 @@ def origin(self): @origin.setter def origin(self, value): value = int(value) - if not hasattr(self, 'points') or self.points.ptp() == 0: + if not hasattr(self, "points") or self.points.ptp() == 0: self.points = np.ones(3, dtype=np.int64) * value else: self.points[0] = value @@ -436,7 +433,7 @@ def plot(self, vertices, show=False): If True, call plt.show() """ if vertices.shape[1] != 2: - raise ValueError('only for 2D points!') + raise ValueError("only for 2D points!") import matplotlib.pyplot as plt @@ -444,12 +441,14 @@ def plot(self, vertices, show=False): angle = np.degrees(self.angle(vertices)) # TODO: handle text size better - plt.text(*vertices[self.origin], - s=self.text, - rotation=angle, - ha=self.align[0], - va=self.align[1], - size=18) + plt.text( + *vertices[self.origin], + s=self.text, + rotation=angle, + ha=self.align[0], + va=self.align[1], + size=18, + ) if show: plt.show() @@ -470,7 +469,7 @@ def angle(self, vertices): """ if vertices.shape[1] != 2: - raise ValueError('angle only valid for 2D points!') + raise ValueError("angle only valid for 2D points!") # get the vector from origin direction = vertices[self.vector] - vertices[self.origin] @@ -502,9 +501,7 @@ def end_points(self): return np.array([]) def _bytes(self): - data = b''.join([b'Text', - self.points.tobytes(), - self.text.encode('utf-8')]) + data = b"".join([b"Text", self.points.tobytes(), self.text.encode("utf-8")]) return data @@ -555,22 +552,21 @@ def explode(self): """ # copy over the current layer layer = self.layer - points = np.column_stack(( - self.points, - self.points)).ravel()[1:-1].reshape((-1, 2)) + points = ( + np.column_stack((self.points, self.points)).ravel()[1:-1].reshape((-1, 2)) + ) exploded = [Line(i, layer=layer) for i in points] return exploded def _bytes(self): # give consistent ordering of points for hash if self.points[0] > self.points[-1]: - return b'Line' + self.points.tobytes() + return b"Line" + self.points.tobytes() else: - return b'Line' + self.points[::-1].tobytes() + return b"Line" + self.points[::-1].tobytes() class Arc(Entity): - @property def closed(self): """ @@ -581,7 +577,7 @@ def closed(self): closed : bool If set True, Arc will be a closed circle """ - return getattr(self, '_closed', False) + return getattr(self, "_closed", False) @closed.setter def closed(self, value): @@ -611,7 +607,7 @@ def is_valid(self): def _bytes(self): # give consistent ordering of points for hash order = int(self.points[0] > self.points[-1]) * 2 - 1 - return b'Arc' + bytes(self.closed) + self.points[::order].tobytes() + return b"Arc" + bytes(self.closed) + self.points[::order].tobytes() def length(self, vertices): """ @@ -631,12 +627,10 @@ def length(self, vertices): if self.closed: # we don't need the angular span as # it's indicated as a closed circle - fit = self.center( - vertices, return_normal=False, return_angle=False) + fit = self.center(vertices, return_normal=False, return_angle=False) return np.pi * fit.radius * 4 # get the angular span of the circular arc - fit = self.center( - vertices, return_normal=False, return_angle=True) + fit = self.center(vertices, return_normal=False, return_angle=True) return fit.span * fit.radius * 2 def discrete(self, vertices, scale=1.0): @@ -656,10 +650,9 @@ def discrete(self, vertices, scale=1.0): Path in space made up of line segments """ - return self._orient(discretize_arc( - vertices[self.points], - close=self.closed, - scale=scale)) + return self._orient( + discretize_arc(vertices[self.points], close=self.closed, scale=scale) + ) def center(self, vertices, **kwargs): """ @@ -695,20 +688,17 @@ def bounds(self, vertices): # if we have a closed arc (a circle), we can return the actual bounds # this only works in two dimensions, otherwise this would return the # AABB of an sphere - info = self.center( - vertices, - return_normal=False, - return_angle=False) - bounds = np.array([info.center - info.radius, - info.center + info.radius], - dtype=np.float64) + info = self.center(vertices, return_normal=False, return_angle=False) + bounds = np.array( + [info.center - info.radius, info.center + info.radius], dtype=np.float64 + ) else: # since the AABB of a partial arc is hard, approximate # the bounds by just looking at the discrete values discrete = self.discrete(vertices) - bounds = np.array([discrete.min(axis=0), - discrete.max(axis=0)], - dtype=np.float64) + bounds = np.array( + [discrete.min(axis=0), discrete.max(axis=0)], dtype=np.float64 + ) return bounds @@ -716,12 +706,12 @@ class Curve(Entity): """ The parent class for all wild curves in space. """ + @property def nodes(self): # a point midway through the curve mid = self.points[len(self.points) // 2] - return [[self.points[0], mid], - [mid, self.points[-1]]] + return [[self.points[0], mid], [mid, self.points[-1]]] class Bezier(Curve): @@ -747,10 +737,9 @@ def discrete(self, vertices, scale=1.0, count=None): discrete : (m, 2) or (m, 3) float Curve as line segments """ - return self._orient(discretize_bezier( - vertices[self.points], - count=count, - scale=scale)) + return self._orient( + discretize_bezier(vertices[self.points], count=count, scale=scale) + ) class BSpline(Curve): @@ -758,13 +747,7 @@ class BSpline(Curve): An open or closed B- Spline. """ - def __init__(self, - points, - knots, - layer=None, - metadata=None, - color=None, - **kwargs): + def __init__(self, points, knots, layer=None, metadata=None, color=None, **kwargs): self.points = np.asanyarray(points, dtype=np.int64) self.knots = np.asanyarray(knots, dtype=np.float64) if layer is not None: @@ -794,29 +777,25 @@ def discrete(self, vertices, count=None, scale=1.0): Curve as line segments """ discrete = discretize_bspline( - control=vertices[self.points], - knots=self.knots, - count=count, - scale=scale) + control=vertices[self.points], knots=self.knots, count=count, scale=scale + ) return self._orient(discrete) def _bytes(self): # give consistent ordering of points for hash if self.points[0] > self.points[-1]: - return (b'BSpline' + - self.knots.tobytes() + - self.points.tobytes()) + return b"BSpline" + self.knots.tobytes() + self.points.tobytes() else: - return (b'BSpline' + - self.knots[::-1].tobytes() + - self.points[::-1].tobytes()) + return b"BSpline" + self.knots[::-1].tobytes() + self.points[::-1].tobytes() def to_dict(self): """ Returns a dictionary with all of the information about the entity. """ - return {'type': self.__class__.__name__, - 'points': self.points.tolist(), - 'knots': self.knots.tolist(), - 'closed': self.closed} + return { + "type": self.__class__.__name__, + "points": self.points.tolist(), + "knots": self.knots.tolist(), + "closed": self.closed, + } diff --git a/trimesh/path/exchange/dxf.py b/trimesh/path/exchange/dxf.py index 0a49fee20..4e53759cc 100644 --- a/trimesh/path/exchange/dxf.py +++ b/trimesh/path/exchange/dxf.py @@ -11,38 +11,40 @@ from ..entities import Arc, BSpline, Line, Text # unit codes -_DXF_UNITS = {1: 'inches', - 2: 'feet', - 3: 'miles', - 4: 'millimeters', - 5: 'centimeters', - 6: 'meters', - 7: 'kilometers', - 8: 'microinches', - 9: 'mils', - 10: 'yards', - 11: 'angstroms', - 12: 'nanometers', - 13: 'microns', - 14: 'decimeters', - 15: 'decameters', - 16: 'hectometers', - 17: 'gigameters', - 18: 'AU', - 19: 'light years', - 20: 'parsecs'} +_DXF_UNITS = { + 1: "inches", + 2: "feet", + 3: "miles", + 4: "millimeters", + 5: "centimeters", + 6: "meters", + 7: "kilometers", + 8: "microinches", + 9: "mils", + 10: "yards", + 11: "angstroms", + 12: "nanometers", + 13: "microns", + 14: "decimeters", + 15: "decameters", + 16: "hectometers", + 17: "gigameters", + 18: "AU", + 19: "light years", + 20: "parsecs", +} # backwards, for reference _UNITS_TO_DXF = {v: k for k, v in _DXF_UNITS.items()} # a string which we will replace spaces with temporarily -_SAFESPACE = '|<^>|' +_SAFESPACE = "|<^>|" # save metadata to a DXF Xrecord starting here # Valid values are 1-369 (except 5 and 105) XRECORD_METADATA = 134 # the sentinel string for trimesh metadata # this should be seen at XRECORD_METADATA -XRECORD_SENTINEL = 'TRIMESH_METADATA:' +XRECORD_SENTINEL = "TRIMESH_METADATA:" # the maximum line length before we split lines XRECORD_MAX_LINE = 200 # the maximum index of XRECORDS @@ -69,21 +71,21 @@ def load_dxf(file_obj, **kwargs): # splitlines function which uses the universal newline method raw = file_obj.read() # if we've been passed bytes - if hasattr(raw, 'decode'): + if hasattr(raw, "decode"): # search for the sentinel string indicating binary DXF # do it by encoding sentinel to bytes and subset searching - if raw[:22].find(b'AutoCAD Binary DXF') != -1: + if raw[:22].find(b"AutoCAD Binary DXF") != -1: # no converter to ASCII DXF available - raise ValueError('binary DXF not supported!') + raise ValueError("binary DXF not supported!") else: # we've been passed bytes that don't have the # header for binary DXF so try decoding as UTF-8 - raw = raw.decode('utf-8', errors='ignore') + raw = raw.decode("utf-8", errors="ignore") # remove trailing whitespace raw = str(raw).strip() # without any spaces and in upper case - cleaned = raw.replace(' ', '').strip().upper() + cleaned = raw.replace(" ", "").strip().upper() # blob with spaces and original case blob_raw = np.array(str.splitlines(raw)).reshape((-1, 2)) @@ -91,41 +93,40 @@ def load_dxf(file_obj, **kwargs): blob = np.array(str.splitlines(cleaned)).reshape((-1, 2)) # get the section which contains the header in the DXF file - endsec = np.nonzero(blob[:, 1] == 'ENDSEC')[0] + endsec = np.nonzero(blob[:, 1] == "ENDSEC")[0] # store metadata metadata = {} # try reading the header, which may be malformed - header_start = np.nonzero(blob[:, 1] == 'HEADER')[0] + header_start = np.nonzero(blob[:, 1] == "HEADER")[0] if len(header_start) > 0: header_end = endsec[np.searchsorted(endsec, header_start[0])] - header_blob = blob[header_start[0]:header_end] + header_blob = blob[header_start[0] : header_end] # store some properties from the DXF header - metadata['DXF_HEADER'] = {} - for key, group in [('$ACADVER', '1'), - ('$DIMSCALE', '40'), - ('$DIMALT', '70'), - ('$DIMALTF', '40'), - ('$DIMUNIT', '70'), - ('$INSUNITS', '70'), - ('$LUNITS', '70')]: - value = get_key(header_blob, - key, - group) + metadata["DXF_HEADER"] = {} + for key, group in [ + ("$ACADVER", "1"), + ("$DIMSCALE", "40"), + ("$DIMALT", "70"), + ("$DIMALTF", "40"), + ("$DIMUNIT", "70"), + ("$INSUNITS", "70"), + ("$LUNITS", "70"), + ]: + value = get_key(header_blob, key, group) if value is not None: - metadata['DXF_HEADER'][key] = value + metadata["DXF_HEADER"][key] = value # store unit data pulled from the header of the DXF # prefer LUNITS over INSUNITS # I couldn't find a table for LUNITS values but they # look like they are 0- indexed versions of # the INSUNITS keys, so for now offset the key value - for offset, key in [(-1, '$LUNITS'), - (0, '$INSUNITS')]: + for offset, key in [(-1, "$LUNITS"), (0, "$INSUNITS")]: # get the key from the header blob - units = get_key(header_blob, key, '70') + units = get_key(header_blob, key, "70") # if it exists add the offset if units is None: continue @@ -133,57 +134,49 @@ def load_dxf(file_obj, **kwargs): units += offset # if the key is in our list of units store it if units in _DXF_UNITS: - metadata['units'] = _DXF_UNITS[units] + metadata["units"] = _DXF_UNITS[units] # warn on drawings with no units - if 'units' not in metadata: - log.debug('DXF doesn\'t have units specified!') + if "units" not in metadata: + log.debug("DXF doesn't have units specified!") # get the section which contains entities in the DXF file - entity_start = np.nonzero(blob[:, 1] == 'ENTITIES')[0][0] + entity_start = np.nonzero(blob[:, 1] == "ENTITIES")[0][0] entity_end = endsec[np.searchsorted(endsec, entity_start)] blocks = None check_entity = blob[entity_start:entity_end][:, 1] # only load blocks if an entity references them via an INSERT - if 'INSERT' in check_entity or 'BLOCK' in check_entity: + if "INSERT" in check_entity or "BLOCK" in check_entity: try: # which part of the raw file contains blocks - block_start = np.nonzero(blob[:, 1] == 'BLOCKS')[0][0] + block_start = np.nonzero(blob[:, 1] == "BLOCKS")[0][0] block_end = endsec[np.searchsorted(endsec, block_start)] blob_block = blob[block_start:block_end] blob_block_raw = blob_raw[block_start:block_end] - block_infl = np.nonzero( - (blob_block == [ - '0', 'BLOCK']).all( - axis=1))[0] + block_infl = np.nonzero((blob_block == ["0", "BLOCK"]).all(axis=1))[0] # collect blocks by name blocks = {} - for index in np.array_split( - np.arange(len(blob_block)), block_infl): + for index in np.array_split(np.arange(len(blob_block)), block_infl): try: v, e, name = convert_entities( - blob_block[index], - blob_block_raw[index], - return_name=True) + blob_block[index], blob_block_raw[index], return_name=True + ) if len(e) > 0: blocks[name] = (v, e) except BaseException: pass except BaseException: - log.error('failed to parse blocks!', exc_info=True) + log.error("failed to parse blocks!", exc_info=True) # actually load referenced entities vertices, entities = convert_entities( - blob[entity_start:entity_end], - blob_raw[entity_start:entity_end], - blocks=blocks) + blob[entity_start:entity_end], blob_raw[entity_start:entity_end], blocks=blocks + ) # return result as kwargs for trimesh.path.Path2D constructor - result = {'vertices': vertices, - 'entities': entities, - 'metadata': metadata} + result = {"vertices": vertices, "entities": entities, "metadata": metadata} return result @@ -216,11 +209,10 @@ def info(e): """ # which keys should we extract from the entity data # DXF group code : our metadata key - get = {'8': 'layer', '2': 'name'} + get = {"8": "layer", "2": "name"} # replace group codes with names and only # take info from the entity dict if it is in cand - renamed = {get[k]: util.make_sequence(v)[0] for k, - v in e.items() if k in get} + renamed = {get[k]: util.make_sequence(v)[0] for k, v in e.items() if k in get} return renamed def convert_line(e): @@ -228,25 +220,22 @@ def convert_line(e): Convert DXF LINE entities into trimesh Line entities. """ # create a single Line entity - entities.append(Line(points=len(vertices) + np.arange(2), - **info(e))) + entities.append(Line(points=len(vertices) + np.arange(2), **info(e))) # add the vertices to our collection - vertices.extend(np.array([[e['10'], e['20']], - [e['11'], e['21']]], - dtype=np.float64)) + vertices.extend( + np.array([[e["10"], e["20"]], [e["11"], e["21"]]], dtype=np.float64) + ) def convert_circle(e): """ Convert DXF CIRCLE entities into trimesh Circle entities """ - R = float(e['40']) - C = np.array([e['10'], - e['20']]).astype(np.float64) - points = to_threepoint(center=C[:2], - radius=R) - entities.append(Arc(points=(len(vertices) + np.arange(3)), - closed=True, - **info(e))) + R = float(e["40"]) + C = np.array([e["10"], e["20"]]).astype(np.float64) + points = to_threepoint(center=C[:2], radius=R) + entities.append( + Arc(points=(len(vertices) + np.arange(3)), closed=True, **info(e)) + ) vertices.extend(points) def convert_arc(e): @@ -254,24 +243,17 @@ def convert_arc(e): Convert DXF ARC entities into into trimesh Arc entities. """ # the radius of the circle - R = float(e['40']) + R = float(e["40"]) # the center point of the circle - C = np.array([e['10'], - e['20']], dtype=np.float64) + C = np.array([e["10"], e["20"]], dtype=np.float64) # the start and end angle of the arc, in degrees # this may depend on an AUNITS header data - A = np.radians(np.array([e['50'], - e['51']], dtype=np.float64)) + A = np.radians(np.array([e["50"], e["51"]], dtype=np.float64)) # convert center/radius/angle representation # to three points on the arc representation - points = to_threepoint( - center=C[:2], - radius=R, - angles=A) + points = to_threepoint(center=C[:2], radius=R, angles=A) # add a single Arc entity - entities.append(Arc(points=len(vertices) + np.arange(3), - closed=False, - **info(e))) + entities.append(Arc(points=len(vertices) + np.arange(3), closed=False, **info(e))) # add the three vertices vertices.extend(points) @@ -280,15 +262,14 @@ def convert_polyline(e): Convert DXF LWPOLYLINE entities into trimesh Line entities. """ # load the points in the line - lines = np.column_stack(( - e['10'], e['20'])).astype(np.float64) + lines = np.column_stack((e["10"], e["20"])).astype(np.float64) # save entity info so we don't have to recompute polyinfo = info(e) # 70 is the closed flag for polylines # if the closed flag is set make sure to close - is_closed = '70' in e and int(e['70'][0]) & 1 + is_closed = "70" in e and int(e["70"][0]) & 1 if is_closed: lines = np.vstack((lines, lines[:1])) @@ -296,13 +277,13 @@ def convert_polyline(e): # "bulge" is autocad for "add a stupid arc using flags # in my otherwise normal polygon", it's like SVG arc # flags but somehow even more annoying - if '42' in e: + if "42" in e: # get the actual bulge float values - bulge = np.array(e['42'], dtype=np.float64) + bulge = np.array(e["42"], dtype=np.float64) # what position were vertices stored at - vid = np.nonzero(chunk[:, 0] == '10')[0] + vid = np.nonzero(chunk[:, 0] == "10")[0] # what position were bulges stored at in the chunk - bid = np.nonzero(chunk[:, 0] == '42')[0] + bid = np.nonzero(chunk[:, 0] == "42")[0] # filter out endpoint bulge if we're not closed if not is_closed: bid_ok = bid < vid.max() @@ -311,10 +292,9 @@ def convert_polyline(e): # which vertex index is bulge value associated with bulge_idx = np.searchsorted(vid, bid) # convert stupid bulge to Line/Arc entities - v, e = bulge_to_arcs(lines=lines, - bulge=bulge, - bulge_idx=bulge_idx, - is_closed=is_closed) + v, e = bulge_to_arcs( + lines=lines, bulge=bulge, bulge_idx=bulge_idx, is_closed=is_closed + ) for i in e: # offset added entities by current vertices length i.points += len(vertices) @@ -325,9 +305,7 @@ def convert_polyline(e): # we have a normal polyline so just add it # as single line entity and vertices - entities.append(Line( - points=np.arange(len(lines)) + len(vertices), - **polyinfo)) + entities.append(Line(points=np.arange(len(lines)) + len(vertices), **polyinfo)) vertices.extend(lines) def convert_bspline(e): @@ -337,16 +315,13 @@ def convert_bspline(e): # in the DXF there are n points and n ordered fields # with the same group code - points = np.column_stack((e['10'], - e['20'])).astype(np.float64) - knots = np.array(e['40']).astype(np.float64) + points = np.column_stack((e["10"], e["20"])).astype(np.float64) + knots = np.array(e["40"]).astype(np.float64) # if there are only two points, save it as a line if len(points) == 2: # create a single Line entity - entities.append(Line(points=len(vertices) + - np.arange(2), - **info(e))) + entities.append(Line(points=len(vertices) + np.arange(2), **info(e))) # add the vertices to our collection vertices.extend(points) return @@ -354,15 +329,17 @@ def convert_bspline(e): # check bit coded flag for closed # closed = bool(int(e['70'][0]) & 1) # check euclidean distance to see if closed - closed = np.linalg.norm(points[0] - - points[-1]) < tol.merge + closed = np.linalg.norm(points[0] - points[-1]) < tol.merge # create a BSpline entity - entities.append(BSpline( - points=np.arange(len(points)) + len(vertices), - knots=knots, - closed=closed, - **info(e))) + entities.append( + BSpline( + points=np.arange(len(points)) + len(vertices), + knots=knots, + closed=closed, + **info(e), + ) + ) # add the vertices vertices.extend(points) @@ -371,35 +348,38 @@ def convert_text(e): Convert a DXF TEXT entity into a native text entity. """ # text with leading and trailing whitespace removed - text = e['1'].strip() + text = e["1"].strip() # try getting optional height of text try: - height = float(e['40']) + height = float(e["40"]) except BaseException: height = None try: # rotation angle converted to radians - angle = np.radians(float(e['50'])) + angle = np.radians(float(e["50"])) except BaseException: # otherwise no rotation angle = 0.0 # origin point - origin = np.array( - [e['10'], e['20']], dtype=np.float64) + origin = np.array([e["10"], e["20"]], dtype=np.float64) # an origin-relative point (so transforms work) vector = origin + [np.cos(angle), np.sin(angle)] # try to extract a (horizontal, vertical) text alignment - align = ['center', 'center'] + align = ["center", "center"] try: - align[0] = ['left', 'center', 'right'][int(e['72'])] + align[0] = ["left", "center", "right"][int(e["72"])] except BaseException: pass # append the entity - entities.append(Text(origin=len(vertices), - vector=len(vertices) + 1, - height=height, - text=text, - align=align)) + entities.append( + Text( + origin=len(vertices), + vector=len(vertices) + 1, + height=height, + text=text, + align=align, + ) + ) # append the text origin and direction vertices.append(origin) vertices.append(vector) @@ -413,20 +393,16 @@ def convert_insert(e): return # name of block to insert - name = e['2'] + name = e["2"] # if we haven't loaded the block skip if name not in blocks: return # angle to rotate the block by - angle = float(e.get('50', 0.0)) + angle = float(e.get("50", 0.0)) # the insertion point of the block - offset = np.array([e.get('10', 0.0), - e.get('20', 0.0)], - dtype=np.float64) + offset = np.array([e.get("10", 0.0), e.get("20", 0.0)], dtype=np.float64) # what to scale the block by - scale = np.array([e.get('41', 1.0), - e.get('42', 1.0)], - dtype=np.float64) + scale = np.array([e.get("41", 1.0), e.get("42", 1.0)], dtype=np.float64) # the current entities and vertices of the referenced block. cv, ce = blocks[name] @@ -436,18 +412,23 @@ def convert_insert(e): # offset its vertices to the current index entities[-1].points += len(vertices) # transform the block's vertices based on the entity settings - vertices.extend(tf.transform_points(cv, tf.planar_matrix( - offset=offset, theta=np.radians(angle), scale=scale))) + vertices.extend( + tf.transform_points( + cv, tf.planar_matrix(offset=offset, theta=np.radians(angle), scale=scale) + ) + ) # find the start points of entities # DXF object to trimesh object converters - loaders = {'LINE': (dict, convert_line), - 'LWPOLYLINE': (multi_dict, convert_polyline), - 'ARC': (dict, convert_arc), - 'CIRCLE': (dict, convert_circle), - 'SPLINE': (multi_dict, convert_bspline), - 'INSERT': (dict, convert_insert), - 'BLOCK': (dict, convert_insert)} + loaders = { + "LINE": (dict, convert_line), + "LWPOLYLINE": (multi_dict, convert_polyline), + "ARC": (dict, convert_arc), + "CIRCLE": (dict, convert_circle), + "SPLINE": (multi_dict, convert_bspline), + "INSERT": (dict, convert_insert), + "BLOCK": (dict, convert_insert), + } # store loaded vertices vertices = [] @@ -457,13 +438,12 @@ def convert_insert(e): # multiple vertex entities like a real asshole polyline = None # chunks of entities are divided by group-code-0 - inflection = np.nonzero(blob[:, 0] == '0')[0] + inflection = np.nonzero(blob[:, 0] == "0")[0] unsupported = defaultdict(lambda: 0) # loop through chunks of entity information - for index in np.array_split( - np.arange(len(blob)), inflection): + for index in np.array_split(np.arange(len(blob)), inflection): # if there is only a header continue if len(index) < 1: continue @@ -474,53 +454,47 @@ def convert_insert(e): # if we are referencing a block or insert by name make # sure the name key is in the original case vs upper-case - if entity_type in ('BLOCK', 'INSERT'): + if entity_type in ("BLOCK", "INSERT"): try: - index_name = next( - i for i, v in enumerate(chunk) - if v[0] == '2') + index_name = next(i for i, v in enumerate(chunk) if v[0] == "2") chunk[index_name][1] = blob_raw[index][index_name][1] except StopIteration: pass # special case old- style polyline entities - if entity_type == 'POLYLINE': + if entity_type == "POLYLINE": polyline = [dict(chunk)] # if we are collecting vertex entities - elif polyline is not None and entity_type == 'VERTEX': + elif polyline is not None and entity_type == "VERTEX": polyline.append(dict(chunk)) # the end of a polyline - elif polyline is not None and entity_type == 'SEQEND': + elif polyline is not None and entity_type == "SEQEND": # pull the geometry information for the entity - lines = np.array([[i['10'], i['20']] - for i in polyline[1:]], - dtype=np.float64) + lines = np.array([[i["10"], i["20"]] for i in polyline[1:]], dtype=np.float64) is_closed = False # check for a closed flag on the polyline - if '70' in polyline[0]: + if "70" in polyline[0]: # flag is bit- coded integer - flag = int(polyline[0]['70']) + flag = int(polyline[0]["70"]) # first bit represents closed is_closed = bool(flag & 1) if is_closed: lines = np.vstack((lines, lines[:1])) # get the index of each bulged vertices - bulge_idx = np.array([i for i, e in enumerate(polyline) - if '42' in e], - dtype=np.int64) + bulge_idx = np.array( + [i for i, e in enumerate(polyline) if "42" in e], dtype=np.int64 + ) # get the actual bulge value - bulge = np.array([float(e['42']) - for i, e in enumerate(polyline) - if '42' in e], - dtype=np.float64) + bulge = np.array( + [float(e["42"]) for i, e in enumerate(polyline) if "42" in e], + dtype=np.float64, + ) # convert bulge to new entities cv, ce = bulge_to_arcs( - lines=lines, - bulge=bulge, - bulge_idx=bulge_idx, - is_closed=is_closed) + lines=lines, bulge=bulge, bulge_idx=bulge_idx, is_closed=is_closed + ) for i in ce: # offset entities by existing vertices i.points += len(vertices) @@ -528,7 +502,7 @@ def convert_insert(e): entities.extend(ce) # we no longer have an active polyline polyline = None - elif entity_type == 'TEXT': + elif entity_type == "TEXT": # text entities need spaces preserved so take # group codes from clean representation (0- column) # and data from the raw representation (1- column) @@ -540,8 +514,7 @@ def convert_insert(e): try: convert_text(dict(chunk_raw)) except BaseException: - log.debug('failed to load text entity!', - exc_info=True) + log.debug("failed to load text entity!", exc_info=True) # if the entity contains all relevant data we can # cleanly load it from inside a single function elif entity_type in loaders: @@ -551,16 +524,18 @@ def convert_insert(e): entity_data = chunker(chunk) # append data to the lists we're collecting loader(entity_data) - elif entity_type != 'ENTITIES': + elif entity_type != "ENTITIES": unsupported[entity_type] += 1 if len(unsupported) > 0: - log.debug('skipping dxf entities: {}'.format( - ', '.join(f'{k}: {v}' for k, v - in unsupported.items()))) + log.debug( + "skipping dxf entities: {}".format( + ", ".join(f"{k}: {v}" for k, v in unsupported.items()) + ) + ) # stack vertices into single array vertices = util.vstack_empty(vertices).astype(np.float64) if return_name: - name = blob_raw[blob[:, 0] == '2'][0][1] + name = blob_raw[blob[:, 0] == "2"][0][1] return vertices, entities, name return vertices, entities @@ -583,12 +558,9 @@ def export_dxf(path, only_layers=None): Path formatted as a DXF file """ # get the template for exporting DXF files - template = resources.get( - 'templates/dxf.json', decode_json=True) + template = resources.get("templates/dxf.json", decode_json=True) - def format_points(points, - as_2D=False, - increment=True): + def format_points(points, as_2D=False, increment=True): """ Format points into DXF- style point string. @@ -616,9 +588,8 @@ def format_points(points, three = util.stack_3D(points) if increment: group = np.tile( - np.arange( - len(three), - dtype=np.int64).reshape((-1, 1)), (1, 3)) + np.arange(len(three), dtype=np.int64).reshape((-1, 1)), (1, 3) + ) else: group = np.zeros((len(three), 3), dtype=np.int64) group += [10, 20, 30] @@ -627,9 +598,9 @@ def format_points(points, group = group[:, :2] three = three[:, :2] # join into result string - packed = '\n'.join(f'{g:d}\n{v:.12g}' - for g, v in zip(group.reshape(-1), - three.reshape(-1))) + packed = "\n".join( + f"{g:d}\n{v:.12g}" for g, v in zip(group.reshape(-1), three.reshape(-1)) + ) return packed @@ -648,12 +619,14 @@ def entity_info(entity): Has keys 'COLOR', 'LAYER', 'NAME' """ # TODO : convert RGBA entity.color to index - subs = {'COLOR': 255, # default is ByLayer - 'LAYER': 0, - 'NAME': str(id(entity))[:16]} - if hasattr(entity, 'layer'): + subs = { + "COLOR": 255, # default is ByLayer + "LAYER": 0, + "NAME": str(id(entity))[:16], + } + if hasattr(entity, "layer"): # make sure layer name is forced into ASCII - subs['LAYER'] = util.to_ascii(entity.layer) + subs["LAYER"] = util.to_ascii(entity.layer) return subs def convert_line(line, vertices): @@ -676,83 +649,77 @@ def convert_line(line, vertices): points = line.discrete(vertices) # if one or fewer points return nothing if len(points) <= 1: - return '' + return "" # generate a substitution dictionary for template subs = entity_info(line) - subs['POINTS'] = format_points( - points, as_2D=True, increment=False) - subs['TYPE'] = 'LWPOLYLINE' - subs['VCOUNT'] = len(points) + subs["POINTS"] = format_points(points, as_2D=True, increment=False) + subs["TYPE"] = "LWPOLYLINE" + subs["VCOUNT"] = len(points) # 1 is closed # 0 is default (open) - subs['FLAG'] = int(bool(line.closed)) + subs["FLAG"] = int(bool(line.closed)) - result = template['line'].format(**subs) + result = template["line"].format(**subs) return result def convert_arc(arc, vertices): # get the center of arc and include span angles - info = arc.center( - vertices, return_angle=True, return_normal=False) + info = arc.center(vertices, return_angle=True, return_normal=False) subs = entity_info(arc) center = info.center if len(center) == 2: center = np.append(center, 0.0) - data = '10\n{:.12g}\n20\n{:.12g}\n30\n{:.12g}'.format(*center) - data += f'\n40\n{info.radius:.12g}' + data = "10\n{:.12g}\n20\n{:.12g}\n30\n{:.12g}".format(*center) + data += f"\n40\n{info.radius:.12g}" if arc.closed: - subs['TYPE'] = 'CIRCLE' + subs["TYPE"] = "CIRCLE" else: - subs['TYPE'] = 'ARC' + subs["TYPE"] = "ARC" # an arc is the same as a circle, with an added start # and end angle field - data += '\n100\nAcDbArc' - data += '\n50\n{:.12g}\n51\n{:.12g}'.format( - *np.degrees(info.angles)) - subs['DATA'] = data - result = template['arc'].format(**subs) + data += "\n100\nAcDbArc" + data += "\n50\n{:.12g}\n51\n{:.12g}".format(*np.degrees(info.angles)) + subs["DATA"] = data + result = template["arc"].format(**subs) return result def convert_bspline(spline, vertices): # points formatted with group code - points = format_points(vertices[spline.points], - increment=False) + points = format_points(vertices[spline.points], increment=False) # (n,) float knots, formatted with group code - knots = ('40\n{:.12g}\n' * len(spline.knots) - ).format(*spline.knots)[:-1] + knots = ("40\n{:.12g}\n" * len(spline.knots)).format(*spline.knots)[:-1] # bit coded - flags = {'closed': 1, - 'periodic': 2, - 'rational': 4, - 'planar': 8, - 'linear': 16} + flags = {"closed": 1, "periodic": 2, "rational": 4, "planar": 8, "linear": 16} - flag = flags['planar'] + flag = flags["planar"] if spline.closed: - flag = flag | flags['closed'] + flag = flag | flags["closed"] normal = [0.0, 0.0, 1.0] n_code = [210, 220, 230] - n_str = '\n'.join(f'{i:d}\n{j:.12g}' - for i, j in zip(n_code, normal)) + n_str = "\n".join(f"{i:d}\n{j:.12g}" for i, j in zip(n_code, normal)) subs = entity_info(spline) - subs.update({'TYPE': 'SPLINE', - 'POINTS': points, - 'KNOTS': knots, - 'NORMAL': n_str, - 'DEGREE': 3, - 'FLAG': flag, - 'FCOUNT': 0, - 'KCOUNT': len(spline.knots), - 'PCOUNT': len(spline.points)}) + subs.update( + { + "TYPE": "SPLINE", + "POINTS": points, + "KNOTS": knots, + "NORMAL": n_str, + "DEGREE": 3, + "FLAG": flag, + "FCOUNT": 0, + "KCOUNT": len(spline.knots), + "PCOUNT": len(spline.points), + } + ) # format into string template - result = template['bspline'].format(**subs) + result = template["bspline"].format(**subs) return result @@ -763,17 +730,19 @@ def convert_text(txt, vertices): # start with layer info sub = entity_info(txt) # get the origin point of the text - sub['ORIGIN'] = format_points( - vertices[[txt.origin]], increment=False) + sub["ORIGIN"] = format_points(vertices[[txt.origin]], increment=False) # rotation angle in degrees - sub['ANGLE'] = np.degrees(txt.angle(vertices)) + sub["ANGLE"] = np.degrees(txt.angle(vertices)) # actual string of text with spaces escaped # force into ASCII to avoid weird encoding issues - sub['TEXT'] = txt.text.replace(' ', _SAFESPACE).encode( - 'ascii', errors='ignore').decode('ascii') + sub["TEXT"] = ( + txt.text.replace(" ", _SAFESPACE) + .encode("ascii", errors="ignore") + .decode("ascii") + ) # height of text - sub['HEIGHT'] = txt.height - result = template['text'].format(**sub) + sub["HEIGHT"] = txt.height + result = template["text"].format(**sub) return result def convert_generic(entity, vertices): @@ -787,11 +756,13 @@ def convert_generic(entity, vertices): # precision in the string conversion np.set_printoptions(precision=12) # trimesh entity to DXF entity converters - conversions = {'Line': convert_line, - 'Text': convert_text, - 'Arc': convert_arc, - 'Bezier': convert_generic, - 'BSpline': convert_bspline} + conversions = { + "Line": convert_line, + "Text": convert_text, + "Arc": convert_arc, + "Bezier": convert_generic, + "BSpline": convert_bspline, + } collected = [] for e, layer in zip(path.entities, path.layers): name = type(e).__name__ @@ -804,32 +775,29 @@ def convert_generic(entity, vertices): # only save if we converted something collected.append(converted) else: - log.debug('Entity type %s not exported!', name) + log.debug("Entity type %s not exported!", name) # join all entities into one string - entities_str = '\n'.join(collected) + entities_str = "\n".join(collected) # add in the extents of the document as explicit XYZ lines - hsub = {f'EXTMIN_{k}': v for k, v in zip( - 'XYZ', np.append(path.bounds[0], 0.0))} - hsub.update({f'EXTMAX_{k}': v for k, v in zip( - 'XYZ', np.append(path.bounds[1], 0.0))}) + hsub = {f"EXTMIN_{k}": v for k, v in zip("XYZ", np.append(path.bounds[0], 0.0))} + hsub.update({f"EXTMAX_{k}": v for k, v in zip("XYZ", np.append(path.bounds[1], 0.0))}) # apply a units flag defaulting to `1` - hsub['LUNITS'] = _UNITS_TO_DXF.get(path.units, 1) + hsub["LUNITS"] = _UNITS_TO_DXF.get(path.units, 1) # run the format for the header - sections = [template['header'].format(**hsub).strip()] + sections = [template["header"].format(**hsub).strip()] # do the same for entities - sections.append(template['entities'].format( - ENTITIES=entities_str).strip()) + sections.append(template["entities"].format(ENTITIES=entities_str).strip()) # and the footer - sections.append(template['footer'].strip()) + sections.append(template["footer"].strip()) # filter out empty sections # random whitespace causes AutoCAD to fail to load # although Draftsight, LibreCAD, and Inkscape don't care # what a giant legacy piece of shit # create the joined string blob - blob = '\n'.join(sections).replace(_SAFESPACE, ' ') + blob = "\n".join(sections).replace(_SAFESPACE, " ") # run additional self- checks if tol.strict: # check that every line pair is (group code, value) @@ -839,16 +807,12 @@ def convert_generic(entity, vertices): # group codes should all be convertible to int and positive assert all(int(i) >= 0 for i in lines[::2]) # make sure we didn't slip any unicode in there - blob.encode('ascii') + blob.encode("ascii") return blob -def bulge_to_arcs(lines, - bulge, - bulge_idx, - is_closed=False, - metadata=None): +def bulge_to_arcs(lines, bulge, bulge_idx, is_closed=False, metadata=None): """ Polylines can have "vertex bulge," which means the polyline has an arc tangent to segments, rather than meeting at a @@ -916,12 +880,13 @@ def bulge_to_arcs(lines, vector = lines[tid[:, 0]] - lines[tid[:, 1]] # the length of the connector segment - length = (np.linalg.norm(vector, axis=1)) + length = np.linalg.norm(vector, axis=1) # perpendicular vectors by crossing vector with Z perp = np.cross( np.column_stack((vector, np.zeros(len(vector)))), - np.ones((len(vector), 3)) * [0, 0, 1]) + np.ones((len(vector), 3)) * [0, 0, 1], + ) # strip the zero Z perp = util.unitize(perp[:, :2]) @@ -936,31 +901,26 @@ def bulge_to_arcs(lines, # convert each arc to three points: # start, any point on arc, end - three = np.column_stack(( - lines[tid[:, 0]], - midpoint + perp * offset.reshape((-1, 1)), - lines[tid[:, 1]])).reshape((-1, 3, 2)) + three = np.column_stack( + (lines[tid[:, 0]], midpoint + perp * offset.reshape((-1, 1)), lines[tid[:, 1]]) + ).reshape((-1, 3, 2)) # if we're in strict mode make sure our arcs # have the same magnitude as the input data if tol.strict: from ..arc import arc_center - check_angle = [arc_center(i).span - for i in three] - assert np.allclose(np.abs(angle), - np.abs(check_angle)) - check_radii = [arc_center(i).radius - for i in three] + check_angle = [arc_center(i).span for i in three] + assert np.allclose(np.abs(angle), np.abs(check_angle)) + + check_radii = [arc_center(i).radius for i in three] assert np.allclose(check_radii, np.abs(radius)) # collect new entities and vertices entities, vertices = [], [] # add the entities for each new arc for arc_points in three: - entities.append(Arc( - points=np.arange(3) + len(vertices), - **metadata)) + entities.append(Arc(points=np.arange(3) + len(vertices), **metadata)) vertices.extend(arc_points) # if there are unconsumed line @@ -970,13 +930,10 @@ def bulge_to_arcs(lines, existing = util.stack_lines(np.arange(len(lines))) # remove line segments replaced with arcs for line_idx in grouping.boolean_rows( - existing, - np.sort(tid, axis=1), - np.setdiff1d): + existing, np.sort(tid, axis=1), np.setdiff1d + ): # add a single line entity and vertices - entities.append(Line( - points=np.arange(2) + len(vertices), - **metadata)) + entities.append(Line(points=np.arange(2) + len(vertices), **metadata)) vertices.extend(lines[line_idx].copy()) # make sure vertices are clean numpy array @@ -1004,4 +961,4 @@ def get_key(blob, field, code): # store the loaders we have available -_dxf_loaders = {'dxf': load_dxf} +_dxf_loaders = {"dxf": load_dxf} diff --git a/trimesh/path/exchange/export.py b/trimesh/path/exchange/export.py index 96b1eb0df..9a5db560f 100644 --- a/trimesh/path/exchange/export.py +++ b/trimesh/path/exchange/export.py @@ -4,10 +4,7 @@ from . import dxf, svg_io -def export_path(path, - file_type=None, - file_obj=None, - **kwargs): +def export_path(path, file_type=None, file_obj=None, **kwargs): """ Export a Path object to a file- like object, or to a filename @@ -42,8 +39,7 @@ def export_dict(path): Export a path as a dict of kwargs for the Path constructor. """ export_entities = [e.to_dict() for e in path.entities] - export_object = {'entities': export_entities, - 'vertices': path.vertices.tolist()} + export_object = {"entities": export_entities, "vertices": path.vertices.tolist()} return export_object @@ -61,23 +57,20 @@ def _write_export(export, file_obj=None): if file_obj is None: return export - if hasattr(file_obj, 'write'): + if hasattr(file_obj, "write"): out_file = file_obj else: # expand user and relative paths - file_path = os.path.abspath( - os.path.expanduser(file_obj)) - out_file = open(file_path, 'wb') + file_path = os.path.abspath(os.path.expanduser(file_obj)) + out_file = open(file_path, "wb") try: out_file.write(export) except TypeError: - out_file.write(export.encode('utf-8')) + out_file.write(export.encode("utf-8")) out_file.close() return export -_path_exporters = {'dxf': dxf.export_dxf, - 'svg': svg_io.export_svg, - 'dict': export_dict} +_path_exporters = {"dxf": dxf.export_dxf, "svg": svg_io.export_svg, "dict": export_dict} diff --git a/trimesh/path/exchange/load.py b/trimesh/path/exchange/load.py index 3cfb18278..b7baaaab6 100644 --- a/trimesh/path/exchange/load.py +++ b/trimesh/path/exchange/load.py @@ -32,6 +32,7 @@ def load_path(file_obj, file_type=None, **kwargs): """ # avoid a circular import from ...exchange.load import load_kwargs + # record how long we took tic = util.now() @@ -41,20 +42,18 @@ def load_path(file_obj, file_type=None, **kwargs): return file_obj elif util.is_file(file_obj): # for open file file_objects use loaders - kwargs.update(path_loaders[file_type]( - file_obj, file_type=file_type)) + kwargs.update(path_loaders[file_type](file_obj, file_type=file_type)) elif util.is_string(file_obj): # strings passed are evaluated as file file_objects - with open(file_obj, 'rb') as f: + with open(file_obj, "rb") as f: # get the file type from the extension file_type = os.path.splitext(file_obj)[-1][1:].lower() # call the loader - kwargs.update(path_loaders[file_type]( - f, file_type=file_type)) - elif util.is_instance_named(file_obj, ['Polygon', 'MultiPolygon']): + kwargs.update(path_loaders[file_type](f, file_type=file_type)) + elif util.is_instance_named(file_obj, ["Polygon", "MultiPolygon"]): # convert from shapely polygons to Path2D kwargs.update(misc.polygon_to_path(file_obj)) - elif util.is_instance_named(file_obj, 'MultiLineString'): + elif util.is_instance_named(file_obj, "MultiLineString"): # convert from shapely LineStrings to Path2D kwargs.update(misc.linestrings_to_path(file_obj)) elif isinstance(file_obj, dict): @@ -64,10 +63,10 @@ def load_path(file_obj, file_type=None, **kwargs): # load as lines in space kwargs.update(misc.lines_to_path(file_obj)) else: - raise ValueError('Not a supported object type!') + raise ValueError("Not a supported object type!") result = load_kwargs(kwargs) - util.log.debug(f'loaded {str(result)} in {util.now() - tic:0.4f}s') + util.log.debug(f"loaded {str(result)} in {util.now() - tic:0.4f}s") return result @@ -85,5 +84,5 @@ def path_formats(): return set(path_loaders.keys()) -path_loaders = {'svg': svg_to_path} +path_loaders = {"svg": svg_to_path} path_loaders.update(_dxf_loaders) diff --git a/trimesh/path/exchange/misc.py b/trimesh/path/exchange/misc.py index 57c958a7e..e99d56429 100644 --- a/trimesh/path/exchange/misc.py +++ b/trimesh/path/exchange/misc.py @@ -22,14 +22,15 @@ def dict_to_path(as_dict): # start kwargs with initial value result = as_dict.copy() # map of constructors - loaders = {'Arc': Arc, 'Line': Line} + loaders = {"Arc": Arc, "Line": Line} # pre- allocate entity array - entities = [None] * len(as_dict['entities']) + entities = [None] * len(as_dict["entities"]) # run constructor for dict kwargs - for entity_index, entity in enumerate(as_dict['entities']): - entities[entity_index] = loaders[entity['type']]( - points=entity['points'], closed=entity['closed']) - result['entities'] = entities + for entity_index, entity in enumerate(as_dict["entities"]): + entities[entity_index] = loaders[entity["type"]]( + points=entity["points"], closed=entity["closed"] + ) + result["entities"] = entities return result @@ -53,8 +54,7 @@ def lines_to_path(lines): if util.is_shape(lines, (-1, (2, 3))): # the case where we have a list of points # we are going to assume they are connected - result = {'entities': np.array([Line(np.arange(len(lines)))]), - 'vertices': lines} + result = {"entities": np.array([Line(np.arange(len(lines)))]), "vertices": lines} return result elif util.is_shape(lines, (-1, 2, (2, 3))): # case where we have line segments in 2D or 3D @@ -66,10 +66,9 @@ def lines_to_path(lines): # use scipy edges_to_path to skip creating # a bajillion individual line entities which # will be super slow vs. fewer polyline entities - return edges_to_path(edges=inverse.reshape((-1, 2)), - vertices=lines[unique]) + return edges_to_path(edges=inverse.reshape((-1, 2)), vertices=lines[unique]) else: - raise ValueError('Lines must be (n,(2|3)) or (n,2,(2|3))') + raise ValueError("Lines must be (n,(2|3)) or (n,2,(2|3))") return result @@ -92,21 +91,19 @@ def polygon_to_path(polygon): # start vertices vertices = [] - if hasattr(polygon.boundary, 'geoms'): + if hasattr(polygon.boundary, "geoms"): boundaries = polygon.boundary.geoms else: boundaries = [polygon.boundary] # append interiors as single Line objects for boundary in boundaries: - entities.append(Line(np.arange(len(boundary.coords)) + - len(vertices))) + entities.append(Line(np.arange(len(boundary.coords)) + len(vertices))) # append the new vertex array vertices.extend(boundary.coords) # make sure result arrays are numpy - kwargs = {'entities': entities, - 'vertices': np.array(vertices)} + kwargs = {"entities": entities, "vertices": np.array(vertices)} return kwargs @@ -134,16 +131,14 @@ def linestrings_to_path(multi): for line in multi: # only append geometry with points - if hasattr(line, 'coords'): + if hasattr(line, "coords"): coords = np.array(line.coords) if len(coords) < 2: continue - entities.append(Line(np.arange(len(coords)) + - len(vertices))) + entities.append(Line(np.arange(len(coords)) + len(vertices))) vertices.extend(coords) - kwargs = {'entities': np.array(entities), - 'vertices': np.array(vertices)} + kwargs = {"entities": np.array(entities), "vertices": np.array(vertices)} return kwargs @@ -168,21 +163,16 @@ def faces_to_path(mesh, face_ids=None, **kwargs): edges = mesh.edges_sorted else: # take advantage of edge ordering to index as single row - edges = mesh.edges_sorted.reshape( - (-1, 6))[face_ids].reshape((-1, 2)) + edges = mesh.edges_sorted.reshape((-1, 6))[face_ids].reshape((-1, 2)) # an edge which occurs onely once is on the boundary - unique_edges = grouping.group_rows( - edges, require_count=1) + unique_edges = grouping.group_rows(edges, require_count=1) # add edges and vertices to kwargs - kwargs.update(edges_to_path(edges=edges[unique_edges], - vertices=mesh.vertices)) + kwargs.update(edges_to_path(edges=edges[unique_edges], vertices=mesh.vertices)) return kwargs -def edges_to_path(edges, - vertices, - **kwargs): +def edges_to_path(edges, vertices, **kwargs): """ Given an edge list of indices and associated vertices representing lines, generate kwargs for a Path object. @@ -200,7 +190,7 @@ def edges_to_path(edges, Kwargs for Path constructor """ # sequence of ordered traversals - dfs = graph.traversals(edges, mode='dfs') + dfs = graph.traversals(edges, mode="dfs") # make sure every consecutive index in DFS # traversal is an edge in the source edge list dfs_connected = graph.fill_traversals(dfs, edges=edges) @@ -208,7 +198,5 @@ def edges_to_path(edges, # turn traversals into Line objects lines = [Line(d) for d in dfs_connected] - kwargs.update({'entities': lines, - 'vertices': vertices, - 'process': False}) + kwargs.update({"entities": lines, "vertices": vertices, "process": False}) return kwargs diff --git a/trimesh/path/exchange/svg_io.py b/trimesh/path/exchange/svg_io.py index 4f04c2877..8d2af6851 100644 --- a/trimesh/path/exchange/svg_io.py +++ b/trimesh/path/exchange/svg_io.py @@ -28,12 +28,12 @@ etree = exceptions.ExceptionWrapper(E) # store any additional properties using a trimesh namespace -_ns_name = 'trimesh' -_ns_url = 'https://github.com/mikedh/trimesh' -_ns = f'{{{_ns_url}}}' +_ns_name = "trimesh" +_ns_url = "https://github.com/mikedh/trimesh" +_ns = f"{{{_ns_url}}}" _IDENTITY = np.eye(3) -_IDENTITY.flags['WRITEABLE'] = False +_IDENTITY.flags["WRITEABLE"] = False def svg_to_path(file_obj=None, file_type=None, path_string=None): @@ -69,9 +69,8 @@ def element_transform(e, max_depth=10): matrices = [] current = e for _ in range(max_depth): - if 'transform' in current.attrib: - matrices.extend(transform_to_matrices( - current.attrib['transform'])) + if "transform" in current.attrib: + matrices.extend(transform_to_matrices(current.attrib["transform"])) current = current.getparent() if current is None: break @@ -89,52 +88,49 @@ def element_transform(e, max_depth=10): # store paths and transforms as # (path string, 3x3 matrix) paths = [] - for element in tree.iter('{*}path'): + for element in tree.iter("{*}path"): # store every path element attributes and transform - paths.append((element.attrib, - element_transform(element))) + paths.append((element.attrib, element_transform(element))) try: # see if the SVG should be reproduced as a scene - force = tree.attrib[_ns + 'class'] + force = tree.attrib[_ns + "class"] except BaseException: pass elif path_string is not None: # parse a single SVG path string - paths = [({'d': path_string}, np.eye(3))] + paths = [({"d": path_string}, np.eye(3))] else: - raise ValueError('`file_obj` or `pathstring` required') + raise ValueError("`file_obj` or `pathstring` required") result = _svg_path_convert(paths=paths, force=force) try: # get overall metadata from JSON string if it exists - result['metadata'] = _decode( - tree.attrib[_ns + 'metadata']) + result["metadata"] = _decode(tree.attrib[_ns + "metadata"]) except KeyError: # not in the trimesh ns pass except BaseException: # no metadata stored with trimesh ns - log.debug('failed metadata', exc_info=True) + log.debug("failed metadata", exc_info=True) # if the result is a scene try to get the metadata # for each subgeometry here - if 'geometry' in result: + if "geometry" in result: try: # get per-geometry metadata if available - bag = _decode( - tree.attrib[_ns + 'metadata_geometry']) + bag = _decode(tree.attrib[_ns + "metadata_geometry"]) for name, meta in bag.items(): - if name in result['geometry']: + if name in result["geometry"]: # assign this metadata to the geometry - result['geometry'][name]['metadata'] = meta + result["geometry"][name]["metadata"] = meta except KeyError: # no stored geometry metadata so ignore pass except BaseException: # failed to load existing metadata - log.debug('failed metadata', exc_info=True) + log.debug("failed metadata", exc_info=True) return result @@ -161,32 +157,32 @@ def transform_to_matrices(transform): # split the transform string in to components of: # (operation, args) i.e. (translate, '-1.0, 2.0') components = [ - [j.strip() for j in i.strip().split('(') if len(j) > 0] - for i in transform.lower().split(')') if len(i) > 0] + [j.strip() for j in i.strip().split("(") if len(j) > 0] + for i in transform.lower().split(")") + if len(i) > 0 + ] # store each matrix without dotting matrices = [] for line in components: if len(line) == 0: continue elif len(line) != 2: - raise ValueError('should always have two components!') + raise ValueError("should always have two components!") key, args = line # convert string args to array of floats # support either comma or space delimiter - values = np.array([float(i) for i in - args.replace(',', ' ').split()]) - if key == 'translate': + values = np.array([float(i) for i in args.replace(",", " ").split()]) + if key == "translate": # convert translation to a (3, 3) homogeneous matrix matrices.append(_IDENTITY.copy()) matrices[-1][:2, 2] = values - elif key == 'matrix': + elif key == "matrix": # [a b c d e f] -> # [[a c e], # [b d f], # [0 0 1]] - matrices.append(np.vstack(( - values.reshape((3, 2)).T, [0, 0, 1]))) - elif key == 'rotate': + matrices.append(np.vstack((values.reshape((3, 2)).T, [0, 0, 1]))) + elif key == "rotate": # SVG rotations are in degrees angle = np.degrees(values[0]) # if there are three values rotate around point @@ -194,15 +190,14 @@ def transform_to_matrices(transform): point = values[1:] else: point = None - matrices.append(planar_matrix(theta=angle, - point=point)) - elif key == 'scale': + matrices.append(planar_matrix(theta=angle, point=point)) + elif key == "scale": # supports (x_scale, y_scale) or (scale) mat = _IDENTITY.copy() mat[:2, :2] *= values matrices.append(mat) else: - log.debug(f'unknown SVG transform: {key}') + log.debug(f"unknown SVG transform: {key}") return matrices @@ -221,9 +216,9 @@ def _svg_path_convert(paths, force=None): drawing : dict Kwargs for Path2D constructor """ + def complex_to_float(values): - return np.array([[i.real, i.imag] for i in values], - dtype=np.float64) + return np.array([[i.real, i.imag] for i in values], dtype=np.float64) def load_multi(multi): # load a previously parsed multiline @@ -236,29 +231,28 @@ def load_multi(multi): def load_arc(svg_arc): # load an SVG arc into a trimesh arc - points = complex_to_float([svg_arc.start, - svg_arc.point(0.5), - svg_arc.end]) + points = complex_to_float([svg_arc.start, svg_arc.point(0.5), svg_arc.end]) # create an arc from the now numpy points - arc = Arc(points=np.arange(3) + counts[name], - # we may have monkey-patched the entity to - # indicate that it is a closed circle - closed=getattr(svg_arc, 'closed', False)) + arc = Arc( + points=np.arange(3) + counts[name], + # we may have monkey-patched the entity to + # indicate that it is a closed circle + closed=getattr(svg_arc, "closed", False), + ) return arc, points def load_quadratic(svg_quadratic): # load a quadratic bezier spline - points = complex_to_float([svg_quadratic.start, - svg_quadratic.control, - svg_quadratic.end]) + points = complex_to_float( + [svg_quadratic.start, svg_quadratic.control, svg_quadratic.end] + ) return Bezier(points=np.arange(3) + counts[name]), points def load_cubic(svg_cubic): # load a cubic bezier spline - points = complex_to_float([svg_cubic.start, - svg_cubic.control1, - svg_cubic.control2, - svg_cubic.end]) + points = complex_to_float( + [svg_cubic.start, svg_cubic.control1, svg_cubic.control2, svg_cubic.end] + ) return Bezier(np.arange(4) + counts[name]), points class MultiLine: @@ -266,22 +260,21 @@ class MultiLine: def __init__(self, lines): if tol.strict: # in unit tests make sure we only have lines - assert all(type(L).__name__ in ('Line', 'Close') - for L in lines) + assert all(type(L).__name__ in ("Line", "Close") for L in lines) # get the starting point of every line points = [L.start for L in lines] # append the endpoint points.append(lines[-1].end) # convert to (n, 2) float points - self.points = np.array([[i.real, i.imag] - for i in points], - dtype=np.float64) + self.points = np.array([[i.real, i.imag] for i in points], dtype=np.float64) # load functions for each entity - loaders = {'Arc': load_arc, - 'MultiLine': load_multi, - 'CubicBezier': load_cubic, - 'QuadraticBezier': load_quadratic} + loaders = { + "Arc": load_arc, + "MultiLine": load_multi, + "CubicBezier": load_cubic, + "QuadraticBezier": load_quadratic, + } entities = collections.defaultdict(list) vertices = collections.defaultdict(list) @@ -289,14 +282,14 @@ def __init__(self, lines): for attrib, matrix in paths: # the path string is stored under `d` - path_string = attrib.get('d', '') + path_string = attrib.get("d", "") if len(path_string) == 0: - log.debug('empty path string!') + log.debug("empty path string!") continue # get the name of the geometry if trimesh specified it # note that the get will by default return `None` - name = _decode(attrib.get(_ns + 'name')) + name = _decode(attrib.get(_ns + "name")) # get parsed entities from svg.path raw = np.array(list(parse_path(path_string))) @@ -305,19 +298,16 @@ def __init__(self, lines): continue # create an integer code for entities we can combine - kinds_lookup = {'Line': 1, 'Close': 1, 'Arc': 2} + kinds_lookup = {"Line": 1, "Close": 1, "Arc": 2} # get a code for each entity we parsed - kinds = np.array([kinds_lookup.get(type(i).__name__, 0) - for i in raw], dtype=int) + kinds = np.array([kinds_lookup.get(type(i).__name__, 0) for i in raw], dtype=int) # find groups of consecutive entities so we can combine - blocks = grouping.blocks( - kinds, min_len=1, only_nonzero=False) + blocks = grouping.blocks(kinds, min_len=1, only_nonzero=False) if tol.strict: # in unit tests make sure we didn't lose any entities - assert util.allclose(np.hstack(blocks), - np.arange(len(raw))) + assert util.allclose(np.hstack(blocks), np.arange(len(raw))) # Combine consecutive entities that can be represented # more concisely as a single trimesh entity. @@ -325,23 +315,30 @@ def __init__(self, lines): for b in blocks: chunk = raw[b] current = type(raw[b[0]]).__name__ - if current in ('Line', 'Close'): + if current in ("Line", "Close"): # if entity consists of lines add a multiline parsed.append(MultiLine(chunk)) - elif len(b) > 1 and current == 'Arc': + elif len(b) > 1 and current == "Arc": # if we have multiple arcs check to see if they # actually represent a single closed circle # get a single array with the relevant arc points - verts = np.array([[a.start.real, - a.start.imag, - a.end.real, - a.end.imag, - a.center.real, - a.center.imag, - a.radius.real, - a.radius.imag, - a.rotation] for a in chunk], - dtype=np.float64) + verts = np.array( + [ + [ + a.start.real, + a.start.imag, + a.end.real, + a.end.imag, + a.center.real, + a.center.imag, + a.radius.real, + a.radius.imag, + a.rotation, + ] + for a in chunk + ], + dtype=np.float64, + ) # all arcs share the same center radius and rotation closed = False if verts[:, 4:].ptp(axis=0).mean() < 1e-3: @@ -367,7 +364,8 @@ def __init__(self, lines): entity_meta = { k.lstrip(_ns): _decode(v) for k, v in attrib.items() - if k[1:].startswith(_ns_url)} + if k[1:].startswith(_ns_url) + } except BaseException: entity_meta = {} @@ -386,13 +384,14 @@ def __init__(self, lines): counts[name] += len(v) if len(vertices) == 0: - return {'vertices': [], 'entities': []} - - geoms = {name: {'vertices': np.vstack(v), - 'entities': entities[name]} - for name, v in vertices.items()} - if len(geoms) > 1 or force == 'Scene': - kwargs = {'geometry': geoms} + return {"vertices": [], "entities": []} + + geoms = { + name: {"vertices": np.vstack(v), "entities": entities[name]} + for name, v in vertices.items() + } + if len(geoms) > 1 or force == "Scene": + kwargs = {"geometry": geoms} else: # return a single Path2D kwargs = next(iter(geoms.values())) @@ -400,11 +399,7 @@ def __init__(self, lines): return kwargs -def _entities_to_str(entities, - vertices, - name=None, - digits=None, - only_layers=None): +def _entities_to_str(entities, vertices, name=None, digits=None, only_layers=None): """ Convert the entities of a path to path strings. @@ -427,17 +422,19 @@ def _entities_to_str(entities, points = vertices.copy() # generate a format string with the requested digits - temp_digits = f'0.{int(digits)}f' + temp_digits = f"0.{int(digits)}f" # generate a format string for circles as two arc segments - temp_circle = ('M {x:DI},{y:DI}a{r:DI},{r:DI},0,1,0,{d:DI},' + - '0a{r:DI},{r:DI},0,1,0,-{d:DI},0Z').replace('DI', temp_digits) + temp_circle = ( + "M {x:DI},{y:DI}a{r:DI},{r:DI},0,1,0,{d:DI}," + "0a{r:DI},{r:DI},0,1,0,-{d:DI},0Z" + ).replace("DI", temp_digits) # generate a format string for an absolute move-to command - temp_move = 'M{:DI},{:DI}'.replace('DI', temp_digits) + temp_move = "M{:DI},{:DI}".replace("DI", temp_digits) # generate a format string for an absolute-line command - temp_line = 'L{:DI},{:DI}'.replace('DI', temp_digits) + temp_line = "L{:DI},{:DI}".replace("DI", temp_digits) # generate a format string for a single arc - temp_arc = 'M{SX:DI} {SY:DI}A{R},{R} 0 {L:d},{S:d} {EX:DI},{EY:DI}'.replace( - 'DI', temp_digits) + temp_arc = "M{SX:DI} {SY:DI}A{R},{R} 0 {L:d},{S:d} {EX:DI},{EY:DI}".replace( + "DI", temp_digits + ) def svg_arc(arc): """ @@ -446,26 +443,25 @@ def svg_arc(arc): sweep flag: direction (cw/ccw) """ vertices = points[arc.points] - info = arc_center( - vertices, return_normal=False, return_angle=True) + info = arc_center(vertices, return_normal=False, return_angle=True) C, R, angle = info.center, info.radius, info.span if arc.closed: - return temp_circle.format(x=C[0] - R, - y=C[1], - r=R, - d=2.0 * R) + return temp_circle.format(x=C[0] - R, y=C[1], r=R, d=2.0 * R) vertex_start, vertex_mid, vertex_end = vertices large_flag = int(angle > np.pi) - sweep_flag = int(np.cross(vertex_mid - vertex_start, - vertex_end - vertex_start) > 0.0) - return temp_arc.format(SX=vertex_start[0], - SY=vertex_start[1], - L=large_flag, - S=sweep_flag, - EX=vertex_end[0], - EY=vertex_end[1], - R=R) + sweep_flag = int( + np.cross(vertex_mid - vertex_start, vertex_end - vertex_start) > 0.0 + ) + return temp_arc.format( + SX=vertex_start[0], + SY=vertex_start[1], + L=large_flag, + S=sweep_flag, + EX=vertex_end[0], + EY=vertex_end[1], + R=R, + ) def svg_discrete(entity): """ @@ -475,10 +471,11 @@ def svg_discrete(entity): discrete = entity.discrete(points) # if entity contains no geometry return if len(discrete) == 0: - return '' + return "" # the format string for the SVG path result = (temp_move + (temp_line * (len(discrete) - 1))).format( - *discrete.reshape(-1)) + *discrete.reshape(-1) + ) return result # tuples of (metadata, path string) @@ -488,7 +485,7 @@ def svg_discrete(entity): if only_layers is not None and entity.layer not in only_layers: continue # check the class name of the entity - if entity.__class__.__name__ == 'Arc': + if entity.__class__.__name__ == "Arc": # export the exact version of the entity path_string = svg_arc(entity) else: @@ -496,16 +493,12 @@ def svg_discrete(entity): path_string = svg_discrete(entity) meta = deepcopy(entity.metadata) if name is not None: - meta['name'] = name + meta["name"] = name pairs.append((meta, path_string)) return pairs -def export_svg(drawing, - return_path=False, - only_layers=None, - digits=None, - **kwargs): +def export_svg(drawing, return_path=False, only_layers=None, digits=None, **kwargs): """ Export a Path2D object into an SVG file. @@ -526,42 +519,46 @@ def export_svg(drawing, XML formatted SVG, or path string """ # collect custom attributes for the overall export - attribs = {'class': type(drawing).__name__} + attribs = {"class": type(drawing).__name__} - if util.is_instance_named(drawing, 'Scene'): + if util.is_instance_named(drawing, "Scene"): pairs = [] geom_meta = {} for name, geom in drawing.geometry.items(): - if not util.is_instance_named(geom, 'Path2D'): + if not util.is_instance_named(geom, "Path2D"): continue geom_meta[name] = geom.metadata # a pair of (metadata, path string) - pairs.extend(_entities_to_str( - entities=geom.entities, - vertices=geom.vertices, - name=name, - digits=digits, - only_layers=only_layers)) + pairs.extend( + _entities_to_str( + entities=geom.entities, + vertices=geom.vertices, + name=name, + digits=digits, + only_layers=only_layers, + ) + ) if len(geom_meta) > 0: # encode the whole metadata bundle here to avoid # polluting the file with a ton of loose attribs - attribs['metadata_geometry'] = _encode(geom_meta) - elif util.is_instance_named(drawing, 'Path2D'): + attribs["metadata_geometry"] = _encode(geom_meta) + elif util.is_instance_named(drawing, "Path2D"): pairs = _entities_to_str( entities=drawing.entities, vertices=drawing.vertices, digits=digits, - only_layers=only_layers) + only_layers=only_layers, + ) else: - raise ValueError('drawing must be Scene or Path2D object!') + raise ValueError("drawing must be Scene or Path2D object!") # return path string without XML wrapping if return_path: - return ' '.join(v[1] for v in pairs) + return " ".join(v[1] for v in pairs) # fetch the export template for the base SVG file - template_svg = resources.get('templates/base.svg') + template_svg = resources.get("templates/base.svg") elements = [] for meta, path_string in pairs: @@ -569,25 +566,27 @@ def export_svg(drawing, elements.append(f'') # format as XML - if 'stroke_width' in kwargs: - stroke_width = float(kwargs['stroke_width']) + if "stroke_width" in kwargs: + stroke_width = float(kwargs["stroke_width"]) else: # set stroke to something OK looking stroke_width = drawing.extents.max() / 800.0 try: # store metadata in XML as JSON -_- - attribs['metadata'] = _encode(drawing.metadata) + attribs["metadata"] = _encode(drawing.metadata) except BaseException: # log failed metadata encoding - log.debug('failed to encode', exc_info=True) - - subs = {'elements': '\n'.join(elements), - 'min_x': drawing.bounds[0][0], - 'min_y': drawing.bounds[0][1], - 'width': drawing.extents[0], - 'height': drawing.extents[1], - 'stroke_width': stroke_width, - 'attribs': _format_attrib(attribs)} + log.debug("failed to encode", exc_info=True) + + subs = { + "elements": "\n".join(elements), + "min_x": drawing.bounds[0][0], + "min_y": drawing.bounds[0][1], + "width": drawing.extents[0], + "height": drawing.extents[1], + "stroke_width": stroke_width, + "attribs": _format_attrib(attribs), + } return template_svg.format(**subs) @@ -601,10 +600,11 @@ def _format_attrib(attrib): Bag of keys and values. """ bag = {k: _encode(v) for k, v in attrib.items()} - return '\n'.join(f'{_ns_name}:{k}="{v}"' - for k, v in bag.items() - if len(k) > 0 and v is not None - and len(v) > 0) + return "\n".join( + f'{_ns_name}:{k}="{v}"' + for k, v in bag.items() + if len(k) > 0 and v is not None and len(v) > 0 + ) def _encode(stuff): @@ -623,10 +623,13 @@ def _encode(stuff): """ if util.is_string(stuff) and '"' not in stuff: return stuff - pack = base64.urlsafe_b64encode(jsonify( - {k: v for k, v in stuff.items() - if not k.startswith('_')}, separators=(',', ':')).encode('utf-8')) - result = 'base64,' + util.decode_text(pack) + pack = base64.urlsafe_b64encode( + jsonify( + {k: v for k, v in stuff.items() if not k.startswith("_")}, + separators=(",", ":"), + ).encode("utf-8") + ) + result = "base64," + util.decode_text(pack) if tol.strict: # make sure we haven't broken the things _deep_same(stuff, _decode(result)) @@ -706,7 +709,8 @@ def _decode(bag): if bag is None: return text = util.decode_text(bag) - if text.startswith('base64,'): - return json.loads(base64.urlsafe_b64decode( - text[7:].encode('utf-8')).decode('utf-8')) + if text.startswith("base64,"): + return json.loads( + base64.urlsafe_b64decode(text[7:].encode("utf-8")).decode("utf-8") + ) return text diff --git a/trimesh/path/intersections.py b/trimesh/path/intersections.py index fd70c7a02..d0f24259e 100644 --- a/trimesh/path/intersections.py +++ b/trimesh/path/intersections.py @@ -4,9 +4,7 @@ from ..constants import tol_path as tol -def line_line(origins, - directions, - plane_normal=None): +def line_line(origins, directions, plane_normal=None): """ Find the intersection between two lines. Uses terminology from: @@ -68,9 +66,8 @@ def line_line(origins, return False, None # value of parameter s where intersection occurs - s_I = (np.dot(-v_perp, w) / - np.dot(v_perp, u)) + s_I = np.dot(-v_perp, w) / np.dot(v_perp, u) # plug back into the equation of the line to find the point intersection = p_0 + s_I * u - return True, intersection[:(3 - is_2D)] + return True, intersection[: (3 - is_2D)] diff --git a/trimesh/path/packing.py b/trimesh/path/packing.py index fe9b194e2..ceb6c123b 100644 --- a/trimesh/path/packing.py +++ b/trimesh/path/packing.py @@ -4,6 +4,8 @@ Pack rectangular regions onto larger rectangular regions. """ +from typing import Optional + import numpy as np from ..constants import log, tol @@ -158,7 +160,7 @@ def _roll(a, count): return np.concatenate([a[-count:], a[:-count]]) -def rectangles_single(extents, size=None, shuffle=False, rotate=True): +def rectangles_single(extents, size=None, shuffle=False, rotate=True, random=None): """ Execute a single insertion order of smaller rectangles onto a larger rectangle using a binary space partition tree. @@ -198,8 +200,11 @@ def rectangles_single(extents, size=None, shuffle=False, rotate=True): order = np.argsort(extents.max(axis=1))[::-1] if shuffle: - # reorder with permutations - order = np.random.permutation(order) + if random is not None: + order = random.permutation(order) + else: + # reorder with permutations + order = np.random.permutation(order) if size is None: # if no bounds are passed start it with the size of a large @@ -407,10 +412,11 @@ def rectangles( extents, size=None, density_escape=0.99, - spacing=0.0, + spacing=None, iterations=50, rotate=True, quanta=None, + seed=None, ): """ Run multiple iterations of rectangle packing, this is the @@ -426,13 +432,15 @@ def rectangles( density_escape : float Exit early if rectangular density is above this threshold. spacing : float - Distance to allow between rect + Distance to allow between rectangles iterations : int Number of iterations to run rotate : bool Allow right angle rotations or not. quanta : None or float Discrete "snap" interval. + seed + If deterministic results are needed seed the RNG here. Returns --------- @@ -444,8 +452,10 @@ def rectangles( # copy extents and make sure they are floats extents = np.array(extents, dtype=np.float64) dim = extents.shape[1] - # add on any requested spacing - extents += spacing * 2.0 + + if spacing is not None: + # add on any requested spacing + extents += spacing * 2.0 # hyper-volume: area in 2D, volume in 3D, party in 4D area = np.prod(extents, axis=1) @@ -454,11 +464,16 @@ def rectangles( # how many rect were inserted best_count = 0 + if seed is None: + random = None + else: + random = np.random.default_rng(seed=seed) + for i in range(iterations): # run a single insertion order # don't shuffle the first run, shuffle subsequent runs bounds, insert = rectangles_single( - extents=extents, size=size, shuffle=(i != 0), rotate=rotate + extents=extents, size=size, shuffle=(i != 0), rotate=rotate, random=random ) count = insert.sum() @@ -482,16 +497,23 @@ def rectangles( if density > density_escape and insert.all(): break - if spacing > 1e-12: + if spacing is not None: # shrink the bounds by spacing result[0] += [[[spacing], [-spacing]]] - log.debug(f"packed with density {best_density:0.5f}") + log.debug(f"{iterations} iterations packed with density {best_density:0.3f}") return result -def images(images, power_resize: bool = False, deduplicate: bool = False): +def images( + images, + power_resize: bool = False, + deduplicate: bool = False, + iterations: Optional[int] = 50, + seed: Optional[int] = None, + spacing: Optional[float] = None, +): """ Pack a list of images and return result and offsets. @@ -504,6 +526,8 @@ def images(images, power_resize: bool = False, deduplicate: bool = False): power of two? Not every GPU supports materials that aren't a power of two size. deduplicate + Should images that have identical hashes be inserted + more than once? Returns ----------- @@ -520,20 +544,38 @@ def images(images, power_resize: bool = False, deduplicate: bool = False): [hash(i.tobytes()) for i in images], return_index=True, return_inverse=True ) # use the number of pixels as the rectangle size - bounds, insert = rectangles(extents=[images[i].size for i in index], rotate=False) + bounds, insert = rectangles( + extents=[images[i].size for i in index], + rotate=False, + iterations=iterations, + seed=seed, + spacing=spacing, + ) # really should have inserted all the rect assert insert.all() - # re-index back to original indexes + # re-index bounds back to original indexes bounds = bounds[inverse] + assert np.allclose(bounds.ptp(axis=1), [i.size for i in images]) else: # use the number of pixels as the rectangle size - bounds, insert = rectangles(extents=[i.size for i in images], rotate=False) + bounds, insert = rectangles( + extents=[i.size for i in images], + rotate=False, + iterations=iterations, + seed=seed, + spacing=spacing, + ) # really should have inserted all the rect assert insert.all() + if spacing is None: + spacing = 0 + else: + spacing = int(spacing) + # offsets should be integer multiple of pizels offset = bounds[:, 0].round().astype(int) - extents = bounds.reshape((-1, 2)).ptp(axis=0) + extents = bounds.reshape((-1, 2)).ptp(axis=0) + (spacing * 2) size = extents.round().astype(int) if power_resize: # round up all dimensions to powers of 2 @@ -541,8 +583,14 @@ def images(images, power_resize: bool = False, deduplicate: bool = False): # create the image in the mode of the first image result = Image.new(images[0].mode, tuple(size)) + + done = set() # paste each image into the result for img, off in zip(images, offset): + if tuple(off) in done: + continue + else: + done.add(tuple(off)) # box is upper left corner corner = (off[0], size[1] - img.size[1] - off[1]) result.paste(img, box=corner) diff --git a/trimesh/path/path.py b/trimesh/path/path.py index 20c15b488..b10d8ceb0 100644 --- a/trimesh/path/path.py +++ b/trimesh/path/path.py @@ -65,13 +65,15 @@ class Path(parent.Geometry): simple vertex array is applied to the entity. """ - def __init__(self, - entities=None, - vertices=None, - metadata=None, - process=True, - colors=None, - **kwargs): + def __init__( + self, + entities=None, + vertices=None, + metadata=None, + process=True, + colors=None, + **kwargs, + ): """ Instantiate a path object. @@ -94,12 +96,11 @@ def __init__(self, self.colors = colors # collect metadata into new dictionary self.metadata = {} - if metadata.__class__.__name__ == 'dict': + if metadata.__class__.__name__ == "dict": self.metadata.update(metadata) # cache will dump whenever self.crc changes - self._cache = caching.Cache( - id_function=self.__hash__) + self._cache = caching.Cache(id_function=self.__hash__) if process: # literally nothing will work if vertices @@ -110,10 +111,9 @@ def __repr__(self): """ Print a quick summary of the number of vertices and entities. """ - return ''.format( - type(self).__name__, - self.vertices.shape, - len(self.entities)) + return "".format( + type(self).__name__, self.vertices.shape, len(self.entities) + ) def process(self): """ @@ -141,7 +141,7 @@ def colors(self): colors = np.array([to_rgba(c) for c in raw]) # don't allow parts of the color array to be written - colors.flags['WRITEABLE'] = False + colors.flags["WRITEABLE"] = False return colors @colors.setter @@ -160,7 +160,7 @@ def colors(self, values): # make sure colors are RGBA colors = to_rgba(values) if len(colors) != len(self.entities): - raise ValueError('colors must be per-entity!') + raise ValueError("colors must be per-entity!") # otherwise assign each color to the entity for c, e in zip(colors, self.entities): e.color = c @@ -171,8 +171,7 @@ def vertices(self): @vertices.setter def vertices(self, values): - self._vertices = caching.tracked_array( - values, dtype=np.float64) + self._vertices = caching.tracked_array(values, dtype=np.float64) @property def entities(self): @@ -216,11 +215,11 @@ def __hash__(self): Appended hashes """ # get the hash of the trackedarray vertices - hashable = [hex(self.vertices.__hash__()).encode('utf-8')] + hashable = [hex(self.vertices.__hash__()).encode("utf-8")] # get the bytes for each entity hashable.extend(e._bytes() for e in self.entities) # hash the combined result - return caching.hash_fast(b''.join(hashable)) + return caching.hash_fast(b"".join(hashable)) @caching.cache_decorator def paths(self): @@ -232,8 +231,7 @@ def paths(self): paths : (n,) sequence of (*,) int Referencing self.entities """ - paths = traversal.closed_paths( - self.entities, self.vertices) + paths = traversal.closed_paths(self.entities, self.vertices) return paths @caching.cache_decorator @@ -250,8 +248,7 @@ def dangling(self): return np.arange(len(self.entities)) else: included = np.hstack(self.paths) - dangling = np.setdiff1d(np.arange(len(self.entities)), - included) + dangling = np.setdiff1d(np.arange(len(self.entities)), included) return dangling @caching.cache_decorator @@ -279,7 +276,7 @@ def scale(self): Approximate size of the world holding this path """ # use vertices peak-peak rather than exact extents - scale = float((self.vertices.ptp(axis=0) ** 2).sum() ** .5) + scale = float((self.vertices.ptp(axis=0) ** 2).sum() ** 0.5) return scale @caching.cache_decorator @@ -292,8 +289,7 @@ def length(self): length : float Summed length of every entity """ - length = float(sum(i.length(self.vertices) - for i in self.entities)) + length = float(sum(i.length(self.vertices) for i in self.entities)) return length @caching.cache_decorator @@ -310,16 +306,14 @@ def bounds(self): # some entities (aka 3- point Arc) have bounds that can't # be generated from just bound box of vertices - points = np.array([e.bounds(self.vertices) - for e in self.entities], - dtype=np.float64) + points = np.array( + [e.bounds(self.vertices) for e in self.entities], dtype=np.float64 + ) # flatten bound extrema into (n, dimension) array points = points.reshape((-1, self.vertices.shape[1])) # get the max and min of all bounds - return np.array([points.min(axis=0), - points.max(axis=0)], - dtype=np.float64) + return np.array([points.min(axis=0), points.max(axis=0)], dtype=np.float64) @caching.cache_decorator def centroid(self): @@ -357,14 +351,14 @@ def units(self): units : str Current unit system """ - if 'units' in self.metadata: - return self.metadata['units'] + if "units" in self.metadata: + return self.metadata["units"] else: return None @units.setter def units(self, units): - self.metadata['units'] = units + self.metadata["units"] = units def convert_units(self, desired, guess=False): """ @@ -377,9 +371,7 @@ def convert_units(self, desired, guess=False): guess : bool If True will attempt to guess units """ - units._convert_units(self, - desired=desired, - guess=guess) + units._convert_units(self, desired=desired, guess=guess) def explode(self): """ @@ -417,8 +409,7 @@ def is_closed(self): closed : bool Every entity is connected at its ends """ - closed = all(i == 2 for i in - dict(self.vertex_graph.degree()).values()) + closed = all(i == 2 for i in dict(self.vertex_graph.degree()).values()) return closed @@ -473,7 +464,7 @@ def apply_transform(self, transform): transform = np.asanyarray(transform, dtype=np.float64) if transform.shape != (dimension + 1, dimension + 1): - raise ValueError('transform is incorrect shape!') + raise ValueError("transform is incorrect shape!") elif np.abs(transform - np.eye(dimension + 1)).max() < 1e-8: # if we've been passed an identity matrix do nothing return self @@ -483,28 +474,29 @@ def apply_transform(self, transform): # new cache to transfer items cache = {} # apply transform to discretized paths - if 'discrete' in self._cache.cache: - cache['discrete'] = [tf.transform_points( - d, matrix=transform) for d in self.discrete] + if "discrete" in self._cache.cache: + cache["discrete"] = [ + tf.transform_points(d, matrix=transform) for d in self.discrete + ] # things we can just straight up copy # as they are topological not geometric - for key in ['root', - 'paths', - 'path_valid', - 'dangling', - 'vertex_graph', - 'enclosure', - 'enclosure_shell', - 'enclosure_directed']: + for key in [ + "root", + "paths", + "path_valid", + "dangling", + "vertex_graph", + "enclosure", + "enclosure_shell", + "enclosure_directed", + ]: # if they're in cache save them from the purge if key in self._cache.cache: cache[key] = self._cache.cache[key] # transform vertices in place - self.vertices = tf.transform_points( - self.vertices, - matrix=transform) + self.vertices = tf.transform_points(self.vertices, matrix=transform) # explicitly clear the cache self._cache.clear() self._cache.id_set() @@ -556,12 +548,9 @@ def merge_vertices(self, digits=None): if len(self.vertices) == 0: return if digits is None: - digits = util.decimal_to_digits( - tol.merge * self.scale, - min_digits=1) + digits = util.decimal_to_digits(tol.merge * self.scale, min_digits=1) - unique, inverse = grouping.unique_rows( - self.vertices, digits=digits) + unique, inverse = grouping.unique_rows(self.vertices, digits=digits) self.vertices = self.vertices[unique] entities_ok = np.ones(len(self.entities), dtype=bool) @@ -572,20 +561,20 @@ def merge_vertices(self, digits=None): # entities that don't need runs merged # don't screw up control- point- knot relationship - if kind in 'BSpline Bezier Text': + if kind in "BSpline Bezier Text": entity.points = inverse[entity.points] continue # if we merged duplicate vertices, the entity may # have multiple references to the same vertex points = grouping.merge_runs(inverse[entity.points]) # if there are three points and two are identical fix it - if kind == 'Line': + if kind == "Line": if len(points) == 3 and points[0] == points[-1]: points = points[:2] elif len(points) < 2: # lines need two or more vertices entities_ok[index] = False - elif kind == 'Arc' and len(points) != 3: + elif kind == "Arc" and len(points) != 3: # three point arcs need three points entities_ok[index] = False @@ -635,8 +624,7 @@ def remove_invalid(self): ---------- self.entities: shortened """ - valid = np.array([i.is_valid for i in self.entities], - dtype=bool) + valid = np.array([i.is_valid for i in self.entities], dtype=bool) self.entities = self.entities[valid] def remove_duplicate_entities(self): @@ -701,10 +689,9 @@ def discretize_path(self, path): discrete : (m, dimension) Linear segment path. """ - discrete = traversal.discretize_path(self.entities, - self.vertices, - path, - scale=self.scale) + discrete = traversal.discretize_path( + self.entities, self.vertices, path, scale=self.scale + ) return discrete @caching.cache_decorator @@ -718,14 +705,10 @@ def discrete(self): discrete : (len(self.paths),) A sequence of (m*, dimension) float """ - discrete = [self.discretize_path(i) - for i in self.paths] + discrete = [self.discretize_path(i) for i in self.paths] return discrete - def export(self, - file_obj=None, - file_type=None, - **kwargs): + def export(self, file_obj=None, file_type=None, **kwargs): """ Export the path to a file object or return data. @@ -741,13 +724,10 @@ def export(self, exported : bytes or str Exported as specified type """ - return export_path(self, - file_type=file_type, - file_obj=file_obj, - **kwargs) + return export_path(self, file_type=file_type, file_obj=file_obj, **kwargs) def to_dict(self): - export_dict = self.export(file_type='dict') + export_dict = self.export(file_type="dict") return export_dict def copy(self): @@ -768,13 +748,15 @@ def copy(self): metadata[key] = copy.deepcopy(self.metadata[key]) except RuntimeError: # multiple threads - log.warning(f'key {key} changed during copy') + log.warning(f"key {key} changed during copy") # copy the core data - copied = type(self)(entities=copy.deepcopy(self.entities), - vertices=copy.deepcopy(self.vertices), - metadata=metadata, - process=False) + copied = type(self)( + entities=copy.deepcopy(self.entities), + vertices=copy.deepcopy(self.vertices), + metadata=metadata, + process=False, + ) cache = {} # try to copy the cache over to the new object @@ -786,10 +768,10 @@ def copy(self): cache[k] = copy.deepcopy(self._cache.cache[k]) except RuntimeError: # if we have multiple threads this may error and is NBD - log.debug('unable to copy cache') + log.debug("unable to copy cache") except BaseException: # catch and log errors we weren't expecting - log.error('unable to copy cache', exc_info=True) + log.error("unable to copy cache", exc_info=True) copied._cache.cache = cache copied._cache.id_set() @@ -804,6 +786,7 @@ def scene(self): scene: trimesh.scene.Scene object containing current path """ from ..scene import Scene + scene = Scene(self) return scene @@ -830,14 +813,13 @@ class Path3D(Path): """ def _process_functions(self): - return [self.merge_vertices, - self.remove_duplicate_entities, - self.remove_unreferenced_vertices] + return [ + self.merge_vertices, + self.remove_duplicate_entities, + self.remove_unreferenced_vertices, + ] - def to_planar(self, - to_2D=None, - normal=None, - check=True): + def to_planar(self, to_2D=None, normal=None, check=True): """ Check to see if current vectors are all coplanar. @@ -882,20 +864,17 @@ def to_planar(self, N *= np.sign(np.dot(N, normal)) N = normal else: - log.debug( - f"passed normal not used: {normal.shape}") + log.debug(f"passed normal not used: {normal.shape}") # create a transform from fit plane to XY - to_2D = plane_transform(origin=C, - normal=N) + to_2D = plane_transform(origin=C, normal=N) # make sure we've extracted a transform to_2D = np.asanyarray(to_2D, dtype=np.float64) if to_2D.shape != (4, 4): - raise ValueError('unable to create transform!') + raise ValueError("unable to create transform!") # transform all vertices to 2D plane - flat = tf.transform_points(self.vertices, - to_2D) + flat = tf.transform_points(self.vertices, to_2D) # Z values of vertices which are referenced heights = flat[referenced][:, 2] @@ -904,7 +883,7 @@ def to_planar(self, # since Z is inconsistent set height to zero height = 0.0 if check: - raise ValueError('points are not flat!') + raise ValueError("points are not flat!") else: # if the points were planar store the height height = heights.mean() @@ -916,22 +895,23 @@ def to_planar(self, # exactly Z=0 adjust it so the returned transform does if np.abs(height) > tol.planar: # adjust to_3D transform by height - adjust = tf.translation_matrix( - [0, 0, height]) + adjust = tf.translation_matrix([0, 0, height]) # apply the height adjustment to_3D to_3D = np.dot(to_3D, adjust) # copy metadata to new object metadata = copy.deepcopy(self.metadata) # store transform we used to move it onto the plane - metadata['to_3D'] = to_3D + metadata["to_3D"] = to_3D # create the Path2D with the same entities # and XY values of vertices projected onto the plane - planar = Path2D(entities=copy.deepcopy(self.entities), - vertices=flat[:, :2], - metadata=metadata, - process=False) + planar = Path2D( + entities=copy.deepcopy(self.entities), + vertices=flat[:, :2], + metadata=metadata, + process=False, + ) return planar, to_3D @@ -961,9 +941,11 @@ def _process_functions(self): """ Return a list of functions to clean up a Path2D """ - return [self.merge_vertices, - self.remove_duplicate_entities, - self.remove_unreferenced_vertices] + return [ + self.merge_vertices, + self.remove_duplicate_entities, + self.remove_unreferenced_vertices, + ] def apply_obb(self): """ @@ -1003,17 +985,12 @@ def obb(self): obb : (3, 3) float Homogeneous transformation matrix """ - matrix = bounds.oriented_bounds_2D( - self.vertices[self.referenced_vertices])[0] + matrix = bounds.oriented_bounds_2D(self.vertices[self.referenced_vertices])[0] return matrix - def rasterize(self, - pitch=None, - origin=None, - resolution=None, - fill=True, - width=None, - **kwargs): + def rasterize( + self, pitch=None, origin=None, resolution=None, fill=True, width=None, **kwargs + ): """ Rasterize a Path2D object into a boolean image ("mode 1"). @@ -1035,12 +1012,14 @@ def rasterize(self, raster : PIL.Image object, mode 1 Rasterized version of closed regions. """ - image = raster.rasterize(self, - pitch=pitch, - origin=origin, - resolution=resolution, - fill=fill, - width=width) + image = raster.rasterize( + self, + pitch=pitch, + origin=origin, + resolution=resolution, + fill=fill, + width=width, + ) return image def sample(self, count, **kwargs): @@ -1074,9 +1053,9 @@ def sample(self, count, **kwargs): elif len(poly) == 1: samples = polygons.sample(poly[0], count=count, **kwargs) else: - samples = util.vstack_empty([ - polygons.sample(i, count=count, **kwargs) - for i in poly]) + samples = util.vstack_empty( + [polygons.sample(i, count=count, **kwargs) for i in poly] + ) return samples @@ -1110,19 +1089,21 @@ def to_3D(self, transform=None): 3D version of current path """ # if there is a stored 'to_3D' transform in metadata use it - if transform is None and 'to_3D' in self.metadata: - transform = self.metadata['to_3D'] + if transform is None and "to_3D" in self.metadata: + transform = self.metadata["to_3D"] # copy vertices and stack with zeros from (n, 2) to (n, 3) - vertices = np.column_stack((copy.deepcopy(self.vertices), - np.zeros(len(self.vertices)))) + vertices = np.column_stack( + (copy.deepcopy(self.vertices), np.zeros(len(self.vertices))) + ) if transform is not None: - vertices = tf.transform_points(vertices, - transform) + vertices = tf.transform_points(vertices, transform) # make sure everything is deep copied - path_3D = Path3D(entities=copy.deepcopy(self.entities), - vertices=vertices, - metadata=copy.deepcopy(self.metadata)) + path_3D = Path3D( + entities=copy.deepcopy(self.entities), + vertices=vertices, + metadata=copy.deepcopy(self.metadata), + ) return path_3D @caching.cache_decorator @@ -1164,16 +1145,13 @@ def polygons_full(self): for i, root in enumerate(self.root): # a list of multiple Polygon objects that # are fully contained by the root curve - children = [closed[child] - for child in enclosure[root].keys()] + children = [closed[child] for child in enclosure[root].keys()] # all polygons_closed are CCW, so for interiors reverse them - holes = [np.array(p.exterior.coords)[::-1] - for p in children] + holes = [np.array(p.exterior.coords)[::-1] for p in children] # a single Polygon object shell = closed[root].exterior # create a polygon with interiors - full[i] = polygons.repair_invalid(Polygon(shell=shell, - holes=holes)) + full[i] = polygons.repair_invalid(Polygon(shell=shell, holes=holes)) # so we can use advanced indexing full = np.array(full) @@ -1217,8 +1195,10 @@ def extrude(self, height, **kwargs): mesh: trimesh object representing extruded polygon """ from ..primitives import Extrusion - result = [Extrusion(polygon=i, height=height, **kwargs) - for i in self.polygons_full] + + result = [ + Extrusion(polygon=i, height=height, **kwargs) for i in self.polygons_full + ] if len(result) == 1: return result[0] return result @@ -1278,11 +1258,11 @@ def medial_axis(self, resolution=None, clip=None): from .exchange.misc import edges_to_path # edges and vertices - edge_vert = [polygons.medial_axis(i, resolution, clip) - for i in self.polygons_full] + edge_vert = [ + polygons.medial_axis(i, resolution, clip) for i in self.polygons_full + ] # create a Path2D object for each region - medials = [Path2D(**edges_to_path( - edges=e, vertices=v)) for e, v in edge_vert] + medials = [Path2D(**edges_to_path(edges=e, vertices=v)) for e, v in edge_vert] # get a single Path2D of medial axis medial = concatenate(medials) @@ -1309,9 +1289,7 @@ def connected_paths(self, path_id, include_self=False): if len(self.root) == 1: path_ids = np.arange(len(self.polygons_closed)) else: - path_ids = list(nx.node_connected_component( - self.enclosure, - path_id)) + path_ids = list(nx.node_connected_component(self.enclosure, path_id)) if include_self: return np.array(path_ids) return np.setdiff1d(path_ids, [path_id]) @@ -1327,7 +1305,7 @@ def simplify(self, **kwargs): """ return simplify.simplify_basic(self, **kwargs) - def simplify_spline(self, smooth=.0002, verbose=False): + def simplify_spline(self, smooth=0.0002, verbose=False): """ Convert paths into b-splines. @@ -1343,9 +1321,7 @@ def simplify_spline(self, smooth=.0002, verbose=False): simplified : Path2D Discrete curves replaced with splines """ - return simplify.simplify_spline(self, - smooth=smooth, - verbose=verbose) + return simplify.simplify_spline(self, smooth=smooth, verbose=verbose) def split(self, **kwargs): """ @@ -1364,16 +1340,17 @@ def plot_discrete(self, show=False, annotations=True): Plot the closed curves of the path. """ import matplotlib.pyplot as plt + axis = plt.gca() - axis.set_aspect('equal', 'datalim') + axis.set_aspect("equal", "datalim") for i, points in enumerate(self.discrete): - color = ['g', 'k'][i in self.root] + color = ["g", "k"][i in self.root] axis.plot(*points.T, color=color) if annotations: for e in self.entities: - if not hasattr(e, 'plot'): + if not hasattr(e, "plot"): continue e.plot(self.vertices) @@ -1395,21 +1372,24 @@ def plot_entities(self, show=False, annotations=True, color=None): Override entity colors and make them all this color. """ import matplotlib.pyplot as plt + # keep plot axis scaled the same axis = plt.gca() - axis.set_aspect('equal', 'datalim') + axis.set_aspect("equal", "datalim") # hardcode a format for each entity type - eformat = {'Line0': {'color': 'g', 'linewidth': 1}, - 'Line1': {'color': 'y', 'linewidth': 1}, - 'Arc0': {'color': 'r', 'linewidth': 1}, - 'Arc1': {'color': 'b', 'linewidth': 1}, - 'Bezier0': {'color': 'k', 'linewidth': 1}, - 'Bezier1': {'color': 'k', 'linewidth': 1}, - 'BSpline0': {'color': 'm', 'linewidth': 1}, - 'BSpline1': {'color': 'm', 'linewidth': 1}} + eformat = { + "Line0": {"color": "g", "linewidth": 1}, + "Line1": {"color": "y", "linewidth": 1}, + "Arc0": {"color": "r", "linewidth": 1}, + "Arc1": {"color": "b", "linewidth": 1}, + "Bezier0": {"color": "k", "linewidth": 1}, + "Bezier1": {"color": "k", "linewidth": 1}, + "BSpline0": {"color": "m", "linewidth": 1}, + "BSpline1": {"color": "m", "linewidth": 1}, + } for entity in self.entities: # if the entity has it's own plot method use it - if annotations and hasattr(entity, 'plot'): + if annotations and hasattr(entity, "plot"): entity.plot(self.vertices) continue # otherwise plot the discrete curve @@ -1420,10 +1400,10 @@ def plot_entities(self, show=False, annotations=True, color=None): fmt = eformat[e_key].copy() if color is not None: # passed color will override other options - fmt['color'] = color - elif hasattr(entity, 'color'): + fmt["color"] = color + elif hasattr(entity, "color"): # if entity has specified color use it - fmt['color'] = entity.color + fmt["color"] = entity.color axis.plot(*discrete.T, **fmt) if show: plt.show() @@ -1458,15 +1438,17 @@ def identifier_hash(self): SHA256 hash of the identifier vector. """ as_int = (self.identifier * 1e4).astype(np.int64) - return sha256(as_int.tobytes(order='C')).hexdigest() + return sha256(as_int.tobytes(order="C")).hexdigest() @property def identifier_md5(self): warnings.warn( - '`geom.identifier_md5` is deprecated and will ' + - 'be removed in October 2023: replace ' + - 'with `geom.identifier_hash`', - category=DeprecationWarning, stacklevel=2) + "`geom.identifier_md5` is deprecated and will " + + "be removed in October 2023: replace " + + "with `geom.identifier_hash`", + category=DeprecationWarning, + stacklevel=2, + ) return self.identifier_hash @property @@ -1478,9 +1460,7 @@ def path_valid(self): Indexes of self.paths self.polygons_closed which are valid polygons. """ - valid = np.array( - [i is not None for i in self.polygons_closed], - dtype=bool) + valid = np.array([i is not None for i in self.polygons_closed], dtype=bool) return valid @caching.cache_decorator @@ -1495,7 +1475,7 @@ def root(self): List of indexes """ populate = self.enclosure_directed # NOQA - return self._cache['root'] + return self._cache["root"] @caching.cache_decorator def enclosure(self): @@ -1523,7 +1503,7 @@ def enclosure_directed(self): contained by their parent node. """ root, enclosure = polygons.enclosure_tree(self.polygons_closed) - self._cache['root'] = root + self._cache["root"] = root return enclosure @caching.cache_decorator @@ -1537,8 +1517,7 @@ def enclosure_shell(self): corresponding : dict {index of self.paths of shell : [indexes of holes]} """ - pairs = [(r, self.connected_paths(r, include_self=False)) - for r in self.root] + pairs = [(r, self.connected_paths(r, include_self=False)) for r in self.root] # OrderedDict to maintain corresponding order corresponding = collections.OrderedDict(pairs) return corresponding diff --git a/trimesh/path/raster.py b/trimesh/path/raster.py index 5967d308e..18be312c0 100644 --- a/trimesh/path/raster.py +++ b/trimesh/path/raster.py @@ -11,6 +11,7 @@ from PIL import Image, ImageChops, ImageDraw except BaseException as E: from .. import exceptions + # re-raise the useful exception when called _handle = exceptions.ExceptionWrapper(E) Image = _handle @@ -18,12 +19,7 @@ ImageChops = _handle -def rasterize(path, - pitch=None, - origin=None, - resolution=None, - fill=True, - width=None): +def rasterize(path, pitch=None, origin=None, resolution=None, fill=True, width=None): """ Rasterize a Path2D object into a boolean image ("mode 1"). @@ -60,17 +56,14 @@ def rasterize(path, # if resolution is None make it larger than path if resolution is None: - span = np.vstack(( - path.bounds, origin)).ptp(axis=0) + span = np.vstack((path.bounds, origin)).ptp(axis=0) resolution = np.ceil(span / pitch) + 2 # get resolution as a (2,) int tuple - resolution = np.asanyarray(resolution, - dtype=np.int64) + resolution = np.asanyarray(resolution, dtype=np.int64) resolution = tuple(resolution.tolist()) # convert all discrete paths to pixel space - discrete = [((i - origin) / pitch).round().astype(np.int64) - for i in path.discrete] + discrete = [((i - origin) / pitch).round().astype(np.int64) for i in path.discrete] # the path indexes that are exteriors # needed to know what to fill/empty but expensive @@ -78,16 +71,14 @@ def rasterize(path, enclosure = path.enclosure_directed # draw the exteriors - result = Image.new(mode='1', size=resolution) + result = Image.new(mode="1", size=resolution) draw = ImageDraw.Draw(result) # if a width is specified draw the outline if width is not None: width = int(width) for coords in discrete: - draw.line(coords.flatten().tolist(), - fill=1, - width=width) + draw.line(coords.flatten().tolist(), fill=1, width=width) # if we are not filling the polygon exit if not fill: return result @@ -97,11 +88,9 @@ def rasterize(path, # and then go in as we progress for root in roots: # draw the exterior - draw.polygon(discrete[root].flatten().tolist(), - fill=1) + draw.polygon(discrete[root].flatten().tolist(), fill=1) # draw the interior children for child in enclosure[root]: - draw.polygon(discrete[child].flatten().tolist(), - fill=0) + draw.polygon(discrete[child].flatten().tolist(), fill=0) return result diff --git a/trimesh/path/repair.py b/trimesh/path/repair.py index 504a029fd..b558d22e9 100644 --- a/trimesh/path/repair.py +++ b/trimesh/path/repair.py @@ -11,7 +11,7 @@ from . import segments -def fill_gaps(path, distance=.025): +def fill_gaps(path, distance=0.025): """ For 3D line segments defined by two points, turn them in to an origin defined as the closest point along @@ -34,9 +34,7 @@ def fill_gaps(path, distance=.025): """ # find any vertex without degree 2 (connected to two things) - broken = np.array([ - k for k, d in dict(path.vertex_graph.degree()).items() - if d != 2]) + broken = np.array([k for k, d in dict(path.vertex_graph.degree()).items() if d != 2]) # if all vertices have correct connectivity, exit if len(broken) == 0: @@ -44,7 +42,7 @@ def fill_gaps(path, distance=.025): # first find broken vertices with distance tree = cKDTree(path.vertices[broken]) - pairs = tree.query_pairs(r=distance, output_type='ndarray') + pairs = tree.query_pairs(r=distance, output_type="ndarray") connect_seg = [] if len(pairs) > 0: @@ -62,8 +60,11 @@ def fill_gaps(path, distance=.025): broken_set = set(broken) # query end points set vs path.dangling to avoid having # to compute every single path and discrete curve - dangle = [i for i, e in enumerate(path.entities) if - len(broken_set.intersection(e.end_points)) > 0] + dangle = [ + i + for i, e in enumerate(path.entities) + if len(broken_set.intersection(e.end_points)) > 0 + ] segs = [] # mask for which entities to keep @@ -73,7 +74,7 @@ def fill_gaps(path, distance=.025): for entity_index in dangle: # only consider line entities - if path.entities[entity_index].__class__.__name__ != 'Line': + if path.entities[entity_index].__class__.__name__ != "Line": continue if line_class is None: @@ -89,8 +90,7 @@ def fill_gaps(path, distance=.025): keep[entity_index] = False # combine segments with connection segments - all_segs = util.vstack_empty((util.vstack_empty(segs), - connect_seg)) + all_segs = util.vstack_empty((util.vstack_empty(segs), connect_seg)) # go home early if len(all_segs) == 0: @@ -104,9 +104,7 @@ def fill_gaps(path, distance=.025): # add line segments in as line entities entities = [] for i in range(len(final_seg)): - entities.append( - line_class( - points=np.arange(2) + (i * 2) + len(path.vertices))) + entities.append(line_class(points=np.arange(2) + (i * 2) + len(path.vertices))) # replace entities with new entities path.entities = np.append(path.entities[keep], entities) diff --git a/trimesh/path/segments.py b/trimesh/path/segments.py index 7281ef60a..7ab3ebdee 100644 --- a/trimesh/path/segments.py +++ b/trimesh/path/segments.py @@ -34,8 +34,7 @@ def segments_to_parameters(segments): """ segments = np.asanyarray(segments, dtype=np.float64) if not util.is_shape(segments, (-1, 2, (2, 3))): - raise ValueError('incorrect segment shape!', - segments.shape) + raise ValueError("incorrect segment shape!", segments.shape) # make the initial origin one of the end points endpoint = segments[:, 0] @@ -51,8 +50,7 @@ def segments_to_parameters(segments): # parametric start and end of line segment parameters = np.column_stack((offset, offset + vectors_norm)) # make sure signs are consistent - vectors, signs = util.vector_hemisphere( - vectors, return_sign=True) + vectors, signs = util.vector_hemisphere(vectors, return_sign=True) parameters *= signs.reshape((-1, 1)) return origins, vectors, parameters @@ -83,16 +81,14 @@ def parameters_to_segments(origins, vectors, parameters): parameters = np.asanyarray(parameters, dtype=np.float64) # turn the segments into a reshapable 2D array - segments = np.hstack((origins + vectors * parameters[:, :1], - origins + vectors * parameters[:, 1:])) + segments = np.hstack( + (origins + vectors * parameters[:, :1], origins + vectors * parameters[:, 1:]) + ) return segments.reshape((-1, 2, origins.shape[1])) -def colinear_pairs(segments, - radius=.01, - angle=.01, - length=None): +def colinear_pairs(segments, radius=0.01, angle=0.01, length=None): """ Find pairs of segments which are colinear. @@ -126,15 +122,15 @@ def colinear_pairs(segments, tree = spatial.cKDTree(origins) # find origins closer than specified radius - pairs = tree.query_pairs(r=radius, output_type='ndarray') + pairs = tree.query_pairs(r=radius, output_type="ndarray") # calculate angles between pairs angles = geometry.vector_angle(vectors[pairs]) # angles can be within tolerance of 180 degrees or 0.0 degrees angle_ok = np.logical_or( - util.isclose(angles, np.pi, atol=angle), - util.isclose(angles, 0.0, atol=angle)) + util.isclose(angles, np.pi, atol=angle), util.isclose(angles, 0.0, atol=angle) + ) # apply angle threshold colinear = pairs[angle_ok] @@ -142,8 +138,7 @@ def colinear_pairs(segments, # if length is specified check endpoint proximity if length is not None: a, b = param[colinear.T] - distance = np.abs(np.column_stack( - [a[:, :1] - b, a[:, 1:] - b])).min(axis=1) + distance = np.abs(np.column_stack([a[:, :1] - b, a[:, 1:] - b])).min(axis=1) identical = distance < length # remove non- identical pairs colinear = colinear[identical] @@ -181,8 +176,7 @@ def split(segments, points, atol=1e-5): seg_flat = segments.reshape((-1, segments.shape[2])) # find the length of every segment - length = ((segments[:, 0, :] - - segments[:, 1, :]) ** 2).sum(axis=1) ** 0.5 + length = ((segments[:, 0, :] - segments[:, 1, :]) ** 2).sum(axis=1) ** 0.5 # a mask to remove segments we split at the end keep = np.ones(len(segments), dtype=bool) @@ -195,13 +189,13 @@ def split(segments, points, atol=1e-5): # by using scipy.spatial.distance.cdist here # find the distance from point to every segment endpoint - pair = ((seg_flat - p) ** 2).sum( - axis=1).reshape((-1, 2)) ** 0.5 + pair = ((seg_flat - p) ** 2).sum(axis=1).reshape((-1, 2)) ** 0.5 # point is on a segment if it is not on a vertex # and the sum length is equal to the actual segment length on_seg = np.logical_and( util.isclose(length, pair.sum(axis=1), atol=atol), - ~util.isclose(pair, 0.0, atol=atol).any(axis=1)) + ~util.isclose(pair, 0.0, atol=atol).any(axis=1), + ) # if we have any points on the segment split it in twain if on_seg.any(): @@ -238,8 +232,8 @@ def unique(segments, digits=5): # find segments as unique indexes so we can find duplicates inverse = grouping.unique_rows( - segments.reshape((-1, segments.shape[2])), - digits=digits)[1].reshape((-1, 2)) + segments.reshape((-1, segments.shape[2])), digits=digits + )[1].reshape((-1, 2)) # make sure rows are sorted inverse.sort(axis=1) # remove segments where both indexes are the same @@ -282,13 +276,11 @@ def overlap(origins, vectors, params): if tol.strict: # convert input to parameters before flipping # to make sure we didn't screw it up - truth = parameters_to_segments(origins, - vectors, - params) + truth = parameters_to_segments(origins, vectors, params) # this function only works on parallel lines dot = np.dot(*vectors) - assert np.isclose(np.abs(dot), 1.0, atol=.01) + assert np.isclose(np.abs(dot), 1.0, atol=0.01) # if two vectors are reversed if dot < 0.0: @@ -300,10 +292,7 @@ def overlap(origins, vectors, params): if tol.strict: # do a check to make sure our reversal didn't # inadvertently give us incorrect segments - assert np.allclose(truth, - parameters_to_segments(origins, - vectors, - params)) + assert np.allclose(truth, parameters_to_segments(origins, vectors, params)) # merge the parameter ranges ok, new_range = interval.intersection(*params) @@ -312,8 +301,9 @@ def overlap(origins, vectors, params): return 0.0, np.array([]) # create the overlapping segment pairs (2, 2, 3) - segments = np.array([o + v * new_range.reshape((-1, 1)) - for o, v in zip(origins, vectors)]) + segments = np.array( + [o + v * new_range.reshape((-1, 1)) for o, v in zip(origins, vectors)] + ) # get the length of the new range length = new_range.ptp() @@ -342,23 +332,19 @@ def extrude(segments, height, double_sided=False): """ segments = np.asanyarray(segments, dtype=np.float64) if not util.is_shape(segments, (-1, 2, 2)): - raise ValueError('segments shape incorrect') + raise ValueError("segments shape incorrect") # we are creating two vertices triangles for every 2D line segment # on the segments of the 2D triangulation vertices = np.tile(segments.reshape((-1, 2)), 2).reshape((-1, 2)) - vertices = np.column_stack((vertices, - np.tile([0, height, 0, height], - len(segments)))) - faces = np.tile([3, 1, 2, 2, 1, 0], - (len(segments), 1)) + vertices = np.column_stack((vertices, np.tile([0, height, 0, height], len(segments)))) + faces = np.tile([3, 1, 2, 2, 1, 0], (len(segments), 1)) faces += np.arange(len(segments)).reshape((-1, 1)) * 4 faces = faces.reshape((-1, 3)) if double_sided: # stack so they will render from the back - faces = np.vstack(( - faces, np.fliplr(faces))) + faces = np.vstack((faces, np.fliplr(faces))) return vertices, faces @@ -390,10 +376,7 @@ def length(segments, summed=True): return norms -def resample(segments, - maxlen, - return_index=False, - return_count=False): +def resample(segments, maxlen, return_index=False, return_count=False): """ Resample line segments until no segment is longer than maxlen. @@ -446,9 +429,9 @@ def resample(segments, # the vector for each incremental length increment = vec[mask] / split # stack the increment vector into the shape needed - v = (tile(increment, split + 1).reshape((-1, 3)) * - tile(np.arange(split + 1), - len(increment)).reshape((-1, 1))) + v = tile(increment, split + 1).reshape((-1, 3)) * tile( + np.arange(split + 1), len(increment) + ).reshape((-1, 1)) # stack the origin points correctly o = tile(pt1[mask], split + 1).reshape((-1, 3)) # now get each segment as an (split, 3) polyline @@ -462,9 +445,9 @@ def resample(segments, # get the original index from the mask index_original = np.nonzero(mask)[0].reshape((-1, 1)) # save one entry per split segment - index.append((np.ones((len(poly), split), - dtype=np.int64) * - index_original).ravel()) + index.append( + (np.ones((len(poly), split), dtype=np.int64) * index_original).ravel() + ) if tol.strict: # check to make sure every start and end point # from the reconstructed result corresponds @@ -472,18 +455,14 @@ def resample(segments, assert np.allclose(original[0], recon[0]) assert np.allclose(original[-1], recon[-1]) # make sure stack slicing was OK - assert np.allclose( - util.stack_lines(np.arange(split + 1)), - stacks[:split]) + assert np.allclose(util.stack_lines(np.arange(split + 1)), stacks[:split]) # stack into (n, 2, 3) segments result = [np.concatenate(result)] if tol.strict: # make sure resampled segments have the same length as input - assert np.isclose(length(segments), - length(result[0]), - atol=1e-3) + assert np.isclose(length(segments), length(result[0]), atol=1e-3) # stack additional return options if return_index: @@ -525,22 +504,21 @@ def to_svg(segments, digits=4, matrix=None, merge=True): """ segments = np.array(segments, copy=True) if not util.is_shape(segments, (-1, 2, 2)): - raise ValueError('only for (n, 2, 2) segments!') + raise ValueError("only for (n, 2, 2) segments!") # create the array to export # apply 2D transformation if passed if matrix is not None: segments = transformations.transform_points( - segments.reshape((-1, 2)), - matrix=matrix).reshape((-1, 2, 2)) + segments.reshape((-1, 2)), matrix=matrix + ).reshape((-1, 2, 2)) if merge: # remove duplicate and zero-length segments segments = unique(segments, digits=digits) # create the format string for a single line segment - base = 'M_ _L_ _'.replace( - '_', '{:0.' + str(int(digits)) + 'f}') + base = "M_ _L_ _".replace("_", "{:0." + str(int(digits)) + "f}") # create one large format string then apply points result = (base * len(segments)).format(*segments.ravel()) return result diff --git a/trimesh/path/simplify.py b/trimesh/path/simplify.py index 60b5f38ac..13ca517d7 100644 --- a/trimesh/path/simplify.py +++ b/trimesh/path/simplify.py @@ -10,11 +10,7 @@ from . import arc, entities -def fit_circle_check(points, - scale, - prior=None, - final=False, - verbose=False): +def fit_circle_check(points, scale, prior=None, final=False, verbose=False): """ Fit a circle, and reject the fit if: * the radius is larger than tol.radius_min*scale or tol.radius_max*scale @@ -54,14 +50,14 @@ def fit_circle_check(points, # check to make sure radius is between min and max allowed if not tol.radius_min < (R / scale) < tol.radius_max: if verbose: - log.debug('circle fit error: R %f', R / scale) + log.debug("circle fit error: R %f", R / scale) return None # check point radius error r_error = r_deviation / R if r_error > tol.radius_frac: if verbose: - log.debug('circle fit error: fit %s', str(r_error)) + log.debug("circle fit error: fit %s", str(r_error)) return None vectors = np.diff(points, axis=0) @@ -72,11 +68,11 @@ def fit_circle_check(points, angle = segment / R if (angle > tol.seg_angle).any(): if verbose: - log.debug('circle fit error: angle %s', str(angle)) + log.debug("circle fit error: angle %s", str(angle)) return None if final and (angle > tol.seg_angle_min).sum() < 3: - log.debug('final: angle %s', str(angle)) + log.debug("final: angle %s", str(angle)) return None # check segment length as a fraction of drawing scale @@ -84,12 +80,12 @@ def fit_circle_check(points, if (scaled > tol.seg_frac).any(): if verbose: - log.debug('circle fit error: segment %s', str(scaled)) + log.debug("circle fit error: segment %s", str(scaled)) return None # check to make sure the line segments on the ends are actually # tangent with the candidate circle fit - mid_pt = points[[0, -2]] + (vectors[[0, -1]] * .5) + mid_pt = points[[0, -2]] + (vectors[[0, -1]] * 0.5) radial = util.unitize(mid_pt - C) ends = util.unitize(vectors[[0, -1]]) tangent = np.abs(np.arccos(util.diagonal_dot(radial, ends))) @@ -97,12 +93,10 @@ def fit_circle_check(points, if tangent > tol.tangent: if verbose: - log.debug('circle fit error: tangent %f', - np.degrees(tangent)) + log.debug("circle fit error: tangent %f", np.degrees(tangent)) return None - result = {'center': C, - 'radius': R} + result = {"center": C, "radius": R} return result @@ -175,7 +169,7 @@ def merge_colinear(points, scale): scale = float(scale) if len(points.shape) != 2 or points.shape[1] != 2: - raise ValueError('only for 2D points!') + raise ValueError("only for 2D points!") # if there's less than 3 points nothing to merge if len(points) < 3: @@ -205,11 +199,11 @@ def merge_colinear(points, scale): # find the projection of each direction vector # onto the perpendicular vector - projection = np.abs(util.diagonal_dot(perp, - direction[:-1])) + projection = np.abs(util.diagonal_dot(perp, direction[:-1])) - projection_ratio = np.max((projection / direction_norm[1:], - projection / direction_norm[:-1]), axis=0) + projection_ratio = np.max( + (projection / direction_norm[1:], projection / direction_norm[:-1]), axis=0 + ) mask = np.ones(len(points), dtype=bool) # since we took diff, we need to offset by one @@ -219,7 +213,7 @@ def merge_colinear(points, scale): return merged -def resample_spline(points, smooth=.001, count=None, degree=3): +def resample_spline(points, smooth=0.001, count=None, degree=3): """ Resample a path in space, smoothing along a b-spline. @@ -240,6 +234,7 @@ def resample_spline(points, smooth=.001, count=None, degree=3): Points in space """ from scipy.interpolate import splev, splprep + if count is None: count = len(points) points = np.asanyarray(points) @@ -279,6 +274,7 @@ def points_to_spline_entity(points, smooth=None, count=None): """ from scipy.interpolate import splprep + if count is None: count = len(points) if smooth is None: @@ -296,9 +292,7 @@ def points_to_spline_entity(points, smooth=None, count=None): control = control[:-1] index[-1] = index[0] - entity = entities.BSpline(points=index, - knots=knots, - closed=closed) + entity = entities.BSpline(points=index, knots=knots, closed=closed) return entity, control @@ -318,9 +312,8 @@ def simplify_basic(drawing, process=False, **kwargs): Original path but with some closed line-loops converted to circles """ - if any(entity.__class__.__name__ != 'Line' - for entity in drawing.entities): - log.debug('Skipping path containing entities other than `Line`') + if any(entity.__class__.__name__ != "Line" for entity in drawing.entities): + log.debug("Skipping path containing entities other than `Line`") return drawing # we are going to do a bookkeeping to avoid having @@ -337,14 +330,13 @@ def simplify_basic(drawing, process=False, **kwargs): # loop through (n, 2) closed paths for discrete in drawing.discrete: # check to see if the closed entity is a circle - circle = is_circle(discrete, - scale=scale) + circle = is_circle(discrete, scale=scale) if circle is not None: # the points are circular enough for our high standards # so replace them with a closed Arc entity - entities_new.append(entities.Arc(points=np.arange(3) + - len(vertices_new), - closed=True)) + entities_new.append( + entities.Arc(points=np.arange(3) + len(vertices_new), closed=True) + ) vertices_new.extend(circle) else: # not a circle, so clean up colinear segments @@ -363,18 +355,22 @@ def simplify_basic(drawing, process=False, **kwargs): entities=entities_new, vertices=vertices_new, metadata=copy.deepcopy(drawing.metadata), - process=process) + process=process, + ) # we have changed every path to a single closed entity # either a closed arc, or a closed line # so all closed paths are now represented by a single entity - cache.cache.update({ - 'paths': np.arange(len(entities_new)).reshape((-1, 1)), - 'path_valid': np.ones(len(entities_new), dtype=bool), - 'dangling': np.array([])}) + cache.cache.update( + { + "paths": np.arange(len(entities_new)).reshape((-1, 1)), + "path_valid": np.ones(len(entities_new), dtype=bool), + "dangling": np.array([]), + } + ) # force recompute of exact bounds - if 'bounds' in cache.cache: - cache.cache.pop('bounds') + if "bounds" in cache.cache: + cache.cache.pop("bounds") simplified._cache = cache # set the cache ID so it won't dump when a value is requested @@ -406,14 +402,13 @@ def simplify_spline(path, smooth=None, verbose=False): scale = path.scale for discrete in path.discrete: - circle = is_circle( - discrete, scale=scale, verbose=verbose) + circle = is_circle(discrete, scale=scale, verbose=verbose) if circle is not None: # the points are circular enough for our high standards # so replace them with a closed Arc entity - new_entities.append(entities.Arc( - points=np.arange(3) + len(new_vertices), - closed=True)) + new_entities.append( + entities.Arc(points=np.arange(3) + len(new_vertices), closed=True) + ) new_vertices.extend(circle) continue @@ -426,7 +421,6 @@ def simplify_spline(path, smooth=None, verbose=False): new_entities.append(entity) # create the Path2D object for the result - simplified = type(path)(entities=new_entities, - vertices=new_vertices) + simplified = type(path)(entities=new_entities, vertices=new_vertices) return simplified diff --git a/trimesh/path/traversal.py b/trimesh/path/traversal.py index 75f256e66..5446850e8 100644 --- a/trimesh/path/traversal.py +++ b/trimesh/path/traversal.py @@ -11,6 +11,7 @@ # create a dummy module which will raise the ImportError # or other exception only when someone tries to use networkx from ..exceptions import ExceptionWrapper + nx = ExceptionWrapper(E) @@ -37,15 +38,11 @@ def vertex_graph(entities): if entity.closed: closed.append(index) else: - graph.add_edges_from(entity.nodes, - entity_index=index) + graph.add_edges_from(entity.nodes, entity_index=index) return graph, np.array(closed) -def vertex_to_entity_path(vertex_path, - graph, - entities, - vertices=None): +def vertex_to_entity_path(vertex_path, graph, entities, vertices=None): """ Convert a path of vertex indices to a path of entity indices. @@ -65,6 +62,7 @@ def vertex_to_entity_path(vertex_path, entity_path : (q,) int Entity indices which make up vertex_path """ + def edge_direction(a, b): """ Given two edges, figure out if the first needs to be @@ -94,24 +92,28 @@ def edge_direction(a, b): elif a[1] == b[1]: return 1, -1 else: - constants.log.debug('\n'.join([ - 'edges not connected!', - 'vertex path %s', - 'entity path: %s', - 'entity[a]: %s,', - 'entity[b]: %s']), + constants.log.debug( + "\n".join( + [ + "edges not connected!", + "vertex path %s", + "entity path: %s", + "entity[a]: %s,", + "entity[b]: %s", + ] + ), vertex_path, entity_path, entities[ea].points, - entities[eb].points) + entities[eb].points, + ) return None, None if vertices is None or vertices.shape[1] != 2: ccw_direction = 1 else: - ccw_check = is_ccw(vertices[np.append(vertex_path, - vertex_path[0])]) + ccw_check = is_ccw(vertices[np.append(vertex_path, vertex_path[0])]) ccw_direction = (ccw_check * 2) - 1 # make sure vertex path is correct type @@ -123,7 +125,7 @@ def edge_direction(a, b): # get two wrapped vertex positions vertex_path_pos = np.mod(np.arange(2) + i, len(vertex_path)) vertex_index = vertex_path[vertex_path_pos] - entity_index = graph.get_edge_data(*vertex_index)['entity_index'] + entity_index = graph.get_edge_data(*vertex_index)["entity_index"] entity_path.append(entity_index) # remove duplicate entities and order CCW entity_path = grouping.unique_ordered(entity_path)[::ccw_direction] @@ -140,8 +142,7 @@ def edge_direction(a, b): round_trip = np.append(entity_path, entity_path[0]) round_trip = zip(round_trip[:-1], round_trip[1:]) for ea, eb in round_trip: - da, db = edge_direction(entities[ea].end_points, - entities[eb].end_points) + da, db = edge_direction(entities[ea].end_points, entities[eb].end_points) if da is not None: entities[ea].reverse(direction=da) entities[eb].reverse(direction=db) @@ -185,11 +186,7 @@ def closed_paths(entities, vertices): if len(vertex_path) < 2: continue # convert vertex indices to entity indices - entity_paths.append( - vertex_to_entity_path(vertex_path, - graph, - entities, - vertices)) + entity_paths.append(vertex_to_entity_path(vertex_path, graph, entities, vertices)) return entity_paths @@ -220,12 +217,10 @@ def discretize_path(entities, vertices, path, scale=1.0): vertices = np.asanyarray(vertices) path_len = len(path) if path_len == 0: - raise ValueError('Cannot discretize empty path!') + raise ValueError("Cannot discretize empty path!") if path_len == 1: # case where we only have one entity - discrete = np.asanyarray(entities[path[0]].discrete( - vertices, - scale=scale)) + discrete = np.asanyarray(entities[path[0]].discrete(vertices, scale=scale)) else: # run through path appending each entity discrete = [] @@ -251,7 +246,6 @@ def discretize_path(entities, vertices, path, scale=1.0): class PathSample: - def __init__(self, points): # make sure input array is numpy self._points = np.array(points) @@ -295,26 +289,22 @@ def truncate(self, distance): offset = distance - self._cum_norm[position - 1] if offset < constants.tol_path.merge: - truncated = self._points[:position + 1] + truncated = self._points[: position + 1] else: - vector = util.unitize(np.diff( - self._points[np.arange(2) + position], - axis=0).reshape(-1)) + vector = util.unitize( + np.diff(self._points[np.arange(2) + position], axis=0).reshape(-1) + ) vector *= offset endpoint = self._points[position] + vector - truncated = np.vstack((self._points[:position + 1], - endpoint)) - assert (util.row_norm(np.diff( - truncated, axis=0)).sum() - - distance) < constants.tol_path.merge + truncated = np.vstack((self._points[: position + 1], endpoint)) + assert ( + util.row_norm(np.diff(truncated, axis=0)).sum() - distance + ) < constants.tol_path.merge return truncated -def resample_path(points, - count=None, - step=None, - step_round=True): +def resample_path(points, count=None, step=None, step_round=True): """ Given a path along (n,d) points, resample them such that the distance traversed along the path is constant in between each @@ -345,9 +335,9 @@ def resample_path(points, points = np.array(points, dtype=np.float64) # generate samples along the perimeter from kwarg count or step if (count is not None) and (step is not None): - raise ValueError('Only step OR count can be specified') + raise ValueError("Only step OR count can be specified") if (count is None) and (step is None): - raise ValueError('Either step or count must be specified') + raise ValueError("Either step or count must be specified") sampler = PathSample(points) if step is not None and step_round: @@ -412,30 +402,35 @@ def split(path): for index in connected: nodes = paths[index] # add a path which is just sequential indexes - new_paths.append(np.arange(len(nodes)) + - len(new_entities)) + new_paths.append(np.arange(len(nodes)) + len(new_entities)) # save the entity indexes new_entities.extend(nodes) # store the root index from the original drawing metadata = copy.deepcopy(path.metadata) - metadata['split_2D'] = root_index + metadata["split_2D"] = root_index # we made the root path the last index of connected new_root = np.array([len(new_paths) - 1]) # prevents the copying from nuking our cache with path._cache: # create the Path2D - split.append(Path2D( - entities=copy.deepcopy(path.entities[new_entities]), - vertices=copy.deepcopy(path.vertices), - metadata=metadata)) + split.append( + Path2D( + entities=copy.deepcopy(path.entities[new_entities]), + vertices=copy.deepcopy(path.vertices), + metadata=metadata, + ) + ) # add back expensive things to the cache split[-1]._cache.update( - {'paths': new_paths, - 'polygons_closed': polygons_closed[connected], - 'discrete': [discrete[c] for c in connected], - 'root': new_root}) + { + "paths": new_paths, + "polygons_closed": polygons_closed[connected], + "discrete": [discrete[c] for c in connected], + "root": new_root, + } + ) # set the cache ID split[-1]._cache.id_set() diff --git a/trimesh/path/util.py b/trimesh/path/util.py index 683ff450d..e9862ae92 100644 --- a/trimesh/path/util.py +++ b/trimesh/path/util.py @@ -24,7 +24,7 @@ def concatenate(paths): # upgrade to 3D if we have mixed 2D and 3D paths dimensions = {i.vertices.shape[1] for i in paths} if len(dimensions) > 1: - paths = [i.to_3D() if hasattr(i, 'to_3D') else i for i in paths] + paths = [i.to_3D() if hasattr(i, "to_3D") else i for i in paths] # length of vertex arrays vert_len = np.array([len(i.vertices) for i in paths]) @@ -51,7 +51,7 @@ def concatenate(paths): entities.append(copied) # generate the single new concatenated path # use input types so we don't have circular imports - concat = type(path)(metadata=metadata, - entities=entities, - vertices=np.vstack(vertices)) + concat = type(path)( + metadata=metadata, entities=entities, vertices=np.vstack(vertices) + ) return concat diff --git a/trimesh/util.py b/trimesh/util.py index f252b7ad6..4fa1be8b9 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -69,6 +69,24 @@ def has_module(name): return pkgutil.find_loader(name) is not None +try: + import rtree + + # some versions of rtree screw up indexes on stream loading + # do a test here so we know if we are free to use stream loading + assert ( + next( + rtree.index.Index( + [(1564, [0, 0, 0, 10, 10, 10], None)], + properties=rtree.index.Property(dimension=3), + ).intersection([1, 1, 1, 2, 2, 2]) + ) + == 1564 + ) +except BaseException as E: + rtree = E + + def unitize(vectors, check_valid=False, threshold=None): """ Unitize a vector or an array or row-vectors. @@ -1745,9 +1763,6 @@ def bounds_tree(bounds): tree : Rtree Tree containing bounds by index """ - # rtree is a soft dependency - import rtree - # make sure we've copied bounds bounds = np.array(bounds, dtype=np.float64, copy=True) if len(bounds.shape) == 3: @@ -1765,29 +1780,11 @@ def bounds_tree(bounds): raise ValueError("Bounds must be (n,dimension*2)!") dimension = int(dimension / 2) - # some versions of rtree screw up indexes on stream loading - # do a test here so we know if we are free to use stream loading - # or if we have to do a loop to insert things which is 5x slower - rtree_test = rtree.index.Index( - [(1564, [0, 0, 0, 10, 10, 10], None)], - properties=rtree.index.Property(dimension=3), - ) - rtree_stream_ok = next(rtree_test.intersection([1, 1, 1, 2, 2, 2])) == 1564 - properties = rtree.index.Property(dimension=dimension) - if rtree_stream_ok: - # stream load was verified working on import above - tree = rtree.index.Index( - zip(np.arange(len(bounds)), bounds, [None] * len(bounds)), - properties=properties, - ) - else: - # in some rtree versions stream loading goofs the index - log.warning("rtree stream loading broken! Try upgrading rtree!") - tree = rtree.index.Index(properties=properties) - for i, b in enumerate(bounds): - tree.insert(i, b) - return tree + # stream load was verified working on import above + return rtree.index.Index( + zip(np.arange(len(bounds)), bounds, [None] * len(bounds)), properties=properties + ) def wrap_as_stream(item): diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index c2c5ae70c..a454c862e 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -732,7 +732,7 @@ def pack( materials, uvs, deduplicate=True, - padding=1, + padding: int = 1, max_tex_size_individual=8192, max_tex_size_fused=8192, ): @@ -900,23 +900,6 @@ def get_occlusion_texture(mat): occlusion_texture = occlusion_texture.convert("L") return occlusion_texture - def pad_image(src, padding=1): - # uses replication padding on all 4 sides - - if isinstance(padding, int): - padding = (padding, padding) - x, y = np.meshgrid( - np.arange(src.shape[1] + 2 * padding[0]), - np.arange(src.shape[0] + 2 * padding[1]), - ) - x -= padding[0] - y -= padding[1] - x = np.clip(x, 0, src.shape[1] - 1) - y = np.clip(y, 0, src.shape[0] - 1) - - result = src[y, x] - return result - def resize_images(images, sizes): resized = [] for img, size in zip(images, sizes): @@ -927,11 +910,20 @@ def resize_images(images, sizes): resized.append(img) return resized - def pack_images(images, power_resize=True, random_seed=42): - # random seed needs to be identical to achieve same results - # TODO: we could alternatively reuse the offsets from the first packing call - np.random.seed(random_seed) - return packing.images(images, deduplicate=True, power_resize=power_resize) + def pack_images(images): + # run image packing with our material-specific settings + # which including deduplicating by hash, upsizing to the + # nearest power of two, returning deterministically by seeding + # and padding every side of the image by 1 pixel + # np.random.seed(42) + return packing.images( + images, + deduplicate=True, + power_resize=True, + seed=42, + iterations=10, + spacing=int(padding), + ) if deduplicate: # start by collecting a list of indexes for each material hash @@ -998,10 +990,6 @@ def pack_images(images, power_resize=True, random_seed=42): tex_size = np.round(tex_size / scale).astype(np.int64) unpadded_sizes.append(tex_size) - images = [ - Image.fromarray(pad_image(np.array(img), padding), img.mode) for img in images - ] - # pack the multiple images into a single large image final, offsets = pack_images(images) @@ -1016,10 +1004,6 @@ def pack_images(images, power_resize=True, random_seed=42): break if use_pbr: - metallic_roughness = [ - Image.fromarray(pad_image(np.array(img), padding), img.mode) - for img in metallic_roughness - ] # even if we only need the first two channels, store RGB, because # PIL 'LA' mode images are interpreted incorrectly in other 3D software final_metallic_roughness, _ = pack_images(metallic_roughness) @@ -1029,29 +1013,17 @@ def pack_images(images, power_resize=True, random_seed=42): emissive = None final_emissive = None else: - emissive = [ - Image.fromarray(pad_image(np.array(img), padding), mode=img.mode) - for img in emissive - ] final_emissive, _ = pack_images(emissive) if all(n is not None for n in normals): # only use normal texture if all materials use them # how else would you handle missing normals? - normals = [ - Image.fromarray(pad_image(np.array(img), padding), mode=img.mode) - for img in normals - ] final_normals, _ = pack_images(normals) else: final_normals = None if any(np.array(o).min() < 255 for o in occlusion): # only use occlusion texture if any material actually has an occlusion value - occlusion = [ - Image.fromarray(pad_image(np.array(img), padding), mode=img.mode) - for img in occlusion - ] final_occlusion, _ = pack_images(occlusion) else: final_occlusion = None @@ -1125,7 +1097,11 @@ def pack_images(images, power_resize=True, random_seed=42): # should be exactly identical # note this is only true for simple colors # interpolation on complicated stuff can break this - assert np.allclose(reference, compare) + if not np.allclose(reference, compare): + from IPython import embed + + embed() + # assert np.allclose(reference, compare) if use_pbr: return ( From 8b79d8fb99307f0070ad17fc20720d6a3131895f Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sat, 23 Sep 2023 15:14:54 -0400 Subject: [PATCH 109/144] handle interleaved buffers --- trimesh/exchange/gltf.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index e06318990..b9ece091d 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -351,10 +351,6 @@ def load_glb( kwargs : dict Kwargs to instantiate a trimesh.Scene """ - - # save the start position of the file for referencing - # against lengths - start = file_obj.tell() # read the first 20 bytes which contain section lengths head_data = file_obj.read(20) head = np.frombuffer(head_data, dtype=" Date: Sat, 23 Sep 2023 15:37:50 -0400 Subject: [PATCH 110/144] handle no buffer info --- trimesh/exchange/gltf.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/trimesh/exchange/gltf.py b/trimesh/exchange/gltf.py index b9ece091d..f89a2e47c 100644 --- a/trimesh/exchange/gltf.py +++ b/trimesh/exchange/gltf.py @@ -383,11 +383,21 @@ def load_glb( # read the binary data referred to by GLTF as 'buffers' buffers = [] - for buffer_info in header["buffers"]: - # if they have interleaved URI data with GLB data handle it here - if "uri" in buffer_info: - buffers.append(_uri_to_bytes(uri=buffer_info["uri"], resolver=resolver)) + start = file_obj.tell() + + # header can contain base64 encoded data in the URI field + info = header.get("buffers", []).copy() + + while (file_obj.tell() - start) < length: + # if we have buffer infos with URI check it here + try: + # if they have interleaved URI data with GLB data handle it here + uri = info.pop(0)["uri"] + buffers.append(_uri_to_bytes(uri=uri, resolver=resolver)) continue + except (IndexError, KeyError): + # if there was no buffer info or URI we still need to read + pass # the last read put us past the JSON chunk # we now read the chunk header, which is 8 bytes From b7558ac78c1a0594d8d7c063496a8da57d6ed4ab Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 25 Sep 2023 15:26:38 -0400 Subject: [PATCH 111/144] somewhat close --- trimesh/visual/material.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index a454c862e..2c5d808bc 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -780,8 +780,11 @@ def multiply_factor(img, factor, mode): """ if factor is None: return img.convert(mode) - img = np.array(img.convert(mode)) - img = np.round(img.astype(np.float64) * factor).astype(np.uint8) + img = ( + (np.array(img.convert(mode), dtype=np.float64) * factor) + .round() + .astype(np.uint8) + ) return Image.fromarray(img, mode=mode) def get_base_color_texture(mat): @@ -1032,11 +1035,11 @@ def pack_images(images): final_size = np.array(final.size, dtype=np.float64) # collect scaled new UV coordinates by material index new_uv = {} - for group, img, off in zip(mat_idx, images, offsets): + for group, img, offset in zip(mat_idx, images, offsets): # how big was the original image - scale = (np.array(img.size) - 1 - 2 * padding) / (final_size - 1) + scale_uv = img.size / final_size # what is the offset in fractions of final image - xy_off = (off + padding) / (final_size - 1) + offset_uv = offset / final.size # scale and translate each of the new UV coordinates # also make sure they are in 0.0-1.0 using modulus (i.e. wrap) for g in group: @@ -1044,6 +1047,7 @@ def pack_images(images): # only wrap pixels that are outside of 0.0-1.0. # use a small leeway of half a pixel for floating point inaccuracies and # the case of uv==1.0 + """" half_pixel_width = 1.0 / (2 * img.size[0]) half_pixel_height = 1.0 / (2 * img.size[1]) wrap_mask_u = (g_uvs[:, 0] <= -half_pixel_width) | ( @@ -1056,6 +1060,10 @@ def pack_images(images): g_uvs[wrap_mask] = g_uvs[wrap_mask] % 1.0 new_uv[g] = (g_uvs * scale) + xy_off + """ + moved = (uvs[g] * scale_uv) + offset_uv + moved[np.logical_or(moved < -0.001, moved > 1.001)] %= 1.0 + new_uv[g] = moved # stack the new UV coordinates in the original order stacked = np.vstack([new_uv[i] for i in range(len(uvs))]) From ec8338b46da2e9e20a9cbf874fe97cffdafcbcfc Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 28 Sep 2023 16:36:15 -0400 Subject: [PATCH 112/144] run black on everything --- examples/convexify.py | 9 +- examples/docker/render/render.py | 9 +- examples/nricp.py | 110 ++-- examples/offscreen_render.py | 16 +- examples/outlined.py | 17 +- examples/ray.py | 24 +- examples/raytrace.py | 16 +- examples/scan_register.py | 28 +- examples/shortest.py | 16 +- examples/sinter.py | 27 +- examples/viewcallback.py | 2 +- examples/voxel.py | 103 ++-- examples/voxel_fillers.py | 17 +- examples/voxel_silhouette.py | 15 +- examples/widget.py | 41 +- tests/corpus.py | 87 +-- tests/generic.py | 1 + tests/helpers/dxfhelp.py | 44 +- tests/helpers/id_helper.py | 80 ++- tests/notebooks.py | 82 ++- tests/regression.py | 43 +- tests/test_3dxml.py | 17 +- tests/test_3mf.py | 28 +- tests/test_adjacency.py | 10 +- tests/test_align.py | 53 +- tests/test_arc.py | 90 +-- tests/test_assimp.py | 10 +- tests/test_base.py | 23 +- tests/test_binvox.py | 20 +- tests/test_boolean.py | 57 +- tests/test_bounds.py | 132 ++--- tests/test_camera.py | 47 +- tests/test_collision.py | 119 ++-- tests/test_color.py | 65 +-- tests/test_convex.py | 77 +-- tests/test_copy.py | 11 +- tests/test_crash.py | 20 +- tests/test_creation.py | 165 +++--- tests/test_curvature.py | 37 +- tests/test_dae.py | 38 +- tests/test_dxf.py | 98 ++-- tests/test_edges.py | 8 +- tests/test_encoding.py | 72 ++- tests/test_except.py | 7 +- tests/test_export.py | 198 ++++--- tests/test_extrude.py | 52 +- tests/test_facets.py | 5 +- tests/test_fill.py | 12 +- tests/test_geom.py | 4 +- tests/test_gltf.py | 8 +- tests/test_gmsh.py | 2 +- tests/test_grouping.py | 105 ++-- tests/test_html.py | 7 +- tests/test_identifier.py | 62 +- tests/test_import.py | 4 +- tests/test_inertia.py | 272 +++++---- tests/test_integralmeancurvature.py | 9 +- tests/test_interval.py | 40 +- tests/test_light.py | 13 +- tests/test_loaded.py | 24 +- tests/test_medial.py | 12 +- tests/test_merge.py | 6 +- tests/test_mesh.py | 52 +- tests/test_meta.py | 52 +- tests/test_minimal.py | 34 +- tests/test_mutate.py | 23 +- tests/test_normals.py | 51 +- tests/test_nsphere.py | 15 +- tests/test_off.py | 21 +- tests/test_operators.py | 18 +- tests/test_packing.py | 149 ++--- tests/test_path_creation.py | 21 +- tests/test_pathlib.py | 29 +- tests/test_paths.py | 108 ++-- tests/test_pbr.py | 20 +- tests/test_permutate.py | 56 +- tests/test_ply.py | 167 +++--- tests/test_points.py | 94 ++- tests/test_polygons.py | 100 ++-- tests/test_poses.py | 18 +- tests/test_primitives.py | 194 +++---- tests/test_proximity.py | 90 ++- tests/test_raster.py | 45 +- tests/test_ray.py | 134 ++--- tests/test_registration.py | 281 +++++---- tests/test_remesh.py | 100 ++-- tests/test_render.py | 17 +- tests/test_repr.py | 29 +- tests/test_resolvers.py | 50 +- tests/test_runlength.py | 110 ++-- tests/test_sample.py | 30 +- tests/test_scene.py | 190 +++--- tests/test_scenegraph.py | 98 ++-- tests/test_section.py | 217 +++---- tests/test_segments.py | 73 +-- tests/test_simplify.py | 50 +- tests/test_smooth.py | 5 +- tests/test_smoothing.py | 8 +- tests/test_splines.py | 16 +- tests/test_stl.py | 63 +- tests/test_svg.py | 90 ++- tests/test_texture.py | 83 ++- tests/test_thickness.py | 40 +- tests/test_trackball.py | 38 +- tests/test_triangles.py | 106 ++-- tests/test_typed.py | 1 + tests/test_units.py | 39 +- tests/test_unwrap.py | 13 +- tests/test_upstream.py | 2 +- tests/test_urdf.py | 4 +- tests/test_vector.py | 16 +- tests/test_vertices.py | 13 +- tests/test_viewer.py | 7 +- tests/test_visual.py | 11 +- tests/test_voxel.py | 183 +++--- trimesh/__init__.py | 41 +- trimesh/base.py | 33 +- trimesh/boolean.py | 18 +- trimesh/bounds.py | 175 +++--- trimesh/collision.py | 158 +++-- trimesh/comparison.py | 37 +- trimesh/convex.py | 53 +- trimesh/creation.py | 739 +++++++++++++----------- trimesh/curvature.py | 31 +- trimesh/exceptions.py | 6 +- trimesh/exchange/binvox.py | 241 ++++---- trimesh/exchange/dae.py | 238 ++++---- trimesh/exchange/export.py | 154 ++--- trimesh/exchange/load.py | 262 ++++----- trimesh/exchange/misc.py | 66 +-- trimesh/exchange/obj.py | 412 +++++++------ trimesh/exchange/off.py | 47 +- trimesh/exchange/openctm.py | 33 +- trimesh/exchange/ply.py | 463 +++++++-------- trimesh/exchange/stl.py | 110 ++-- trimesh/exchange/threedxml.py | 285 ++++----- trimesh/exchange/threemf.py | 184 +++--- trimesh/exchange/urdf.py | 124 ++-- trimesh/exchange/xaml.py | 66 +-- trimesh/exchange/xyz.py | 33 +- trimesh/geometry.py | 184 +++--- trimesh/grouping.py | 152 +++-- trimesh/inertia.py | 130 +++-- trimesh/intersections.py | 288 +++++---- trimesh/interval.py | 26 +- trimesh/nsphere.py | 25 +- trimesh/parent.py | 70 ++- trimesh/permutate.py | 37 +- trimesh/poses.py | 87 +-- trimesh/primitives.py | 414 +++++++------ trimesh/proximity.py | 158 +++-- trimesh/ray/__init__.py | 4 +- trimesh/ray/ray_pyembree.py | 115 ++-- trimesh/ray/ray_triangle.py | 161 +++--- trimesh/ray/ray_util.py | 47 +- trimesh/registration.py | 447 +++++++------- trimesh/remesh.py | 159 +++-- trimesh/rendering.py | 151 +++-- trimesh/repair.py | 77 ++- trimesh/resolvers.py | 133 ++--- trimesh/resources/__init__.py | 20 +- trimesh/resources/javascript/compile.py | 54 +- trimesh/sample.py | 20 +- trimesh/scene/__init__.py | 2 +- trimesh/scene/cameras.py | 57 +- trimesh/scene/lighting.py | 65 +-- trimesh/scene/scene.py | 377 ++++++------ trimesh/scene/transforms.py | 198 +++---- trimesh/schemas.py | 4 +- trimesh/smoothing.py | 66 +-- trimesh/transformations.py | 4 +- trimesh/triangles.py | 211 +++---- trimesh/units.py | 30 +- trimesh/viewer/__init__.py | 14 +- trimesh/viewer/notebook.py | 37 +- trimesh/viewer/trackball.py | 22 +- trimesh/viewer/widget.py | 56 +- trimesh/viewer/windowed.py | 341 +++++------ trimesh/visual/material.py | 19 +- trimesh/voxel/encoding.py | 4 +- 180 files changed, 7066 insertions(+), 7709 deletions(-) diff --git a/examples/convexify.py b/examples/convexify.py index c1b675b40..b84beef0d 100644 --- a/examples/convexify.py +++ b/examples/convexify.py @@ -12,15 +12,14 @@ import trimesh -if __name__ == '__main__': - +if __name__ == "__main__": # attach to trimesh logs trimesh.util.attach_to_log() log = trimesh.util.log # load the mesh from filename # file objects are also supported - mesh = trimesh.load_mesh('../models/box.STL') + mesh = trimesh.load_mesh("../models/box.STL") # split the mesh into connected components of face adjacency # splitting sometimes produces non- watertight meshes @@ -36,9 +35,9 @@ # combine all components into one mesh convex_combined = np.sum(meshes_convex) - log.debug('Showing original mesh') + log.debug("Showing original mesh") mesh.show() # open a viewer window for convexified mesh - log.debug('Showing convexified mesh') + log.debug("Showing convexified mesh") convex_combined.show() diff --git a/examples/docker/render/render.py b/examples/docker/render/render.py index fed7a97ad..12f2df51b 100644 --- a/examples/docker/render/render.py +++ b/examples/docker/render/render.py @@ -2,7 +2,7 @@ import trimesh -if __name__ == '__main__': +if __name__ == "__main__": # print logged messages trimesh.util.attach_to_log() @@ -16,12 +16,11 @@ # set a GL config that fixes a depth buffer issue in xvfb window_conf = gl.Config(double_buffer=True, depth_size=24) # run the actual render call - png = scene.save_image(resolution=[1920, 1080], - window_conf=window_conf) + png = scene.save_image(resolution=[1920, 1080], window_conf=window_conf) # the PNG is just bytes data - trimesh.util.log.info('rendered bytes:', len(png)) + trimesh.util.log.info("rendered bytes:", len(png)) # write the render to a volume we should have docker mounted - with open('/output/output.png', 'wb') as f: + with open("/output/output.png", "wb") as f: f.write(png) diff --git a/examples/nricp.py b/examples/nricp.py index e95e608fd..ae6a85648 100644 --- a/examples/nricp.py +++ b/examples/nricp.py @@ -21,49 +21,45 @@ def slider_closure(records, pv_mesh): """ Return a function used for a PyVista slider widget. """ + def cb(value): t1 = min(int(value), len(records) - 1) t2 = min(t1 + 1, len(records) - 1) t = value - t1 pv_mesh.points = (1 - t) * records[t1] + t * records[t2] - for i, pos in enumerate( - pv_mesh.points[landmarks_vertex_indices[:, 0]]): - p.add_mesh( - pv.Sphere( - target.scale / 200, - pos), - name=str(i), - color='r') - pv_mesh['distance'] = ( - 1 - t) * distances[t1] + t * distances[t2] - return cb + for i, pos in enumerate(pv_mesh.points[landmarks_vertex_indices[:, 0]]): + p.add_mesh(pv.Sphere(target.scale / 200, pos), name=str(i), color="r") + pv_mesh["distance"] = (1 - t) * distances[t1] + t * distances[t2] + return cb -if __name__ == '__main__': +if __name__ == "__main__": # attach to trimesh logs trimesh.util.attach_to_log() # Get two meshes that have a comparable shape - source = trimesh.load_mesh('../models/reference.obj', process=False) - target = trimesh.load_mesh('../models/target.obj', process=False) + source = trimesh.load_mesh("../models/reference.obj", process=False) + target = trimesh.load_mesh("../models/target.obj", process=False) # Vertex indices of landmarks source / target - landmarks_vertex_indices = np.array([ - [177, 1633], - [181, 1561], - [614, 1556], - [610, 1629], - [114, 315], - [398, 413], - [812, 412], - [227, 99], - [241, 87], - [674, 86], - [660, 98], - [362, 574], - [779, 573], - ]) + landmarks_vertex_indices = np.array( + [ + [177, 1633], + [181, 1561], + [614, 1556], + [610, 1629], + [114, 315], + [398, 413], + [812, 412], + [227, 99], + [241, 87], + [674, 86], + [660, 98], + [362, 574], + [779, 573], + ] + ) source_markers_vertices = source.vertices[landmarks_vertex_indices[:, 0]] target_markers_vertices = target.vertices[landmarks_vertex_indices[:, 1]] @@ -76,9 +72,9 @@ def cb(value): if use_barycentric_coordinates: source_markers_vertices = source.vertices[landmarks_vertex_indices[:, 0]] source_markers_tids = closest_point(source, source_markers_vertices)[2] - source_markers_barys = \ - points_to_barycentric(source.triangles[source_markers_tids], - source_markers_vertices) + source_markers_barys = points_to_barycentric( + source.triangles[source_markers_tids], source_markers_vertices + ) source_landmarks = (source_markers_tids, source_markers_barys) else: source_landmarks = landmarks_vertex_indices[:, 0] @@ -114,45 +110,61 @@ def cb(value): # Amberg et al. 2007 records_amberg = nricp_amberg( - source, target, source_landmarks=source_landmarks, + source, + target, + source_landmarks=source_landmarks, distance_threshold=0.05, target_positions=target_markers_vertices, - steps=steps_amberg, return_records=True) + steps=steps_amberg, + return_records=True, + ) # Sumner and Popovic 2004 records_sumner = nricp_sumner( - source, target, + source, + target, source_landmarks=source_landmarks, distance_threshold=0.05, target_positions=target_markers_vertices, - steps=steps_sumner, return_records=True) + steps=steps_sumner, + return_records=True, + ) # Show the result try: import pyvista as pv - for records, name in [(records_amberg, 'Amberg et al. 2007'), - (records_sumner, 'Sumner and Popovic 2004')]: + + for records, name in [ + (records_amberg, "Amberg et al. 2007"), + (records_sumner, "Sumner and Popovic 2004"), + ]: distances = [closest_point(target, r)[1] for r in records] p = pv.Plotter() - p.background_color = 'w' + p.background_color = "w" pv_mesh = pv.wrap(source) - pv_mesh['distance'] = distances[0] + pv_mesh["distance"] = distances[0] p.add_text(name, color=(0, 0, 0)) p.add_mesh( - pv_mesh, color=(0.6, 0.6, 0.9), cmap='rainbow', - clim=(0, target.scale / 100), scalars='distance', - scalar_bar_args={'color': (0, 0, 0)}) - p.add_mesh(pv.wrap(target), style='wireframe') + pv_mesh, + color=(0.6, 0.6, 0.9), + cmap="rainbow", + clim=(0, target.scale / 100), + scalars="distance", + scalar_bar_args={"color": (0, 0, 0)}, + ) + p.add_mesh(pv.wrap(target), style="wireframe") p.add_slider_widget( slider_closure(records=records, pv_mesh=pv_mesh), - rng=(0, len(records)), value=0, - color='black', - event_type='always', - title='step') + rng=(0, len(records)), + value=0, + color="black", + event_type="always", + title="step", + ) for pos in target_markers_vertices: - p.add_mesh(pv.Sphere(target.scale / 200, pos), color='g') + p.add_mesh(pv.Sphere(target.scale / 200, pos), color="g") p.show() diff --git a/examples/offscreen_render.py b/examples/offscreen_render.py index 1c855ea5b..797ce8b91 100644 --- a/examples/offscreen_render.py +++ b/examples/offscreen_render.py @@ -1,15 +1,14 @@ - import numpy as np import trimesh -if __name__ == '__main__': +if __name__ == "__main__": # print logged messages trimesh.util.attach_to_log() log = trimesh.util.log # load a mesh - mesh = trimesh.load('../models/featuretype.STL') + mesh = trimesh.load("../models/featuretype.STL") # get a scene object containing the mesh, this is equivalent to: # scene = trimesh.scene.Scene(mesh) @@ -18,12 +17,11 @@ # a 45 degree homogeneous rotation matrix around # the Y axis at the scene centroid rotate = trimesh.transformations.rotation_matrix( - angle=np.radians(10.0), - direction=[0, 1, 0], - point=scene.centroid) + angle=np.radians(10.0), direction=[0, 1, 0], point=scene.centroid + ) for i in range(4): - trimesh.constants.log.info('Saving image %d', i) + trimesh.constants.log.info("Saving image %d", i) # rotate the camera view transform camera_old, _geometry = scene.graph[scene.camera.name] @@ -36,10 +34,10 @@ # is passed don't save the image try: # increment the file name - file_name = 'render_' + str(i) + '.png' + file_name = "render_" + str(i) + ".png" # save a render of the object as a png png = scene.save_image(resolution=[1920, 1080], visible=True) - with open(file_name, 'wb') as f: + with open(file_name, "wb") as f: f.write(png) f.close() diff --git a/examples/outlined.py b/examples/outlined.py index 79401f5f5..33dad9244 100644 --- a/examples/outlined.py +++ b/examples/outlined.py @@ -9,16 +9,16 @@ import trimesh -if __name__ == '__main__': - mesh = trimesh.load('../models/featuretype.STL') +if __name__ == "__main__": + mesh = trimesh.load("../models/featuretype.STL") # get edges we want to highlight by finding edges # that have sharp angles between adjacent faces - edges = mesh.face_adjacency_edges[mesh.face_adjacency_angles > np.radians( - 30)] + edges = mesh.face_adjacency_edges[mesh.face_adjacency_angles > np.radians(30)] # get a Path3D object for the edges we want to highlight - path = trimesh.path.Path3D(**trimesh.path.exchange.misc.edges_to_path( - edges, mesh.vertices.copy())) + path = trimesh.path.Path3D( + **trimesh.path.exchange.misc.edges_to_path(edges, mesh.vertices.copy()) + ) # set the mesh face colors to white mesh.visual.face_colors = [255, 255, 255, 255] @@ -30,8 +30,9 @@ # set the camera transform to look at the mesh scene.camera_transform = scene.camera.look_at( points=mesh.vertices, - rotation=trimesh.transformations.euler_matrix(np.pi / 3, 0, np.pi / 5)) + rotation=trimesh.transformations.euler_matrix(np.pi / 3, 0, np.pi / 5), + ) # write a PNG of the render - with open('outlined.PNG', 'wb') as f: + with open("outlined.PNG", "wb") as f: f.write(scene.save_image()) diff --git a/examples/ray.py b/examples/ray.py index 372e456ab..277a79c8e 100644 --- a/examples/ray.py +++ b/examples/ray.py @@ -11,16 +11,13 @@ import trimesh -if __name__ == '__main__': - +if __name__ == "__main__": # test on a sphere mesh mesh = trimesh.primitives.Sphere() # create some rays - ray_origins = np.array([[0, 0, -5], - [2, 2, -10]]) - ray_directions = np.array([[0, 0, 1], - [0, 0, 1]]) + ray_origins = np.array([[0, 0, -5], [2, 2, -10]]) + ray_directions = np.array([[0, 0, 1], [0, 0, 1]]) """ Signature: mesh.ray.intersects_location(ray_origins, @@ -45,22 +42,19 @@ # run the mesh- ray test locations, index_ray, index_tri = mesh.ray.intersects_location( - ray_origins=ray_origins, - ray_directions=ray_directions) + ray_origins=ray_origins, ray_directions=ray_directions + ) # stack rays into line segments for visualization as Path3D - ray_visualize = trimesh.load_path(np.hstack(( - ray_origins, - ray_origins + ray_directions)).reshape(-1, 2, 3)) + ray_visualize = trimesh.load_path( + np.hstack((ray_origins, ray_origins + ray_directions)).reshape(-1, 2, 3) + ) # make mesh transparent- ish mesh.visual.face_colors = [100, 100, 100, 100] # create a visualization scene with rays, hits, and mesh - scene = trimesh.Scene([ - mesh, - ray_visualize, - trimesh.points.PointCloud(locations)]) + scene = trimesh.Scene([mesh, ray_visualize, trimesh.points.PointCloud(locations)]) # display the scene scene.show() diff --git a/examples/raytrace.py b/examples/raytrace.py index daf193568..582429561 100644 --- a/examples/raytrace.py +++ b/examples/raytrace.py @@ -13,10 +13,9 @@ import trimesh -if __name__ == '__main__': - +if __name__ == "__main__": # test on a simple mesh - mesh = trimesh.load('../models/featuretype.STL') + mesh = trimesh.load("../models/featuretype.STL") # scene will have automatically generated camera and lights scene = mesh.scene() @@ -26,19 +25,18 @@ scene.camera.resolution = [640, 480] # set field of view, in degrees # make it relative to resolution so pixels per degree is same - scene.camera.fov = 60 * (scene.camera.resolution / - scene.camera.resolution.max()) + scene.camera.fov = 60 * (scene.camera.resolution / scene.camera.resolution.max()) # convert the camera to rays with one ray per pixel origins, vectors, pixels = scene.camera_rays() # do the actual ray- mesh queries points, index_ray, index_tri = mesh.ray.intersects_location( - origins, vectors, multiple_hits=False) + origins, vectors, multiple_hits=False + ) # for each hit, find the distance along its vector - depth = trimesh.util.diagonal_dot(points - origins[0], - vectors[index_ray]) + depth = trimesh.util.diagonal_dot(points - origins[0], vectors[index_ray]) # find pixel locations of actual hits pixel_ray = pixels[index_ray] @@ -47,7 +45,7 @@ a = np.zeros(scene.camera.resolution, dtype=np.uint8) # scale depth against range (0.0 - 1.0) - depth_float = ((depth - depth.min()) / depth.ptp()) + depth_float = (depth - depth.min()) / depth.ptp() # convert depth into 0 - 255 uint8 depth_int = (depth_float * 255).round().astype(np.uint8) diff --git a/examples/scan_register.py b/examples/scan_register.py index 613c7a222..7632e0ddd 100644 --- a/examples/scan_register.py +++ b/examples/scan_register.py @@ -43,14 +43,14 @@ def simulated_brick(face_count, extents, noise, max_iter=10): # randomly rotation with translation transform = trimesh.transformations.random_rotation_matrix() - transform[:3, 3] = (np.random.random(3) - .5) * 1000 + transform[:3, 3] = (np.random.random(3) - 0.5) * 1000 mesh.apply_transform(transform) return mesh -if __name__ == '__main__': +if __name__ == "__main__": # print log messages to terminal trimesh.util.attach_to_log() log = trimesh.util.log @@ -59,9 +59,7 @@ def simulated_brick(face_count, extents, noise, max_iter=10): extents = [6, 12, 2] # create a simulated brick with noise and random transform - scan = simulated_brick(face_count=5000, - extents=extents, - noise=.05) + scan = simulated_brick(face_count=5000, extents=extents, noise=0.05) # create a "true" mesh truth = trimesh.creation.box(extents=extents) @@ -71,14 +69,18 @@ def simulated_brick(face_count, extents, noise, max_iter=10): # seeded by the principal components of inertia truth_to_scan, cost = truth.register(scan) - log.debug("centroid distance pre-registration:", - np.linalg.norm(truth.centroid - scan.centroid)) + log.debug( + "centroid distance pre-registration:", + np.linalg.norm(truth.centroid - scan.centroid), + ) # apply the registration transform truth.apply_transform(truth_to_scan) - log.debug("centroid distance post-registration:", - np.linalg.norm(truth.centroid - scan.centroid)) + log.debug( + "centroid distance post-registration:", + np.linalg.norm(truth.centroid - scan.centroid), + ) # find the distance from the truth mesh to each scan vertex distance = truth.nearest.on_surface(scan.vertices)[1] @@ -89,12 +91,12 @@ def simulated_brick(face_count, extents, noise, max_iter=10): scan.visual.vertex_colors = trimesh.visual.interpolate(distance) # print some quick statistics about the mesh - log.debug('distance max:', distance.max()) - log.debug('distance mean:', distance.mean()) - log.debug('distance STD:', distance.std()) + log.debug("distance max:", distance.max()) + log.debug("distance mean:", distance.mean()) + log.debug("distance STD:", distance.std()) # export result with vertex colors for meshlab - scan.export('scan_new.ply') + scan.export("scan_new.ply") # show in a pyglet window scan.show() diff --git a/examples/shortest.py b/examples/shortest.py index 4b96b91ac..ec833e4e6 100644 --- a/examples/shortest.py +++ b/examples/shortest.py @@ -11,8 +11,7 @@ import trimesh -if __name__ == '__main__': - +if __name__ == "__main__": # test on a sphere mesh mesh = trimesh.primitives.Sphere() @@ -30,18 +29,14 @@ # alternative method for weighted graph creation # you can also create the graph with from_edgelist and # a list comprehension, which is like 1.5x faster - ga = nx.from_edgelist([(e[0], e[1], {'length': L}) - for e, L in zip(edges, length)]) + ga = nx.from_edgelist([(e[0], e[1], {"length": L}) for e, L in zip(edges, length)]) # arbitrary indices of mesh.vertices to test with start = 0 end = int(len(mesh.vertices) / 2.0) # run the shortest path query using length for edge weight - path = nx.shortest_path(g, - source=start, - target=end, - weight='length') + path = nx.shortest_path(g, source=start, target=end, weight="length") # VISUALIZE RESULT # make the sphere transparent-ish @@ -52,9 +47,6 @@ points_visual = trimesh.points.PointCloud(mesh.vertices[[start, end]]) # create a scene with the mesh, path, and points - scene = trimesh.Scene([ - points_visual, - path_visual, - mesh]) + scene = trimesh.Scene([points_visual, path_visual, mesh]) scene.show(smooth=False) diff --git a/examples/sinter.py b/examples/sinter.py index 006bd13ce..d8afa30aa 100644 --- a/examples/sinter.py +++ b/examples/sinter.py @@ -11,8 +11,9 @@ from trimesh.path import packing # path with our sample models -models = os.path.abspath(os.path.join( - os.path.expanduser(os.path.dirname(__file__)), '..', 'models')) +models = os.path.abspath( + os.path.join(os.path.expanduser(os.path.dirname(__file__)), "..", "models") +) def collect_meshes(count=None, max_size=20.0): @@ -33,14 +34,16 @@ def collect_meshes(count=None, max_size=20.0): meshes = [] for file_name in sorted(os.listdir(models)): try: - scene = trimesh.load(os.path.join(models, file_name), - force='scene') + scene = trimesh.load(os.path.join(models, file_name), force="scene") except BaseException: pass for ori in scene.geometry.values(): - if (not isinstance(ori, trimesh.Trimesh) or - not ori.is_watertight or ori.volume < 0.001 or - ori.extents.max() > max_size): + if ( + not isinstance(ori, trimesh.Trimesh) + or not ori.is_watertight + or ori.volume < 0.001 + or ori.extents.max() > max_size + ): continue # split into single body meshes @@ -55,7 +58,7 @@ def collect_meshes(count=None, max_size=20.0): return meshes -if __name__ == '__main__': +if __name__ == "__main__": trimesh.util.attach_to_log() log = trimesh.util.log @@ -63,12 +66,11 @@ def collect_meshes(count=None, max_size=20.0): # get some sample data meshes = collect_meshes(max_size=size) - log.debug(f'loaded {len(meshes)} meshes') + log.debug(f"loaded {len(meshes)} meshes") # place the meshes into the volume with Profiler() as P: - placed, transforms, consume = packing.meshes( - meshes, size=[size] * 3, spacing=0.1) + placed, transforms, consume = packing.meshes(meshes, size=[size] * 3, spacing=0.1) P.log.debug(show_all=True) # none of the placed meshes should have overlapping AABB @@ -84,4 +86,5 @@ def collect_meshes(count=None, max_size=20.0): sections = concat.section_multiplane( plane_origin=concat.bounds[0], plane_normal=[0, 0, 1], - heights=np.linspace(0.0, 10.0, 100)) + heights=np.linspace(0.0, 10.0, 100), + ) diff --git a/examples/viewcallback.py b/examples/viewcallback.py index 82ccff58a..9bd8ec4d6 100644 --- a/examples/viewcallback.py +++ b/examples/viewcallback.py @@ -37,7 +37,7 @@ def sinwave(scene): scene.graph.update(node, matrix=matrix) -if __name__ == '__main__': +if __name__ == "__main__": # create some spheres a = trimesh.primitives.Sphere() b = trimesh.primitives.Sphere() diff --git a/examples/voxel.py b/examples/voxel.py index e85ac7f08..d5a4f131e 100644 --- a/examples/voxel.py +++ b/examples/voxel.py @@ -1,4 +1,3 @@ - import inspect import os @@ -10,13 +9,9 @@ log = trimesh.util.log -dir_current = os.path.dirname( - os.path.abspath( - inspect.getfile( - inspect.currentframe()))) +dir_current = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # the absolute path for our reference models -dir_models = os.path.abspath( - os.path.join(dir_current, '..', 'models')) +dir_models = os.path.abspath(os.path.join(dir_current, "..", "models")) def show(chair_mesh, chair_voxels, colors=(1, 1, 1, 0.3)): @@ -25,101 +20,93 @@ def show(chair_mesh, chair_voxels, colors=(1, 1, 1, 0.3)): scene.show() -if __name__ == '__main__': +if __name__ == "__main__": trimesh.util.attach_to_log() - base_name = 'chair_model' - chair_mesh = trimesh.load(os.path.join(dir_models, '%s.obj' % base_name)) + base_name = "chair_model" + chair_mesh = trimesh.load(os.path.join(dir_models, "%s.obj" % base_name)) if isinstance(chair_mesh, trimesh.scene.Scene): - chair_mesh = trimesh.util.concatenate([ - trimesh.Trimesh(mesh.vertices, mesh.faces) - for mesh in chair_mesh.geometry.values()]) - - binvox_path = os.path.join(dir_models, '%s.binvox' % base_name) + chair_mesh = trimesh.util.concatenate( + [ + trimesh.Trimesh(mesh.vertices, mesh.faces) + for mesh in chair_mesh.geometry.values() + ] + ) + + binvox_path = os.path.join(dir_models, "%s.binvox" % base_name) chair_voxels = trimesh.load(binvox_path) - chair_voxels = v.VoxelGrid( - chair_voxels.encoding.dense, - chair_voxels.transform) + chair_voxels = v.VoxelGrid(chair_voxels.encoding.dense, chair_voxels.transform) - log.debug('white: voxelized chair (binvox, exact)') - show( - chair_mesh, voxelize_mesh( - chair_mesh, exact=True), colors=( - 1, 1, 1, 0.3)) + log.debug("white: voxelized chair (binvox, exact)") + show(chair_mesh, voxelize_mesh(chair_mesh, exact=True), colors=(1, 1, 1, 0.3)) - log.debug('red: binvox-loaded chair') + log.debug("red: binvox-loaded chair") show(chair_mesh, chair_voxels, colors=(1, 0, 0, 0.3)) - voxelized_chair_mesh = chair_mesh.voxelized( - np.max(chair_mesh.extents) / 32) - log.debug('green: voxelized chair (default).') + voxelized_chair_mesh = chair_mesh.voxelized(np.max(chair_mesh.extents) / 32) + log.debug("green: voxelized chair (default).") show(chair_mesh, voxelized_chair_mesh, colors=(0, 1, 0, 0.3)) shape = (50, 17, 63) revox = chair_voxels.revoxelized(shape) - log.debug('cyan: revoxelized.') + log.debug("cyan: revoxelized.") show(chair_mesh, revox, colors=(0, 1, 1, 0.3)) values = chair_voxels.encoding.dense.copy() - values[:values.shape[0] // 2] = 0 + values[: values.shape[0] // 2] = 0 stripped = v.VoxelGrid(values, chair_voxels.transform.copy()).strip() - log.debug( - 'yellow: stripped halved voxel grid. Transform is updated appropriately') + log.debug("yellow: stripped halved voxel grid. Transform is updated appropriately") show(chair_mesh, stripped, colors=(1, 1, 0, 0.3)) transform = np.eye(4) transform[:3] += np.random.normal(size=(3, 4)) * 0.2 transformed_chair_mesh = chair_mesh.copy().apply_transform(transform) - log.debug('original transform volume: %s' - % str(chair_voxels.element_volume)) + log.debug("original transform volume: %s" % str(chair_voxels.element_volume)) chair_voxels.apply_transform(transform) - log.debug('warped transform volume: %s' % - str(chair_voxels.element_volume)) - log.debug('blue: transformed voxels') - log.debug('Transformation is lazy, and each voxel is no longer a cube.') + log.debug("warped transform volume: %s" % str(chair_voxels.element_volume)) + log.debug("blue: transformed voxels") + log.debug("Transformation is lazy, and each voxel is no longer a cube.") show(transformed_chair_mesh, chair_voxels, colors=(0, 0, 1, 0.3)) - voxelized = chair_mesh.voxelized(pitch=0.02, method='subdivide').fill() - log.debug('green: subdivided') + voxelized = chair_mesh.voxelized(pitch=0.02, method="subdivide").fill() + log.debug("green: subdivided") show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3)) - voxelized = chair_mesh.voxelized(pitch=0.02, method='ray') - log.debug('red: ray. Poor performance on thin structures') + voxelized = chair_mesh.voxelized(pitch=0.02, method="ray") + log.debug("red: ray. Poor performance on thin structures") show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3)) - voxelized = chair_mesh.voxelized(pitch=0.02, method='binvox') - log.debug('red: binvox (default). Poor performance on thin structures') + voxelized = chair_mesh.voxelized(pitch=0.02, method="binvox") + log.debug("red: binvox (default). Poor performance on thin structures") show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3)) - voxelized = chair_mesh.voxelized( - pitch=0.02, method='binvox', wireframe=True) - log.debug( - 'green: binvox (wireframe). Still doesn\'t capture all thin structures') + voxelized = chair_mesh.voxelized(pitch=0.02, method="binvox", wireframe=True) + log.debug("green: binvox (wireframe). Still doesn't capture all thin structures") show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3)) - voxelized = chair_mesh.voxelized(pitch=0.02, method='binvox', exact=True) - log.debug('blue: binvox (exact). Does a good job') + voxelized = chair_mesh.voxelized(pitch=0.02, method="binvox", exact=True) + log.debug("blue: binvox (exact). Does a good job") show(chair_mesh, voxelized, colors=(0, 0, 1, 0.3)) voxelized = chair_mesh.voxelized( pitch=0.02, - method='binvox', + method="binvox", exact=True, downsample_factor=2, - downsample_threshold=1) - log.debug('red: binvox (exact downsampled) surface') + downsample_threshold=1, + ) + log.debug("red: binvox (exact downsampled) surface") show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3)) - chair_voxels = chair_mesh.voxelized( - pitch=0.02, method='binvox', exact=True) + chair_voxels = chair_mesh.voxelized(pitch=0.02, method="binvox", exact=True) - voxelized = chair_voxels.copy().fill(method='base') - log.debug('blue: binvox (exact) filled (base). Gets a bit overly excited') + voxelized = chair_voxels.copy().fill(method="base") + log.debug("blue: binvox (exact) filled (base). Gets a bit overly excited") show(chair_mesh, voxelized, colors=(0, 0, 1, 0.3)) - voxelized = chair_voxels.copy().fill(method='orthographic') - log.debug('green: binvox (exact) filled (orthographic).') + voxelized = chair_voxels.copy().fill(method="orthographic") + log.debug("green: binvox (exact) filled (orthographic).") log.debug("Doesn't do much as should be expected") show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3)) diff --git a/examples/voxel_fillers.py b/examples/voxel_fillers.py index 768814a91..8a28e7e7f 100644 --- a/examples/voxel_fillers.py +++ b/examples/voxel_fillers.py @@ -6,12 +6,13 @@ def show(surface, filled): """ Display a colored example. """ - scene = trimesh.Scene([surface.as_boxes(colors=(1, 0, 0, 0.3)), - filled.as_boxes(colors=(0, 0, 1, 0.5))]) + scene = trimesh.Scene( + [surface.as_boxes(colors=(1, 0, 0, 0.3)), filled.as_boxes(colors=(0, 0, 1, 0.5))] + ) scene.show() -if __name__ == '__main__': +if __name__ == "__main__": trimesh.util.attach_to_log() mesh = trimesh.primitives.Sphere() @@ -19,16 +20,14 @@ def show(surface, filled): # remove_internal produced unexpected results when boundary pixels # are occupied not useful very often # but handy to demonstrate filling algorithms. - surface = mesh.voxelized( - pitch=0.2, method='binvox', remove_internal=True) + surface = mesh.voxelized(pitch=0.2, method="binvox", remove_internal=True) for impl in fillers: trimesh.util.log.debug(impl) show(surface, surface.copy().fill(method=impl)) - filled = mesh.voxelized( - pitch=0.05, method='binvox', exact=True).fill(method='holes') + filled = mesh.voxelized(pitch=0.05, method="binvox", exact=True).fill(method="holes") hollow = filled.copy().hollow() - trimesh.util.log.debug('filled volume, hollow_volume') + trimesh.util.log.debug("filled volume, hollow_volume") trimesh.util.log.debug(filled.volume, hollow.volume) - trimesh.util.log.debug('hollow voxel (zoom in to see hollowness)') + trimesh.util.log.debug("hollow voxel (zoom in to see hollowness)") hollow.show() diff --git a/examples/voxel_silhouette.py b/examples/voxel_silhouette.py index fd76a8476..142d95fc5 100644 --- a/examples/voxel_silhouette.py +++ b/examples/voxel_silhouette.py @@ -9,6 +9,7 @@ def vis(): # separate function to delay plt import import matplotlib.pyplot as plt + _, (ax0, ax1, ax2) = plt.subplots(1, 3) ax0.imshow(image) ax1.imshow(sil) @@ -18,14 +19,15 @@ def vis(): plt.show() -if __name__ == '__main__': +if __name__ == "__main__": trimesh.util.attach_to_log() log = trimesh.util.log resolution = 256 fov = 60.0 path = os.path.realpath( - os.path.join(os.path.dirname(__file__), '..', 'models', 'bunny.ply')) + os.path.join(os.path.dirname(__file__), "..", "models", "bunny.ply") + ) mesh = trimesh.load(path) scene = mesh.scene() @@ -44,9 +46,9 @@ def vis(): closest = np.min(dists) farthest = np.max(dists) z = np.linspace(closest, farthest, resolution) - log.debug(f'z range: {closest:f}, {farthest:f}') + log.debug(f"z range: {closest:f}, {farthest:f}") - vox = mesh.voxelized(1. / resolution, method='binvox') + vox = mesh.voxelized(1.0 / resolution, method="binvox") coords = np.expand_dims(rays, axis=-2) * np.expand_dims(z, axis=-1) coords += origin @@ -54,8 +56,9 @@ def vis(): sil = np.any(frust_vox_dense, axis=-1) sil = sil.T # change to image ordering (y, x) - image = np.array(Image.open(trimesh.util.wrap_as_stream( - scene.save_image(resolution=None)))) + image = np.array( + Image.open(trimesh.util.wrap_as_stream(scene.save_image(resolution=None))) + ) image = image[..., :3] vis() diff --git a/examples/widget.py b/examples/widget.py index 08011e1d8..30a06c1dd 100644 --- a/examples/widget.py +++ b/examples/widget.py @@ -31,7 +31,7 @@ def create_scene(): # plane geom = trimesh.creation.box((0.5, 0.5, 0.01)) geom.apply_translation((0, 0, -0.005)) - geom.visual.face_colors = (.6, .6, .6) + geom.visual.face_colors = (0.6, 0.6, 0.6) scene.add_geometry(geom) # axis @@ -42,27 +42,24 @@ def create_scene(): # box1 geom = trimesh.creation.box((box_size,) * 3) - geom.visual.face_colors = np.random.uniform( - 0, 1, (len(geom.faces), 3)) + geom.visual.face_colors = np.random.uniform(0, 1, (len(geom.faces), 3)) transform = tf.translation_matrix([0.1, 0.1, box_size / 2]) scene.add_geometry(geom, transform=transform) # box2 geom = trimesh.creation.box((box_size,) * 3) - geom.visual.face_colors = np.random.uniform( - 0, 1, (len(geom.faces), 3)) + geom.visual.face_colors = np.random.uniform(0, 1, (len(geom.faces), 3)) transform = tf.translation_matrix([-0.1, 0.1, box_size / 2]) scene.add_geometry(geom, transform=transform) # fuze - geom = trimesh.load(str(here / '../models/fuze.obj')) + geom = trimesh.load(str(here / "../models/fuze.obj")) transform = tf.translation_matrix([-0.1, -0.1, 0]) scene.add_geometry(geom, transform=transform) # sphere geom = trimesh.creation.icosphere(radius=0.05) - geom.visual.face_colors = np.random.uniform( - 0, 1, (len(geom.faces), 3)) + geom.visual.face_colors = np.random.uniform(0, 1, (len(geom.faces), 3)) transform = tf.translation_matrix([0.1, -0.1, box_size / 2]) scene.add_geometry(geom, transform=transform) @@ -104,7 +101,7 @@ def __init__(self): gui.add(hbox) - pyglet.clock.schedule_interval(self.callback, 1. / 20) + pyglet.clock.schedule_interval(self.callback, 1.0 / 20) pyglet.app.run() def callback(self, dt): @@ -121,28 +118,22 @@ def callback(self, dt): self.scene_widget2._draw() # change image - image = np.random.randint(0, - 255, - (self.height - 10, self.width // 3 - 10, 3), - dtype=np.uint8) + image = np.random.randint( + 0, 255, (self.height - 10, self.width // 3 - 10, 3), dtype=np.uint8 + ) with io.BytesIO() as f: - PIL.Image.fromarray(image).save(f, format='JPEG') + PIL.Image.fromarray(image).save(f, format="JPEG") self.image_widget.image = pyglet.image.load(filename=None, file=f) def _create_window(self, width, height): try: - config = pyglet.gl.Config(sample_buffers=1, - samples=4, - depth_size=24, - double_buffer=True) - window = pyglet.window.Window(config=config, - width=width, - height=height) + config = pyglet.gl.Config( + sample_buffers=1, samples=4, depth_size=24, double_buffer=True + ) + window = pyglet.window.Window(config=config, width=width, height=height) except pyglet.window.NoSuchConfigException: config = pyglet.gl.Config(double_buffer=True) - window = pyglet.window.Window(config=config, - width=width, - height=height) + window = pyglet.window.Window(config=config, width=width, height=height) @window.event def on_key_press(symbol, modifiers): @@ -153,6 +144,6 @@ def on_key_press(symbol, modifiers): return window -if __name__ == '__main__': +if __name__ == "__main__": np.random.seed(0) Application() diff --git a/tests/corpus.py b/tests/corpus.py index c4f1127ae..bac41b9e1 100644 --- a/tests/corpus.py +++ b/tests/corpus.py @@ -16,12 +16,15 @@ # remove loaders that are thin wrappers available.difference_update( - [k for k, v in - trimesh.exchange.load.mesh_loaders.items() - if v in (trimesh.exchange.misc.load_meshio,)]) + [ + k + for k, v in trimesh.exchange.load.mesh_loaders.items() + if v in (trimesh.exchange.misc.load_meshio,) + ] +) # remove loaders we don't care about -available.difference_update({'json', 'dae', 'zae'}) -available.update({'dxf', 'svg'}) +available.difference_update({"json", "dae", "zae"}) +available.update({"dxf", "svg"}) def on_repo(repo, commit): @@ -38,43 +41,47 @@ def on_repo(repo, commit): # get a resolver for the specific commit repo = trimesh.resolvers.GithubResolver( - repo=repo, commit=commit, - save='~/.trimesh-cache') + repo=repo, commit=commit, save="~/.trimesh-cache" + ) # list file names in the repo we can load - paths = [i for i in repo.keys() - if i.lower().split('.')[-1] in available] + paths = [i for i in repo.keys() if i.lower().split(".")[-1] in available] report = {} for _i, path in enumerate(paths): - namespace, name = path.rsplit('/', 1) + namespace, name = path.rsplit("/", 1) # get a subresolver that has a root at # the file we are trying to load resolver = repo.namespaced(namespace) check = path.lower() - broke = ('malformed empty outofmemory ' + - 'bad incorrect missing ' + - 'failures pond.0.ply').split() + broke = ( + "malformed empty outofmemory " + + "bad incorrect missing " + + "failures pond.0.ply" + ).split() should_raise = any(b in check for b in broke) raised = False # clip off the big old name from the archive - saveas = path[path.find(commit) + len(commit):] + saveas = path[path.find(commit) + len(commit) :] try: m = trimesh.load( file_obj=wrap_as_stream(resolver.get(name)), file_type=name, - resolver=resolver) + resolver=resolver, + ) report[saveas] = str(m) # if our source was a GLTF we should be able to roundtrip without # dropping - if name.lower().split('.')[-1] in ('gltf', - 'glb') and len(m.geometry) > 0: + if name.lower().split(".")[-1] in ("gltf", "glb") and len(m.geometry) > 0: # try round-tripping the file - e = trimesh.load(file_obj=wrap_as_stream(m.export(file_type='glb')), - file_type='glb', process=False) + e = trimesh.load( + file_obj=wrap_as_stream(m.export(file_type="glb")), + file_type="glb", + process=False, + ) # geometry keys should have survived roundtrip assert set(m.geometry.keys()) == set(e.geometry.keys()) @@ -84,16 +91,17 @@ def on_repo(repo, commit): ori = m.geometry[key] # todo : why doesn't this pass # assert np.allclose(ori.vertices, geom.vertices) - if isinstance(getattr(geom, 'visual', None), - trimesh.visual.TextureVisuals): + if isinstance( + getattr(geom, "visual", None), trimesh.visual.TextureVisuals + ): a, b = geom.visual.material, ori.visual.material # try our fancy equal assert equal(a.baseColorFactor, b.baseColorFactor) try: - assert equal( - a.baseColorTexture, b.baseColorTexture) + assert equal(a.baseColorTexture, b.baseColorTexture) except BaseException: from IPython import embed + embed() except NotImplementedError as E: @@ -112,7 +120,7 @@ def on_repo(repo, commit): # if it worked when it didn't have to add a label if should_raise and not raised: # raise ValueError(name) - report[saveas] += ' SHOULD HAVE RAISED' + report[saveas] += " SHOULD HAVE RAISED" return report @@ -147,7 +155,7 @@ def equal(a, b): return np.allclose(a, b) # a PIL image of some variety - if hasattr(a, 'getpixel'): + if hasattr(a, "getpixel"): if a.size != b.size: return False # very crude: it's pretty hard to check if two images @@ -161,28 +169,33 @@ def equal(a, b): return a == b -if __name__ == '__main__': - +if __name__ == "__main__": trimesh.util.attach_to_log() with Profiler() as P: # check the assimp corpus, about 50mb report = on_repo( - repo='assimp/assimp', - commit='c2967cf79acdc4cd48ecb0729e2733bf45b38a6f') + repo="assimp/assimp", commit="c2967cf79acdc4cd48ecb0729e2733bf45b38a6f" + ) # check the gltf-sample-models, about 1gb - report.update(on_repo( - repo='KhronosGroup/glTF-Sample-Models', - commit='8e9a5a6ad1a2790e2333e3eb48a1ee39f9e0e31b')) + report.update( + on_repo( + repo="KhronosGroup/glTF-Sample-Models", + commit="8e9a5a6ad1a2790e2333e3eb48a1ee39f9e0e31b", + ) + ) # add back collada for this repo - available.update(['dae', 'zae']) - report.update(on_repo( - repo='ros-industrial/universal_robot', - commit='8f01aa1934079e5a2c859ccaa9dd6623d4cfa2fe')) + available.update(["dae", "zae"]) + report.update( + on_repo( + repo="ros-industrial/universal_robot", + commit="8f01aa1934079e5a2c859ccaa9dd6623d4cfa2fe", + ) + ) # show all profiler lines log.info(P.output_text(show_all=True)) # print a formatted report of what we loaded - log.debug('\n'.join(f'# {k}\n{v}\n' for k, v in report.items())) + log.debug("\n".join(f"# {k}\n{v}\n" for k, v in report.items())) diff --git a/tests/generic.py b/tests/generic.py index 53076a2d9..3c85b6c9d 100644 --- a/tests/generic.py +++ b/tests/generic.py @@ -39,6 +39,7 @@ tf = trimesh.transformations TemporaryDirectory = tempfile.TemporaryDirectory + # make a dummy profiler which does nothing class DummyProfiler(object): def __enter__(self, *args, **kwargs): diff --git a/tests/helpers/dxfhelp.py b/tests/helpers/dxfhelp.py index 088331791..2cb8b0dc6 100644 --- a/tests/helpers/dxfhelp.py +++ b/tests/helpers/dxfhelp.py @@ -12,7 +12,7 @@ import numpy as np -def get_json(file_name='../templates/dxf.json'): +def get_json(file_name="../templates/dxf.json"): """ Load the JSON blob into native objects """ @@ -21,26 +21,25 @@ def get_json(file_name='../templates/dxf.json'): return t -def write_json(template, file_name='../templates/dxf.json'): +def write_json(template, file_name="../templates/dxf.json"): """ Write a native object to a JSON blob """ - with open(file_name, 'w') as f: + with open(file_name, "w") as f: json.dump(template, f, indent=4) -def replace_whitespace(text, SAFE_SPACE='|<^>|', insert=True, reformat=False): +def replace_whitespace(text, SAFE_SPACE="|<^>|", insert=True, reformat=False): """ Replace non-strippable whitepace in a string with a safe space """ if insert: # replace whitespace with safe space chr - args = (' ', SAFE_SPACE) + args = (" ", SAFE_SPACE) else: # replace safe space chr with whitespace - args = (SAFE_SPACE, ' ') - lines = [line.strip().replace(*args) - for line in str.splitlines(text)] + args = (SAFE_SPACE, " ") + lines = [line.strip().replace(*args) for line in str.splitlines(text)] # remove any blank lines if any(len(L) == 0 for L in lines): shaped = np.reshape(lines, (-1, 2)) @@ -53,23 +52,23 @@ def replace_whitespace(text, SAFE_SPACE='|<^>|', insert=True, reformat=False): if reformat: for i in range(len(lines)): cur = lines[i].strip() - if cur.startswith('$$'): + if cur.startswith("$$"): lines[i] = cur[1:] - elif cur.startswith('${'): + elif cur.startswith("${"): lines[i] = cur[1:] - elif cur.startswith('$'): - lines[i] = '{' + cur[1:] + '}' + elif cur.startswith("$"): + lines[i] = "{" + cur[1:] + "}" - return '\n'.join(lines) + return "\n".join(lines) -def write_files(template, destination='./dxf'): +def write_files(template, destination="./dxf"): """ For a dict, write each value to destination/key """ os.makedirs(destination) for key, value in template.items(): - with open(os.path.join(destination, key), 'w') as f: + with open(os.path.join(destination, key), "w") as f: f.write(replace_whitespace(value, reformat=True, insert=False)) @@ -81,27 +80,28 @@ def read_files(path): template = {} for file_name in os.listdir(path): # skip emacs buffers - if '~' in file_name: + if "~" in file_name: continue with open(os.path.join(path, file_name)) as f: template[file_name] = replace_whitespace( - f.read(), reformat=False, insert=True) + f.read(), reformat=False, insert=True + ) return template -if __name__ == '__main__': - +if __name__ == "__main__": import sys import trimesh + trimesh.util.attach_to_log() # dump files to JSON template - if 'dump' in sys.argv: - t = read_files('dxf') + if "dump" in sys.argv: + t = read_files("dxf") write_json(t) - elif 'read' in sys.argv: + elif "read" in sys.argv: # dump JSON to files for editing t = get_json() write_files(t) diff --git a/tests/helpers/id_helper.py b/tests/helpers/id_helper.py index b33dbb5c4..000d02e1d 100644 --- a/tests/helpers/id_helper.py +++ b/tests/helpers/id_helper.py @@ -24,12 +24,14 @@ TOL_ZERO = 1e-12 -def permutations(mesh, - function=lambda x: x.identifier, - displacement_max=1e-8, - count=1000, - subdivisions=2, - cutoff=3600): +def permutations( + mesh, + function=lambda x: x.identifier, + displacement_max=1e-8, + count=1000, + subdivisions=2, + cutoff=3600, +): """ Permutate a mesh, record the maximum it deviates from the original mesh and the resulting value of an identifier function. @@ -55,9 +57,9 @@ def permutations(mesh, for _j in range(subdivisions - 1): divided.append(divided[-1].copy().subdivide()) - for i, _displacement in enumerate(np.linspace(0.0, - displacement_max / mesh.scale, - count)): + for i, _displacement in enumerate( + np.linspace(0.0, displacement_max / mesh.scale, count) + ): # get one of the subdivided meshes current = np.random.choice(divided).copy() @@ -73,13 +75,13 @@ def permutations(mesh, identifiers.append(identifier) if (time.time() - start) > cutoff: - log.debug(f'bailing for time:{time.time() - start} count:{i}') + log.debug(f"bailing for time:{time.time() - start} count:{i}") return np.array(identifiers) return np.array(identifiers) -def get_meshes(path='../../../models', cutoff=None): +def get_meshes(path="../../../models", cutoff=None): """ Get a list of single- body meshes to test identifiers on. @@ -111,23 +113,22 @@ def get_meshes(path='../../../models', cutoff=None): cylinder = trimesh.creation.cylinder( radius=np.random.random() * 100, height=np.random.random() * 1000, - sections=int(np.clip(np.random.random() * 720, - 20, - 720))) + sections=int(np.clip(np.random.random() * 720, 20, 720)), + ) capsule = trimesh.creation.capsule( radius=np.random.random() * 100, height=np.random.random() * 1000, - count=np.clip(np.random.random(2) * 720, - 20, - 720).astype(int)) + count=np.clip(np.random.random(2) * 720, 20, 720).astype(int), + ) bodies.append(cylinder) bodies.append(capsule) for _i in range(10): - bodies.append(trimesh.creation.random_soup( - int(np.clip(np.random.random() * 1000, - 20, - 1000)))) + bodies.append( + trimesh.creation.random_soup( + int(np.clip(np.random.random() * 1000, 20, 1000)) + ) + ) bodies.append(trimesh.creation.icosphere()) bodies.append(trimesh.creation.uv_sphere()) bodies.append(trimesh.creation.icosahedron()) @@ -146,12 +147,12 @@ def data_stats(data): return mean, percent -if __name__ == '__main__': +if __name__ == "__main__": trimesh.util.attach_to_log(level=logging.INFO) meshes = get_meshes() - log.debug('loaded meshes!') + log.debug("loaded meshes!") # we want the whole thing to last less than hours = 5 @@ -161,36 +162,29 @@ def data_stats(data): running = [] for i, m in enumerate(meshes): - # calculate permutations - identifier = permutations(m, - count=1000, - cutoff=cutoff) + identifier = permutations(m, count=1000, cutoff=cutoff) # get data mean, percent = data_stats(identifier) - nz = np.logical_and(np.abs(mean) > TOL_ZERO, - np.abs(percent) > TOL_ZERO) + nz = np.logical_and(np.abs(mean) > TOL_ZERO, np.abs(percent) > TOL_ZERO) r = np.ones_like(mean) * 10 r[nz] = np.round(np.log10(np.abs(mean[nz] / percent[nz]))) - 1 running.append(r) - result.append({'mean': mean.tolist(), - 'percent': percent.tolist()}) + result.append({"mean": mean.tolist(), "percent": percent.tolist()}) - log.debug(f'\n\n{i}/{len(meshes) - 1}') - log.debug('mean', mean) - log.debug('percent', percent) - log.debug('oom', mean / percent) - log.debug('curun', running[-1]) - log.debug('minrun', np.min(running, axis=0)) - log.debug('meanrun', np.mean(running, axis=0)) + log.debug(f"\n\n{i}/{len(meshes) - 1}") + log.debug("mean", mean) + log.debug("percent", percent) + log.debug("oom", mean / percent) + log.debug("curun", running[-1]) + log.debug("minrun", np.min(running, axis=0)) + log.debug("meanrun", np.mean(running, axis=0)) # every loop dump everything # thrash- ey for sure but intermediate results are great - name_out = 'res.json' - with open(name_out, 'w') as file_obj: - json.dump(result, - file_obj, - indent=4) + name_out = "res.json" + with open(name_out, "w") as file_obj: + json.dump(result, file_obj, indent=4) diff --git a/tests/notebooks.py b/tests/notebooks.py index fdf0a8717..32f3ed30b 100644 --- a/tests/notebooks.py +++ b/tests/notebooks.py @@ -8,10 +8,9 @@ import numpy as np # current working directory -cwd = os.path.dirname(os.path.abspath( - inspect.getfile(inspect.currentframe()))) +cwd = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) -log = logging.getLogger('notebook') +log = logging.getLogger("notebook") def load_notebook(file_obj): @@ -32,15 +31,12 @@ def load_notebook(file_obj): Cleaned script which can be passed to exec """ raw = json.load(file_obj) - lines = np.hstack([i['source'] - for i in raw['cells'] if 'source' in i]) + lines = np.hstack([i["source"] for i in raw["cells"] if "source" in i]) script = exclude_calls(lines) return script -def exclude_calls( - lines, - exclude=None): +def exclude_calls(lines, exclude=None): """ Exclude certain calls based on substrings, replacing them with pass statements. @@ -58,21 +54,16 @@ def exclude_calls( Lines combined with newline """ if exclude is None: - exclude = ['%matplotlib', - '%pylab', - 'show', - 'plt', - 'save_image', - '?'] + exclude = ["%matplotlib", "%pylab", "show", "plt", "save_image", "?"] result = [] for line in lines: # skip lines that only have whitespace or comments strip = line.strip() - if len(strip) == 0 or strip.startswith('#'): + if len(strip) == 0 or strip.startswith("#"): continue # if the line has a blacklisted phrase switch it with a pass statement # we don't want to exclude function definitions however - if not strip.startswith('def ') and any(i in line for i in exclude): + if not strip.startswith("def ") and any(i in line for i in exclude): # switch statement with pass line_modified = to_pass(line) else: @@ -84,7 +75,7 @@ def exclude_calls( # append the modified line to the result result.append(line_modified) # recombine into string and add trailing newline - result = '\n'.join(result) + '\n' + result = "\n".join(result) + "\n" return result @@ -103,61 +94,57 @@ def to_pass(line): but code replaced with pass statement """ # the number of leading spaces on the line - spaces = len(line) - len(line.lstrip(' ')) + spaces = len(line) - len(line.lstrip(" ")) # replace statement with pass and correct leading spaces - passed = (' ' * spaces) + 'pass' + passed = (" " * spaces) + "pass" return passed -def render_notebook(file_name, out_name, nbconvert='jupyter'): +def render_notebook(file_name, out_name, nbconvert="jupyter"): """ Render an IPython notebook to an HTML file. """ out_name = os.path.abspath(out_name) file_name = os.path.abspath(file_name) - command = [nbconvert, - 'nbconvert', - '--execute', - '--to', - 'html', - file_name, - '--output', - out_name] + command = [ + nbconvert, + "nbconvert", + "--execute", + "--to", + "html", + file_name, + "--output", + out_name, + ] subprocess.check_call(command) -def render_examples(out_dir, in_dir=None, ext='ipynb'): +def render_examples(out_dir, in_dir=None, ext="ipynb"): """ Render all IPython notebooks in a directory to HTML. """ # replace with relative path if in_dir is None: - in_dir = os.path.abspath( - os.path.join(cwd, '../examples')) + in_dir = os.path.abspath(os.path.join(cwd, "../examples")) for file_name in os.listdir(in_dir): # check extension - split = file_name.split('.') + split = file_name.split(".") if split[-1] != ext: continue # full path of file nb_path = os.path.join(in_dir, file_name) - html_path = os.path.join(out_dir, - '.'.join(split[:-1]) + '.html') + html_path = os.path.join(out_dir, ".".join(split[:-1]) + ".html") render_notebook(nb_path, html_path) def main(): - # examples which we're not going to run in CI # widget.py opens a window and does a bunch of openGL stuff - ci_blacklist = ['widget.py', - 'voxel.py', - 'voxel_fillers.py', - 'voxel_silhouette.py'] + ci_blacklist = ["widget.py", "voxel.py", "voxel_fillers.py", "voxel_silhouette.py"] if "examples" in sys.argv: out_path = sys.argv[sys.argv.index("examples") + 1] @@ -166,36 +153,35 @@ def main(): # exec the script passed file_name = sys.argv[sys.argv.index("exec") + 1].strip() # we want to skip some of these examples in CI - if 'ci' in sys.argv and os.path.basename(file_name) in ci_blacklist: - log.debug(f'{file_name} in CI blacklist: skipping!') + if "ci" in sys.argv and os.path.basename(file_name) in ci_blacklist: + log.debug(f"{file_name} in CI blacklist: skipping!") return # skip files that don't exist if not os.path.exists(file_name): return - if file_name.lower().endswith('.ipynb'): + if file_name.lower().endswith(".ipynb"): # ipython notebooks with open(file_name) as file_obj: script = load_notebook(file_obj) - elif file_name.lower().endswith('.py'): + elif file_name.lower().endswith(".py"): # regular python files with open(file_name) as file_obj: - script = exclude_calls(file_obj.read().split('\n')) + script = exclude_calls(file_obj.read().split("\n")) else: # skip other types of files return - log.debug(f'running {file_name}') + log.debug(f"running {file_name}") try: exec(script, globals()) except BaseException as E: - log.debug( - f'failed {file_name}!\n\nscript was:\n{script}\n\n') + log.debug(f"failed {file_name}!\n\nscript was:\n{script}\n\n") raise E -if __name__ == '__main__': +if __name__ == "__main__": """ Load and run a notebook if a file name is passed. """ diff --git a/tests/regression.py b/tests/regression.py index 39dd57222..e554dec99 100644 --- a/tests/regression.py +++ b/tests/regression.py @@ -12,7 +12,7 @@ def typical_application(): meshes = g.get_meshes(raise_error=True) for mesh in meshes: - g.log.info('Testing %s', mesh.metadata['file_name']) + g.log.info("Testing %s", mesh.metadata["file_name"]) assert len(mesh.faces) > 0 assert len(mesh.vertices) > 0 @@ -35,8 +35,7 @@ def typical_application(): assert mesh.volume > 0.0 - section = mesh.section(plane_normal=[0, 0, 1], # NOQA - plane_origin=mesh.centroid) + section = mesh.section(plane_normal=[0, 0, 1], plane_origin=mesh.centroid) # NOQA sample = mesh.sample(1000) assert sample.shape == (1000, 3) @@ -68,29 +67,27 @@ def establish_baseline(counts=None): """ if counts is None: counts = [390, 3820, 1710] - setup = 'import numpy as np' + setup = "import numpy as np" # test a dot product with itself - dot = '\n'.join(('a = np.arange(3*10**3,dtype=np.float64).reshape((-1,3))', - 'b = np.dot(a, a.T)')) + dot = "\n".join( + ("a = np.arange(3*10**3,dtype=np.float64).reshape((-1,3))", "b = np.dot(a, a.T)") + ) # test a cross product - cross = '\n'.join( - ('a = np.arange(3*10**4,dtype=np.float64).reshape((-1,3))', - 'b = np.cross(a, a[::-1])')) + cross = "\n".join( + ( + "a = np.arange(3*10**4,dtype=np.float64).reshape((-1,3))", + "b = np.cross(a, a[::-1])", + ) + ) # try a list comprehension with some stuff in it - loop = '[i * 3.14 for i in np.arange(10**3) if i % 7 == 0]' + loop = "[i * 3.14 for i in np.arange(10**3) if i % 7 == 0]" times = {} - times['dot'] = min(timeit.repeat(dot, - setup, - number=counts[0])) - times['cross'] = min(timeit.repeat(cross, - setup, - number=counts[1])) - times['loop'] = min(timeit.repeat(loop, - setup, - number=counts[2])) + times["dot"] = min(timeit.repeat(dot, setup, number=counts[0])) + times["cross"] = min(timeit.repeat(cross, setup, number=counts[1])) + times["loop"] = min(timeit.repeat(loop, setup, number=counts[2])) return times @@ -105,16 +102,16 @@ def machine_info(): Contains information about machine """ import psutil + info = {} - info['cpu_count'] = psutil.cpu_count() + info["cpu_count"] = psutil.cpu_count() return info -if __name__ == '__main__': - +if __name__ == "__main__": info = machine_info() - info['baseline'] = establish_baseline() + info["baseline"] = establish_baseline() import pyinstrument diff --git a/tests/test_3dxml.py b/tests/test_3dxml.py index 495861c7f..9440ac102 100644 --- a/tests/test_3dxml.py +++ b/tests/test_3dxml.py @@ -7,27 +7,26 @@ class DXMLTest(g.unittest.TestCase): - def test_abaqus_texture(self): # an assembly with instancing - s = g.get_mesh('cube1.3dxml') + s = g.get_mesh("cube1.3dxml") # should be 1 unique meshes assert len(s.geometry) == 1 - v = s.geometry['Abaqus_Geometry'].visual - assert v.kind == 'texture' + v = s.geometry["Abaqus_Geometry"].visual + assert v.kind == "texture" assert len(np.unique(v.to_color().vertex_colors, axis=0)) == 4 def test_abaqus_blocks(self): # an assembly with two Faces elements of different color - s = g.get_mesh('blocks.3dxml') - assert g.np.isclose(s.volume, 18000, atol = 1.0) - v = s.geometry['Abaqus_Geometry'].visual - assert v.kind == 'face' + s = g.get_mesh("blocks.3dxml") + assert g.np.isclose(s.volume, 18000, atol=1.0) + v = s.geometry["Abaqus_Geometry"].visual + assert v.kind == "face" assert len(np.unique(v.face_colors, axis=0)) == 2 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_3mf.py b/tests/test_3mf.py index eaa07b970..1604b23dc 100644 --- a/tests/test_3mf.py +++ b/tests/test_3mf.py @@ -5,10 +5,9 @@ class MFTest(g.unittest.TestCase): - def test_3MF(self): # an assembly with instancing - s = g.get_mesh('counterXP.3MF') + s = g.get_mesh("counterXP.3MF") # should be 2 unique meshes assert len(s.geometry) == 2 @@ -17,7 +16,7 @@ def test_3MF(self): assert all(m.is_volume for m in s.geometry.values()) # a single body 3MF assembly - s = g.get_mesh('featuretype.3MF') + s = g.get_mesh("featuretype.3MF") # should be 2 unique meshes assert len(s.geometry) == 1 # should be 6 instances around the scene @@ -27,46 +26,45 @@ def test_units(self): # test our unit conversion function converter = g.trimesh.units.unit_conversion # these are the units listed in the 3MF spec as valid - units = ['micron', 'millimeter', - 'centimeter', 'inch', 'foot', 'meter'] + units = ["micron", "millimeter", "centimeter", "inch", "foot", "meter"] # check conversion factor for all valid 3MF units - assert all(converter(u, 'inches') > 1e-12 for u in units) + assert all(converter(u, "inches") > 1e-12 for u in units) def test_kwargs(self): # check if kwargs are properly passed to geometries - s = g.get_mesh('P_XPM_0331_01.3mf') + s = g.get_mesh("P_XPM_0331_01.3mf") assert all(len(v.vertices) == 4 for v in s.geometry.values()) - s = g.get_mesh('P_XPM_0331_01.3mf', process=False) + s = g.get_mesh("P_XPM_0331_01.3mf", process=False) assert all(len(v.vertices) == 5 for v in s.geometry.values()) def test_names(self): # check if two different objects with the same name are correctly # processed - s = g.get_mesh('cube_and_sphere_same_name.3mf') + s = g.get_mesh("cube_and_sphere_same_name.3mf") assert len(s.geometry) == 2 def test_roundtrip(self): if g.sys.version_info < (3, 6): - g.log.warning('relies on > Python 3.5') + g.log.warning("relies on > Python 3.5") return # test a scene round-tripped through the # 3MF exporter and importer - s = g.get_mesh('cycloidal.3DXML') + s = g.get_mesh("cycloidal.3DXML") assert len(s.geometry) == 13 # export and reload r = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream( - s.export(file_type='3mf')), - file_type='3mf') + file_obj=g.trimesh.util.wrap_as_stream(s.export(file_type="3mf")), + file_type="3mf", + ) assert set(s.geometry.keys()) == set(r.geometry.keys()) assert g.np.allclose(s.bounds, r.bounds) assert g.np.isclose(s.area, r.area, rtol=1e-3) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_adjacency.py b/tests/test_adjacency.py index a9f370ab6..0ddd306c9 100644 --- a/tests/test_adjacency.py +++ b/tests/test_adjacency.py @@ -5,17 +5,13 @@ class AdjacencyTest(g.unittest.TestCase): - def test_radius(self): - for radius in [0.1, 1.0, 3.1459, 29.20]: - m = g.trimesh.creation.cylinder( - radius=radius, height=radius * 10) + m = g.trimesh.creation.cylinder(radius=radius, height=radius * 10) # remove the cylinder cap signs = (g.np.sign(m.vertices[:, 2]) < 0)[m.faces] - not_cap = ~g.np.logical_or( - signs.all(axis=1), ~signs.any(axis=1)) + not_cap = ~g.np.logical_or(signs.all(axis=1), ~signs.any(axis=1)) m.update_faces(not_cap) # compare the calculated radius @@ -25,6 +21,6 @@ def test_radius(self): assert g.np.allclose(radii, radius, atol=radius / 100) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_align.py b/tests/test_align.py index 8fa42fb82..ade85172d 100644 --- a/tests/test_align.py +++ b/tests/test_align.py @@ -7,7 +7,6 @@ class AlignTests(g.unittest.TestCase): - def test_align(self): """ Test aligning two 3D vectors @@ -19,14 +18,19 @@ def test_align(self): # start with some edge cases and make sure the transform works target = g.np.array([0, 0, -1], dtype=g.np.float64) - vectors = g.np.vstack(( - g.trimesh.unitize(g.random((1000, 3)) - .5), - g.random((1000, 3)) - .5, - [-target, target], - g.trimesh.util.generate_basis(target), - [[7.12106798e-07, -7.43194705e-08, 1.00000000e+00], - [0, 0, -1], - [1e-4, 1e-4, -1]])) + vectors = g.np.vstack( + ( + g.trimesh.unitize(g.random((1000, 3)) - 0.5), + g.random((1000, 3)) - 0.5, + [-target, target], + g.trimesh.util.generate_basis(target), + [ + [7.12106798e-07, -7.43194705e-08, 1.00000000e00], + [0, 0, -1], + [1e-4, 1e-4, -1], + ], + ) + ) # collect errors norms = [] @@ -45,15 +49,18 @@ def test_align(self): norms = g.np.array(norms) g.log.debug( - 'vector error after transform:\n' + - 'err.ptp: {}\nerr.std: {}\nerr.mean: {}\nerr.median: {}'.format( - norms.ptp(), norms.std(), norms.mean(), g.np.median(norms))) + "vector error after transform:\n" + + "err.ptp: {}\nerr.std: {}\nerr.mean: {}\nerr.median: {}".format( + norms.ptp(), norms.std(), norms.mean(), g.np.median(norms) + ) + ) # these vectors should be perpendicular and zero - angles = [align(i, target, return_angle=True)[1] - for i in g.trimesh.util.generate_basis(target)] - assert g.np.allclose( - angles, [g.np.pi / 2, g.np.pi / 2, 0.0]) + angles = [ + align(i, target, return_angle=True)[1] + for i in g.trimesh.util.generate_basis(target) + ] + assert g.np.allclose(angles, [g.np.pi / 2, g.np.pi / 2, 0.0]) def test_range(self): # function we're testing @@ -63,13 +70,12 @@ def test_range(self): # generate angles from 0 to 180 degrees angles = g.np.linspace(0.0, g.np.pi / 1e7, 10000) # generate on- plane vectors - vectors = g.np.column_stack((g.np.cos(angles), - g.np.sin(angles), - g.np.zeros(len(angles)))) + vectors = g.np.column_stack( + (g.np.cos(angles), g.np.sin(angles), g.np.zeros(len(angles))) + ) # rotate them arbitrarily off the plane just for funsies - vectors = g.trimesh.transform_points( - vectors, g.transforms[20]) + vectors = g.trimesh.transform_points(vectors, g.transforms[20]) for angle, vector in zip(angles, vectors): g.trimesh.util.generate_basis(vector) @@ -96,13 +102,12 @@ def test_rigid(self): T = align([0, 0, -1], [-1e-4, 1e-4, 1]) assert g.np.isclose(g.np.linalg.det(T), 1.0) - vector_1 = g.np.array( - [7.12106798e-07, -7.43194705e-08, 1.00000000e+00]) + vector_1 = g.np.array([7.12106798e-07, -7.43194705e-08, 1.00000000e00]) vector_2 = g.np.array([0, 0, -1]) T, angle = align(vector_1, vector_2, return_angle=True) assert g.np.isclose(g.np.linalg.det(T), 1.0) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_arc.py b/tests/test_arc.py index 7e5420364..2eac5922f 100644 --- a/tests/test_arc.py +++ b/tests/test_arc.py @@ -5,90 +5,92 @@ class ArcTests(g.unittest.TestCase): - def test_center(self): - from trimesh.path.arc import arc_center + test_points = [[[0, 0], [1.0, 1], [2, 0]]] test_results = [[[1, 0], 1.0]] points = test_points[0] res_center, res_radius = test_results[0] center_info = arc_center(points) - C, R, N, angle = (center_info['center'], # NOQA - center_info['radius'], - center_info['normal'], - center_info['span']) + C, R, N, angle = ( + center_info["center"], + center_info["radius"], + center_info["normal"], + center_info["span"], + ) assert abs(R - res_radius) < g.tol_path.zero assert g.trimesh.util.euclidean(C, res_center) < g.tol_path.zero # large magnitude arc failed some coplanar tests c = g.trimesh.path.arc.arc_center( - [[30156.18, 1673.64, -2914.56], - [30152.91, 1780.09, -2885.51], - [30148.3, 1875.81, -2857.79]]) + [ + [30156.18, 1673.64, -2914.56], + [30152.91, 1780.09, -2885.51], + [30148.3, 1875.81, -2857.79], + ] + ) assert len(c.center) == 3 def test_center_random(self): from trimesh.path.arc import arc_center + # Test that arc centers work on well formed random points in 2D and 3D min_angle = g.np.radians(2) count = 1000 - center_3D = (g.random((count, 3)) - .5) * 50 + center_3D = (g.random((count, 3)) - 0.5) * 50 center_2D = center_3D[:, 0:2] radii = g.np.clip(g.random(count) * 100, min_angle, g.np.inf) - angles = g.random((count, 2)) * \ - (g.np.pi - min_angle) + min_angle - angles = g.np.column_stack((g.np.zeros(count), - g.np.cumsum(angles, axis=1))) - - points_2D = g.np.column_stack((g.np.cos(angles[:, 0]), - g.np.sin(angles[:, 0]), - g.np.cos(angles[:, 1]), - g.np.sin(angles[:, 1]), - g.np.cos(angles[:, 2]), - g.np.sin(angles[:, 2]))).reshape((-1, 6)) + angles = g.random((count, 2)) * (g.np.pi - min_angle) + min_angle + angles = g.np.column_stack((g.np.zeros(count), g.np.cumsum(angles, axis=1))) + + points_2D = g.np.column_stack( + ( + g.np.cos(angles[:, 0]), + g.np.sin(angles[:, 0]), + g.np.cos(angles[:, 1]), + g.np.sin(angles[:, 1]), + g.np.cos(angles[:, 2]), + g.np.sin(angles[:, 2]), + ) + ).reshape((-1, 6)) points_2D *= radii.reshape((-1, 1)) points_2D += g.np.tile(center_2D, (1, 3)) points_2D = points_2D.reshape((-1, 3, 2)) - points_3D = g.np.column_stack(( - points_2D.reshape((-1, 2)), - g.np.tile(center_3D[:, 2].reshape((-1, 1)), - (1, 3)).reshape(-1))).reshape((-1, 3, 3)) - for center, radius, three in zip(center_2D, - radii, - points_2D): + points_3D = g.np.column_stack( + ( + points_2D.reshape((-1, 2)), + g.np.tile(center_3D[:, 2].reshape((-1, 1)), (1, 3)).reshape(-1), + ) + ).reshape((-1, 3, 3)) + for center, radius, three in zip(center_2D, radii, points_2D): info = arc_center(three) - assert g.np.allclose(center, info['center']) - assert g.np.allclose(radius, info['radius']) + assert g.np.allclose(center, info["center"]) + assert g.np.allclose(radius, info["radius"]) - for center, radius, three in zip(center_3D, - radii, - points_3D): + for center, radius, three in zip(center_3D, radii, points_3D): transform = g.trimesh.transformations.random_rotation_matrix() - center = g.trimesh.transformations.transform_points([center], transform)[ - 0] - three = g.trimesh.transformations.transform_points( - three, transform) + center = g.trimesh.transformations.transform_points([center], transform)[0] + three = g.trimesh.transformations.transform_points(three, transform) info = arc_center(three) - assert g.np.allclose(center, info['center']) - assert g.np.allclose(radius, info['radius']) + assert g.np.allclose(center, info["center"]) + assert g.np.allclose(radius, info["radius"]) def test_multiroot(self): """ Test a Path2D object containing polygons nested in the interiors of other polygons. """ - inner = g.trimesh.creation.annulus(r_min=.5, r_max=0.6, height=1.0) - outer = g.trimesh.creation.annulus(r_min=.9, r_max=1.0, height=1.0) + inner = g.trimesh.creation.annulus(r_min=0.5, r_max=0.6, height=1.0) + outer = g.trimesh.creation.annulus(r_min=0.9, r_max=1.0, height=1.0) m = inner + outer - s = m.section(plane_normal=[0, 0, 1], - plane_origin=[0, 0, 0]) + s = m.section(plane_normal=[0, 0, 1], plane_origin=[0, 0, 0]) p = s.to_planar()[0] assert len(p.polygons_closed) == 4 @@ -97,6 +99,6 @@ def test_multiroot(self): g.check_path2D(p) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_assimp.py b/tests/test_assimp.py index 50175d3bb..e8cbab63e 100644 --- a/tests/test_assimp.py +++ b/tests/test_assimp.py @@ -5,7 +5,6 @@ class AssimpTest(g.unittest.TestCase): - def test_duck(self): # load the duck using pyassimp try: @@ -13,10 +12,9 @@ def test_duck(self): except BaseException: return - file_path = g.os.path.join(g.dir_models, 'duck.dae') - with open(file_path, 'rb') as f: - kwargs = g.trimesh.exchange.assimp.load_pyassimp( - f, file_type='dae') + file_path = g.os.path.join(g.dir_models, "duck.dae") + with open(file_path, "rb") as f: + kwargs = g.trimesh.exchange.assimp.load_pyassimp(f, file_type="dae") scene = g.trimesh.exchange.load.load_kwargs(kwargs) @@ -24,6 +22,6 @@ def test_duck(self): assert len(scene.graph.nodes_geometry) == 1 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_base.py b/tests/test_base.py index 11ced68ce..15aef53ca 100644 --- a/tests/test_base.py +++ b/tests/test_base.py @@ -8,7 +8,6 @@ class MeshTests(g.unittest.TestCase): - def test_vertex_neighbors(self): m = g.trimesh.primitives.Box() neighbors = m.vertex_neighbors @@ -17,13 +16,13 @@ def test_vertex_neighbors(self): for v_i, neighs in enumerate(neighbors): for n in neighs: - assert ([v_i, n] in elist or [n, v_i] in elist) + assert [v_i, n] in elist or [n, v_i] in elist def test_validate(self): """ Make sure meshes with validation work """ - m = g.get_mesh('featuretype.STL', validate=True) + m = g.get_mesh("featuretype.STL", validate=True) assert m.is_volume @@ -64,22 +63,22 @@ def test_none(self): """ # a radially symmetric mesh with units # should have no properties that are None - mesh = g.get_mesh('tube.obj') - mesh.units = 'in' + mesh = g.get_mesh("tube.obj") + mesh.units = "in" # loop through string property names for method in dir(mesh): # ignore private- ish methods - if method.startswith('_'): + if method.startswith("_"): continue # a string expression to evaluate - expr = f'mesh.{method}' + expr = f"mesh.{method}" try: # get the value of that expression res = eval(expr) except ImportError: - g.log.warning('unable to import!', exc_info=True) + g.log.warning("unable to import!", exc_info=True) continue # shouldn't be None! @@ -88,13 +87,13 @@ def test_none(self): # check methods in scene objects scene = mesh.scene() # camera will be None unless set - blacklist = ['camera'] + blacklist = ["camera"] for method in dir(scene): # ignore private- ish methods - if method.startswith('_') or method in blacklist: + if method.startswith("_") or method in blacklist: continue # a string expression to evaluate - expr = f'scene.{method}' + expr = f"scene.{method}" # get the value of that expression res = eval(expr) # shouldn't be None! @@ -102,6 +101,6 @@ def test_none(self): raise ValueError(f'"{expr}" is None!!') -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_binvox.py b/tests/test_binvox.py index dc19b3392..c2bbae568 100644 --- a/tests/test_binvox.py +++ b/tests/test_binvox.py @@ -19,16 +19,15 @@ def test_load_save_invariance(self): shape = dense.shape rl_data = rl.dense_to_rle(dense.flatten(), dtype=np.uint8) translate = np.array([2, 5, 10], dtype=np.float32) - scale = 5. + scale = 5.0 base = binvox.voxel_from_binvox( - rl_data, shape, translate, scale, axis_order='xzy') + rl_data, shape, translate, scale, axis_order="xzy" + ) s = scale / (n - 1) - np.testing.assert_equal(base.transform, np.array([ - [s, 0, 0, 2], - [0, s, 0, 5], - [0, 0, s, 10], - [0, 0, 0, 1] - ])) + np.testing.assert_equal( + base.transform, + np.array([[s, 0, 0, 2], [0, s, 0, 5], [0, 0, s, 10], [0, 0, 0, 1]]), + ) dense = dense.transpose((0, 2, 1)) bound_min = translate - 0.5 * s bound_max = translate + scale + 0.5 * s @@ -36,8 +35,7 @@ def test_load_save_invariance(self): np.testing.assert_equal(base.encoding.dense, dense) if binvox.binvox_encoder is None: - g.log.warning( - 'No binvox encoder found, skipping binvox export tests') + g.log.warning("No binvox encoder found, skipping binvox export tests") return file_obj = BytesIO(binvox.export_binvox(base)) @@ -50,6 +48,6 @@ def test_load_save_invariance(self): np.testing.assert_equal(base.shape, loaded.shape) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_boolean.py b/tests/test_boolean.py index d20dfb1d3..ea77391c0 100644 --- a/tests/test_boolean.py +++ b/tests/test_boolean.py @@ -5,83 +5,78 @@ class BooleanTest(g.unittest.TestCase): - def setUp(self): - self.a = g.get_mesh('ballA.off') - self.b = g.get_mesh('ballB.off') - self.truth = g.data['boolean'] + self.a = g.get_mesh("ballA.off") + self.b = g.get_mesh("ballB.off") + self.truth = g.data["boolean"] def is_zero(self, value): - return abs(value) < .001 + return abs(value) < 0.001 def test_boolean(self): a, b = self.a, self.b - engines = [('blender', g.trimesh.interfaces.blender.exists), - ('scad', g.trimesh.interfaces.scad.exists)] + engines = [ + ("blender", g.trimesh.interfaces.blender.exists), + ("scad", g.trimesh.interfaces.scad.exists), + ] for engine, exists in engines: # if we have all_dep set it means we should fail if # engine is not installed so don't continue if not exists: - g.log.warning('skipping boolean engine %s', engine) + g.log.warning("skipping boolean engine %s", engine) continue - g.log.info('Testing boolean ops with engine %s', engine) + g.log.info("Testing boolean ops with engine %s", engine) ab = a.difference(b, engine=engine) assert ab.is_volume - assert self.is_zero( - ab.volume - self.truth['difference']) + assert self.is_zero(ab.volume - self.truth["difference"]) - assert g.np.allclose( - ab.bounds[0], - a.bounds[0]) + assert g.np.allclose(ab.bounds[0], a.bounds[0]) ba = b.difference(a, engine=engine) assert ba.is_volume - assert self.is_zero( - ba.volume - self.truth['difference']) + assert self.is_zero(ba.volume - self.truth["difference"]) - assert g.np.allclose( - ba.bounds[1], - b.bounds[1]) + assert g.np.allclose(ba.bounds[1], b.bounds[1]) i = a.intersection(b, engine=engine) assert i.is_volume - assert self.is_zero( - i.volume - self.truth['intersection']) + assert self.is_zero(i.volume - self.truth["intersection"]) u = a.union(b, engine=engine) assert u.is_volume - assert self.is_zero(u.volume - self.truth['union']) + assert self.is_zero(u.volume - self.truth["union"]) - g.log.info('booleans succeeded with %s', engine) + g.log.info("booleans succeeded with %s", engine) def test_multiple(self): """ Make sure boolean operations work on multiple meshes. """ engines = [ - ('blender', g.trimesh.interfaces.blender.exists), - ('scad', g.trimesh.interfaces.scad.exists)] + ("blender", g.trimesh.interfaces.blender.exists), + ("scad", g.trimesh.interfaces.scad.exists), + ] for _engine, exists in engines: if not exists: continue a = g.trimesh.primitives.Sphere(center=[0, 0, 0]) - b = g.trimesh.primitives.Sphere(center=[0, 0, .75]) + b = g.trimesh.primitives.Sphere(center=[0, 0, 0.75]) c = g.trimesh.primitives.Sphere(center=[0, 0, 1.5]) r = g.trimesh.boolean.union([a, b, c]) assert r.is_volume assert r.body_count == 1 - assert g.np.isclose(r.volume, - 8.617306056726884) + assert g.np.isclose(r.volume, 8.617306056726884) def test_empty(self): engines = [ - ('blender', g.trimesh.interfaces.blender.exists), - ('scad', g.trimesh.interfaces.scad.exists)] + ("blender", g.trimesh.interfaces.blender.exists), + ("scad", g.trimesh.interfaces.scad.exists), + ] for engine, exists in engines: if not exists: continue @@ -94,6 +89,6 @@ def test_empty(self): assert i.is_empty -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_bounds.py b/tests/test_bounds.py index 6e9b1b6c4..cf78d76b2 100644 --- a/tests/test_bounds.py +++ b/tests/test_bounds.py @@ -5,10 +5,8 @@ class BoundsTest(g.unittest.TestCase): - def setUp(self): - meshes = [g.get_mesh(i) for i in ['large_block.STL', - 'featuretype.STL']] + meshes = [g.get_mesh(i) for i in ["large_block.STL", "featuretype.STL"]] self.meshes = g.np.append(meshes, list(g.get_meshes(5))) def test_obb_mesh(self): @@ -16,7 +14,7 @@ def test_obb_mesh(self): Test the OBB functionality in attributes of Trimesh objects """ for m in self.meshes: - g.log.info('Testing OBB of %s', m.metadata['file_name']) + g.log.info("Testing OBB of %s", m.metadata["file_name"]) for i in range(6): # on the first run through don't transform the points to see # if we succeed in the meshes original orientation @@ -37,8 +35,7 @@ def test_obb_mesh(self): test = m.bounds / (box_ext / 2.0) test_ok = g.np.allclose(test, [[-1, -1, -1], [1, 1, 1]]) if not test_ok: - g.log.error('bounds test failed %s', - str(test)) + g.log.error("bounds test failed %s", str(test)) assert test_ok m.apply_transform(matrix) @@ -46,18 +43,23 @@ def test_obb_mesh(self): # after applying the obb, the extents of the AABB # should be the same as the OBB - close = g.np.allclose(m.bounding_box.extents, - m.bounding_box_oriented.extents, - rtol=1e-3, - atol=1e-3) + close = g.np.allclose( + m.bounding_box.extents, + m.bounding_box_oriented.extents, + rtol=1e-3, + atol=1e-3, + ) if not close: # m.visual.face_colors = [200, 0, 0, 100] # (m + m.bounding_box_oriented).show() # from IPython import embed # embed() - raise ValueError('OBB extents incorrect:\n{}\n{}'.format( - str(m.bounding_box.extents), - str(m.bounding_box_oriented.extents))) + raise ValueError( + "OBB extents incorrect:\n{}\n{}".format( + str(m.bounding_box.extents), + str(m.bounding_box_oriented.extents), + ) + ) c = m.bounding_cylinder # NOQA s = m.bounding_sphere # NOQA @@ -72,22 +74,19 @@ def test_obb_points(self): points = g.random((10, dimension)) to_origin, extents = g.trimesh.bounds.oriented_bounds(points) - assert g.trimesh.util.is_shape(to_origin, - (dimension + 1, dimension + 1)) + assert g.trimesh.util.is_shape(to_origin, (dimension + 1, dimension + 1)) assert g.trimesh.util.is_shape(extents, (dimension,)) transformed = g.trimesh.transform_points(points, to_origin) - transformed_bounds = [transformed.min(axis=0), - transformed.max(axis=0)] + transformed_bounds = [transformed.min(axis=0), transformed.max(axis=0)] for j in transformed_bounds: # assert that the points once our obb to_origin transform is applied # has a bounding box centered on the origin assert g.np.allclose(g.np.abs(j), extents / 2.0) - extents_tf = g.np.diff( - transformed_bounds, axis=0).reshape(dimension) + extents_tf = g.np.diff(transformed_bounds, axis=0).reshape(dimension) assert g.np.allclose(extents_tf, extents) def test_obb_coplanar_points(self): @@ -106,8 +105,7 @@ def test_obb_coplanar_points(self): transformed = g.trimesh.transform_points(points, to_origin) - transformed_bounds = [transformed.min(axis=0), - transformed.max(axis=0)] + transformed_bounds = [transformed.min(axis=0), transformed.max(axis=0)] for j in transformed_bounds: # assert that the points once our obb to_origin transform is applied @@ -152,34 +150,27 @@ def test_cylinder(self): Check bounding cylinders on basically a cuboid """ # not rotationally symmetric - mesh = g.get_mesh('featuretype.STL') + mesh = g.get_mesh("featuretype.STL") height = 10.0 radius = 1.0 # spherical coordinates to loop through - sphere = g.trimesh.util.grid_linspace( - [[0, 0], [g.np.pi * 2, g.np.pi * 2]], 5) + sphere = g.trimesh.util.grid_linspace([[0, 0], [g.np.pi * 2, g.np.pi * 2]], 5) for s in sphere: T = g.trimesh.transformations.spherical_matrix(*s) - p = g.trimesh.creation.cylinder(radius=radius, - height=height, - transform=T) - assert g.np.isclose(radius, - p.bounding_cylinder.primitive.radius, - rtol=.01) - assert g.np.isclose(height, - p.bounding_cylinder.primitive.height, - rtol=.01) + p = g.trimesh.creation.cylinder(radius=radius, height=height, transform=T) + assert g.np.isclose(radius, p.bounding_cylinder.primitive.radius, rtol=0.01) + assert g.np.isclose(height, p.bounding_cylinder.primitive.height, rtol=0.01) # regular mesh should have the same bounding cylinder # regardless of transform copied = mesh.copy() copied.apply_transform(T) - assert g.np.isclose(mesh.bounding_cylinder.volume, - copied.bounding_cylinder.volume, - rtol=.05) + assert g.np.isclose( + mesh.bounding_cylinder.volume, copied.bounding_cylinder.volume, rtol=0.05 + ) def test_random_cylinder(self): """ @@ -187,19 +178,15 @@ def test_random_cylinder(self): """ for _i in range(20): # create a random cylinder - c = g.trimesh.creation.cylinder( - radius=1.0, height=10).permutate.transform() + c = g.trimesh.creation.cylinder(radius=1.0, height=10).permutate.transform() # bounding primitive should have same height and radius - assert g.np.isclose( - c.bounding_cylinder.primitive.height, 10, rtol=1e-6) - assert g.np.isclose( - c.bounding_cylinder.primitive.radius, 1, rtol=1e-6) + assert g.np.isclose(c.bounding_cylinder.primitive.height, 10, rtol=1e-6) + assert g.np.isclose(c.bounding_cylinder.primitive.radius, 1, rtol=1e-6) # mesh is a cylinder, so center mass of bounding cylinder # should be exactly the same as the mesh center mass assert g.np.allclose( - c.center_mass, - c.bounding_cylinder.center_mass, - rtol=1e-6) + c.center_mass, c.bounding_cylinder.center_mass, rtol=1e-6 + ) def test_bounding_egg(self): # create a distorted sphere mesh @@ -210,15 +197,13 @@ def test_bounding_egg(self): # get a copy with a random transform p = i.permutate.transform() - assert p.symmetry == 'radial' + assert p.symmetry == "radial" # find the bounding cylinder with this random transform r = p.bounding_cylinder # transformed height should match source mesh - assert g.np.isclose(i.vertices[:, 2].ptp(), - r.primitive.height, - rtol=1e-6) + assert g.np.isclose(i.vertices[:, 2].ptp(), r.primitive.height, rtol=1e-6) # slightly inflated cylinder should contain all # vertices of the source mesh assert r.buffer(0.01).contains(p.vertices).all() @@ -234,37 +219,32 @@ def test_obb_order(self): # transform box randomly in rotation and translation mat = g.trimesh.transformations.random_rotation_matrix() # translate in box -100 : +100 - mat[:3, 3] = (g.random(3) - .5) * 200 + mat[:3, 3] = (g.random(3) - 0.5) * 200 # source mesh to check - b = g.trimesh.creation.box(extents=extents, - transform=mat) + b = g.trimesh.creation.box(extents=extents, transform=mat) # calculated OBB primitive obb = b.bounding_box_oriented # make sure extents returned were ordered - assert g.np.allclose(obb.primitive.extents, - extents_ordered) + assert g.np.allclose(obb.primitive.extents, extents_ordered) # make sure mesh isn't reversing windings - assert g.np.isclose(obb.to_mesh().volume, - g.np.prod(extents)) + assert g.np.isclose(obb.to_mesh().volume, g.np.prod(extents)) # make sure OBB has the same bounds as the source mesh # since it is a box the AABB of the OBB should be # the same as the AABB of the source mesh (lol) - assert g.np.allclose(obb.bounds, - b.bounds) + assert g.np.allclose(obb.bounds, b.bounds) # unordered extents and transforms - transform, extents = g.trimesh.bounds.oriented_bounds( - b, ordered=False) - assert g.np.allclose(g.np.sort(extents), - extents_ordered) + transform, extents = g.trimesh.bounds.oriented_bounds(b, ordered=False) + assert g.np.allclose(g.np.sort(extents), extents_ordered) # create a box from the unordered OBB information box = g.trimesh.creation.box( - extents=extents, transform=g.np.linalg.inv(transform)) + extents=extents, transform=g.np.linalg.inv(transform) + ) # make sure it is a real OBB too assert g.np.allclose(box.bounds, b.bounds) @@ -272,26 +252,27 @@ def test_bounds_tree(self): # test r-tree intersections for dimension in (2, 3): # create some (n, 2, 3) bounds - bounds = g.np.array([[i.min(axis=0), i.max(axis=0)] - for i in - [g.random((4, dimension)) - for i in range(10)]]) + bounds = g.np.array( + [ + [i.min(axis=0), i.max(axis=0)] + for i in [g.random((4, dimension)) for i in range(10)] + ] + ) tree = g.trimesh.util.bounds_tree(bounds) for i, b in enumerate(bounds): assert i in set(tree.intersection(b.ravel())) # construct tree with per-row bounds - tree = g.trimesh.util.bounds_tree( - bounds.reshape((-1, dimension * 2))) + tree = g.trimesh.util.bounds_tree(bounds.reshape((-1, dimension * 2))) for i, b in enumerate(bounds): assert i in set(tree.intersection(b.ravel())) def test_obb_corpus(self): # get some sample watertight meshes with nonzero volume min_volume = 0.1 - meshes = list(g.get_meshes(split=True, - min_volume=min_volume, - only_watertight=True)) - g.log.debug(f'loaded {len(meshes)} meshes') + meshes = list( + g.get_meshes(split=True, min_volume=min_volume, only_watertight=True) + ) + g.log.debug(f"loaded {len(meshes)} meshes") if g.PY3: # our models corpus should have 200+ models @@ -309,8 +290,7 @@ def test_obb_corpus(self): # now loop through mesh-obb pairs and validate for m, o in zip(meshes, obb): # move the mesh into the OBB frame - check = m.copy().apply_transform( - g.np.linalg.inv(o.primitive.transform)) + check = m.copy().apply_transform(g.np.linalg.inv(o.primitive.transform)) # check the mesh bounds against the claimed OBB bounds half = o.primitive.extents / 2.0 check_extents = g.np.array([-half, half]) @@ -318,6 +298,6 @@ def test_obb_corpus(self): assert g.np.allclose(check.bounds, check_extents, rtol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_camera.py b/tests/test_camera.py index 41a50125b..bafec8738 100644 --- a/tests/test_camera.py +++ b/tests/test_camera.py @@ -7,19 +7,15 @@ class CameraTests(g.unittest.TestCase): - def test_K(self): resolution = (320, 240) fov = (60, 40) - camera = g.trimesh.scene.Camera( - resolution=resolution, - fov=fov) + camera = g.trimesh.scene.Camera(resolution=resolution, fov=fov) # ground truth matrix - K_expected = np.array([[277.128, 0, 160], - [0, 329.697, 120], - [0, 0, 1]], - dtype=np.float64) + K_expected = np.array( + [[277.128, 0, 160], [0, 329.697, 120], [0, 0, 1]], dtype=np.float64 + ) assert np.allclose(camera.K, K_expected, rtol=1e-3) @@ -33,15 +29,11 @@ def test_consistency(self): resolution = (320, 240) focal = None fov = (60, 40) - camera = g.trimesh.scene.Camera( - resolution=resolution, - focal=focal, - fov=fov) + camera = g.trimesh.scene.Camera(resolution=resolution, focal=focal, fov=fov) assert np.allclose(camera.fov, fov) camera = g.trimesh.scene.Camera( - resolution=resolution, - focal=camera.focal, - fov=None) + resolution=resolution, focal=camera.focal, fov=None + ) assert np.allclose(camera.fov, fov) def test_focal_updates_on_resolution_change(self): @@ -53,21 +45,16 @@ def test_focal_updates_on_resolution_change(self): fov = (60, 40) # start with initial data - base_cam = g.trimesh.scene.Camera( - resolution=base_res, - fov=fov) + base_cam = g.trimesh.scene.Camera(resolution=base_res, fov=fov) # update both focal length and resolution base_focal = base_cam.focal base_cam.resolution = updated_res - assert not g.np.allclose(base_cam.focal, - base_focal) + assert not g.np.allclose(base_cam.focal, base_focal) # camera created with same arguments should # have the same values - new_cam = g.trimesh.scene.Camera( - resolution=updated_res, - fov=fov) + new_cam = g.trimesh.scene.Camera(resolution=updated_res, fov=fov) assert g.np.allclose(base_cam.focal, new_cam.focal) def test_fov_updates_on_resolution_change(self): @@ -77,9 +64,7 @@ def test_fov_updates_on_resolution_change(self): base_res = (320, 240) updated_res = (640, 480) focal = (100, 100) - base_cam = g.trimesh.scene.Camera( - resolution=base_res, - focal=focal) + base_cam = g.trimesh.scene.Camera(resolution=base_res, focal=focal) base_fov = base_cam.fov base_cam.resolution = updated_res assert base_cam.fov is not base_fov @@ -94,10 +79,7 @@ def test_lookat(self): Test the "look at points" function """ # original points - ori = np.array([[-1, -1], - [1, -1], - [1, 1], - [-1, 1]]) + ori = np.array([[-1, -1], [1, -1], [1, 1], [-1, 1]]) for _i in range(10): # set the extents to be random but positive @@ -111,8 +93,7 @@ def test_lookat(self): T = g.trimesh.scene.cameras.look_at(points + offset, fov) # check using trig - check = (points.ptp(axis=0)[:2] / 2.0) / \ - g.np.tan(np.radians(fov / 2)) + check = (points.ptp(axis=0)[:2] / 2.0) / g.np.tan(np.radians(fov / 2)) check += points[:, 2].mean() # Z should be the same as maximum trig option @@ -136,6 +117,6 @@ def test_ray_index(self): assert all(rid.max(axis=0) == current - 1) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_collision.py b/tests/test_collision.py index 171392340..935002183 100644 --- a/tests/test_collision.py +++ b/tests/test_collision.py @@ -10,14 +10,13 @@ class CollisionTest(g.unittest.TestCase): - def test_collision(self): # Ensure that FCL is importable if fcl is None: - g.log.warning('skipping FCL tests: not installed') + g.log.warning("skipping FCL tests: not installed") return - cube = g.get_mesh('unit_cube.STL') + cube = g.get_mesh("unit_cube.STL") tf1 = g.np.eye(4) tf1[:3, 3] = g.np.array([5, 0, 0]) @@ -27,17 +26,15 @@ def test_collision(self): # Test one-to-many collision checking m = g.trimesh.collision.CollisionManager() - m.add_object('cube0', cube) - m.add_object('cube1', cube, tf1) + m.add_object("cube0", cube) + m.add_object("cube1", cube, tf1) ret = m.in_collision_single(cube) assert ret is True ret, names, data = m.in_collision_single( - cube, - tf1, - return_names=True, - return_data=True) + cube, tf1, return_names=True, return_data=True + ) assert ret is True for c in data: @@ -45,14 +42,13 @@ def test_collision(self): assert g.np.isclose(c.depth, 1.0) assert g.np.allclose(c.normal, g.np.array([-1.0, 0.0, 0.0])) - if 'cube1' not in names: - g.log.debug('\n\n', m._objs.keys(), names) - assert 'cube1' in names + if "cube1" not in names: + g.log.debug("\n\n", m._objs.keys(), names) + assert "cube1" in names - ret, names, data = m.in_collision_single(cube, - tf2, - return_names=True, - return_data=True) + ret, names, data = m.in_collision_single( + cube, tf2, return_names=True, return_data=True + ) assert ret is False assert len(names) == 0 assert all(len(i.point) == 3 for i in data) @@ -62,69 +58,68 @@ def test_collision(self): ret = m.in_collision_internal() assert ret is False - m.add_object('cube2', cube, tf1) + m.add_object("cube2", cube, tf1) ret, names = m.in_collision_internal(return_names=True) assert ret is True - assert ('cube1', 'cube2') in names - assert ('cube0', 'cube1') not in names - assert ('cube2', 'cube1') not in names + assert ("cube1", "cube2") in names + assert ("cube0", "cube1") not in names + assert ("cube2", "cube1") not in names - m.set_transform('cube2', tf2) + m.set_transform("cube2", tf2) ret = m.in_collision_internal() assert ret is False - m.set_transform('cube2', tf1) + m.set_transform("cube2", tf1) ret = m.in_collision_internal() assert ret is True - m.remove_object('cube2') + m.remove_object("cube2") ret = m.in_collision_internal() assert ret is False # Test manager-to-manager collision checking m = g.trimesh.collision.CollisionManager() - m.add_object('cube0', cube) - m.add_object('cube1', cube, tf1) + m.add_object("cube0", cube) + m.add_object("cube1", cube, tf1) n = g.trimesh.collision.CollisionManager() - n.add_object('cube0', cube, tf2) + n.add_object("cube0", cube, tf2) ret = m.in_collision_other(n) assert ret is False - n.add_object('cube3', cube, tf1) + n.add_object("cube3", cube, tf1) ret = m.in_collision_other(n) assert ret is True ret, names = m.in_collision_other(n, return_names=True) assert ret is True - assert ('cube1', 'cube3') in names - assert ('cube3', 'cube1') not in names + assert ("cube1", "cube3") in names + assert ("cube3", "cube1") not in names def test_random_spheres(self): if fcl is None: - g.log.warning('skipping FCL tests: not installed') + g.log.warning("skipping FCL tests: not installed") return # check to see if a scene with a bunch of random # spheres - spheres = [g.trimesh.creation.icosphere( - radius=i[0]).apply_translation( - i[1:] * 100) for i in - g.random((1000, 4))] + spheres = [ + g.trimesh.creation.icosphere(radius=i[0]).apply_translation(i[1:] * 100) + for i in g.random((1000, 4)) + ] scene = g.trimesh.Scene(spheres) - manager, _ = g.trimesh.collision.scene_to_collision( - scene) + manager, _ = g.trimesh.collision.scene_to_collision(scene) collides = manager.in_collision_internal() assert isinstance(collides, bool) def test_distance(self): if fcl is None: - g.log.warning('skipping FCL tests: not installed') + g.log.warning("skipping FCL tests: not installed") return - cube = g.get_mesh('unit_cube.STL') + cube = g.get_mesh("unit_cube.STL") tf1 = g.np.eye(4) tf1[:3, 3] = g.np.array([5, 0, 0]) @@ -143,7 +138,7 @@ def test_distance(self): # Test one-to-many distance checking m = g.trimesh.collision.CollisionManager() - m.add_object('cube1', cube, tf1) + m.add_object("cube1", cube, tf1) dist = m.min_distance_single(cube) assert g.np.isclose(dist, 4.0) @@ -153,17 +148,17 @@ def test_distance(self): dist, name = m.min_distance_single(cube, return_name=True) assert g.np.isclose(dist, 4.0) - assert name == 'cube1' + assert name == "cube1" - m.add_object('cube2', cube, tf2) + m.add_object("cube2", cube, tf2) dist, name = m.min_distance_single(cube, tf3, return_name=True) assert g.np.isclose(dist, 2.0) - assert name == 'cube1' + assert name == "cube1" dist, name = m.min_distance_single(cube, tf4, return_name=True) assert g.np.isclose(dist, 2.0) - assert name == 'cube2' + assert name == "cube2" # Test internal distance checking and object # addition/removal/modification @@ -172,58 +167,54 @@ def test_distance(self): dist, names = m.min_distance_internal(return_names=True) assert g.np.isclose(dist, 9.0) - assert names == ('cube1', 'cube2') + assert names == ("cube1", "cube2") - m.add_object('cube3', cube, tf3) + m.add_object("cube3", cube, tf3) dist, names = m.min_distance_internal(return_names=True) assert g.np.isclose(dist, 2.0) - assert names == ('cube1', 'cube3') + assert names == ("cube1", "cube3") - m.set_transform('cube3', tf4) + m.set_transform("cube3", tf4) dist, names = m.min_distance_internal(return_names=True) assert g.np.isclose(dist, 2.0) - assert names == ('cube2', 'cube3') + assert names == ("cube2", "cube3") # Test manager-to-manager distance checking m = g.trimesh.collision.CollisionManager() - m.add_object('cube0', cube) - m.add_object('cube1', cube, tf1) + m.add_object("cube0", cube) + m.add_object("cube1", cube, tf1) n = g.trimesh.collision.CollisionManager() - n.add_object('cube4', cube, tf2) + n.add_object("cube4", cube, tf2) - dist, names, data = m.min_distance_other( - n, return_names=True, return_data=True) + dist, names, data = m.min_distance_other(n, return_names=True, return_data=True) assert g.np.isclose(dist, 4.0) - assert names == ('cube0', 'cube4') + assert names == ("cube0", "cube4") assert g.np.isclose( - g.np.linalg.norm(data.point(names[0]) - data.point(names[1])), - dist + g.np.linalg.norm(data.point(names[0]) - data.point(names[1])), dist ) - n.add_object('cube5', cube, tf4) + n.add_object("cube5", cube, tf4) - dist, names, data = m.min_distance_other( - n, return_names=True, return_data=True) + dist, names, data = m.min_distance_other(n, return_names=True, return_data=True) assert g.np.isclose(dist, 1.0) - assert names == ('cube0', 'cube5') + assert names == ("cube0", "cube5") assert g.np.isclose( - g.np.linalg.norm(data.point(names[0]) - data.point(names[1])), - dist + g.np.linalg.norm(data.point(names[0]) - data.point(names[1])), dist ) def test_scene(self): if fcl is None: return - scene = g.get_mesh('cycloidal.3DXML') + scene = g.get_mesh("cycloidal.3DXML") manager, objects = g.trimesh.collision.scene_to_collision(scene) assert manager.in_collision_internal() assert objects is not None -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_color.py b/tests/test_color.py index 3743d9caf..88f29387e 100644 --- a/tests/test_color.py +++ b/tests/test_color.py @@ -5,9 +5,8 @@ class VisualTest(g.unittest.TestCase): - def test_visual(self): - mesh = g.get_mesh('featuretype.STL') + mesh = g.get_mesh("featuretype.STL") # stl shouldn't have any visual properties defined assert not mesh.visual.defined @@ -22,8 +21,8 @@ def test_visual(self): assert mesh.visual.transparency def test_concatenate(self): - a = g.get_mesh('ballA.off') - b = g.get_mesh('ballB.off') + a = g.get_mesh("ballA.off") + b = g.get_mesh("ballB.off") a.visual.face_colors = [255, 0, 0] r = a + b @@ -34,7 +33,7 @@ def test_data_model(self): Test the probably too- magical color caching and storage system. """ - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") test_color = [255, 0, 0, 255] test_color_2 = [0, 255, 0, 255] test_color_transparent = [25, 33, 0, 146] @@ -67,7 +66,7 @@ def test_data_model(self): # the rest of the colors should be unchanged assert (m.visual.face_colors[1] != test_color).any() assert len(m.visual._data) >= 1 - assert m.visual.kind == 'face' + assert m.visual.kind == "face" assert m.visual.defined assert not m.visual.transparency @@ -77,7 +76,7 @@ def test_data_model(self): # assert len(m.visual._cache) == 0 # should be just material and face information assert len(m.visual._data.data) >= 1 - assert m.visual.kind == 'face' + assert m.visual.kind == "face" assert bool((m.visual.vertex_colors == test_color).all()) assert m.visual.defined assert not m.visual.transparency @@ -86,7 +85,7 @@ def test_data_model(self): m.visual.vertex_colors[0] = test_color_2 assert (m.visual.vertex_colors[0] == test_color_2).all() assert (m.visual.vertex_colors[1] != test_color_2).any() - assert m.visual.kind == 'vertex' + assert m.visual.kind == "vertex" assert m.visual.defined assert not m.visual.transparency @@ -96,34 +95,32 @@ def test_data_model(self): test = (g.random((len(m.faces), 4)) * 255).astype(g.np.uint8) m.visual.face_colors = test assert bool((m.visual.face_colors == test).all()) - assert m.visual.kind == 'face' + assert m.visual.kind == "face" - test = (g.random((len(m.vertices), 4)) - * 255).astype(g.np.uint8) + test = (g.random((len(m.vertices), 4)) * 255).astype(g.np.uint8) m.visual.vertex_colors = test assert bool((m.visual.vertex_colors == test).all()) - assert m.visual.kind == 'vertex' + assert m.visual.kind == "vertex" test = (g.random(4) * 255).astype(g.np.uint8) m.visual.face_colors = test assert bool((m.visual.vertex_colors == test).all()) - assert m.visual.kind == 'face' + assert m.visual.kind == "face" m.visual.vertex_colors[0] = [0, 0, 0, 0] - assert m.visual.kind == 'vertex' + assert m.visual.kind == "vertex" test = (g.random(4) * 255).astype(g.np.uint8) m.visual.vertex_colors = test assert bool((m.visual.face_colors == test).all()) - assert m.visual.kind == 'vertex' - m.visual.face_colors[:2] = ( - g.random((2, 4)) * 255).astype(g.np.uint8) - assert m.visual.kind == 'face' + assert m.visual.kind == "vertex" + m.visual.face_colors[:2] = (g.random((2, 4)) * 255).astype(g.np.uint8) + assert m.visual.kind == "face" def test_smooth(self): """ Make sure cached smooth model is dumped if colors are changed """ - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") # will put smoothed mesh into visuals cache s = m.smoothed() @@ -138,7 +135,7 @@ def test_smooth(self): assert s1.visual.face_colors.ptp(axis=0).max() != 0 # do the same check on vertex color - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") s = m.smoothed() # every color should be default color assert s.visual.vertex_colors.ptp(axis=0).max() == 0 @@ -147,16 +144,15 @@ def test_smooth(self): assert s1.visual.face_colors.ptp(axis=0).max() != 0 def test_vertex(self): - - m = g.get_mesh('torus.STL') + m = g.get_mesh("torus.STL") m.visual.vertex_colors = [100, 100, 100, 255] assert len(m.visual.vertex_colors) == len(m.vertices) def test_conversion(self): - m = g.get_mesh('machinist.XAML') - assert m.visual.kind == 'face' + m = g.get_mesh("machinist.XAML") + assert m.visual.kind == "face" # unmerge vertices so we don't get average colors m.unmerge_vertices() @@ -166,7 +162,7 @@ def test_conversion(self): # assign averaged vertex colors as default m.visual.vertex_colors = m.visual.vertex_colors - assert m.visual.kind == 'vertex' + assert m.visual.kind == "vertex" m.visual._cache.clear() assert g.np.allclose(initial, m.visual.face_colors) @@ -195,8 +191,7 @@ def test_interpolate(self): # try interpolating with matplotlib color maps try: - colors = g.trimesh.visual.interpolate(values, - 'viridis') + colors = g.trimesh.visual.interpolate(values, "viridis") except ImportError: # if matplotlib isn't installed return @@ -238,8 +233,7 @@ def test_uv_to_interpolated_color(self): colors = g.trimesh.visual.uv_to_interpolated_color(uv, img) # exact interpolated values before being converted to uint8 - colors_expected = [[7.75, 24.8, 128 + 7.75, 255], - [12.4, 15.5, 128 + 12.4, 255]] + colors_expected = [[7.75, 24.8, 128 + 7.75, 255], [12.4, 15.5, 128 + 12.4, 255]] assert g.np.allclose(colors, colors_expected, rtol=0, atol=1) @@ -258,18 +252,17 @@ def test_iterset(self): def test_copy(self): s = g.trimesh.creation.uv_sphere().scene() - s.geometry['geometry_0'].visual.face_colors[:, :3] = [0, 255, 0] + s.geometry["geometry_0"].visual.face_colors[:, :3] = [0, 255, 0] - a = s.geometry['geometry_0'] - assert id(a) == id(s.geometry['geometry_0']) + a = s.geometry["geometry_0"] + assert id(a) == id(s.geometry["geometry_0"]) - b = s.geometry['geometry_0'].copy() + b = s.geometry["geometry_0"].copy() assert id(a) != id(b) - assert g.np.allclose(a.visual.face_colors, - b.visual.face_colors) + assert g.np.allclose(a.visual.face_colors, b.visual.face_colors) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_convex.py b/tests/test_convex.py index 33968233f..3433ec0a7 100644 --- a/tests/test_convex.py +++ b/tests/test_convex.py @@ -5,22 +5,26 @@ class ConvexTest(g.unittest.TestCase): - def test_convex(self): - # store (true is_convex, mesh) tuples - meshes = [(False, g.get_mesh('featuretype.STL')), - (False, g.get_mesh('quadknot.obj')), - (True, g.get_mesh('unit_cube.STL')), - (False, g.get_mesh('1002_tray_bottom.STL')), - (True, g.trimesh.creation.icosphere()), - (True, g.trimesh.creation.uv_sphere()), - (True, g.trimesh.creation.box()), - (True, g.trimesh.creation.cylinder( - radius=1, height=10)), - (True, g.trimesh.creation.capsule()), - (False, (g.trimesh.creation.box(extents=(1, 1, 1)) + - g.trimesh.creation.box(bounds=[[10, 10, 10], [12, 12, 12]])))] + meshes = [ + (False, g.get_mesh("featuretype.STL")), + (False, g.get_mesh("quadknot.obj")), + (True, g.get_mesh("unit_cube.STL")), + (False, g.get_mesh("1002_tray_bottom.STL")), + (True, g.trimesh.creation.icosphere()), + (True, g.trimesh.creation.uv_sphere()), + (True, g.trimesh.creation.box()), + (True, g.trimesh.creation.cylinder(radius=1, height=10)), + (True, g.trimesh.creation.capsule()), + ( + False, + ( + g.trimesh.creation.box(extents=(1, 1, 1)) + + g.trimesh.creation.box(bounds=[[10, 10, 10], [12, 12, 12]]) + ), + ), + ] for is_convex, mesh in meshes: assert mesh.is_watertight @@ -40,11 +44,11 @@ def test_convex(self): volume.append(hulls[-1].volume) # which of the volumes are close to the median volume - close = g.np.isclose(volume, - g.np.median(volume), - atol=mesh.bounding_box.volume / 1000) + close = g.np.isclose( + volume, g.np.median(volume), atol=mesh.bounding_box.volume / 1000 + ) - if g.platform.system() == 'Linux': + if g.platform.system() == "Linux": # on linux the convex hulls are pretty robust close_ok = close.all() else: @@ -53,48 +57,51 @@ def test_convex(self): # for success: here of 90% of values are close to the median # then declare everything hunky dory ratio = close.sum() / float(len(close)) - close_ok = ratio > .9 + close_ok = ratio > 0.9 if not close_ok: - g.log.error(f'volume inconsistent: {volume}') - raise ValueError('volume is inconsistent on {}'.format( - mesh.metadata['file_name'])) + g.log.error(f"volume inconsistent: {volume}") + raise ValueError( + "volume is inconsistent on {}".format(mesh.metadata["file_name"]) + ) assert min(volume) > 0.0 if not all(i.is_winding_consistent for i in hulls): raise ValueError( - 'mesh %s reported bad winding on convex hull!', - mesh.metadata['file_name']) + "mesh %s reported bad winding on convex hull!", + mesh.metadata["file_name"], + ) if not all(i.is_convex for i in hulls): - raise ValueError('mesh %s reported non-convex convex hull!', - mesh.metadata['file_name']) + raise ValueError( + "mesh %s reported non-convex convex hull!", mesh.metadata["file_name"] + ) def test_primitives(self): - for prim in [g.trimesh.primitives.Sphere(), - g.trimesh.primitives.Cylinder(), - g.trimesh.primitives.Box()]: + for prim in [ + g.trimesh.primitives.Sphere(), + g.trimesh.primitives.Cylinder(), + g.trimesh.primitives.Box(), + ]: assert prim.is_convex # convex things should have hulls of the same volume # convert to mesh to get tessellated volume rather than # analytic primitive volume tess = prim.to_mesh() - assert g.np.isclose(tess.convex_hull.volume, - tess.volume) + assert g.np.isclose(tess.convex_hull.volume, tess.volume) def test_projections(self): # check the vertex projection onto adjacent face plane # this is used to calculate convexity for m in g.get_meshes(4): - assert (len(m.face_adjacency_projections) == - (len(m.face_adjacency))) + assert len(m.face_adjacency_projections) == (len(m.face_adjacency)) def test_truth(self): # check a non-watertight mesh - m = g.get_mesh('not_convex.obj') + m = g.get_mesh("not_convex.obj") assert not m.is_convex -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_copy.py b/tests/test_copy.py index 9e6bde033..cc612d03d 100644 --- a/tests/test_copy.py +++ b/tests/test_copy.py @@ -8,7 +8,6 @@ class CopyTests(g.unittest.TestCase): - def test_copy(self): for mesh in g.get_meshes(raise_error=True): if not isinstance(mesh, g.trimesh.Trimesh) or len(mesh.faces) == 0: @@ -20,7 +19,7 @@ def test_copy(self): _ = mesh.triangles_tree _ = mesh.face_adjacency_angles _ = mesh.facets - assert 'triangles_tree' in mesh._cache + assert "triangles_tree" in mesh._cache assert len(mesh._cache) > 0 # if you cache c-objects then deepcopy the mesh @@ -40,20 +39,18 @@ def test_copy(self): copied = g.copy.copy(mesh) assert len(copied._cache) == len(mesh._cache) # the triangles_tree should be the SAME OBJECT - assert id(copied.triangles_tree) == id( - mesh.triangles_tree) + assert id(copied.triangles_tree) == id(mesh.triangles_tree) # cache should be same data in different object assert id(copied._cache.cache) != id(mesh._cache.cache) assert id(copied._cache) != id(mesh._cache) # identifier shouldn't change - assert g.np.allclose(copied.identifier, - mesh.identifier) + assert g.np.allclose(copied.identifier, mesh.identifier) # ...still shouldn't have changed anything assert start == hash(mesh) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_crash.py b/tests/test_crash.py index a47613fbf..e4eb656bd 100644 --- a/tests/test_crash.py +++ b/tests/test_crash.py @@ -9,15 +9,12 @@ def not_open(file_name, proc): Assert that a file name is not open """ # expand input path - file_name = g.os.path.abspath( - g.os.path.expanduser(file_name)) + file_name = g.os.path.abspath(g.os.path.expanduser(file_name)) # assert none of the open files are the one specified - assert all(i.path != file_name for i in - proc.open_files()) + assert all(i.path != file_name for i in proc.open_files()) class FileTests(g.unittest.TestCase): - def test_close(self): """ Even when loaders crash, we should close files @@ -25,15 +22,14 @@ def test_close(self): try: import psutil except ImportError: - g.log.warning('no psutil, exiting') + g.log.warning("no psutil, exiting") return # a reference to current process proc = psutil.Process() # create a blank empty PLY file - f = g.tempfile.NamedTemporaryFile(suffix='.ply', - delete=False) + f = g.tempfile.NamedTemporaryFile(suffix=".ply", delete=False) # close file f.close() @@ -62,8 +58,7 @@ def test_close(self): g.os.remove(f.name) # create a blank empty unsupported file - f = g.tempfile.NamedTemporaryFile(suffix='.blorb', - delete=False) + f = g.tempfile.NamedTemporaryFile(suffix=".blorb", delete=False) # close file f.close() # file shouldn't be open @@ -91,8 +86,7 @@ def test_close(self): g.os.remove(f.name) # create a blank empty DXF file - f = g.tempfile.NamedTemporaryFile(suffix='.dxf', - delete=False) + f = g.tempfile.NamedTemporaryFile(suffix=".dxf", delete=False) # close file f.close() # file shouldn't be open @@ -120,6 +114,6 @@ def test_close(self): g.os.remove(f.name) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_creation.py b/tests/test_creation.py index fa60f9293..fabb82c8e 100644 --- a/tests/test_creation.py +++ b/tests/test_creation.py @@ -5,13 +5,12 @@ class CreationTest(g.unittest.TestCase): - def setUp(self): engines = [] - if g.trimesh.util.has_module('triangle'): - engines.append('triangle') - if g.trimesh.util.has_module('mapbox_earcut'): - engines.append('earcut') + if g.trimesh.util.has_module("triangle"): + engines.append("triangle") + if g.trimesh.util.has_module("mapbox_earcut"): + engines.append("earcut") self.engines = engines def test_box(self): @@ -19,9 +18,7 @@ def test_box(self): # should create a unit cube with origin centroid m = box() - assert g.np.allclose(m.bounds, - [[-0.5] * 3, - [0.5] * 3]) + assert g.np.allclose(m.bounds, [[-0.5] * 3, [0.5] * 3]) # check creation by passing extents extents = g.np.array([1.2, 1.9, 10.3]) @@ -38,7 +35,7 @@ def test_cone(self): assert c.is_volume assert c.body_count == 1 assert g.np.allclose(c.extents, 1.0, atol=0.03) - assert c.metadata['shape'] == 'cone' + assert c.metadata["shape"] == "cone" def test_cylinder(self): # tolerance for cylinders @@ -48,36 +45,30 @@ def test_cylinder(self): assert c.is_volume assert c.body_count == 1 assert g.np.allclose(c.extents, 1.0, atol=atol) - assert c.metadata['shape'] == 'cylinder' + assert c.metadata["shape"] == "cylinder" # check the "use a segment" feature # passed height should be overridden radius = 0.75 offset = 10.0 # true bounds - bounds = [[0, -radius, offset - radius], - [1, radius, offset + radius]], + bounds = ([[0, -radius, offset - radius], [1, radius, offset + radius]],) # create with a height that gets overridden c = g.trimesh.creation.cylinder( - radius=radius, - height=200, - segment=[[0, 0, offset], - [1, 0, offset]]) + radius=radius, height=200, segment=[[0, 0, offset], [1, 0, offset]] + ) assert c.is_volume assert c.body_count == 1 # make sure segment has been applied correctly - assert g.np.allclose( - c.bounds, bounds, atol=atol) + assert g.np.allclose(c.bounds, bounds, atol=atol) # try again with no height passed c = g.trimesh.creation.cylinder( - radius=radius, - segment=[[0, 0, offset], - [1, 0, offset]]) + radius=radius, segment=[[0, 0, offset], [1, 0, offset]] + ) assert c.is_volume assert c.body_count == 1 # make sure segment has been applied correctly - assert g.np.allclose( - c.bounds, bounds, atol=atol) + assert g.np.allclose(c.bounds, bounds, atol=atol) def test_soup(self): count = 100 @@ -95,23 +86,21 @@ def test_capsule(self): def test_spheres(self): # test generation of UV spheres and icospheres - for sphere in [g.trimesh.creation.uv_sphere(), - g.trimesh.creation.icosphere()]: + for sphere in [g.trimesh.creation.uv_sphere(), g.trimesh.creation.icosphere()]: assert sphere.is_volume assert sphere.is_convex assert sphere.is_watertight assert sphere.is_winding_consistent assert sphere.body_count == 1 - assert sphere.metadata['shape'] == 'sphere' + assert sphere.metadata["shape"] == "sphere" # all vertices should have radius of exactly 1.0 - radii = g.np.linalg.norm( - sphere.vertices - sphere.center_mass, axis=1) + radii = g.np.linalg.norm(sphere.vertices - sphere.center_mass, axis=1) assert g.np.allclose(radii, 1.0) # test additional arguments - red_sphere = g.trimesh.creation.icosphere(face_colors=(1., 0, 0)) + red_sphere = g.trimesh.creation.icosphere(face_colors=(1.0, 0, 0)) expected = g.np.full((len(red_sphere.faces), 4), (255, 0, 0, 255)) g.np.testing.assert_allclose(red_sphere.visual.face_colors, expected) @@ -121,8 +110,7 @@ def test_camera_marker(self): """ # camera transform (pose) is identity camera = g.trimesh.scene.Camera(resolution=(320, 240), fov=(60, 45)) - meshes = g.trimesh.creation.camera_marker( - camera=camera, marker_height=0.04) + meshes = g.trimesh.creation.camera_marker(camera=camera, marker_height=0.04) assert isinstance(meshes, list) # all meshes should be viewable type for mesh in meshes: @@ -135,13 +123,12 @@ def test_axis(self): axis_length = 0.4 # construct a visual axis - axis = g.trimesh.creation.axis(origin_size=origin_size, - axis_length=axis_length) + axis = g.trimesh.creation.axis(origin_size=origin_size, axis_length=axis_length) # AABB should be origin radius + cylinder length - assert g.np.allclose(origin_size + axis_length, - axis.bounding_box.primitive.extents, - rtol=.01) + assert g.np.allclose( + origin_size + axis_length, axis.bounding_box.primitive.extents, rtol=0.01 + ) def test_path_sweep(self): if len(self.engines) == 0: @@ -151,9 +138,9 @@ def test_path_sweep(self): vec = g.np.array([0, 1]) * 0.2 n_comps = 100 angle = g.np.pi * 2.0 / n_comps - rotmat = g.np.array([ - [g.np.cos(angle), -g.np.sin(angle)], - [g.np.sin(angle), g.np.cos(angle)]]) + rotmat = g.np.array( + [[g.np.cos(angle), -g.np.sin(angle)], [g.np.sin(angle), g.np.cos(angle)]] + ) perim = [] for _i in range(n_comps): perim.append(vec) @@ -169,8 +156,7 @@ def test_path_sweep(self): # Extrude for engine in self.engines: - mesh = g.trimesh.creation.sweep_polygon( - poly, path, engine=engine) + mesh = g.trimesh.creation.sweep_polygon(poly, path, engine=engine) assert mesh.is_volume def test_annulus(self): @@ -182,15 +168,12 @@ def test_annulus(self): transforms = [None] transforms.extend(g.transforms) for T in transforms: - a = g.trimesh.creation.annulus(r_min=1.0, - r_max=2.0, - height=1.0, - transform=T) + a = g.trimesh.creation.annulus(r_min=1.0, r_max=2.0, height=1.0, transform=T) # mesh should be well constructed assert a.is_volume assert a.is_watertight assert a.is_winding_consistent - assert a.metadata['shape'] == 'annulus' + assert a.metadata["shape"] == "annulus" # should be centered at origin assert g.np.allclose(a.center_mass, 0.0) @@ -201,39 +184,34 @@ def test_annulus(self): axis = g.trimesh.transform_points(axis, T) # should be along rotated Z - assert (g.np.allclose(a.symmetry_axis, axis[2]) or - g.np.allclose(a.symmetry_axis, -axis[2])) + assert g.np.allclose(a.symmetry_axis, axis[2]) or g.np.allclose( + a.symmetry_axis, -axis[2] + ) radii = [g.np.dot(a.vertices, i) for i in axis[:2]] radii = g.np.linalg.norm(radii, axis=0) # vertices should all be at r_min or r_max - assert g.np.logical_or(g.np.isclose(radii, 1.0), - g.np.isclose(radii, 2.0)).all() + assert g.np.logical_or( + g.np.isclose(radii, 1.0), g.np.isclose(radii, 2.0) + ).all() # all heights should be at +/- height/2.0 - assert g.np.allclose(g.np.abs(g.np.dot(a.vertices, - axis[2])), 0.5) + assert g.np.allclose(g.np.abs(g.np.dot(a.vertices, axis[2])), 0.5) # do some cylinder comparison checks - a = g.trimesh.creation.annulus(r_min=0.0, - r_max=1.0, - height=1.0) + a = g.trimesh.creation.annulus(r_min=0.0, r_max=1.0, height=1.0) cylinder = g.trimesh.creation.cylinder(radius=1, height=1) # should survive a zero-inner-radius assert g.np.isclose(a.volume, cylinder.volume) assert g.np.isclose(a.area, cylinder.area) # bounds should be the same as a cylinder - a = g.trimesh.creation.annulus(r_min=.25, - r_max=1.0, - height=1.0) + a = g.trimesh.creation.annulus(r_min=0.25, r_max=1.0, height=1.0) c = g.trimesh.creation.cylinder(radius=1, height=1) assert g.np.allclose(a.bounds, c.bounds) # segment should work the same for both seg = [[1, 2, 3], [4, 5, 6]] - a = g.trimesh.creation.annulus(r_min=.25, - r_max=1.0, - segment=seg) + a = g.trimesh.creation.annulus(r_min=0.25, r_max=1.0, segment=seg) c = g.trimesh.creation.cylinder(radius=1, segment=seg) assert g.np.allclose(a.bounds, c.bounds) @@ -243,7 +221,7 @@ def test_triangulate(self): """ # circles bigger = g.Point([10, 0]).buffer(1.0) - smaller = g.Point([10, 0]).buffer(.25) + smaller = g.Point([10, 0]).buffer(0.25) # circle with hole in center donut = bigger.difference(smaller) @@ -251,45 +229,45 @@ def test_triangulate(self): # make sure we have nonzero data assert bigger.area > 1.0 # make sure difference did what we think it should - assert g.np.isclose(donut.area, - bigger.area - smaller.area) + assert g.np.isclose(donut.area, bigger.area - smaller.area) - times = {'earcut': 0.0, 'triangle': 0.0} + times = {"earcut": 0.0, "triangle": 0.0} iterations = 50 # get a polygon to benchmark times with including interiors bench = [bigger, smaller, donut] - bench.extend(g.get_mesh( - '2D/ChuteHolderPrint.DXF').polygons_full) - bench.extend(g.get_mesh( - '2D/wrench.dxf').polygons_full) + bench.extend(g.get_mesh("2D/ChuteHolderPrint.DXF").polygons_full) + bench.extend(g.get_mesh("2D/wrench.dxf").polygons_full) # check triangulation of both meshpy and triangle engine # including an example that has interiors for engine in self.engines: # make sure all our polygons triangulate resonably for poly in bench: - v, f = g.trimesh.creation.triangulate_polygon( - poly, engine=engine) + v, f = g.trimesh.creation.triangulate_polygon(poly, engine=engine) # run asserts check_triangulation(v, f, poly.area) try: # do a quick benchmark per engine # in general triangle appears to be 2x # faster than - times[engine] += min( - g.timeit.repeat( - 't(p, engine=e)', - repeat=3, - number=iterations, - globals={ - 't': g.trimesh.creation.triangulate_polygon, - 'p': poly, - 'e': engine})) / iterations + times[engine] += ( + min( + g.timeit.repeat( + "t(p, engine=e)", + repeat=3, + number=iterations, + globals={ + "t": g.trimesh.creation.triangulate_polygon, + "p": poly, + "e": engine, + }, + ) + ) + / iterations + ) except BaseException: - g.log.error( - 'failed to benchmark triangle', exc_info=True) - g.log.info( - f'benchmarked triangulation on {len(bench)} polygons: {str(times)}') + g.log.error("failed to benchmark triangle", exc_info=True) + g.log.info(f"benchmarked triangulation on {len(bench)} polygons: {str(times)}") def test_triangulate_plumbing(self): """ @@ -297,7 +275,7 @@ def test_triangulate_plumbing(self): """ if len(self.engines) == 0: return - p = g.get_mesh('2D/ChuteHolderPrint.DXF') + p = g.get_mesh("2D/ChuteHolderPrint.DXF") for engine in self.engines: v, f = p.triangulate(engine=engine) check_triangulation(v, f, p.area) @@ -315,9 +293,9 @@ def test_truncated(self, count=10): def check_triangulation(v, f, true_area): assert g.trimesh.util.is_shape(v, (-1, 2)) - assert v.dtype.kind == 'f' + assert v.dtype.kind == "f" assert g.trimesh.util.is_shape(f, (-1, 3)) - assert f.dtype.kind == 'i' + assert f.dtype.kind == "i" tri = g.trimesh.util.stack_3D(v)[f] area = g.trimesh.triangles.area(tri).sum() @@ -331,12 +309,17 @@ def test_torus(): minor_radius = 0.2 m = torus(major_radius=major_radius, minor_radius=minor_radius) - extents = g.np.array([2 * major_radius + 2 * minor_radius, - 2 * major_radius + 2 * minor_radius, - 2 * minor_radius]) + extents = g.np.array( + [ + 2 * major_radius + 2 * minor_radius, + 2 * major_radius + 2 * minor_radius, + 2 * minor_radius, + ] + ) assert g.np.allclose(m.extents, extents) assert g.np.allclose(m.bounds, [-extents / 2.0, extents / 2.0]) -if __name__ == '__main__': + +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_curvature.py b/tests/test_curvature.py index 0bd876e2d..e8b871e4f 100644 --- a/tests/test_curvature.py +++ b/tests/test_curvature.py @@ -5,47 +5,44 @@ class CurvatureTest(g.unittest.TestCase): - def test_gaussian_curvature(self): - - for radius in g.np.linspace(.25, 2.0, 10): + for radius in g.np.linspace(0.25, 2.0, 10): m = g.trimesh.creation.icosphere(radius=radius) gauss = g.trimesh.curvature.discrete_gaussian_curvature_measure( - mesh=m, - points=m.vertices, - radius=radius * 2.0) / (4 * g.np.pi) - assert g.np.allclose(gauss, 1.0, atol=.01) + mesh=m, points=m.vertices, radius=radius * 2.0 + ) / (4 * g.np.pi) + assert g.np.allclose(gauss, 1.0, atol=0.01) # a torus should have approximately half its vertices with positive # curvature, and half with negative - t = g.get_mesh('torus.STL') + t = g.get_mesh("torus.STL") gauss = g.trimesh.curvature.discrete_gaussian_curvature_measure( - mesh=t, - points=t.vertices, - radius=1.0) + mesh=t, points=t.vertices, radius=1.0 + ) ratio = float((gauss < 0.0).sum()) / float(len(gauss)) - assert g.np.isclose(ratio, 0.5, atol=.2) + assert g.np.isclose(ratio, 0.5, atol=0.2) def test_mean_curvature(self): m = g.trimesh.creation.icosphere() - mean = g.trimesh.curvature.discrete_mean_curvature_measure( - m, - m.vertices, - 2.0) / (4 * g.np.pi) + mean = g.trimesh.curvature.discrete_mean_curvature_measure(m, m.vertices, 2.0) / ( + 4 * g.np.pi + ) # Check mean curvature for unit sphere is 1.0 - assert g.np.allclose(mean, 1.0, atol=.01) + assert g.np.allclose(mean, 1.0, atol=0.01) def test_vertex_defect(self): # a subdivided box will only have corners and planar regions # so all vertex defects should be 0 or 90 degrees m = g.trimesh.primitives.Box().subdivide() - assert g.np.logical_or(g.np.isclose(m.vertex_defects, 0.0), - g.np.isclose(m.vertex_defects, g.np.pi / 2.0)).all() + assert g.np.logical_or( + g.np.isclose(m.vertex_defects, 0.0), + g.np.isclose(m.vertex_defects, g.np.pi / 2.0), + ).all() assert len(m.vertex_defects) == len(m.vertices) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_dae.py b/tests/test_dae.py index b33cd33eb..f6c17e354 100644 --- a/tests/test_dae.py +++ b/tests/test_dae.py @@ -10,7 +10,7 @@ except BaseException: # TODO : REMOVE WHEN UPSTREAM RELEASE FIXED # https://github.com/pycollada/pycollada/pull/92 - g.log.error('DAE fix not pushed yet!') + g.log.error("DAE fix not pushed yet!") collada = None @@ -20,67 +20,65 @@ def test_duck(self): Load a collada scene with pycollada. """ if collada is None: - g.log.error('no pycollada to test!') + g.log.error("no pycollada to test!") return - scene = g.get_mesh('duck.dae') + scene = g.get_mesh("duck.dae") assert len(scene.geometry) == 1 assert len(scene.graph.nodes_geometry) == 1 def test_shoulder(self): if collada is None: - g.log.error('no pycollada to test!') + g.log.error("no pycollada to test!") return - scene = g.get_mesh('shoulder.zae') + scene = g.get_mesh("shoulder.zae") assert len(scene.geometry) == 3 assert len(scene.graph.nodes_geometry) == 3 def test_export(self): if collada is None: - g.log.error('no pycollada to test!') + g.log.error("no pycollada to test!") return - a = g.get_mesh('ballA.off') - r = a.export(file_type='dae') + a = g.get_mesh("ballA.off") + r = a.export(file_type="dae") assert len(r) > 0 def test_obj_roundtrip(self): # get a zipped-DAE scene - s = g.get_mesh('duck.zae', force='mesh') + s = g.get_mesh("duck.zae", force="mesh") with g.TemporaryDirectory() as root: # export using a file path so it can auto-create # a FilePathResolver to write the stupid assets - path = g.os.path.join(root, 'duck.obj') + path = g.os.path.join(root, "duck.obj") s.export(path) # bring it back from outer space - rec = g.trimesh.load(path, force='mesh') + rec = g.trimesh.load(path, force="mesh") assert rec.visual.uv.ptp(axis=0).ptp() > 1e-5 - assert (s.visual.material.baseColorTexture.size == - rec.visual.material.image.size) + assert s.visual.material.baseColorTexture.size == rec.visual.material.image.size def test_material_round(self): """ Test to make sure materials survive a roundtrip with an actually identical result """ - s = g.get_mesh('blue_cube.dae') + s = g.get_mesh("blue_cube.dae") assert len(s.geometry) == 1 m = next(iter(s.geometry.values())) rs = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream( - m.export(file_type='dae')), - file_type='dae') + file_obj=g.trimesh.util.wrap_as_stream(m.export(file_type="dae")), + file_type="dae", + ) assert len(rs.geometry) == 1 r = next(iter(rs.geometry.values())) # this will compare everything in `material._data` - assert (hash(m.visual.material) == - hash(r.visual.material)) + assert hash(m.visual.material) == hash(r.visual.material) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_dxf.py b/tests/test_dxf.py index dd10e1cbb..a9866380d 100644 --- a/tests/test_dxf.py +++ b/tests/test_dxf.py @@ -10,20 +10,16 @@ class DXFTest(g.unittest.TestCase): - def test_dxf(self): - # get a path we can write - temp_name = g.tempfile.NamedTemporaryFile( - suffix='.dxf', delete=False).name + temp_name = g.tempfile.NamedTemporaryFile(suffix=".dxf", delete=False).name loaded = g.get_2D() # split drawings into single body parts splits = [] for d in loaded: s = d.split() # check area of split result vs source - assert g.np.isclose(sum(i.area for i in s), - d.area) + assert g.np.isclose(sum(i.area for i in s), d.area) splits.append(s) # export the drawing to the file @@ -36,7 +32,7 @@ def test_dxf(self): ezdxf.read(f) # export to a string - text = d.export(file_type='dxf') + text = d.export(file_type="dxf") # DXF files are always pairs of lines lines = str.splitlines(str(text)) @@ -44,9 +40,10 @@ def test_dxf(self): assert all(len(L.strip()) > 0 for L in lines) # reload the file by name and by stream - rc = [g.trimesh.load(temp_name), - g.trimesh.load(g.io_wrap(text), - file_type='dxf')] + rc = [ + g.trimesh.load(temp_name), + g.trimesh.load(g.io_wrap(text), file_type="dxf"), + ] # compare reloaded with original for r in rc: @@ -65,32 +62,35 @@ def test_dxf(self): r = g.trimesh.load(temp_name) ratio = abs(p.length - r.length) / p.length - if ratio > .01: - g.log.error('perimeter ratio on export %s wrong! %f %f %f', - p.metadata['file_name'], - p.length, - r.length, - ratio) - - raise ValueError('perimeter ratio too large ({}) on {}'.format( + if ratio > 0.01: + g.log.error( + "perimeter ratio on export %s wrong! %f %f %f", + p.metadata["file_name"], + p.length, + r.length, ratio, - p.metadata['file_name'])) + ) - def test_spline(self): + raise ValueError( + "perimeter ratio too large ({}) on {}".format( + ratio, p.metadata["file_name"] + ) + ) - d = g.get_mesh('2D/cycloidal.dxf') + def test_spline(self): + d = g.get_mesh("2D/cycloidal.dxf") assert len(d.entities) == 1 - assert type(d.entities[0]).__name__ == 'BSpline' + assert type(d.entities[0]).__name__ == "BSpline" # export to dxf and wrap as a file object - e = g.trimesh.util.wrap_as_stream(d.export(file_type='dxf')) + e = g.trimesh.util.wrap_as_stream(d.export(file_type="dxf")) # reconstitute drawing - r = g.trimesh.load(e, file_type='dxf') + r = g.trimesh.load(e, file_type="dxf") # make sure reconstituted drawing is the same as the source assert len(r.entities) == 1 - assert type(r.entities[0]).__name__ == 'BSpline' + assert type(r.entities[0]).__name__ == "BSpline" assert g.np.isclose(r.area, d.area) assert len(d.entities[0].points) == len(r.entities[0].points) @@ -107,7 +107,7 @@ def test_versions(self): uc.2007b.dxf: unit square, R2007 binary DXF """ # directory where multiple versions of DXF are - dir_versions = g.os.path.join(g.dir_2D, 'versions') + dir_versions = g.os.path.join(g.dir_2D, "versions") # load the different versions paths = {} @@ -119,9 +119,9 @@ def test_versions(self): except ValueError as E: # something like 'r14a' for ascii # and 'r14b' for binary - version = f.split('.')[-2] + version = f.split(".")[-2] # we should only get ValueErrors on binary DXF - assert version[-1] == 'b' + assert version[-1] == "b" g.log.debug(E, f) # group drawings which have the same geometry @@ -129,7 +129,7 @@ def test_versions(self): groups = g.collections.defaultdict(list) for k in paths.keys(): # the first string before a period is the drawing name - groups[k.split('.')[0]].append(k) + groups[k.split(".")[0]].append(k) # loop through each group of the same drawing for group in groups.values(): @@ -138,13 +138,11 @@ def test_versions(self): L = g.np.array(L, dtype=g.np.float64) # make sure all versions have consistent length - assert g.np.allclose(L, L.mean(), rtol=.01) + assert g.np.allclose(L, L.mean(), rtol=0.01) # count the number of entities in the path # this should be the same for every version - E = g.np.array( - [len(paths[i].entities) for i in group], - dtype=g.np.int64) + E = g.np.array([len(paths[i].entities) for i in group], dtype=g.np.int64) assert E.ptp() == 0 def test_bulge(self): @@ -153,13 +151,14 @@ def test_bulge(self): implicit arcs. """ # get a drawing with bulged polylines - p = g.get_mesh('2D/LM2.dxf') + p = g.get_mesh("2D/LM2.dxf") # count the number of unclosed arc entities # this drawing only has polylines with bulge - spans = [e.center(p.vertices)['span'] - for e in p.entities if - type(e).__name__ == 'Arc' and - not e.closed] + spans = [ + e.center(p.vertices)["span"] + for e in p.entities + if type(e).__name__ == "Arc" and not e.closed + ] # should have only one outer loop assert len(p.root) == 1 # should have 6 partial arcs from bulge @@ -169,12 +168,12 @@ def test_bulge(self): def test_text(self): # load file with a single text entity - original = g.get_mesh('2D/text.dxf') + original = g.get_mesh("2D/text.dxf") # export then reload roundtrip = g.trimesh.load( - file_obj=g.io_wrap(original.export(file_type='dxf')), - file_type='dxf') + file_obj=g.io_wrap(original.export(file_type="dxf")), file_type="dxf" + ) for d in [original, roundtrip]: # should contain a single Text entity @@ -205,20 +204,21 @@ def test_unicode(self): the encoding flags in DXF headers. """ # get a base 2D model - m = g.get_mesh('2D/wrench.dxf') + m = g.get_mesh("2D/wrench.dxf") # make one of the entity layers a unicode string # store it as B64 so python2 doesn't get mad - layer = g.base64.b64decode( - 'VFJBw4dBRE9IT1JJWk9OVEFMX1RSQcOHQURPNA==').decode('utf-8') + layer = g.base64.b64decode("VFJBw4dBRE9IT1JJWk9OVEFMX1RSQcOHQURPNA==").decode( + "utf-8" + ) m.entities[0].layer = layer # export to a string - export = m.export(file_type='dxf') + export = m.export(file_type="dxf") # if any unicode survived the export this will fail - export.encode('ascii') + export.encode("ascii") def test_insert_block(self): - a = g.get_mesh('2D/insert.dxf') - b = g.get_mesh('2D/insert_r14.dxf') + a = g.get_mesh("2D/insert.dxf") + b = g.get_mesh("2D/insert_r14.dxf") assert len(a.polygons_full) == 2 assert len(b.polygons_full) == 2 @@ -227,6 +227,6 @@ def test_insert_block(self): assert g.np.isclose(b.area, 54075.0, atol=1) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_edges.py b/tests/test_edges.py index 18eba63db..ffa372681 100644 --- a/tests/test_edges.py +++ b/tests/test_edges.py @@ -5,9 +5,8 @@ class EdgeTest(g.unittest.TestCase): - def test_face_unique(self): - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") # our basic edges should have the same # unique values as our faces @@ -25,10 +24,9 @@ def test_face_unique(self): assert (e[:, 0] == e[:, 1]).all() # should be the same values as the original faces - assert (e[:, 0].reshape((-1, 3)) == - g.np.sort(m.faces, axis=1)).all() + assert (e[:, 0].reshape((-1, 3)) == g.np.sort(m.faces, axis=1)).all() -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_encoding.py b/tests/test_encoding.py index a17f28cf9..e282a329f 100644 --- a/tests/test_encoding.py +++ b/tests/test_encoding.py @@ -8,10 +8,10 @@ shape = (10, 10, 10) dense_data = np.random.uniform(size=shape) < 0.2 -rle = enc.RunLengthEncoding.from_dense( - dense_data.reshape((-1,)), dtype=bool).reshape(shape) -brle = enc.BinaryRunLengthEncoding.from_dense( - dense_data.reshape((-1,))).reshape(shape) +rle = enc.RunLengthEncoding.from_dense(dense_data.reshape((-1,)), dtype=bool).reshape( + shape +) +brle = enc.BinaryRunLengthEncoding.from_dense(dense_data.reshape((-1,))).reshape(shape) dense = enc.DenseEncoding(dense_data) indices = np.column_stack(np.where(dense_data)) values = np.ones(shape=(indices.shape[0],), dtype=bool) @@ -26,18 +26,14 @@ class EncodingTest(g.unittest.TestCase): - def _test_dense(self, encoding, data): np.testing.assert_equal(encoding.dense, data) def _test_rle(self, encoding, data): - np.testing.assert_equal( - encoding.run_length_data(), rl.dense_to_rle(data)) + np.testing.assert_equal(encoding.run_length_data(), rl.dense_to_rle(data)) def _test_brle(self, encoding, data): - np.testing.assert_equal( - encoding.binary_run_length_data(), - rl.dense_to_brle(data)) + np.testing.assert_equal(encoding.binary_run_length_data(), rl.dense_to_brle(data)) def _test_transpose(self, encoding, data, perm): encoding = encoding.transpose(perm) @@ -53,7 +49,7 @@ def _test_flat(self, encoding, data): self._test_dense(encoding.flat, data.reshape((-1,))) def _test_flipped(self, encoding, data, axes): - if hasattr(axes, '__iter__'): + if hasattr(axes, "__iter__"): for a in axes: data = np.flip(data, a) else: @@ -61,10 +57,17 @@ def _test_flipped(self, encoding, data, axes): self._test_dense(encoding.flip(axes), data) def _test_composite( - self, encoding, data, transpose=(0, 2, 1), reshape=(5, 2, -1), - flatten=True, flip=(0, 2)): + self, + encoding, + data, + transpose=(0, 2, 1), + reshape=(5, 2, -1), + flatten=True, + flip=(0, 2), + ): def check(): self._test_dense(encoding, data) + if transpose is not None: encoding = encoding.transpose(transpose) data = data.transpose(transpose) @@ -75,7 +78,7 @@ def check(): check() if flip: encoding = encoding.flip(flip) - if hasattr(flip, '__iter__'): + if hasattr(flip, "__iter__"): for a in flip: data = np.flip(data, a) else: @@ -103,7 +106,9 @@ def test_brle(self): def test_flipped(self): axes = ( - 0, 1, 2, + 0, + 1, + 2, (0,), (0, 1), (0, 2), @@ -126,24 +131,17 @@ def test_transpose(self): self._test_transpose(encoding, dense_data, perm) perm = (0, 2, 1) if not isinstance(encoding, enc.DenseEncoding): - self.assertTrue( - encoding.transpose(perm).transpose(perm) is encoding) + self.assertTrue(encoding.transpose(perm).transpose(perm) is encoding) def test_flat(self): for encoding in encodings: self._test_dense(encoding.flat, dense_data.reshape((-1,))) def test_reshape(self): - shapes = ( - (10, 10, 10), - (5, 20, 10), - (50, 4, 5), - (-1, 4, 5) - ) + shapes = ((10, 10, 10), (5, 20, 10), (50, 4, 5), (-1, 4, 5)) for encoding in encodings: for shape in shapes: - self._test_dense( - encoding.reshape(shape), dense_data.reshape(shape)) + self._test_dense(encoding.reshape(shape), dense_data.reshape(shape)) def test_composite(self): for encoding in encodings: @@ -153,7 +151,7 @@ def test_dense_stripped(self): base_shape = (5, 5, 5) dense = np.ones(base_shape, dtype=bool) padding = [[2, 2], [2, 2], [2, 2]] - dense = np.pad(dense, padding, mode='constant') + dense = np.pad(dense, padding, mode="constant") encoding = enc.DenseEncoding(dense) stripped, calculated_padding = encoding.stripped np.testing.assert_equal(calculated_padding, padding) @@ -163,24 +161,21 @@ def test_dense_stripped(self): def test_sparse_stripped(self): box = g.trimesh.primitives.Box() box.apply_translation([0.5, 0.5, 0.5]) # center at origin - box.apply_scale(5) # 0 -> 5 + box.apply_scale(5) # 0 -> 5 expected_sparse_indices = np.array(box.vertices) - box.apply_translation([2, 2, 2]) # 2 -> 7 + box.apply_translation([2, 2, 2]) # 2 -> 7 sparse = np.array(box.vertices, dtype=int) encoding = enc.SparseBinaryEncoding(sparse, shape=(9, 9, 9)) stripped, calculated_padding = encoding.stripped - np.testing.assert_equal( - stripped.sparse_indices, expected_sparse_indices) - np.testing.assert_equal( - calculated_padding, 2 * np.ones((3, 2), dtype=int)) + np.testing.assert_equal(stripped.sparse_indices, expected_sparse_indices) + np.testing.assert_equal(calculated_padding, 2 * np.ones((3, 2), dtype=int)) def test_empty_stripped(self): res = 10 encoding = enc.DenseEncoding(np.zeros((res,) * 3, dtype=bool)) stripped, calculated_padding = encoding.stripped self.assertEqual(stripped.size, 0) - np.testing.assert_equal( - calculated_padding, [[0, res], [0, res], [0, res]]) + np.testing.assert_equal(calculated_padding, [[0, res], [0, res], [0, res]]) def test_is_empty(self): res = 10 @@ -191,13 +186,14 @@ def test_is_empty(self): self.assertFalse(enc.DenseEncoding(not_empty).is_empty) for cls in ( - enc.SparseEncoding, - enc.RunLengthEncoding, - enc.BinaryRunLengthEncoding): + enc.SparseEncoding, + enc.RunLengthEncoding, + enc.BinaryRunLengthEncoding, + ): self.assertTrue(cls.from_dense(empty).is_empty) self.assertFalse(cls.from_dense(not_empty).is_empty) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_except.py b/tests/test_except.py index 1a0c8cf1e..399835a36 100644 --- a/tests/test_except.py +++ b/tests/test_except.py @@ -5,11 +5,10 @@ class ExceptionsTest(g.unittest.TestCase): - def test_module(self): # create an ExceptionWrapper try: - raise ValueError('nah') + raise ValueError("nah") except BaseException as E: em = g.trimesh.exceptions.ExceptionWrapper(E) @@ -21,12 +20,12 @@ def test_module(self): # should re-raise `ValueError('nah')` em.hi() # if we're here raise an error we don't catch - raise NameError('should not have worked!!') + raise NameError("should not have worked!!") except ValueError: # should have re-raised ValueError pass -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_export.py b/tests/test_export.py index 3ffd72f44..22fc44953 100644 --- a/tests/test_export.py +++ b/tests/test_export.py @@ -7,25 +7,25 @@ class ExportTest(g.unittest.TestCase): - def test_export(self): - from trimesh.exceptions import ExceptionWrapper - export_types = {k for k, v in - g.trimesh.exchange.export._mesh_exporters.items() - if not isinstance(v, ExceptionWrapper)} + export_types = { + k + for k, v in g.trimesh.exchange.export._mesh_exporters.items() + if not isinstance(v, ExceptionWrapper) + } meshes = list(g.get_meshes(8)) # make sure we've got something with texture - meshes.append(g.get_mesh('fuze.obj')) + meshes.append(g.get_mesh("fuze.obj")) for mesh in meshes: # disregard texture mesh.merge_vertices(merge_tex=True, merge_norm=True) for file_type in export_types: # skip pointcloud format - if file_type in ['xyz', 'gltf']: + if file_type in ["xyz", "gltf"]: # a pointcloud format continue # run the export @@ -33,21 +33,19 @@ def test_export(self): # if nothing returned log the message if export is None or len(export) == 0: raise ValueError( - 'No data exported %s to %s', - mesh.metadata['file_name'], - file_type) + "No data exported %s to %s", mesh.metadata["file_name"], file_type + ) if file_type in [ - 'dae', # collada, no native importers - 'collada', # collada, no native importers - 'msgpack', # kind of flaky, but usually works - 'drc']: # DRC is not a lossless format - g.log.warning( - 'no native loaders implemented for collada!') + "dae", # collada, no native importers + "collada", # collada, no native importers + "msgpack", # kind of flaky, but usually works + "drc", + ]: # DRC is not a lossless format + g.log.warning("no native loaders implemented for collada!") continue - g.log.info('Export/import testing on %s', - mesh.metadata['file_name']) + g.log.info("Export/import testing on %s", mesh.metadata["file_name"]) # if export is string or bytes wrap as pseudo file object if isinstance(export, str) or isinstance(export, bytes): @@ -55,48 +53,64 @@ def test_export(self): else: file_obj = export - loaded = g.trimesh.load(file_obj=file_obj, - file_type=file_type, - process=True, - merge_norm=True, - merge_tex=True) + loaded = g.trimesh.load( + file_obj=file_obj, + file_type=file_type, + process=True, + merge_norm=True, + merge_tex=True, + ) # if we exported as GLTF/dae it will come back as a Scene if isinstance(loaded, g.trimesh.Scene) and isinstance( - mesh, g.trimesh.Trimesh): + mesh, g.trimesh.Trimesh + ): assert len(loaded.geometry) == 1 loaded = next(iter(loaded.geometry.values())) - if (not g.trimesh.util.is_shape(loaded._data['faces'], (-1, 3)) or - not g.trimesh.util.is_shape(loaded._data['vertices'], (-1, 3)) or - loaded.faces.shape != mesh.faces.shape): - g.log.error('Export -> import for %s on %s wrong shape!', - file_type, - mesh.metadata['file_name']) + if ( + not g.trimesh.util.is_shape(loaded._data["faces"], (-1, 3)) + or not g.trimesh.util.is_shape(loaded._data["vertices"], (-1, 3)) + or loaded.faces.shape != mesh.faces.shape + ): + g.log.error( + "Export -> import for %s on %s wrong shape!", + file_type, + mesh.metadata["file_name"], + ) if loaded.vertices is None: - g.log.error('Export -> import for %s on %s gave None for vertices!', - file_type, - mesh.metadata['file_name']) + g.log.error( + "Export -> import for %s on %s gave None for vertices!", + file_type, + mesh.metadata["file_name"], + ) if loaded.faces.shape != mesh.faces.shape: - raise ValueError('export cycle {} on {} gave faces {}->{}!'.format( - file_type, - mesh.metadata['file_name'], - str(mesh.faces.shape), - str(loaded.faces.shape))) + raise ValueError( + "export cycle {} on {} gave faces {}->{}!".format( + file_type, + mesh.metadata["file_name"], + str(mesh.faces.shape), + str(loaded.faces.shape), + ) + ) if loaded.vertices.shape != mesh.vertices.shape: - raise ValueError('export cycle {} on {} gave vertices {}->{}!'.format( - file_type, - mesh.metadata['file_name'], - mesh.vertices.shape, - loaded.vertices.shape)) + raise ValueError( + "export cycle {} on {} gave vertices {}->{}!".format( + file_type, + mesh.metadata["file_name"], + mesh.vertices.shape, + loaded.vertices.shape, + ) + ) # try exporting/importing certain file types by name - if file_type in ['obj', 'stl', 'ply', 'off']: - temp = g.tempfile.NamedTemporaryFile(suffix='.' + file_type, - delete=False) + if file_type in ["obj", "stl", "ply", "off"]: + temp = g.tempfile.NamedTemporaryFile( + suffix="." + file_type, delete=False + ) # windows throws permissions errors if you keep it open temp.close() @@ -113,21 +127,19 @@ def test_export(self): continue # formats exportable by trimesh and importable by meshlab # make sure things we export can be loaded by meshlab - both = set(g.meshlab_formats).intersection( - set(export_types)) + both = set(g.meshlab_formats).intersection(set(export_types)) # additional options to pass to exporters to try to ferret # out combinations which lead to invalid output - kwargs = {'ply': [{'vertex_normal': True, - 'encoding': 'ascii'}, - {'vertex_normal': True, - 'encoding': 'binary'}, - {'vertex_normal': False, - 'encoding': 'ascii'}, - {'vertex_normal': False, - 'encoding': 'binary'}], - 'stl': [{'file_type': 'stl'}, - {'file_type': 'stl_ascii'}]} + kwargs = { + "ply": [ + {"vertex_normal": True, "encoding": "ascii"}, + {"vertex_normal": True, "encoding": "binary"}, + {"vertex_normal": False, "encoding": "ascii"}, + {"vertex_normal": False, "encoding": "binary"}, + ], + "stl": [{"file_type": "stl"}, {"file_type": "stl_ascii"}], + } # make sure input mesh has garbage removed mesh._validate = True @@ -146,16 +158,14 @@ def test_export(self): # try each combination of options for option in options: temp = g.tempfile.NamedTemporaryFile( - suffix='.' + file_type, - delete=False) - temp_off = g.tempfile.NamedTemporaryFile( - suffix='.off', - delete=False) + suffix="." + file_type, delete=False + ) + temp_off = g.tempfile.NamedTemporaryFile(suffix=".off", delete=False) # windows throws permissions errors if you keep it open temp.close() temp_off.close() # write over the tempfile - option['file_obj'] = temp.name + option["file_obj"] = temp.name mesh.export(**option) # -_- @@ -175,9 +185,9 @@ def test_export(self): g.os.remove(temp_off.name) def test_obj(self): - m = g.get_mesh('textured_tetrahedron.obj', process=False) - export = m.export(file_type='obj') - reconstructed = g.wrapload(export, file_type='obj', process=False) + m = g.get_mesh("textured_tetrahedron.obj", process=False) + export = m.export(file_type="obj") + reconstructed = g.wrapload(export, file_type="obj", process=False) # test that we get at least the same number of normals and texcoords out; # the loader may reorder vertices, so we shouldn't check direct # equality @@ -189,9 +199,7 @@ def test_obj_order(self): reorder vertices. """ # get a writeable temp file location - temp = g.tempfile.NamedTemporaryFile( - suffix='.obj', - delete=False) + temp = g.tempfile.NamedTemporaryFile(suffix=".obj", delete=False) temp.close() # simple solid @@ -205,39 +213,38 @@ def test_obj_order(self): assert g.np.allclose(x.faces, y.faces) def test_dict(self): - mesh = g.get_mesh('machinist.XAML') - assert mesh.visual.kind == 'face' + mesh = g.get_mesh("machinist.XAML") + assert mesh.visual.kind == "face" mesh.visual.vertex_colors = mesh.visual.vertex_colors - assert mesh.visual.kind == 'vertex' + assert mesh.visual.kind == "vertex" as_dict = mesh.to_dict() back = g.trimesh.Trimesh(**as_dict) # NOQA def test_scene(self): # get a multi- mesh scene with a transform tree - source = g.get_mesh('cycloidal.3DXML') + source = g.get_mesh("cycloidal.3DXML") # add a transform to zero scene before exporting source.rezero() # export the file as a binary GLTF file, GLB - export = source.export(file_type='glb') + export = source.export(file_type="glb") # re- load the file as a trimesh.Scene object again - loaded = g.wrapload(export, file_type='glb') + loaded = g.wrapload(export, file_type="glb") # the scene should be identical after export-> import cycle - assert g.np.allclose(loaded.extents / source.extents, - 1.0) + assert g.np.allclose(loaded.extents / source.extents, 1.0) def test_gltf_path(self): """ Check to make sure GLTF exports of Path2D and Path3D objects don't immediately crash. """ - path2D = g.get_mesh('2D/wrench.dxf') + path2D = g.get_mesh("2D/wrench.dxf") path3D = path2D.to_3D() - a = g.trimesh.Scene(path2D).export(file_type='glb') - b = g.trimesh.Scene(path3D).export(file_type='glb') + a = g.trimesh.Scene(path2D).export(file_type="glb") + b = g.trimesh.Scene(path3D).export(file_type="glb") assert len(a) > 0 assert len(b) > 0 @@ -252,55 +259,54 @@ def test_parse_file_args(self): RET_COUNT = 5 # a path that doesn't exist - nonexists = f'/banana{g.random()}' + nonexists = f"/banana{g.random()}" assert not g.os.path.exists(nonexists) # loadable OBJ model - exists = g.os.path.join(g.dir_models, 'tube.obj') + exists = g.os.path.join(g.dir_models, "tube.obj") assert g.os.path.exists(exists) # should be able to extract type from passed filename args = f(file_obj=exists, file_type=None) assert len(args) == RET_COUNT - assert args[1] == 'obj' + assert args[1] == "obj" # should be able to extract correct type from longer name - args = f(file_obj=exists, file_type='YOYOMA.oBj') + args = f(file_obj=exists, file_type="YOYOMA.oBj") assert len(args) == RET_COUNT - assert args[1] == 'obj' + assert args[1] == "obj" # with a nonexistent file and no extension it should raise try: args = f(file_obj=nonexists, file_type=None) except ValueError as E: - assert 'not a file' in str(E) + assert "not a file" in str(E) else: - raise ValueError('should have raised exception!') + raise ValueError("should have raised exception!") # nonexistent file with extension passed should return # file name anyway, maybe something else can handle it - args = f(file_obj=nonexists, file_type='.ObJ') + args = f(file_obj=nonexists, file_type=".ObJ") assert len(args) == RET_COUNT # should have cleaned up case - assert args[1] == 'obj' + assert args[1] == "obj" # make sure overriding type works for string filenames - args = f(file_obj=exists, file_type='STL') + args = f(file_obj=exists, file_type="STL") assert len(args) == RET_COUNT # should have used manually passed type over .obj - assert args[1] == 'stl' + assert args[1] == "stl" def test_buffered_random(self): - """Test writing to non-standard file - """ + """Test writing to non-standard file""" mesh = list(g.get_meshes(1))[0] with io.BufferedRandom(io.BytesIO()) as rw: - mesh.export(rw, 'STL') + mesh.export(rw, "STL") rw.seek(0) binary_stl = rw.read() self.assertLess(0, len(binary_stl)) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_extrude.py b/tests/test_extrude.py index d2c65dc10..bd8dd792d 100644 --- a/tests/test_extrude.py +++ b/tests/test_extrude.py @@ -5,60 +5,58 @@ try: import triangle # NOQA + has_triangle = True except ImportError: - g.log.warning('No triangle! Not testing extrude primitives!') + g.log.warning("No triangle! Not testing extrude primitives!") has_triangle = False class ExtrudeTest(g.unittest.TestCase): - def test_extrusion(self): if not has_triangle: return transform = g.trimesh.transformations.random_rotation_matrix() - polygon = g.Point([0, 0]).buffer(.5) - e = g.trimesh.primitives.Extrusion( - polygon=polygon, - transform=transform) + polygon = g.Point([0, 0]).buffer(0.5) + e = g.trimesh.primitives.Extrusion(polygon=polygon, transform=transform) # will create an inflated version of the extrusion - b = e.buffer(.1) + b = e.buffer(0.1) assert b.to_mesh().volume > e.to_mesh().volume assert b.contains(e.vertices).all() # try making it smaller - b = e.buffer(-.1) + b = e.buffer(-0.1) assert b.to_mesh().volume < e.to_mesh().volume assert e.contains(b.vertices).all() # try with negative height e = g.trimesh.primitives.Extrusion( - polygon=polygon, - height=-1.0, - transform=transform) + polygon=polygon, height=-1.0, transform=transform + ) assert e.to_mesh().volume > 0.0 # will create an inflated version of the extrusion - b = e.buffer(.1) + b = e.buffer(0.1) assert b.to_mesh().volume > e.to_mesh().volume assert b.contains(e.vertices).all() # try making it smaller - b = e.buffer(-.1) + b = e.buffer(-0.1) assert b.to_mesh().volume < e.to_mesh().volume assert e.contains(b.vertices).all() # try with negative height and transform - transform = [[1., 0., 0., -0.], - [0., 1., 0., 0.], - [-0., -0., -1., -0.], - [0., 0., 0., 1.]] + transform = [ + [1.0, 0.0, 0.0, -0.0], + [0.0, 1.0, 0.0, 0.0], + [-0.0, -0.0, -1.0, -0.0], + [0.0, 0.0, 0.0, 1.0], + ] e = g.trimesh.primitives.Extrusion( - polygon=polygon, - height=-1.0, - transform=transform) + polygon=polygon, height=-1.0, transform=transform + ) assert e.to_mesh().volume > 0.0 for T in g.transforms: @@ -67,11 +65,12 @@ def test_extrusion(self): obb = current.bounding_box_oriented # check to make sure shortcutted OBB is the right size assert g.np.isclose( - obb.volume, - current.to_mesh().bounding_box_oriented.volume) + obb.volume, current.to_mesh().bounding_box_oriented.volume + ) # use OBB transform to project vertices of extrusion to plane points = g.trimesh.transform_points( - current.vertices, g.np.linalg.inv(obb.primitive.transform)) + current.vertices, g.np.linalg.inv(obb.primitive.transform) + ) # half extents of calculated oriented bounding box half = (g.np.abs(obb.primitive.extents) / 2.0) + 1e-3 # every vertex should be inside OBB @@ -87,12 +86,11 @@ def test_extrude_degen(self): # on creation of the mesh coords = g.np.array([[0, 0], [0, 0], [0, 1], [1, 0]]) mesh = g.trimesh.creation.extrude_triangulation( - vertices=coords, - faces=[[0, 1, 2], - [0, 2, 3]], height=1, validate=True) + vertices=coords, faces=[[0, 1, 2], [0, 2, 3]], height=1, validate=True + ) assert mesh.is_watertight -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_facets.py b/tests/test_facets.py index 79b2b3af5..1b57464e0 100644 --- a/tests/test_facets.py +++ b/tests/test_facets.py @@ -5,9 +5,8 @@ class FacetTest(g.unittest.TestCase): - def test_facet(self): - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") assert len(m.facets) > 0 assert len(m.facets) == len(m.facets_boundary) @@ -31,6 +30,6 @@ def test_empty(self): assert len(m.facets) == len(m.facets_on_hull) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_fill.py b/tests/test_fill.py index a94a86bef..f4d6a9975 100644 --- a/tests/test_fill.py +++ b/tests/test_fill.py @@ -5,11 +5,9 @@ class FillTest(g.unittest.TestCase): - def test_fill(self): - # a path closed with a bowtie, so the topology is wrong - a = g.get_mesh('2D/broken_loop.dxf') + a = g.get_mesh("2D/broken_loop.dxf") assert len(a.paths) == 0 # bowtie shouldn't require any connection distance a.fill_gaps(0.0) @@ -19,18 +17,18 @@ def test_fill(self): assert g.np.isclose(a.area, g.np.prod(a.extents)) # a path with a bowtie and a .05 gap - b = g.get_mesh('2D/broken_pair.dxf') + b = g.get_mesh("2D/broken_pair.dxf") assert len(b.paths) == 0 # should be too small to fill gap - b.fill_gaps(.01) + b.fill_gaps(0.01) assert len(b.paths) == 0 # should be large enough to fill gap - b.fill_gaps(.06) + b.fill_gaps(0.06) assert len(b.paths) == 1 # it is a rectangle assert g.np.isclose(b.area, g.np.prod(b.extents)) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_geom.py b/tests/test_geom.py index a82d291ea..22c587750 100644 --- a/tests/test_geom.py +++ b/tests/test_geom.py @@ -8,9 +8,9 @@ class GeomTests(g.unittest.TestCase): - def test_triangulate(self): from trimesh.geometry import triangulate_quads as tq + # create some triangles and quads tri = (g.random((100, 3)) * 100).astype(g.np.int64) quad = (g.random((100, 4)) * 100).astype(g.np.int64) @@ -33,6 +33,6 @@ def test_triangulate(self): assert len(tq([])) == 0 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_gltf.py b/tests/test_gltf.py index 76bdbda91..77a00e03d 100644 --- a/tests/test_gltf.py +++ b/tests/test_gltf.py @@ -308,10 +308,12 @@ def test_spec_gloss_factors_only(self): # test that we can load a GLTF with specular/glossiness material without textures s = g.get_mesh("pbr_cubes_emissive_spec_gloss.zip") - assert all(isinstance(m.visual.material, g.trimesh.visual.material.PBRMaterial) - for m in s.geometry.values()) + assert all( + isinstance(m.visual.material, g.trimesh.visual.material.PBRMaterial) + for m in s.geometry.values() + ) - spec_gloss_mat = s.geometry['Cube.005'].visual.material + spec_gloss_mat = s.geometry["Cube.005"].visual.material # this is a special case, because color is only coming from specular. # the diffuse value is black assert g.np.allclose(spec_gloss_mat.baseColorFactor, [254, 194, 85, 255], atol=1) diff --git a/tests/test_gmsh.py b/tests/test_gmsh.py index 5cf9c98c2..939823cea 100644 --- a/tests/test_gmsh.py +++ b/tests/test_gmsh.py @@ -20,6 +20,6 @@ def test_generate(self): assert len(result) > 0 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_grouping.py b/tests/test_grouping.py index 711d391a6..583d4bde0 100644 --- a/tests/test_grouping.py +++ b/tests/test_grouping.py @@ -5,14 +5,12 @@ class GroupTests(g.unittest.TestCase): - def test_unique_rows(self): count = 10000 subset = int(count / 10) # check unique_rows on float data - data = g.np.arange(count * 3).reshape((-1, 3)).astype( - g.np.float64) + data = g.np.arange(count * 3).reshape((-1, 3)).astype(g.np.float64) data[:subset] = data[0] unique, inverse = g.trimesh.grouping.unique_rows(data) assert (inverse[:subset] == 0).all() @@ -89,10 +87,7 @@ def test_block_wrap(self): # case: both ends are in a block data = g.np.array([1, 1, 0, 0, 1, 1, 1, 1]) - kwargs = {'data': data, - 'min_len': 2, - 'wrap': True, - 'only_nonzero': True} + kwargs = {"data": data, "min_len": 2, "wrap": True, "only_nonzero": True} r = blocks(**kwargs) # should be one group assert len(r) == 1 @@ -102,10 +97,7 @@ def test_block_wrap(self): assert g.np.allclose(data[r[0]], 1) check_roll_wrap(**kwargs) - kwargs = {'data': data, - 'min_len': 1, - 'wrap': True, - 'only_nonzero': False} + kwargs = {"data": data, "min_len": 1, "wrap": True, "only_nonzero": False} r = blocks(**kwargs) # should be one group assert len(r) == 2 @@ -116,11 +108,7 @@ def test_block_wrap(self): assert check == set(range(len(data))) check_roll_wrap(**kwargs) - r = blocks( - data, - min_len=1, - wrap=False, - only_nonzero=False) + r = blocks(data, min_len=1, wrap=False, only_nonzero=False) assert len(r) == 3 check = set() for i in r: @@ -129,10 +117,7 @@ def test_block_wrap(self): # CASE: blocks not at the end data = g.np.array([1, 0, 0, 0, 1, 1, 1, 0]) - kwargs = {'data': data, - 'min_len': 1, - 'wrap': True, - 'only_nonzero': True} + kwargs = {"data": data, "min_len": 1, "wrap": True, "only_nonzero": True} r = blocks(**kwargs) assert len(r) == 2 assert len(r[0]) == 1 @@ -141,48 +126,40 @@ def test_block_wrap(self): # one block and one eligible but non-block point data = g.np.array([1, 0, 0, 0, 1, 1, 1, 1]) - r = blocks( - data, - min_len=2, wrap=True, only_nonzero=True) + r = blocks(data, min_len=2, wrap=True, only_nonzero=True) assert len(r) == 1 assert g.np.allclose(data[r[0]], 1) # CASE: neither are in a block but together they are eligible data = g.np.array([1, 0, 0, 0, 1]) - kwargs = {'data': data, - 'min_len': 3, - 'wrap': True, - 'only_nonzero': True} + kwargs = {"data": data, "min_len": 3, "wrap": True, "only_nonzero": True} r = blocks(**kwargs) assert len(r) == 0 check_roll_wrap(**kwargs) - kwargs['only_nonzero'] = False + kwargs["only_nonzero"] = False r = blocks(**kwargs) assert len(r) == 1 assert g.np.allclose(data[r[0]], 0) check_roll_wrap(**kwargs) - kwargs['data'] = g.np.abs(data - 1) + kwargs["data"] = g.np.abs(data - 1) # should be the same even inverted rn = blocks(**kwargs) assert len(r) == 1 assert g.np.allclose(r[0], rn[0]) check_roll_wrap(**kwargs) - kwargs = {'data': data, - 'min_len': 2, - 'wrap': True, - 'only_nonzero': True} + kwargs = {"data": data, "min_len": 2, "wrap": True, "only_nonzero": True} r = blocks(**kwargs) assert len(r) == 1 assert set(r[0]) == {0, 4} check_roll_wrap(**kwargs) def test_runs(self): - a = g.np.array([-1, -1, -1, 0, 0, 1, 1, 2, - 0, 3, 3, 4, 4, 5, 5, 6, - 6, 7, 7, 8, 8, 9, 9, 9], - dtype=g.np.int64) + a = g.np.array( + [-1, -1, -1, 0, 0, 1, 1, 2, 0, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9], + dtype=g.np.int64, + ) r = g.trimesh.grouping.merge_runs(a) u = g.trimesh.grouping.unique_ordered(a) @@ -194,16 +171,15 @@ def test_runs(self): def test_cluster(self): # create some random points stacked with some zeros to cluster - points = g.np.vstack((( - g.random((10000, 3)) * 5).astype(g.np.int64), - g.np.zeros((100, 3)))) + points = g.np.vstack( + ((g.random((10000, 3)) * 5).astype(g.np.int64), g.np.zeros((100, 3))) + ) # should be at least one cluster - assert len(g.trimesh.grouping.clusters(points, .01)) > 0 + assert len(g.trimesh.grouping.clusters(points, 0.01)) > 0 # should be at least one group - assert len(g.trimesh.grouping.group_distance(points, .01)) > 0 + assert len(g.trimesh.grouping.group_distance(points, 0.01)) > 0 def test_unique_float(self): - a = g.np.arange(100) / 2.0 t = g.np.tile(a, 2).flatten() @@ -211,7 +187,8 @@ def test_unique_float(self): assert g.np.allclose(unique, a) unique, index, inverse = g.trimesh.grouping.unique_float( - t, return_index=True, return_inverse=True) + t, return_index=True, return_inverse=True + ) assert g.np.allclose(unique[inverse], t) assert g.np.allclose(unique, t[index]) @@ -241,9 +218,7 @@ def test_group_rows(self): def test_group_vector(self): x = g.np.linspace(-100, 100, 100) - vec = g.np.column_stack((x, - g.np.ones(len(x)), - g.np.zeros(len(x)))) + vec = g.np.column_stack((x, g.np.ones(len(x)), g.np.zeros(len(x)))) vec = g.trimesh.unitize(vec) uv, ui = g.trimesh.grouping.group_vectors(vec) @@ -256,8 +231,7 @@ def test_group_vector(self): assert g.np.allclose(uv, vec) assert len(ui) == len(vec) - uv, ui = g.trimesh.grouping.group_vectors( - vec, include_negative=True) + uv, ui = g.trimesh.grouping.group_vectors(vec, include_negative=True) # since we included negative vectors, there should # be half the number of unique vectors and 2 indexes per vector assert g.np.shape(ui) == (100, 2) @@ -271,14 +245,11 @@ def test_boolean_rows(self): b = b.astype(g.np.int32) # should have one overlapping row - intersection = g.trimesh.grouping.boolean_rows( - a, b, g.np.intersect1d) + intersection = g.trimesh.grouping.boolean_rows(a, b, g.np.intersect1d) assert g.np.allclose(intersection.ravel(), [8, 9]) - diff = g.trimesh.grouping.boolean_rows( - a, b, g.np.setdiff1d) - assert g.np.allclose(g.np.unique(diff), - g.np.arange(8)) + diff = g.trimesh.grouping.boolean_rows(a, b, g.np.setdiff1d) + assert g.np.allclose(g.np.unique(diff), g.np.arange(8)) def test_broken(self): # create a broken mesh referencing @@ -289,13 +260,14 @@ def test_broken(self): mesh.merge_vertices() def test_unique_ordered(self): - a = g.np.array([9, 9, 9, 8, 8, 7, 7, 6, - 6, 5, 5, 4, 4, 3, 3, 0, - 2, 1, 1, 0, 0, -1, -1, -1], - dtype=g.np.int64) + a = g.np.array( + [9, 9, 9, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 0, 2, 1, 1, 0, 0, -1, -1, -1], + dtype=g.np.int64, + ) u, ind, inv = g.trimesh.grouping.unique_ordered( - a, return_index=True, return_inverse=True) + a, return_index=True, return_inverse=True + ) # indices are increasing, because we kept original order assert (g.np.diff(ind) > 0).all() @@ -308,8 +280,7 @@ def test_unique_ordered_rows(self): v = g.np.vstack((v, v, v, v)) # index, inverse - i, iv = g.trimesh.grouping.unique_rows( - v, keep_order=True) + i, iv = g.trimesh.grouping.unique_rows(v, keep_order=True) # get the unique values from the index u = v[i] @@ -332,20 +303,18 @@ def check_roll_wrap(**kwargs): """ current = None # remove data from passed kwargs - data = kwargs.pop('data') + data = kwargs.pop("data") for i in range(len(data)): - block = g.trimesh.grouping.blocks( - g.np.roll(data, -i), **kwargs) + block = g.trimesh.grouping.blocks(g.np.roll(data, -i), **kwargs) # get result as a set of tuples with the rolling index # removed through a modulus, so we can compare equality - check = {tuple(((j + i) % len(data)).tolist()) - for j in block} + check = {tuple(((j + i) % len(data)).tolist()) for j in block} if current is None: current = check # all values should be the same assert current == check -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_html.py b/tests/test_html.py index 4d95be2b3..135ef9b3e 100644 --- a/tests/test_html.py +++ b/tests/test_html.py @@ -5,12 +5,10 @@ class ViewTest(g.unittest.TestCase): - def test_JSHTML(self): - import trimesh.viewer.notebook as js - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") s = m.scene() # a viewable scene @@ -19,6 +17,7 @@ def test_JSHTML(self): # check it out as an LXML document from lxml.html import fromstring + h = fromstring(html) # should have some children @@ -33,6 +32,6 @@ def test_inNB(self): assert not js.in_notebook() -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_identifier.py b/tests/test_identifier.py index 16ef93483..5e28c4032 100644 --- a/tests/test_identifier.py +++ b/tests/test_identifier.py @@ -5,18 +5,17 @@ class IdentifierTest(g.unittest.TestCase): - def test_identifier(self, count=25): - meshes = g.np.append(list(g.get_meshes( - only_watertight=True, split=True, min_volume=0.001)), - g.get_mesh('fixed_top.ply')) + meshes = g.np.append( + list(g.get_meshes(only_watertight=True, split=True, min_volume=0.001)), + g.get_mesh("fixed_top.ply"), + ) for mesh in meshes: if not mesh.is_volume or mesh.body_count != 1: - g.log.warning('Mesh %s is not watertight!', - mesh.metadata['file_name']) + g.log.warning("Mesh %s is not watertight!", mesh.metadata["file_name"]) continue - g.log.info('Trying hash at %d random transforms', count) + g.log.info("Trying hash at %d random transforms", count) hashed = [] identifier = [] for transform in g.random_transforms(count): @@ -29,17 +28,20 @@ def test_identifier(self, count=25): if not ok: ptp = g.np.ptp(identifier, axis=0) - g.log.error('Hashes on %s differ after transform:\n %s\n', - mesh.metadata['file_name'], str(ptp)) - raise ValueError('values differ after transform!') + g.log.error( + "Hashes on %s differ after transform:\n %s\n", + mesh.metadata["file_name"], + str(ptp), + ) + raise ValueError("values differ after transform!") # stretch the mesh by a small amount - stretched = mesh.copy().apply_scale( - [0.99974507, 0.9995662, 1.0029832]) + stretched = mesh.copy().apply_scale([0.99974507, 0.9995662, 1.0029832]) if hashed[-1] == stretched.identifier_hash: raise ValueError( - 'Hashes on %s didn\'t change after stretching', - mesh.metadata['file_name']) + "Hashes on %s didn't change after stretching", + mesh.metadata["file_name"], + ) def test_scene_id(self): """ @@ -47,7 +49,7 @@ def test_scene_id(self): make sure transforming meshes around it doesn't change the nuts of their identifier hash. """ - scenes = [g.get_mesh('cycloidal.3DXML')] + scenes = [g.get_mesh("cycloidal.3DXML")] for s in scenes: for geom_name, mesh in s.geometry.items(): @@ -63,18 +65,20 @@ def test_scene_id(self): m = mesh.copy() m.apply_transform(T) meshes.append(m) - if not all(meshes[0].identifier_hash == i.identifier_hash - for i in meshes): - raise ValueError( - f'{geom_name} differs after transform!') + if not all( + meshes[0].identifier_hash == i.identifier_hash for i in meshes + ): + raise ValueError(f"{geom_name} differs after transform!") # check an example for a mirrored part - assert (scenes[0].geometry['disc_cam_B'].identifier_hash != - scenes[0].geometry['disc_cam_A'].identifier_hash) + assert ( + scenes[0].geometry["disc_cam_B"].identifier_hash + != scenes[0].geometry["disc_cam_A"].identifier_hash + ) def test_reflection(self): # identifier should detect mirroring - a = g.get_mesh('featuretype.STL') + a = g.get_mesh("featuretype.STL") b = a.copy() b.vertices[:, 2] *= -1.0 b.invert() @@ -83,7 +87,7 @@ def test_reflection(self): assert a.identifier_hash != b.identifier_hash # a mesh which is quite sensitive to mirroring - a = g.get_mesh('mirror.ply') + a = g.get_mesh("mirror.ply") b = a.copy() b.vertices[:, 2] *= -1.0 b.invert() @@ -93,10 +97,10 @@ def test_reflection(self): def test_duplicates(self): def clean_name(name): - return name.split('_', 1)[0].split('#', 1)[0] + return name.split("_", 1)[0].split("#", 1)[0] # a scene with instances - s = g.get_mesh('cycloidal.3DXML') + s = g.get_mesh("cycloidal.3DXML") # a flat scene dump d = g.trimesh.Scene(s.dump()) @@ -110,10 +114,8 @@ def clean_name(name): # should be the same in both forms assert len(a) == len(b) - a_set = {tuple(sorted([clean_name(i) for i in group])) - for group in a} - b_set = {tuple(sorted([clean_name(i) for i in group])) - for group in b} + a_set = {tuple(sorted([clean_name(i) for i in group])) for group in a} + b_set = {tuple(sorted([clean_name(i) for i in group])) for group in b} assert a_set == b_set ptp = [] @@ -126,6 +128,6 @@ def clean_name(name): assert (ptp[-1] < 0.01).all() -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_import.py b/tests/test_import.py index 74e2f5930..97a486118 100644 --- a/tests/test_import.py +++ b/tests/test_import.py @@ -8,9 +8,9 @@ class ImportTests(unittest.TestCase): - def test_path(self): import os + # make sure trimesh imports without any environment variables # this was failing on `PATH` at some point keys = list(os.environ.keys()) @@ -27,5 +27,5 @@ def test_path(self): assert trimesh.creation.icosphere().is_volume -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_inertia.py b/tests/test_inertia.py index a5302db57..95770fa83 100644 --- a/tests/test_inertia.py +++ b/tests/test_inertia.py @@ -20,30 +20,35 @@ def tetToTris(tet): class InertiaTest(g.unittest.TestCase): - def test_inertia(self): - t0 = g.np.array([[-0.419575686853, -0.898655215203, -0.127965023308, 0.], - [0.712589964872, -0.413418145015, 0.566834172697, 0.], - [-0.562291548012, 0.146643245877, 0.813832890385, 0.], - [0., 0., 0., 1.]]) - t1 = g.np.array([[0.343159553585, 0.624765521319, -0.701362648103, 0.], - [0.509982849005, -0.750986657709, -0.419447891476, 0.], - [-0.788770571525, -0.213745370274, -0.57632794673, 0.], - [0., 0., 0., 1.]]) + t0 = g.np.array( + [ + [-0.419575686853, -0.898655215203, -0.127965023308, 0.0], + [0.712589964872, -0.413418145015, 0.566834172697, 0.0], + [-0.562291548012, 0.146643245877, 0.813832890385, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) + t1 = g.np.array( + [ + [0.343159553585, 0.624765521319, -0.701362648103, 0.0], + [0.509982849005, -0.750986657709, -0.419447891476, 0.0], + [-0.788770571525, -0.213745370274, -0.57632794673, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) # make sure our transformations are actually still transformations assert g.np.abs(g.np.dot(t0, t0.T) - g.np.eye(4)).max() < 1e-10 assert g.np.abs(g.np.dot(t1, t1.T) - g.np.eye(4)).max() < 1e-10 - c = g.trimesh.primitives.Cylinder(height=10, - radius=1, - sections=720, # number of slices - transform=t0) + c = g.trimesh.primitives.Cylinder( + height=10, radius=1, sections=720, transform=t0 # number of slices + ) c0m = c.moment_inertia.copy() - c0 = g.trimesh.inertia.cylinder_inertia(c.volume, - c.primitive.radius, - c.primitive.height, - c.primitive.transform) + c0 = g.trimesh.inertia.cylinder_inertia( + c.volume, c.primitive.radius, c.primitive.height, c.primitive.transform + ) ct = g.np.abs((c0m / c0) - 1) @@ -56,14 +61,12 @@ def test_inertia(self): # the direction (long axis) of the cylinder should correspond to # the smallest principal component of inertia, AKA rotation along # the axis, rather than the other two which are perpendicular - components, vectors = g.trimesh.inertia.principal_axis( - c.moment_inertia) + components, vectors = g.trimesh.inertia.principal_axis(c.moment_inertia) # inferred cylinder axis inferred = vectors[components.argmin()] # inferred cylinder axis should be close to actual cylinder axis - axis_test = g.np.allclose(g.np.abs(inferred), - g.np.abs(c.direction)) + axis_test = g.np.allclose(g.np.abs(inferred), g.np.abs(c.direction)) assert axis_test # make sure Trimesh attribute is plumbed correctly @@ -73,11 +76,10 @@ def test_inertia(self): # the other two axis of the cylinder should be identical assert g.np.abs(g.np.diff(g.np.sort(components)[-2:])).max() < 1e-8 - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") i0 = m.moment_inertia.copy() # rotate the moment of inertia - i1 = g.trimesh.inertia.transform_inertia( - transform=t0, inertia_tensor=i0) + i1 = g.trimesh.inertia.transform_inertia(transform=t0, inertia_tensor=i0) # rotate the mesh m.apply_transform(t0) @@ -87,31 +89,31 @@ def test_inertia(self): assert tf_test.max() < 1e-6 # do it again with another transform - i2 = g.trimesh.inertia.transform_inertia( - transform=t1, inertia_tensor=i1) + i2 = g.trimesh.inertia.transform_inertia(transform=t1, inertia_tensor=i1) m.apply_transform(t1) tf_test = g.np.abs((m.moment_inertia / i2) - 1) assert tf_test.max() < 1e-6 def test_primitives(self): - primitives = [g.trimesh.primitives.Cylinder(height=5), - g.trimesh.primitives.Box(), - g.trimesh.primitives.Sphere(radius=1.23)] + primitives = [ + g.trimesh.primitives.Cylinder(height=5), + g.trimesh.primitives.Box(), + g.trimesh.primitives.Sphere(radius=1.23), + ] for p in primitives: for _i in range(100): # check to make sure the analytic inertia tensors are relatively # close to the meshed inertia tensor (order of magnitude and # sign) b = p.to_mesh() - comparison = g.np.abs( - p.moment_inertia - b.moment_inertia) + comparison = g.np.abs(p.moment_inertia - b.moment_inertia) c_max = comparison.max() / g.np.abs(p.moment_inertia).max() - assert c_max < .1 + assert c_max < 0.1 - if hasattr(p.primitive, 'transform'): + if hasattr(p.primitive, "transform"): matrix = g.trimesh.transformations.random_rotation_matrix() p.primitive.transform = matrix - elif hasattr(p.primitive, 'center'): + elif hasattr(p.primitive, "center"): p.primitive.center = g.random(3) def test_tetrahedron(self): @@ -122,10 +124,14 @@ def test_tetrahedron(self): # http://thescipub.com/pdf/jmssp.2005.8.11.pdf # set up given vertices - vertices = g.np.float32([[8.3322, -11.86875, 0.93355], - [0.75523, 5., 16.37072], - [52.61236, 5., -5.3858], - [2., 5., 3.]]) + vertices = g.np.float32( + [ + [8.3322, -11.86875, 0.93355], + [0.75523, 5.0, 16.37072], + [52.61236, 5.0, -5.3858], + [2.0, 5.0, 3.0], + ] + ) # set up a simple trimesh tetrahedron tris = tetToTris(g.np.int32([0, 1, 2, 3])) @@ -147,9 +153,9 @@ def test_tetrahedron(self): # c' (Eq. 9f) computes from x and y values # therefore the given matrix E_Q (Eq. 2) is not correct # b' and c' need to be swapped like this: - MI_gt = g.np.float32([[a_mi, -c_pi, -b_pi], - [-c_pi, b_mi, -a_pi], - [-b_pi, -a_pi, c_mi]]) + MI_gt = g.np.float32( + [[a_mi, -c_pi, -b_pi], [-c_pi, b_mi, -a_pi], [-b_pi, -a_pi, c_mi]] + ) # check center of mass assert g.np.allclose(CM_gt, tm_tet.center_mass) @@ -158,7 +164,6 @@ def test_tetrahedron(self): assert g.np.allclose(MI_gt, tm_tet.moment_inertia) def test_cube_with_tetras(self): - # set up a unit cube, vertices in this order: # 1-----2 # /| /| @@ -167,37 +172,50 @@ def test_cube_with_tetras(self): # |/ |/ # 4-----7 - vertices = g.np.float32([[-1, -1, 1], - [-1, 1, 1], - [1, 1, 1], - [1, -1, 1], - [-1, -1, -1], - [-1, 1, -1], - [1, 1, -1], - [1, -1, -1]]) * 0.5 + vertices = ( + g.np.float32( + [ + [-1, -1, 1], + [-1, 1, 1], + [1, 1, 1], + [1, -1, 1], + [-1, -1, -1], + [-1, 1, -1], + [1, 1, -1], + [1, -1, -1], + ] + ) + * 0.5 + ) # 6 quad faces for the cube - quads = g.np.int32([[3, 2, 1, 0], - [0, 1, 5, 4], - [1, 2, 6, 5], - [2, 3, 7, 6], - [3, 0, 4, 7], - [4, 5, 6, 7]]) + quads = g.np.int32( + [ + [3, 2, 1, 0], + [0, 1, 5, 4], + [1, 2, 6, 5], + [2, 3, 7, 6], + [3, 0, 4, 7], + [4, 5, 6, 7], + ] + ) # set up two different tetraherdalizations of a cube # using 5 tets (1 in the middle and 4 around) - tetsA = g.np.int32([[0, 1, 3, 4], - [1, 2, 3, 6], - [1, 4, 5, 6], - [3, 4, 6, 7], - [1, 3, 4, 6]]) + tetsA = g.np.int32( + [[0, 1, 3, 4], [1, 2, 3, 6], [1, 4, 5, 6], [3, 4, 6, 7], [1, 3, 4, 6]] + ) # using 6 sets (around the cube diagonal) - tetsB = g.np.int32([[0, 1, 2, 6], - [0, 1, 6, 5], - [0, 4, 5, 6], - [0, 4, 6, 7], - [0, 3, 7, 6], - [0, 2, 3, 6]]) + tetsB = g.np.int32( + [ + [0, 1, 2, 6], + [0, 1, 6, 5], + [0, 4, 5, 6], + [0, 4, 6, 7], + [0, 3, 7, 6], + [0, 2, 3, 6], + ] + ) # create a trimesh cube from vertices and faces tm_cube = g.trimesh.Trimesh(vertices, quads) @@ -262,22 +280,30 @@ def test_frame_inertia_box(self): b = 2 d = 1 # definition in frame 0 - vertices = g.np.float32([[0, 0, 0], - [d, 0, 0], - [d, b, 0], - [0, b, 0], - [0, 0, h], - [d, 0, h], - [d, b, h], - [0, b, h]]) + vertices = g.np.float32( + [ + [0, 0, 0], + [d, 0, 0], + [d, b, 0], + [0, b, 0], + [0, 0, h], + [d, 0, h], + [d, b, h], + [0, b, h], + ] + ) # 6 quad faces for the cube - quads = g.np.int32([[3, 2, 1, 0], - [0, 1, 5, 4], - [1, 2, 6, 5], - [2, 3, 7, 6], - [3, 0, 4, 7], - [4, 5, 6, 7]]) + quads = g.np.int32( + [ + [3, 2, 1, 0], + [0, 1, 5, 4], + [1, 2, 6, 5], + [2, 3, 7, 6], + [3, 0, 4, 7], + [4, 5, 6, 7], + ] + ) # create a trimesh cube from vertices and faces tm_cube = g.trimesh.Trimesh(vertices, quads) @@ -307,19 +333,23 @@ def parallel_axis_theorem(inertia, mass, a1, a2, a3): """ # copy from wikipedia return inertia + mass * g.np.array( - [[a2**2 + a3**2, -a1 * a2, -a1 * a3], - [-a1 * a2, a1**2 + - a3**2, -a2 * a3], - [-a1 * a3, -a2 * a3, a1**2 + a2**2]]) + [ + [a2**2 + a3**2, -a1 * a2, -a1 * a3], + [-a1 * a2, a1**2 + a3**2, -a2 * a3], + [-a1 * a3, -a2 * a3, a1**2 + a2**2], + ] + ) # CHECK FRAME 0 # analytical calculations of inertia tensor by hand - inertia0 = 0.083333333333 * mass * g.np.diag([h**2 + b**2, - h**2 + d**2, - b**2 + d**2]) - a1 = - 0.5 * d - a2 = - 0.5 * b - a3 = - 0.5 * h + inertia0 = ( + 0.083333333333 + * mass + * g.np.diag([h**2 + b**2, h**2 + d**2, b**2 + d**2]) + ) + a1 = -0.5 * d + a2 = -0.5 * b + a3 = -0.5 * h inertia0 = parallel_axis_theorem(inertia0, mass, a1, a2, a3) # transformation from mesh base frame to frame 0 t0 = g.np.eye(4) @@ -327,32 +357,38 @@ def parallel_axis_theorem(inertia, mass, a1, a2, a3): # CHECK FRAME 1 # analytical calculations of inertia tensor by hand - inertia1 = 0.083333333333 * mass * g.np.diag([h**2 + d**2, - h**2 + b**2, - b**2 + d**2]) - a1 = - 0.5 * b + inertia1 = ( + 0.083333333333 + * mass + * g.np.diag([h**2 + d**2, h**2 + b**2, b**2 + d**2]) + ) + a1 = -0.5 * b a2 = 0.5 * d - a3 = - 0.5 * h + a3 = -0.5 * h inertia1 = parallel_axis_theorem(inertia1, mass, a1, a2, a3) # transformation from mesh base frame to frame 1 # rotation of 90 deg around z-Axis t1 = g.trimesh.transformations.rotation_matrix( - g.np.pi * 0.5, [0, 0, 1], [0, 0, 0]) + g.np.pi * 0.5, [0, 0, 1], [0, 0, 0] + ) assert g.np.allclose(tm_cube.moment_inertia_frame(t1), inertia1) # CHECK FRAME 2 # analytical calculations of inertia tensor by hand - inertia2 = 0.083333333333 * mass * g.np.diag([h**2 + b**2, - b**2 + d**2, - h**2 + d**2]) - a1 = - 0.5 * d + inertia2 = ( + 0.083333333333 + * mass + * g.np.diag([h**2 + b**2, b**2 + d**2, h**2 + d**2]) + ) + a1 = -0.5 * d a2 = 0.5 * h - a3 = - 0.5 * b + a3 = -0.5 * b inertia2 = parallel_axis_theorem(inertia2, mass, a1, a2, a3) # transformation from mesh base frame to frame 2 # rotation of -90 deg around x-Axis t2 = g.trimesh.transformations.rotation_matrix( - -g.np.pi * 0.5, [1, 0, 0], [0, 0, 0]) + -g.np.pi * 0.5, [1, 0, 0], [0, 0, 0] + ) assert g.np.allclose(tm_cube.moment_inertia_frame(t2), inertia2) def test_scene(self): @@ -365,7 +401,7 @@ def test_scene(self): # contained in `trimesh.inertia.scene_inertia` # scene with instancing - s = g.get_mesh('cycloidal.3DXML') + s = g.get_mesh("cycloidal.3DXML") s._cache.clear() with g.Profiler() as P: @@ -388,8 +424,7 @@ def test_scene(self): s._cache.clear() with g.Profiler() as P: - total_scene = g.trimesh.inertia.scene_inertia( - s, transform=transform) + total_scene = g.trimesh.inertia.scene_inertia(s, transform=transform) g.log.debug(P.output_text()) # compare the two calculation methods by percent @@ -401,35 +436,32 @@ def test_scene(self): class MassTests(g.unittest.TestCase): - def setUp(self): # inertia numbers pulled from solidworks - self.truth = g.data['mass_properties'] + self.truth = g.data["mass_properties"] self.meshes = {} for data in self.truth: - filename = data['filename'] + filename = data["filename"] self.meshes[filename] = g.get_mesh(filename) def test_mass(self): - for truth in self.truth: - mesh = self.meshes[truth['filename']] + mesh = self.meshes[truth["filename"]] calc = g.trimesh.triangles.mass_properties( - triangles=mesh.triangles, - density=truth['density'], - skip_inertia=False) + triangles=mesh.triangles, density=truth["density"], skip_inertia=False + ) for key, _value in calc.items(): if key not in truth: continue if not g.np.allclose(calc[key], truth[key], atol=1e-2): - raise ValueError('{}({}):\n{}\n!=\n{}'.format( - truth['filename'], - key, - calc[key], - g.np.array(truth[key]))) + raise ValueError( + "{}({}):\n{}\n!=\n{}".format( + truth["filename"], key, calc[key], g.np.array(truth[key]) + ) + ) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_integralmeancurvature.py b/tests/test_integralmeancurvature.py index 58937fffc..4375cde5e 100644 --- a/tests/test_integralmeancurvature.py +++ b/tests/test_integralmeancurvature.py @@ -5,13 +5,11 @@ class IntegralMeanCurvatureTest(g.unittest.TestCase): - def test_IMCsphere(self): # how close do we need to be - relative tolerance tol = 1e-3 for radius in [0.1, 1.0, 3.1459, 29.20]: - m = g.trimesh.creation.icosphere( - subdivisions=4, radius=radius) + m = g.trimesh.creation.icosphere(subdivisions=4, radius=radius) IMC = m.integral_mean_curvature ref = 4 * g.np.pi * radius assert g.np.isclose(IMC, ref, rtol=tol) @@ -22,8 +20,7 @@ def test_IMCcapsule(self): radius = 1.2 for aspect_ratio in [0.0, 0.5, 1.0, 4.0, 100]: L = aspect_ratio * radius - m = g.trimesh.creation.capsule( - height=L, radius=radius, count=[64, 64]) + m = g.trimesh.creation.capsule(height=L, radius=radius, count=[64, 64]) IMC = m.integral_mean_curvature ref = g.np.pi * (L + 4 * radius) assert g.np.isclose(IMC, ref, rtol=tol) @@ -42,6 +39,6 @@ def test_IMCbox(self): assert g.np.isclose(IMC, ref, rtol=tol) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_interval.py b/tests/test_interval.py index 662e4e48c..245c13919 100644 --- a/tests/test_interval.py +++ b/tests/test_interval.py @@ -5,30 +5,22 @@ class IntervalTest(g.unittest.TestCase): - def test_intersection(self): - - pairs = g.np.array([[[0, 1], [1, 2]], - [[1, 0], [1, 2]], - [[0, 0], [0, 0]], - [[10, 20], [9, 21]], - [[5, 15], [7, 10]], - [[5, 10], [10, 9]], - [[0, 1], [0.9, 10]]]) - tru_hit = [False, - False, - False, - True, - True, - True, - True] - tru_int = g.np.array([[0.0, 0.0], - [0.0, 0.0], - [0.0, 0.0], - [10, 20], - [7, 10], - [9, 10], - [0.9, 1.0]]) + pairs = g.np.array( + [ + [[0, 1], [1, 2]], + [[1, 0], [1, 2]], + [[0, 0], [0, 0]], + [[10, 20], [9, 21]], + [[5, 15], [7, 10]], + [[5, 10], [10, 9]], + [[0, 1], [0.9, 10]], + ] + ) + tru_hit = [False, False, False, True, True, True, True] + tru_int = g.np.array( + [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [10, 20], [7, 10], [9, 10], [0.9, 1.0]] + ) func = g.trimesh.interval.intersection @@ -44,6 +36,6 @@ def test_intersection(self): assert g.np.allclose(r_i, tru_int) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_light.py b/tests/test_light.py index 531b96adc..2e81e6074 100644 --- a/tests/test_light.py +++ b/tests/test_light.py @@ -5,22 +5,23 @@ class LightTests(g.unittest.TestCase): - def test_basic(self): - for light_class in [g.trimesh.scene.lighting.DirectionalLight, - g.trimesh.scene.lighting.PointLight, - g.trimesh.scene.lighting.SpotLight]: + for light_class in [ + g.trimesh.scene.lighting.DirectionalLight, + g.trimesh.scene.lighting.PointLight, + g.trimesh.scene.lighting.SpotLight, + ]: light = light_class() assert isinstance(light.intensity, float) assert light.color.shape == (4,) assert light.color.dtype == g.np.uint8 def test_scene(self): - s = g.get_mesh('duck.dae') + s = g.get_mesh("duck.dae") assert len(s.lights) > 0 assert isinstance(s.camera, g.trimesh.scene.cameras.Camera) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_loaded.py b/tests/test_loaded.py index 222ee421b..693141e7e 100644 --- a/tests/test_loaded.py +++ b/tests/test_loaded.py @@ -5,21 +5,19 @@ class LoaderTest(g.unittest.TestCase): - def test_remote(self): """ Try loading a remote mesh using requests """ # get a unit cube from localhost with g.serve_meshes() as address: - mesh = g.trimesh.exchange.load.load_remote( - url=address + '/unit_cube.STL') + mesh = g.trimesh.exchange.load.load_remote(url=address + "/unit_cube.STL") assert g.np.isclose(mesh.volume, 1.0) assert isinstance(mesh, g.trimesh.Trimesh) def test_stl(self): - model = g.get_mesh('empty.stl') + model = g.get_mesh("empty.stl") assert model.is_empty def test_meshio(self): @@ -28,19 +26,16 @@ def test_meshio(self): except BaseException: return # if meshio is importable we should be able to load this - m = g.get_mesh('insulated.msh') + m = g.get_mesh("insulated.msh") assert len(m.faces) > 0 assert m.area > 1e-5 def test_fileobj(self): # make sure we don't close file objects that were passed # check load_mesh - file_obj = open( - g.os.path.join(g.dir_models, 'featuretype.STL'), 'rb') + file_obj = open(g.os.path.join(g.dir_models, "featuretype.STL"), "rb") assert not file_obj.closed - mesh = g.trimesh.load( - file_obj=file_obj, - file_type='stl') + mesh = g.trimesh.load(file_obj=file_obj, file_type="stl") # should have actually loaded the mesh assert len(mesh.faces) == 3476 # should not close the file object @@ -49,12 +44,9 @@ def test_fileobj(self): file_obj.close() # check load_path - file_obj = open( - g.os.path.join(g.dir_models, '2D', 'wrench.dxf'), 'rb') + file_obj = open(g.os.path.join(g.dir_models, "2D", "wrench.dxf"), "rb") assert not file_obj.closed - path = g.trimesh.load( - file_obj=file_obj, - file_type='dxf') + path = g.trimesh.load(file_obj=file_obj, file_type="dxf") assert g.np.isclose(path.area, 1.667, atol=1e-2) # should have actually loaded the path # should not close the file object @@ -63,6 +55,6 @@ def test_fileobj(self): file_obj.close() -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_medial.py b/tests/test_medial.py index 70bafcfae..6aa957ff8 100644 --- a/tests/test_medial.py +++ b/tests/test_medial.py @@ -5,24 +5,24 @@ class MedialTests(g.unittest.TestCase): - def test_medial(self): - p = g.get_mesh('2D/wrench.dxf') + p = g.get_mesh("2D/wrench.dxf") assert p.is_closed assert len(p.polygons_full) == 1 poly = p.polygons_full[0] medial = p.medial_axis() points = medial.vertices.view(g.np.ndarray) - assert all(poly.contains(g.Point(v)) - for v in points) + assert all(poly.contains(g.Point(v)) for v in points) # circles are a special case for medial axis poly = g.Point([0, 0]).buffer(1.0) # construct a Path2D from the polygon medial axis med = g.trimesh.path.Path2D( **g.trimesh.path.exchange.misc.edges_to_path( - *g.trimesh.path.polygons.medial_axis(poly))) + *g.trimesh.path.polygons.medial_axis(poly) + ) + ) # should have returned a single tiny line # with midpoint at origin assert len(med.vertices) == 2 @@ -30,6 +30,6 @@ def test_medial(self): assert float(med.vertices.mean(axis=0).ptp()) < 1e-8 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_merge.py b/tests/test_merge.py index 4166bc177..c5a2489cd 100644 --- a/tests/test_merge.py +++ b/tests/test_merge.py @@ -5,7 +5,6 @@ class MargeTest(g.unittest.TestCase): - def test_cube(self): """ Test PointCloud object @@ -18,8 +17,7 @@ def test_cube(self): assert m.euler_number == 2 # stack a bunch of unreferenced vertices - m.vertices = g.np.vstack(( - m.vertices, g.random((10000, 3)))) + m.vertices = g.np.vstack((m.vertices, g.random((10000, 3)))) assert m.euler_number == 2 assert m.vertices.shape == (10008, 3) assert m.referenced_vertices.sum() == 8 @@ -50,6 +48,6 @@ def test_cube(self): assert copied.referenced_vertices.sum() == 8 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_mesh.py b/tests/test_mesh.py index 1ebb2790f..e4077dd47 100644 --- a/tests/test_mesh.py +++ b/tests/test_mesh.py @@ -8,24 +8,22 @@ class MeshTests(g.unittest.TestCase): - def test_meshes(self): # make sure we can load everything we think we can formats = g.trimesh.available_formats() assert all(isinstance(i, str) for i in formats) assert all(len(i) > 0 for i in formats) - assert all(i in formats - for i in ['stl', 'ply', 'off', 'obj']) + assert all(i in formats for i in ["stl", "ply", "off", "obj"]) for mesh in g.get_meshes(raise_error=True): # log file name for debugging - file_name = mesh.metadata['file_name'] + file_name = mesh.metadata["file_name"] # ply files can return PointCloud objects - if file_name.startswith('points_'): + if file_name.startswith("points_"): continue - g.log.info('Testing %s', file_name) + g.log.info("Testing %s", file_name) start = {mesh.__hash__(), mesh.__hash__()} assert len(mesh.faces) > 0 assert len(mesh.vertices) > 0 @@ -46,8 +44,8 @@ def test_meshes(self): # check edges_unique assert len(mesh.edges) == len(mesh.edges_unique_inverse) assert g.np.allclose( - mesh.edges_sorted, - mesh.edges_unique[mesh.edges_unique_inverse]) + mesh.edges_sorted, mesh.edges_unique[mesh.edges_unique_inverse] + ) assert len(mesh.edges_unique) == len(mesh.edges_unique_length) # euler number should be an integer @@ -66,8 +64,7 @@ def test_meshes(self): # still shouldn't have changed anything assert start == {mesh.__hash__(), mesh.__hash__()} - if not (mesh.is_watertight and - mesh.is_winding_consistent): + if not (mesh.is_watertight and mesh.is_winding_consistent): continue assert len(mesh.facets) == len(mesh.facets_area) @@ -85,22 +82,23 @@ def test_meshes(self): assert abs(mesh.volume) > 0.0 - section = mesh.section(plane_normal=[0, 0, 1], # NOQA - plane_origin=mesh.centroid) + mesh.section( + plane_normal=[0, 0, 1], plane_origin=mesh.centroid + ) sample = mesh.sample(1000) even_sample = g.trimesh.sample.sample_surface_even(mesh, 100) # NOQA assert sample.shape == (1000, 3) - g.log.info('finished testing meshes') + g.log.info("finished testing meshes") # make sure vertex kdtree and triangles rtree exist t = mesh.kdtree - assert hasattr(t, 'query') - g.log.info('Creating triangles tree') + assert hasattr(t, "query") + g.log.info("Creating triangles tree") r = mesh.triangles_tree - assert hasattr(r, 'intersection') - g.log.info('Triangles tree ok') + assert hasattr(r, "intersection") + g.log.info("Triangles tree ok") # face angles should have same assert mesh.face_angles.shape == mesh.faces.shape @@ -118,26 +116,24 @@ def test_meshes(self): continue # nothing in the cache should be writeable - if cached.flags['WRITEABLE']: - raise ValueError(f'{name} is writeable!') + if cached.flags["WRITEABLE"]: + raise ValueError(f"{name} is writeable!") # only check int, float, and bool - if cached.dtype.kind not in 'ibf': + if cached.dtype.kind not in "ibf": continue # there should never be NaN values if g.np.isnan(cached).any(): - raise ValueError('NaN values in %s/%s', - file_name, name) + raise ValueError("NaN values in %s/%s", file_name, name) # fields allowed to have infinite values - if name in ['face_adjacency_radius']: + if name in ["face_adjacency_radius"]: continue # make sure everything is finite if not g.np.isfinite(cached).all(): - raise ValueError('inf values in %s/%s', - file_name, name) + raise ValueError("inf values in %s/%s", file_name, name) # ...still shouldn't have changed anything assert start == {mesh.__hash__(), mesh.__hash__()} @@ -146,10 +142,10 @@ def test_meshes(self): if len(writeable) > 0: # TODO : all cached values should be read-only g.log.error( - 'cached properties writeable: {}'.format( - ', '.join(writeable))) + "cached properties writeable: {}".format(", ".join(writeable)) + ) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_meta.py b/tests/test_meta.py index c774fc497..d52a83523 100644 --- a/tests/test_meta.py +++ b/tests/test_meta.py @@ -5,31 +5,35 @@ class MetaTest(g.unittest.TestCase): - def test_glb(self): # check to see if both `Scene.metadata` and # `Scene.geometry._.metadata` is preserved # create an example scene - s = g.trimesh.Scene([ - g.trimesh.creation.box().permutate.transform(), - g.trimesh.creation.box().permutate.transform(), - g.trimesh.creation.box().permutate.transform()]) + s = g.trimesh.Scene( + [ + g.trimesh.creation.box().permutate.transform(), + g.trimesh.creation.box().permutate.transform(), + g.trimesh.creation.box().permutate.transform(), + ] + ) # add some basic metadata - s.metadata['hi'] = True - s.metadata['10'] = "it's true!" + s.metadata["hi"] = True + s.metadata["10"] = "it's true!" for m in s.geometry.values(): # create some random metadata for each mesh # note that JSON doesn't support integers as keys # so convert integers to strings for comparison - m.metadata.update(g.np.random.randint( - 0, 1000, 10).reshape((-1, 2)).astype(str)) + m.metadata.update( + g.np.random.randint(0, 1000, 10).reshape((-1, 2)).astype(str) + ) # reload the exported scene - r = g.trimesh.load(file_obj=g.trimesh.util.wrap_as_stream( - s.export(file_type='glb')), - file_type='glb') + r = g.trimesh.load( + file_obj=g.trimesh.util.wrap_as_stream(s.export(file_type="glb")), + file_type="glb", + ) # all scene metadata should have survived export-import cycle assert r.metadata == s.metadata @@ -43,33 +47,31 @@ def test_glb(self): # the original metadata should have all survived # the exporter is allowed to add additional keys - assert set(b.metadata.keys()).issuperset( - a.metadata.keys()) + assert set(b.metadata.keys()).issuperset(a.metadata.keys()) # every original value must match exactly - assert all(b.metadata[k] == v for k, v - in a.metadata.items()) + assert all(b.metadata[k] == v for k, v in a.metadata.items()) def test_svg(self): - p = g.get_mesh('2D/1002_tray_bottom.DXF') + p = g.get_mesh("2D/1002_tray_bottom.DXF") assert len(p.layers) == len(p.entities) - assert all(e.layer == '0' for e in p.entities) - assert all(L == '0' for L in p.layers) + assert all(e.layer == "0" for e in p.entities) + assert all(L == "0" for L in p.layers) r = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream( - p.export(file_type='svg')), - file_type='svg') + file_obj=g.trimesh.util.wrap_as_stream(p.export(file_type="svg")), + file_type="svg", + ) # make sure we didn't stomp on original - assert all(e.layer == '0' for e in p.entities) - assert all(e.layer == '0' for e in r.entities) + assert all(e.layer == "0" for e in p.entities) + assert all(e.layer == "0" for e in r.entities) assert all(i == j for i, j in zip(p.layers, r.layers)) assert p.metadata == r.metadata assert len(p.metadata) > 0 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_minimal.py b/tests/test_minimal.py index 4cd4941df..2de6ae316 100644 --- a/tests/test_minimal.py +++ b/tests/test_minimal.py @@ -14,41 +14,36 @@ import trimesh # the path of the current directory -_pwd = os.path.dirname( - os.path.abspath(os.path.expanduser(__file__))) +_pwd = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) # the absolute path for our reference models -_mwd = os.path.abspath( - os.path.join(_pwd, '..', 'models')) +_mwd = os.path.abspath(os.path.join(_pwd, "..", "models")) def get_mesh(file_name, **kwargs): """ Load a mesh from our models directory. """ - return trimesh.load(os.path.join(_mwd, file_name), - **kwargs) + return trimesh.load(os.path.join(_mwd, file_name), **kwargs) class MinimalTest(unittest.TestCase): - def test_path_exc(self): # this should require *no deps* from trimesh.path import packing - bounds, inserted = packing.rectangles_single( - [[1, 1], [2, 2]], - size=[2, 4]) + + bounds, inserted = packing.rectangles_single([[1, 1], [2, 2]], size=[2, 4]) assert inserted.all() extents = bounds.reshape((-1, 2)).ptp(axis=0) assert np.allclose(extents, [2, 3]) assert bounds.shape == (2, 2, 2) density = 5.0 / np.prod(extents) - assert density > .833 + assert density > 0.833 def test_load(self): # kinds of files we should be able to # load even with a minimal install - kinds = 'stl ply obj off gltf glb'.split() + kinds = "stl ply obj off gltf glb".split() for file_name in os.listdir(_mwd): ext = os.path.splitext(file_name)[-1].lower()[1:] @@ -75,7 +70,7 @@ def test_load_path(self): assert isinstance(path, trimesh.path.Path3D) scene = trimesh.Scene(path) assert len(scene.geometry) == 1 - glb = scene.export(file_type='glb') + glb = scene.export(file_type="glb") assert len(glb) > 0 # now create a Path2D @@ -83,15 +78,15 @@ def test_load_path(self): assert isinstance(path, trimesh.path.Path2D) # export to an SVG - svg = path.export(file_type='svg') + svg = path.export(file_type="svg") assert len(svg) > 0 - dxf = path.export(file_type='dxf') + dxf = path.export(file_type="dxf") assert len(dxf) > 0 scene = trimesh.Scene(path) assert len(scene.geometry) == 1 - glb = scene.export(file_type='glb') + glb = scene.export(file_type="glb") assert len(glb) > 0 def test_load_wrap(self): @@ -99,6 +94,7 @@ def test_load_wrap(self): # when we *do not* have `lxml` installed try: import lxml # noqa + return except BaseException: pass @@ -106,7 +102,7 @@ def test_load_wrap(self): # we have no 3DXML exc = None try: - get_mesh('cycloidal.3DXML') + get_mesh("cycloidal.3DXML") except BaseException as E: exc = str(E).lower() @@ -115,10 +111,10 @@ def test_load_wrap(self): # error message should have been useful # containing which module the user was missing - if not any(m in exc for m in ('lxml', 'networkx')): + if not any(m in exc for m in ("lxml", "networkx")): raise ValueError(exc) -if __name__ == '__main__': +if __name__ == "__main__": trimesh.util.attach_to_log() unittest.main() diff --git a/tests/test_mutate.py b/tests/test_mutate.py index 139b3a8fb..78567e628 100644 --- a/tests/test_mutate.py +++ b/tests/test_mutate.py @@ -38,30 +38,29 @@ def get_readonly(model_name): verts = g.np.ndarray(verts.shape, verts.dtype, bytes(verts.tobytes())) faces = g.np.ndarray(faces.shape, faces.dtype, bytes(faces.tobytes())) # everything should be read only now - assert not verts.flags['WRITEABLE'] - assert not faces.flags['WRITEABLE'] + assert not verts.flags["WRITEABLE"] + assert not faces.flags["WRITEABLE"] mesh = g.trimesh.Trimesh(verts, faces, process=False, validate=False) - assert not mesh.vertices.flags['WRITEABLE'] - assert not mesh.faces.flags['WRITEABLE'] + assert not mesh.vertices.flags["WRITEABLE"] + assert not mesh.faces.flags["WRITEABLE"] # return the mesh, and read-only vertices and faces return mesh, verts, faces class MutateTests(g.unittest.TestCase): - def test_not_mutated_cube(self): - self._test_not_mutated(*get_readonly('cube.OBJ')) + self._test_not_mutated(*get_readonly("cube.OBJ")) def test_not_mutated_torus(self): - self._test_not_mutated(*get_readonly('torus.STL')) + self._test_not_mutated(*get_readonly("torus.STL")) def test_not_mutated_bunny(self): - self._test_not_mutated(*get_readonly('bunny.ply')) + self._test_not_mutated(*get_readonly("bunny.ply")) def test_not_mutated_teapot(self): - self._test_not_mutated(*get_readonly('teapot.stl')) + self._test_not_mutated(*get_readonly("teapot.stl")) def _test_not_mutated(self, mesh, verts, faces): verts = g.np.copy(verts) @@ -171,12 +170,12 @@ def _test_not_mutated(self, mesh, verts, faces): mesh.section_multiplane(o, n, heights) mesh.section_multiplane(mesh.center_mass, n, heights) - assert not mesh.vertices.flags['WRITEABLE'] - assert not mesh.faces.flags['WRITEABLE'] + assert not mesh.vertices.flags["WRITEABLE"] + assert not mesh.faces.flags["WRITEABLE"] assert g.np.all(g.np.isclose(verts, mesh.vertices)) assert g.np.all(g.np.isclose(faces, mesh.faces)) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_normals.py b/tests/test_normals.py index 621178406..da1a6d1a9 100644 --- a/tests/test_normals.py +++ b/tests/test_normals.py @@ -5,7 +5,6 @@ class NormalsTest(g.unittest.TestCase): - def test_vertex_normal(self): mesh = g.trimesh.creation.icosahedron() # the icosahedron is centered at zero, so the true vertex @@ -15,16 +14,14 @@ def test_vertex_normal(self): # force fallback to loop normal summing by passing None # as the sparse matrix normals = g.trimesh.geometry.mean_vertex_normals( - len(mesh.vertices), - mesh.faces, - mesh.face_normals) + len(mesh.vertices), mesh.faces, mesh.face_normals + ) assert g.np.allclose(normals - truth, 0.0) # make sure the automatic sparse matrix generation works normals = g.trimesh.geometry.mean_vertex_normals( - len(mesh.vertices), - mesh.faces, - mesh.face_normals) + len(mesh.vertices), mesh.faces, mesh.face_normals + ) assert g.np.allclose(normals - truth, 0.0) # make sure the Trimesh normals- related attributes @@ -34,26 +31,21 @@ def test_vertex_normal(self): assert g.np.allclose(mesh.vertex_normals - truth, 0.0) def test_weighted_vertex_normals(self): - - def compare_trimesh_to_groundtruth( - mesh, - truth, - atol=g.trimesh.tol.merge): + def compare_trimesh_to_groundtruth(mesh, truth, atol=g.trimesh.tol.merge): # force fallback to loop normal summing by passing None # as the sparse matrix normals = g.trimesh.geometry.weighted_vertex_normals( vertex_count=len(mesh.vertices), faces=mesh.faces, face_normals=mesh.face_normals, - face_angles=mesh.face_angles) + face_angles=mesh.face_angles, + ) assert g.np.allclose(normals, truth, atol=atol) # make sure the automatic sparse matrix generation works normals = g.trimesh.geometry.weighted_vertex_normals( - len(mesh.vertices), - mesh.faces, - mesh.face_normals, - mesh.face_angles) + len(mesh.vertices), mesh.faces, mesh.face_normals, mesh.face_angles + ) assert g.np.allclose(normals - truth, 0.0, atol=atol) # make sure the Trimesh normals- related attributes @@ -80,7 +72,7 @@ def compare_trimesh_to_groundtruth( # smooth curved surfaces, corners and sharp creases # ground truth vertex normals were computed in MeshLab and # are included in the file - fandisk_mesh = g.get_mesh('fandisk.obj') + fandisk_mesh = g.get_mesh("fandisk.obj") fandisk_truth = fandisk_mesh.vertex_normals # due to the limited precision in the MeshLab export, # we have to tweak the tolerance for the comparison a little @@ -97,7 +89,7 @@ def compare_trimesh_to_groundtruth( mask = g.np.zeros(len(m.vertices), dtype=bool) mask[m.faces[0]] = False # it's a box so normals should all be unit vectors [1,1,1] - assert g.np.allclose(g.np.abs(norm[mask]), (1.0 / 3.0) ** .5) + assert g.np.allclose(g.np.abs(norm[mask]), (1.0 / 3.0) ** 0.5) # try with a deliberately broken sparse matrix to test looping path norm = g.trimesh.geometry.weighted_vertex_normals( @@ -105,12 +97,13 @@ def compare_trimesh_to_groundtruth( faces=m.faces, face_normals=m.face_normals, face_angles=m.face_angles, - use_loop=True) + use_loop=True, + ) assert g.np.isfinite(norm).all() assert len(norm) == len(m.vertices) # every intact vertex should be away from box corner - assert g.np.allclose(g.np.abs(norm[mask]), (1.0 / 3.0) ** .5) + assert g.np.allclose(g.np.abs(norm[mask]), (1.0 / 3.0) ** 0.5) def test_face_normals(self): """ @@ -131,30 +124,24 @@ def test_face_normals(self): # setting normals to None should force recompute mesh.face_normals = None assert mesh.face_normals is not None - assert not g.np.allclose(mesh.face_normals, - [0.0, 0.0, 1.0]) + assert not g.np.allclose(mesh.face_normals, [0.0, 0.0, 1.0]) # setting face normals to zeros shouldn't work mesh.face_normals = g.np.zeros_like(mesh.faces) - assert g.np.allclose( - g.np.linalg.norm(mesh.face_normals, axis=1), 1.0) + assert g.np.allclose(g.np.linalg.norm(mesh.face_normals, axis=1), 1.0) def test_merge(self): """ Check merging with vertex normals """ # no vertex merging - m = g.get_mesh( - 'cube_compressed.obj', - process=False) + m = g.get_mesh("cube_compressed.obj", process=False) assert m.vertices.shape == (24, 3) assert m.faces.shape == (12, 3) assert g.np.isclose(m.volume, 8.0, atol=1e-4) # with normal-aware vertex merging - m = g.get_mesh( - 'cube_compressed.obj', - process=True) + m = g.get_mesh("cube_compressed.obj", process=True) assert m.vertices.shape == (24, 3) assert m.faces.shape == (12, 3) assert g.np.isclose(m.volume, 8.0, atol=1e-4) @@ -166,6 +153,6 @@ def test_merge(self): assert g.np.isclose(m.volume, 8.0, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_nsphere.py b/tests/test_nsphere.py index 3be0c301b..25b1c8523 100644 --- a/tests/test_nsphere.py +++ b/tests/test_nsphere.py @@ -5,19 +5,15 @@ class NSphereTest(g.unittest.TestCase): - def test_minball(self): # how close do we need to be tol_fit = 1e-2 # get some assorted mesh geometries to test performance # and a perfect sphere mesh to test the degenerate case - for m in g.np.append(list(g.get_meshes(5)), - g.trimesh.primitives.Sphere()): - + for m in g.np.append(list(g.get_meshes(5)), g.trimesh.primitives.Sphere()): s = m.bounding_sphere - R_check = ((m.vertices - s.primitive.center) - ** 2).sum(axis=1).max() ** .5 + R_check = ((m.vertices - s.primitive.center) ** 2).sum(axis=1).max() ** 0.5 assert len(s.primitive.center) == 3 assert s.primitive.radius > 0.0 @@ -29,7 +25,7 @@ def test_minball(self): for _i in range(5): points = g.random((100, d)) C, R = g.trimesh.nsphere.minimum_nsphere(points) - R_check = ((points - C)**2).sum(axis=1).max() ** .5 + R_check = ((points - C) ** 2).sum(axis=1).max() ** 0.5 assert len(C) == d assert R > 0.0 assert abs(R - R_check) < g.tol.merge @@ -39,12 +35,11 @@ def test_isnsphere(self): m = g.trimesh.creation.uv_sphere() # move the mesh around for funsies m.apply_translation(g.random(3)) - m.apply_transform( - g.trimesh.transformations.random_rotation_matrix()) + m.apply_transform(g.trimesh.transformations.random_rotation_matrix()) # all vertices should be on nsphere assert g.trimesh.nsphere.is_nsphere(m.vertices) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_off.py b/tests/test_off.py index c96bf7957..1f9746643 100644 --- a/tests/test_off.py +++ b/tests/test_off.py @@ -8,40 +8,35 @@ class OFFTests(g.unittest.TestCase): - def test_comment(self): # see if we survive comments - file_name = 'comments.off' + file_name = "comments.off" m = g.get_mesh(file_name, process=False) assert m.is_volume assert m.vertices.shape == (8, 3) assert m.faces.shape == (12, 3) with open(g.os.path.join(g.dir_models, file_name)) as f: - lines = [line.split('#', 1)[0].strip() - for line in str.splitlines(f.read())] - lines = [line.split() for line in lines if - 'OFF' not in line and len(line) > 0] + lines = [line.split("#", 1)[0].strip() for line in str.splitlines(f.read())] + lines = [line.split() for line in lines if "OFF" not in line and len(line) > 0] vertices = g.np.array(lines[1:9], dtype=g.np.float64) assert g.np.allclose(vertices, m.vertices) def test_whitespace(self): - file_name = 'whitespace.off' + file_name = "whitespace.off" m = g.get_mesh(file_name, process=False) assert m.is_volume assert m.vertices.shape == (8, 3) assert m.faces.shape == (12, 3) with open(g.os.path.join(g.dir_models, file_name)) as f: - lines = [line.split('#', 1)[0].strip() - for line in str.splitlines(f.read())] - lines = [line.split() for line in lines if - 'OFF' not in line and len(line) > 0] + lines = [line.split("#", 1)[0].strip() for line in str.splitlines(f.read())] + lines = [line.split() for line in lines if "OFF" not in line and len(line) > 0] def test_corpus(self): - g.get_mesh('off.zip') + g.get_mesh("off.zip") -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_operators.py b/tests/test_operators.py index 196852354..7e607e4de 100644 --- a/tests/test_operators.py +++ b/tests/test_operators.py @@ -5,31 +5,25 @@ class OpTest(g.unittest.TestCase): - def test_add(self): - # make sure different concatenation results return the same - m = [g.trimesh.creation.box().apply_translation(v) - for v in 2.0 * g.np.eye(3)] + m = [g.trimesh.creation.box().apply_translation(v) for v in 2.0 * g.np.eye(3)] assert g.np.isclose(sum(m).volume, 3.0) assert g.np.isclose(g.np.sum(m).volume, 3.0) assert g.np.isclose((m[0] + m[1] + m[2]).volume, 3.0) - assert g.np.isclose( - g.trimesh.util.concatenate(m).volume, 3.0) + assert g.np.isclose(g.trimesh.util.concatenate(m).volume, 3.0) - p = g.get_mesh('2D/wrench.dxf') - m = [p.copy().apply_translation(v) - for v in p.extents.max() * 2.0 * g.np.eye(2)] + p = g.get_mesh("2D/wrench.dxf") + m = [p.copy().apply_translation(v) for v in p.extents.max() * 2.0 * g.np.eye(2)] m.append(p) area = 3.0 * p.area assert g.np.isclose(sum(m).area, area) assert g.np.isclose(g.np.sum(m).area, area) assert g.np.isclose((m[0] + m[1] + m[2]).area, area) - assert g.np.isclose( - g.trimesh.path.util.concatenate(m).area, area) + assert g.np.isclose(g.trimesh.path.util.concatenate(m).area, area) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_packing.py b/tests/test_packing.py index e45c8daf4..c56437232 100644 --- a/tests/test_packing.py +++ b/tests/test_packing.py @@ -54,31 +54,34 @@ def _solid_image(color, size): Image with requested color and size. """ from PIL import Image + # convert to RGB uint8 color = g.np.array(color, dtype=g.np.uint8)[:3] # create a one pixel RGB image image = Image.fromarray( - g.np.tile(color, (g.np.prod(size), 1)).reshape( - (size[0], size[1], 3))) + g.np.tile(color, (g.np.prod(size), 1)).reshape((size[0], size[1], 3)) + ) assert image.size == tuple(size[::-1]) return image class PackingTest(g.unittest.TestCase): - def test_obb(self): from trimesh.path import packing - nestable = [g.Polygon(i) for i in g.data['nestable']] + + nestable = [g.Polygon(i) for i in g.data["nestable"]] inserted, transforms = packing.polygons(nestable) def test_image(self): from trimesh.path import packing - images = [_solid_image([255, 0, 0, 255], [10, 10]), - _solid_image([0, 255, 0, 255], [120, 12]), - _solid_image([0, 0, 255, 255], [144, 500])] + images = [ + _solid_image([255, 0, 0, 255], [10, 10]), + _solid_image([0, 255, 0, 255], [120, 12]), + _solid_image([0, 0, 255, 255], [144, 500]), + ] p, offset = packing.images(images, power_resize=False) # result should not be a power-of-two size @@ -93,15 +96,14 @@ def test_paths(self): from trimesh.path import packing from trimesh.path.polygons import polygon_bounds - polygons = g.np.array( - [g.Polygon(i) for i in g.data['nestable']]) + polygons = g.np.array([g.Polygon(i) for i in g.data["nestable"]]) # calculate a packing of the polygons matrix, consume = packing.polygons(polygons) check_bound = g.np.array( - [polygon_bounds(p, matrix=m) - for p, m in zip(polygons[consume], matrix)]) + [polygon_bounds(p, matrix=m) for p, m in zip(polygons[consume], matrix)] + ) assert not packing.bounds_overlap(check_bound) paths = [g.trimesh.load_path(i) for i in polygons] @@ -137,17 +139,22 @@ def test_paths(self): def test_3D(self): from trimesh.path import packing - e = g.np.array([[14., 14., 0.125], - [13.84376457, 13.84376457, 0.25], - [14., 14., 0.125], - [12.00000057, 12.00000057, 0.25], - [14., 14., 0.125], - [12.83700787, 12.83700787, 0.375], - [12.83700787, 12.83700787, 0.125], - [14., 14., 0.625], - [1.9999977, 1.9999509, 0.25], - [0.87481696, 0.87463294, 0.05], - [0.99955503, 0.99911677, 0.1875]]) + + e = g.np.array( + [ + [14.0, 14.0, 0.125], + [13.84376457, 13.84376457, 0.25], + [14.0, 14.0, 0.125], + [12.00000057, 12.00000057, 0.25], + [14.0, 14.0, 0.125], + [12.83700787, 12.83700787, 0.375], + [12.83700787, 12.83700787, 0.125], + [14.0, 14.0, 0.625], + [1.9999977, 1.9999509, 0.25], + [0.87481696, 0.87463294, 0.05], + [0.99955503, 0.99911677, 0.1875], + ] + ) # try packing these 3D boxes bounds, consume = packing.rectangles_single(e) @@ -159,18 +166,23 @@ def test_3D(self): def test_transform(self): from trimesh.path import packing + # try in 3D with random OBB and orientation - ori = g.np.array([[14., 14., 0.125], - [13.84376457, 13.84376457, 0.25], - [14., 14., 0.125], - [12.00000057, 12.00000057, 0.25], - [14., 14., 0.125], - [12.83700787, 12.83700787, 0.375], - [12.83700787, 12.83700787, 0.125], - [14., 14., 0.625], - [1.9999977, 1.9999509, 0.25], - [0.87481696, 0.87463294, 0.05], - [0.99955503, 0.99911677, 0.1875]]) + ori = g.np.array( + [ + [14.0, 14.0, 0.125], + [13.84376457, 13.84376457, 0.25], + [14.0, 14.0, 0.125], + [12.00000057, 12.00000057, 0.25], + [14.0, 14.0, 0.125], + [12.83700787, 12.83700787, 0.375], + [12.83700787, 12.83700787, 0.125], + [14.0, 14.0, 0.625], + [1.9999977, 1.9999509, 0.25], + [0.87481696, 0.87463294, 0.05], + [0.99955503, 0.99911677, 0.1875], + ] + ) density = [] with g.Profiler() as P: @@ -178,9 +190,7 @@ def test_transform(self): # roll the extents by a random amount and offset extents = [] for i in ori: - extents.append( - g.np.roll(i, int(g.random() * 10)) + - g.random(3)) + extents.append(g.np.roll(i, int(g.random() * 10)) + g.random(3)) extents = g.np.array(extents) bounds, consume = packing.rectangles(extents) @@ -190,65 +200,64 @@ def test_transform(self): assert len(bounds) == consume.sum() # generate the transforms for the packing - transforms = packing.roll_transform( - bounds=bounds, extents=extents) + transforms = packing.roll_transform(bounds=bounds, extents=extents) - assert transforms_match(bounds=bounds, - extents=extents[consume], - transforms=transforms) + assert transforms_match( + bounds=bounds, extents=extents[consume], transforms=transforms + ) viz = packing.visualize(bounds=bounds, extents=extents) density.append(viz.volume / viz.bounding_box.volume) - bounds, consume = packing.rectangles( - extents, size=[16, 16, 10]) + bounds, consume = packing.rectangles(extents, size=[16, 16, 10]) # generate the transforms for the packing transforms = packing.roll_transform( - bounds=bounds, extents=extents[consume]) - assert transforms_match(bounds=bounds, - extents=extents[consume], - transforms=transforms) - viz = packing.visualize( - bounds=bounds, extents=extents[consume]) + bounds=bounds, extents=extents[consume] + ) + assert transforms_match( + bounds=bounds, extents=extents[consume], transforms=transforms + ) + viz = packing.visualize(bounds=bounds, extents=extents[consume]) density.append(viz.volume / viz.bounding_box.volume) bounds, consume = packing.rectangles( - extents, size=[16, 16, 10], rotate=False) + extents, size=[16, 16, 10], rotate=False + ) # generate the transforms for the packing transforms = packing.roll_transform( - bounds=bounds, extents=extents[consume]) - assert transforms_match(bounds=bounds, - extents=extents[consume], - transforms=transforms) - viz = packing.visualize( - bounds=bounds, extents=extents[consume]) + bounds=bounds, extents=extents[consume] + ) + assert transforms_match( + bounds=bounds, extents=extents[consume], transforms=transforms + ) + viz = packing.visualize(bounds=bounds, extents=extents[consume]) density.append(viz.volume / viz.bounding_box.volume) bounds, consume = packing.rectangles(extents, rotate=False) # generate the transforms for the packing transforms = packing.roll_transform( - bounds=bounds, extents=extents[consume]) - assert transforms_match(bounds=bounds, - extents=extents[consume], - transforms=transforms) - viz = packing.visualize( - bounds=bounds, extents=extents[consume]) + bounds=bounds, extents=extents[consume] + ) + assert transforms_match( + bounds=bounds, extents=extents[consume], transforms=transforms + ) + viz = packing.visualize(bounds=bounds, extents=extents[consume]) density.append(viz.volume / viz.bounding_box.volume) g.log.debug(P.output_text()) def test_meshes(self, count=20): from trimesh.path import packing + # create some random rotation boxes - meshes = [g.trimesh.creation.box( - extents=extents, - transform=transform) + meshes = [ + g.trimesh.creation.box(extents=extents, transform=transform) for transform, extents in zip( - g.random_transforms(count), - (g.random((count, 3)) + 1) * 10)] - packed, transforms, consume = packing.meshes( - meshes, spacing=0.01) + g.random_transforms(count), (g.random((count, 3)) + 1) * 10 + ) + ] + packed, transforms, consume = packing.meshes(meshes, spacing=0.01) scene = g.trimesh.Scene(packed) assert len(consume) == len(meshes) @@ -259,6 +268,6 @@ def test_meshes(self, count=20): assert density > 0.5 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_path_creation.py b/tests/test_path_creation.py index 090b35ee2..7c0efd959 100644 --- a/tests/test_path_creation.py +++ b/tests/test_path_creation.py @@ -5,12 +5,10 @@ class CreationTests(g.unittest.TestCase): - def test_circle_pattern(self): from trimesh.path import creation - pattern = creation.circle_pattern(pattern_radius=1.0, - circle_radius=0.1, - count=4) + + pattern = creation.circle_pattern(pattern_radius=1.0, circle_radius=0.1, count=4) assert len(pattern.entities) == 4 assert len(pattern.polygons_closed) == 4 assert len(pattern.polygons_full) == 4 @@ -20,14 +18,15 @@ def test_circle_pattern(self): def test_circle(self): from trimesh.path import creation + circle = creation.circle(radius=1.0, center=(1.0, 1.0)) # it's a discrete circle assert g.np.isclose(circle.area, g.np.pi, rtol=0.01) # should be centered at 0 assert g.np.allclose( - circle.polygons_full[0].centroid.coords, [ - 1.0, 1.0], atol=1e-3) + circle.polygons_full[0].centroid.coords, [1.0, 1.0], atol=1e-3 + ) assert len(circle.entities) == 1 assert len(circle.polygons_closed) == 1 assert len(circle.polygons_full) == 1 @@ -48,8 +47,7 @@ def test_rect(self): g.check_path2D(pattern) # make 10 untouching rectangles - pattern = creation.rectangle( - g.np.arange(40).reshape((-1, 2, 2))) + pattern = creation.rectangle(g.np.arange(40).reshape((-1, 2, 2))) assert len(pattern.entities) == 10 assert len(pattern.polygons_closed) == 10 assert len(pattern.polygons_full) == 10 @@ -61,14 +59,13 @@ def test_grid(self): assert g.np.allclose(grid.extents, [10, 10, 0]) # check grid along a plane grid = g.trimesh.path.creation.grid( - side=10.0, - plane_origin=[5.0, 0, 0], - plane_normal=[1, 0, 0]) + side=10.0, plane_origin=[5.0, 0, 0], plane_normal=[1, 0, 0] + ) # make sure plane is applied correctly assert g.np.allclose(grid.extents, [0, 20, 20]) assert g.np.allclose(grid.bounds, [[5, -10, -10], [5, 10, 10]]) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_pathlib.py b/tests/test_pathlib.py index 735860679..16b90cb36 100644 --- a/tests/test_pathlib.py +++ b/tests/test_pathlib.py @@ -8,7 +8,6 @@ class PathTest(g.unittest.TestCase): - def test_pathlib(self): """ Test loading with paths passed as pathlib @@ -17,18 +16,18 @@ def test_pathlib(self): try: import pathlib except ImportError: - g.log.warning('no pathlib') + g.log.warning("no pathlib") return # create a pathlib object for a model that exists - path = pathlib.Path(g.dir_models) / 'featuretype.STL' + path = pathlib.Path(g.dir_models) / "featuretype.STL" # load the mesh m = g.trimesh.load(path) # should be a mesh assert isinstance(m, g.trimesh.Trimesh) # will generate writeable file namey - with g.tempfile.NamedTemporaryFile(suffix='.ply') as f: + with g.tempfile.NamedTemporaryFile(suffix=".ply") as f: name = pathlib.Path(f.name) # should export to file from pathlib object @@ -44,31 +43,27 @@ def test_full_filetype(self): Test loading with file types specified as the full filename, not just extension. """ - file_name = g.get_path('unit_cube.STL') - with open(file_name, 'rb') as f: + file_name = g.get_path("unit_cube.STL") + with open(file_name, "rb") as f: # check `load_mesh` - mesh = g.trimesh.load_mesh(file_obj=file_name, - file_type=file_name) + mesh = g.trimesh.load_mesh(file_obj=file_name, file_type=file_name) assert g.np.isclose(mesh.volume, 1.0) f.seek(0) - mesh = g.trimesh.load(file_obj=file_name, - file_type=file_name) + mesh = g.trimesh.load(file_obj=file_name, file_type=file_name) assert g.np.isclose(mesh.volume, 1.0) - file_name = g.get_path('2D/1002_tray_bottom.DXF') - with open(file_name, 'rb') as f: + file_name = g.get_path("2D/1002_tray_bottom.DXF") + with open(file_name, "rb") as f: # check load_path - path = g.trimesh.load_path(file_obj=file_name, - file_type=file_name) + path = g.trimesh.load_path(file_obj=file_name, file_type=file_name) assert len(path.entities) == 46 f.seek(0) # check `load` - path = g.trimesh.load(file_obj=file_name, - file_type=file_name) + path = g.trimesh.load(file_obj=file_name, file_type=file_name) assert len(path.entities) == 46 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_paths.py b/tests/test_paths.py index 0c511f382..c544dde13 100644 --- a/tests/test_paths.py +++ b/tests/test_paths.py @@ -5,7 +5,6 @@ class VectorTests(g.unittest.TestCase): - def test_discrete(self): for d in g.get_2D(): # store hash before requesting passive functions @@ -17,19 +16,14 @@ def test_discrete(self): # make sure various methods return # basically the same bounds atol = d.scale / 1000 - for dis, pa, pl in zip(d.discrete, - d.paths, - d.polygons_closed): + for dis, pa, pl in zip(d.discrete, d.paths, d.polygons_closed): # bounds of discrete version of path - bd = g.np.array([g.np.min(dis, axis=0), - g.np.max(dis, axis=0)]) + bd = g.np.array([g.np.min(dis, axis=0), g.np.max(dis, axis=0)]) # bounds of polygon version of path bl = g.np.reshape(pl.bounds, (2, 2)) # try bounds of included entities from path - pad = g.np.vstack([d.entities[i].discrete(d.vertices) - for i in pa]) - bp = g.np.array([g.np.min(pad, axis=0), - g.np.max(pad, axis=0)]) + pad = g.np.vstack([d.entities[i].discrete(d.vertices) for i in pa]) + bp = g.np.array([g.np.min(pad, axis=0), g.np.max(pad, axis=0)]) assert g.np.allclose(bd, bl, atol=atol) assert g.np.allclose(bl, bp, atol=atol) @@ -48,45 +42,49 @@ def test_discrete(self): # file_name should be populated, and if we have a DXF file # the layer field should be populated with layer names - if d.metadata['file_name'][-3:] == 'dxf': + if d.metadata["file_name"][-3:] == "dxf": assert len(d.layers) == len(d.entities) for path in d.paths: verts = d.discretize_path(path) - dists = g.np.sum((g.np.diff(verts, axis=0))**2, axis=1)**.5 + dists = g.np.sum((g.np.diff(verts, axis=0)) ** 2, axis=1) ** 0.5 if not g.np.all(dists > g.tol_path.zero): - raise ValueError('{} had zero distance in discrete!', - d.metadata['file_name']) + raise ValueError( + "{} had zero distance in discrete!", d.metadata["file_name"] + ) circuit_dist = g.trimesh.util.euclidean(verts[0], verts[-1]) circuit_test = circuit_dist < g.tol_path.merge if not circuit_test: - g.log.error('On file %s First and last vertex distance %f', - d.metadata['file_name'], - circuit_dist) + g.log.error( + "On file %s First and last vertex distance %f", + d.metadata["file_name"], + circuit_dist, + ) assert circuit_test is_ccw = g.trimesh.path.util.is_ccw(verts) if not is_ccw: - g.log.error('discrete %s not ccw!', - d.metadata['file_name']) + g.log.error("discrete %s not ccw!", d.metadata["file_name"]) for i in range(len(d.paths)): assert d.polygons_closed[i].is_valid assert d.polygons_closed[i].area > g.tol_path.zero - export_dict = d.export(file_type='dict') + export_dict = d.export(file_type="dict") to_dict = d.to_dict() assert isinstance(to_dict, dict) assert isinstance(export_dict, dict) assert len(to_dict) == len(export_dict) - export_svg = d.export(file_type='svg') # NOQA + export_svg = d.export(file_type="svg") # NOQA simple = d.simplify() # NOQA split = d.split() - g.log.info('Split %s into %d bodies, checking identifiers', - d.metadata['file_name'], - len(split)) + g.log.info( + "Split %s into %d bodies, checking identifiers", + d.metadata["file_name"], + len(split), + ) for body in split: _ = body.identifier @@ -103,8 +101,7 @@ def test_discrete(self): assert g.np.allclose(d.bounds[:, 1], ori[:, 1]) if len(d.polygons_full) > 0 and len(d.vertices) < 150: - g.log.info('Checking medial axis on %s', - d.metadata['file_name']) + g.log.info("Checking medial axis on %s", d.metadata["file_name"]) m = d.medial_axis() assert len(m.entities) > 0 @@ -117,12 +114,15 @@ def test_discrete(self): d.process() def test_poly(self): - p = g.get_mesh('2D/LM2.dxf') + p = g.get_mesh("2D/LM2.dxf") assert p.is_closed # one of the lines should be a polyline - assert any(len(e.points) > 2 for e in p.entities if - isinstance(e, g.trimesh.path.entities.Line)) + assert any( + len(e.points) > 2 + for e in p.entities + if isinstance(e, g.trimesh.path.entities.Line) + ) # layers should match entity count assert len(p.layers) == len(p.entities) @@ -136,8 +136,11 @@ def test_poly(self): # explode should have added some new layers assert len(p.entities) == len(p.layers) # all line segments should have two points now - assert all(len(i.points) == 2 for i in p.entities if - isinstance(i, g.trimesh.path.entities.Line)) + assert all( + len(i.points) == 2 + for i in p.entities + if isinstance(i, g.trimesh.path.entities.Line) + ) # should still be closed assert p.is_closed # chop off the last entity @@ -161,20 +164,19 @@ def test_text(self): """ Do some checks on Text entities """ - p = g.get_mesh('2D/LM2.dxf') + p = g.get_mesh("2D/LM2.dxf") p.explode() # get some text entities - text = [e for e in p.entities if - isinstance(e, g.trimesh.path.entities.Text)] + text = [e for e in p.entities if isinstance(e, g.trimesh.path.entities.Text)] assert len(text) > 1 # loop through each of them for t in text: # a spurious error we were seeing in CI - if g.trimesh.util.is_instance_named(t, 'Line'): + if g.trimesh.util.is_instance_named(t, "Line"): raise ValueError( - 'type bases:', - [i.__name__ for i in g.trimesh.util.type_bases(t)]) + "type bases:", [i.__name__ for i in g.trimesh.util.type_bases(t)] + ) # make sure this doesn't crash with text entities g.trimesh.rendering.convert_to_vertexlist(p) @@ -195,7 +197,7 @@ def test_empty(self): assert not b.is_empty def test_color(self): - p = g.get_mesh('2D/wrench.dxf') + p = g.get_mesh("2D/wrench.dxf") # make sure we have entities assert len(p.entities) > 0 # make sure shape of colors is correct @@ -211,14 +213,14 @@ def test_color(self): class SplitTest(g.unittest.TestCase): - def test_split(self): - - for fn in ['2D/ChuteHolderPrint.DXF', - '2D/tray-easy1.dxf', - '2D/sliding-base.dxf', - '2D/wrench.dxf', - '2D/spline_1.dxf']: + for fn in [ + "2D/ChuteHolderPrint.DXF", + "2D/tray-easy1.dxf", + "2D/sliding-base.dxf", + "2D/wrench.dxf", + "2D/spline_1.dxf", + ]: p = g.get_mesh(fn) # make sure something was loaded @@ -242,15 +244,15 @@ def test_split(self): class SectionTest(g.unittest.TestCase): - def test_section(self): - mesh = g.get_mesh('tube.obj') + mesh = g.get_mesh("tube.obj") # check the CCW correctness with a normal in both directions for sign in [1.0, -1.0]: # get a cross section of the tube - section = mesh.section(plane_origin=mesh.center_mass, - plane_normal=[0.0, sign, 0.0]) + section = mesh.section( + plane_origin=mesh.center_mass, plane_normal=[0.0, sign, 0.0] + ) # Path3D -> Path2D planar, T = section.to_planar() @@ -262,16 +264,14 @@ def test_section(self): assert len(polygon.interiors) == 1 # the exterior SHOULD be counterclockwise - assert g.trimesh.path.util.is_ccw( - polygon.exterior.coords) + assert g.trimesh.path.util.is_ccw(polygon.exterior.coords) # the interior should NOT be counterclockwise - assert not g.trimesh.path.util.is_ccw( - polygon.interiors[0].coords) + assert not g.trimesh.path.util.is_ccw(polygon.interiors[0].coords) # should be a valid Path2D g.check_path2D(planar) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_pbr.py b/tests/test_pbr.py index 721599420..582f666ac 100644 --- a/tests/test_pbr.py +++ b/tests/test_pbr.py @@ -6,9 +6,7 @@ class PBRTest(unittest.TestCase): - def test_storage(self): - p = trimesh.visual.material.PBRMaterial() assert isinstance(p, trimesh.visual.material.PBRMaterial) a = [hash(p)] @@ -17,7 +15,7 @@ def test_storage(self): # should raise a ValueError as a float # is non-convertable to RGBA colors p.baseColorFactor = 1.0 - raise BaseException('should have disallowed!') + raise BaseException("should have disallowed!") except ValueError: pass @@ -38,12 +36,12 @@ def test_storage(self): assert p.alphaMode is None try: # only allowed to be 3 things not including a sandwich - p.alphaMode = 'sandwich' + p.alphaMode = "sandwich" raise BaseException("shouldn't have passed") except ValueError: pass - p.alphaMode = 'OPAQUE' - assert p.alphaMode == 'OPAQUE' + p.alphaMode = "OPAQUE" + assert p.alphaMode == "OPAQUE" a.append(hash(p)) # hash should have changed when we edited alphaMode @@ -52,7 +50,7 @@ def test_storage(self): assert p.emissiveFactor is None try: # only allowed to be 3 things not including a sandwich - p.emissiveFactor = 'sandwitch' + p.emissiveFactor = "sandwitch" raise BaseException("shouldn't have passed") except ValueError: pass @@ -71,11 +69,11 @@ def test_storage(self): p.emissiveFactor = [1, 1, 0.5] a.append(hash(p)) assert a[-1] != a[-2] - assert np.allclose(p.emissiveFactor, [1, 1, .5]) + assert np.allclose(p.emissiveFactor, [1, 1, 0.5]) try: # only allowed to be float - p.roughnessFactor = 'hi' + p.roughnessFactor = "hi" raise BaseException("shouldn't have passed") except ValueError: pass @@ -91,7 +89,7 @@ def test_storage(self): try: # only allowed to be float - p.metallicFactor = 'hi' + p.metallicFactor = "hi" raise BaseException("shouldn't have passed") except ValueError: pass @@ -109,6 +107,6 @@ def test_storage(self): assert (np.abs(np.diff(a)) > 0).all() -if __name__ == '__main__': +if __name__ == "__main__": trimesh.util.attach_to_log() unittest.main() diff --git a/tests/test_permutate.py b/tests/test_permutate.py index 7c701599b..47debca74 100644 --- a/tests/test_permutate.py +++ b/tests/test_permutate.py @@ -9,7 +9,6 @@ class PermutateTest(g.unittest.TestCase): - def test_permutate(self): def close(a, b): if len(a) == len(b) == 0: @@ -19,28 +18,30 @@ def close(a, b): return g.np.allclose(a, b) def make_assertions(mesh, test, rigid=False): - if (close(test.face_adjacency, - mesh.face_adjacency) and - len(mesh.faces) > MIN_FACES): - g.log.error( - f'face_adjacency unchanged: {str(test.face_adjacency)}') + if ( + close(test.face_adjacency, mesh.face_adjacency) + and len(mesh.faces) > MIN_FACES + ): + g.log.error(f"face_adjacency unchanged: {str(test.face_adjacency)}") raise ValueError( - 'face adjacency of %s the same after permutation!', - mesh.metadata['file_name']) - - if (close(test.face_adjacency_edges, - mesh.face_adjacency_edges) and - len(mesh.faces) > MIN_FACES): + "face adjacency of %s the same after permutation!", + mesh.metadata["file_name"], + ) + + if ( + close(test.face_adjacency_edges, mesh.face_adjacency_edges) + and len(mesh.faces) > MIN_FACES + ): g.log.error( - f'face_adjacency_edges unchanged: {str(test.face_adjacency_edges)}') + f"face_adjacency_edges unchanged: {str(test.face_adjacency_edges)}" + ) raise ValueError( - 'face adjacency edges of %s the same after permutation!', - mesh.metadata['file_name']) + "face adjacency edges of %s the same after permutation!", + mesh.metadata["file_name"], + ) - assert not close(test.faces, - mesh.faces) - assert not close(test.vertices, - mesh.vertices) + assert not close(test.faces, mesh.faces) + assert not close(test.vertices, mesh.vertices) assert not test.__hash__() == mesh.__hash__() # rigid transforms don't change area or volume @@ -48,11 +49,13 @@ def make_assertions(mesh, test, rigid=False): assert g.np.allclose(mesh.area, test.area) # volume is very dependent on meshes being watertight and sane - if (mesh.is_watertight and - test.is_watertight and - mesh.is_winding_consistent and - test.is_winding_consistent): - assert g.np.allclose(mesh.volume, test.volume, rtol=.05) + if ( + mesh.is_watertight + and test.is_watertight + and mesh.is_winding_consistent + and test.is_winding_consistent + ): + assert g.np.allclose(mesh.volume, test.volume, rtol=0.05) for mesh in g.get_meshes(5): if len(mesh.faces) < MIN_FACES: @@ -63,8 +66,7 @@ def make_assertions(mesh, test, rigid=False): for _i in range(5): mesh = original.copy() - noise = g.trimesh.permutate.noise(mesh, - magnitude=mesh.scale / 50.0) + noise = g.trimesh.permutate.noise(mesh, magnitude=mesh.scale / 50.0) # make sure that if we permutate vertices with no magnitude # area and volume remain the same no_noise = g.trimesh.permutate.noise(mesh, magnitude=0.0) @@ -101,6 +103,6 @@ def test_base(self): transform = mesh.permutate.transform() # NOQA -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_ply.py b/tests/test_ply.py index dec5fa89c..527d70c1a 100644 --- a/tests/test_ply.py +++ b/tests/test_ply.py @@ -5,7 +5,6 @@ class PlyTest(g.unittest.TestCase): - def test_ply_dtype(self): # make sure all ply dtype strings are valid dtypes dtypes = g.trimesh.exchange.ply._dtypes @@ -14,43 +13,41 @@ def test_ply_dtype(self): g.np.dtype(d) def test_ply(self): - m = g.get_mesh('machinist.XAML') + m = g.get_mesh("machinist.XAML") - assert m.visual.kind == 'face' + assert m.visual.kind == "face" assert m.visual.face_colors.ptp(axis=0).max() > 0 - export = m.export(file_type='ply') - reconstructed = g.wrapload(export, file_type='ply') + export = m.export(file_type="ply") + reconstructed = g.wrapload(export, file_type="ply") - assert reconstructed.visual.kind == 'face' + assert reconstructed.visual.kind == "face" - assert g.np.allclose(reconstructed.visual.face_colors, - m.visual.face_colors) + assert g.np.allclose(reconstructed.visual.face_colors, m.visual.face_colors) - m = g.get_mesh('reference.ply') + m = g.get_mesh("reference.ply") - assert m.visual.kind == 'vertex' + assert m.visual.kind == "vertex" assert m.visual.vertex_colors.ptp(axis=0).max() > 0 - export = m.export(file_type='ply') - reconstructed = g.wrapload(export, file_type='ply') - assert reconstructed.visual.kind == 'vertex' + export = m.export(file_type="ply") + reconstructed = g.wrapload(export, file_type="ply") + assert reconstructed.visual.kind == "vertex" - assert g.np.allclose(reconstructed.visual.vertex_colors, - m.visual.vertex_colors) + assert g.np.allclose(reconstructed.visual.vertex_colors, m.visual.vertex_colors) def test_points(self): # Test reading point clouds from PLY files - m = g.get_mesh('points_ascii.ply') + m = g.get_mesh("points_ascii.ply") assert isinstance(m, g.trimesh.PointCloud) assert m.vertices.shape == (5, 3) - m = g.get_mesh('points_bin.ply') + m = g.get_mesh("points_bin.ply") assert m.vertices.shape == (5, 3) assert isinstance(m, g.trimesh.PointCloud) - m = g.get_mesh('points_emptyface.ply') + m = g.get_mesh("points_emptyface.ply") assert m.vertices.shape == (1024, 3) assert isinstance(m, g.trimesh.PointCloud) @@ -61,19 +58,24 @@ def test_list_properties(self): - multiple list properties - single-element properties that come after list properties """ - m = g.get_mesh('points_ascii_with_lists.ply') + m = g.get_mesh("points_ascii_with_lists.ply") - point_list = m.metadata['_ply_raw']['point_list']['data'] + point_list = m.metadata["_ply_raw"]["point_list"]["data"] assert g.np.array_equal( - point_list['point_indices1'][0], g.np.array([10, 11, 12], dtype=g.np.uint32)) + point_list["point_indices1"][0], g.np.array([10, 11, 12], dtype=g.np.uint32) + ) assert g.np.array_equal( - point_list['point_indices1'][1], g.np.array([10, 11], dtype=g.np.uint32)) + point_list["point_indices1"][1], g.np.array([10, 11], dtype=g.np.uint32) + ) assert g.np.array_equal( - point_list['point_indices2'][0], g.np.array([13, 14], dtype=g.np.uint32)) + point_list["point_indices2"][0], g.np.array([13, 14], dtype=g.np.uint32) + ) assert g.np.array_equal( - point_list['point_indices2'][1], g.np.array([12, 13, 14], dtype=g.np.uint32)) + point_list["point_indices2"][1], g.np.array([12, 13, 14], dtype=g.np.uint32) + ) assert g.np.array_equal( - point_list['some_float'], g.np.array([1.1, 2.2], dtype=g.np.float32)) + point_list["some_float"], g.np.array([1.1, 2.2], dtype=g.np.float32) + ) def test_vertex_attributes(self): """ @@ -81,19 +83,18 @@ def test_vertex_attributes(self): written attributes array matches """ - m = g.get_mesh('box.STL') + m = g.get_mesh("box.STL") test_1d_attribute = g.np.copy(m.vertices[:, 0]) test_nd_attribute = g.np.copy(m.vertices) - m.vertex_attributes['test_1d_attribute'] = test_1d_attribute - m.vertex_attributes['test_nd_attribute'] = test_nd_attribute + m.vertex_attributes["test_1d_attribute"] = test_1d_attribute + m.vertex_attributes["test_nd_attribute"] = test_nd_attribute - export = m.export(file_type='ply') - reconstructed = g.wrapload(export, - file_type='ply') + export = m.export(file_type="ply") + reconstructed = g.wrapload(export, file_type="ply") - vertex_attributes = reconstructed.metadata['_ply_raw']['vertex']['data'] - result_1d = vertex_attributes['test_1d_attribute'] - result_nd = vertex_attributes['test_nd_attribute']['f1'] + vertex_attributes = reconstructed.metadata["_ply_raw"]["vertex"]["data"] + result_1d = vertex_attributes["test_1d_attribute"] + result_nd = vertex_attributes["test_nd_attribute"]["f1"] g.np.testing.assert_almost_equal(result_1d, test_1d_attribute) g.np.testing.assert_almost_equal(result_nd, test_nd_attribute) @@ -102,82 +103,82 @@ def test_face_attributes(self): # Test writing face attributes to a ply, by reading # them back and asserting the written attributes array matches - m = g.get_mesh('box.STL') + m = g.get_mesh("box.STL") test_1d_attribute = g.np.copy(m.face_angles[:, 0]) test_nd_attribute = g.np.copy(m.face_angles) - m.face_attributes['test_1d_attribute'] = test_1d_attribute - m.face_attributes['test_nd_attribute'] = test_nd_attribute + m.face_attributes["test_1d_attribute"] = test_1d_attribute + m.face_attributes["test_nd_attribute"] = test_nd_attribute - export = m.export(file_type='ply') - reconstructed = g.wrapload(export, file_type='ply') + export = m.export(file_type="ply") + reconstructed = g.wrapload(export, file_type="ply") - face_attributes = reconstructed.metadata['_ply_raw']['face']['data'] - result_1d = face_attributes['test_1d_attribute'] - result_nd = face_attributes['test_nd_attribute']['f1'] + face_attributes = reconstructed.metadata["_ply_raw"]["face"]["data"] + result_1d = face_attributes["test_1d_attribute"] + result_nd = face_attributes["test_nd_attribute"]["f1"] g.np.testing.assert_almost_equal(result_1d, test_1d_attribute) g.np.testing.assert_almost_equal(result_nd, test_nd_attribute) - no_attr = m.export(file_type='ply', include_attributes=False) + no_attr = m.export(file_type="ply", include_attributes=False) assert len(no_attr) < len(export) def test_cases(self): - a = g.get_mesh('featuretype.STL') - b = g.get_mesh('featuretype.ply') + a = g.get_mesh("featuretype.STL") + b = g.get_mesh("featuretype.ply") assert a.faces.shape == b.faces.shape # has mixed quads and triangles - m = g.get_mesh('suzanne.ply') + m = g.get_mesh("suzanne.ply") assert len(m.faces) > 0 def test_ascii_color(self): mesh = g.trimesh.creation.box() - en = g.wrapload(mesh.export(file_type='ply', encoding="ascii"), - file_type='ply') + en = g.wrapload(mesh.export(file_type="ply", encoding="ascii"), file_type="ply") assert en.visual.kind is None color = [255, 0, 0, 255] mesh.visual.vertex_colors = color # try exporting and reloading raw - eb = g.wrapload(mesh.export(file_type='ply'), file_type='ply') + eb = g.wrapload(mesh.export(file_type="ply"), file_type="ply") assert g.np.allclose(eb.visual.vertex_colors[0], color) - assert eb.visual.kind == 'vertex' + assert eb.visual.kind == "vertex" - ea = g.wrapload(mesh.export(file_type='ply', encoding='ascii'), - file_type='ply') + ea = g.wrapload(mesh.export(file_type="ply", encoding="ascii"), file_type="ply") assert g.np.allclose(ea.visual.vertex_colors, color) - assert ea.visual.kind == 'vertex' + assert ea.visual.kind == "vertex" def test_empty_or_pointcloud(self): # demo files to check - empty_files = ['ply_empty_ascii.ply', - 'ply_empty_bin.ply', - 'ply_empty_header.ply', - 'ply_points_ascii.ply', - 'ply_points_bin.ply'] + empty_files = [ + "ply_empty_ascii.ply", + "ply_empty_bin.ply", + "ply_empty_header.ply", + "ply_points_ascii.ply", + "ply_points_bin.ply", + ] for empty_file in empty_files: - e = g.get_mesh('emptyIO/' + empty_file) - if 'empty' in empty_file: + e = g.get_mesh("emptyIO/" + empty_file) + if "empty" in empty_file: # result should be an empty scene try: - e.export(file_type='ply') + e.export(file_type="ply") except BaseException: continue - raise ValueError('should not export empty') - elif 'points' in empty_file: + raise ValueError("should not export empty") + elif "points" in empty_file: # create export - export = e.export(file_type='ply') - reconstructed = g.wrapload(export, file_type='ply') + export = e.export(file_type="ply") + reconstructed = g.wrapload(export, file_type="ply") # result should be a point cloud instance assert isinstance(e, g.trimesh.PointCloud) - assert hasattr(e, 'vertices') + assert hasattr(e, "vertices") # point cloud export should contain vertices assert isinstance(reconstructed, g.trimesh.PointCloud) - assert hasattr(reconstructed, 'vertices') + assert hasattr(reconstructed, "vertices") def test_blender_uv(self): # test texture coordinate loading for Blender exported ply files @@ -185,66 +186,68 @@ def test_blender_uv(self): # test texture coordinate loading for simple triangulated # Blender-export - mesh_names.append('cube_blender_uv.ply') + mesh_names.append("cube_blender_uv.ply") # same mesh but re-exported from meshlab as binary ply (and with # changed header) - mesh_names.append('cube_blender_uv_meshlab.ply') + mesh_names.append("cube_blender_uv_meshlab.ply") # test texture coordinate loading for mesh with mixed quads and # triangles - mesh_names.append('suzanne.ply') + mesh_names.append("suzanne.ply") for mesh_name in mesh_names: m = g.get_mesh(mesh_name) - assert hasattr(m, 'visual') and hasattr(m.visual, 'uv') + assert hasattr(m, "visual") and hasattr(m.visual, "uv") assert m.visual.uv.shape[0] == m.vertices.shape[0] def test_uv_export(self): m = g.get_mesh("fuze.ply") - assert hasattr(m, 'visual') and hasattr(m.visual, 'uv') + assert hasattr(m, "visual") and hasattr(m.visual, "uv") assert m.visual.uv.shape[0] == m.vertices.shape[0] # create empty file to export to with g.TemporaryDirectory() as D: - name = g.os.path.join(D, 'file.ply') + name = g.os.path.join(D, "file.ply") # export should contain the uv data m.export(name) m2 = g.trimesh.load(name) - assert hasattr(m2, 'visual') and hasattr(m2.visual, 'uv') + assert hasattr(m2, "visual") and hasattr(m2.visual, "uv") assert g.np.allclose(m.visual.uv, m2.visual.uv) def test_fix_texture(self): # test loading of face indices when uv-coordinates are also contained - m1 = g.get_mesh('plane.ply') - m2 = g.get_mesh('plane_tri.ply') + m1 = g.get_mesh("plane.ply") + m2 = g.get_mesh("plane_tri.ply") assert m1.faces.shape == (2, 3) assert m2.faces.shape == (2, 3) def test_texturefile(self): # try loading a PLY with texture - m = g.get_mesh('fuze.ply') + m = g.get_mesh("fuze.ply") # run the checks to make sure fuze has the # correct number of vertices and has texture loaded g.check_fuze(m) def test_metadata(self): - mesh = g.get_mesh('metadata.ply') + mesh = g.get_mesh("metadata.ply") - assert (g.np.array([[12], [90]]) == mesh.metadata[ - '_ply_raw']['face']['data']['face_type']).all() + assert ( + g.np.array([[12], [90]]) + == mesh.metadata["_ply_raw"]["face"]["data"]["face_type"] + ).all() def test_point_uv(self): # points with UV coordinates # TODO shouldn't they be saved as a vertex attribute or something - s = g.get_mesh('point_uv.ply.zip') + s = g.get_mesh("point_uv.ply.zip") p = next(iter(s.geometry.values())) assert p.vertices.shape == (1000, 3) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_points.py b/tests/test_points.py index 14b40dc76..a5aacdc9b 100644 --- a/tests/test_points.py +++ b/tests/test_points.py @@ -5,7 +5,6 @@ class PointsTest(g.unittest.TestCase): - def test_pointcloud(self): """ Test PointCloud object @@ -48,8 +47,7 @@ def test_pointcloud(self): assert hash(cloud) != initial_hash # AABB volume should be same as points - assert g.np.isclose(cloud.bounding_box.volume, - g.np.prod(points.ptp(axis=0))) + assert g.np.isclose(cloud.bounding_box.volume, g.np.prod(points.ptp(axis=0))) # will populate all bounding primitives assert cloud.bounding_primitive.volume > 0.0 @@ -63,8 +61,7 @@ def test_pointcloud(self): assert not g.np.allclose(points[0], [10, 10, 10]) # check to see if copy works - assert g.np.allclose(cloud.vertices, - cloud.copy().vertices) + assert g.np.allclose(cloud.vertices, cloud.copy().vertices) def test_empty(self): p = g.trimesh.PointCloud(None) @@ -102,9 +99,7 @@ def test_plane(self): p = g.trimesh.transform_points(p, matrix) # we made the Z values zero before transforming # so the true normal should be Z then rotated - truth = g.trimesh.transform_points([[0, 0, 1]], - matrix, - translate=False)[0] + truth = g.trimesh.transform_points([[0, 0, 1]], matrix, translate=False)[0] # run the plane fit C, N = g.trimesh.points.plane_fit(p) # sign of normal is arbitrary on fit so check both @@ -113,8 +108,10 @@ def test_plane(self): nb_points_sets = 20 for _i in range(10): # create a random rotation - matrices = [g.trimesh.transformations.random_rotation_matrix() - for _ in range(nb_points_sets)] + matrices = [ + g.trimesh.transformations.random_rotation_matrix() + for _ in range(nb_points_sets) + ] # create some random points in spacd p = g.random((nb_points_sets, 1000, 3)) # make them all lie on the XY plane so we know @@ -129,19 +126,16 @@ def test_plane(self): truths = g.np.zeros((len(p), 3)) for j, matrix in enumerate(matrices): truths[j, :] = g.trimesh.transform_points( - [[0, 0, 1]], - matrix, - translate=False)[0] + [[0, 0, 1]], matrix, translate=False + )[0] # run the plane fit C, N = g.trimesh.points.plane_fit(p) # sign of normal is arbitrary on fit so check both - cosines = g.np.einsum('ij,ij->i', N, truths) + cosines = g.np.einsum("ij,ij->i", N, truths) assert g.np.allclose(g.np.abs(cosines), g.np.ones_like(cosines)) - def test_kmeans(self, - cluster_count=5, - points_per_cluster=100): + def test_kmeans(self, cluster_count=5, points_per_cluster=100): """ Test K-means clustering """ @@ -155,13 +149,10 @@ def test_kmeans(self, clustered = g.np.vstack(clustered) # run k- means clustering on our nicely separated data - centroids, klabel = g.trimesh.points.k_means( - points=clustered, k=cluster_count) + centroids, klabel = g.trimesh.points.k_means(points=clustered, k=cluster_count) # reshape to make sure all groups have the same index - variance = klabel.reshape( - (cluster_count, points_per_cluster)).ptp( - axis=1) + variance = klabel.reshape((cluster_count, points_per_cluster)).ptp(axis=1) assert len(centroids) == cluster_count assert (variance == 0).all() @@ -187,8 +178,7 @@ def test_tsp(self): assert (idx >= 0).all() # make sure distances returned are correct - dist_check = g.np.linalg.norm( - g.np.diff(points[idx], axis=0), axis=1) + dist_check = g.np.linalg.norm(g.np.diff(points[idx], axis=0), axis=1) assert g.np.allclose(dist_check, dist) def test_xyz(self): @@ -196,65 +186,60 @@ def test_xyz(self): Test XYZ file loading """ # test a small file from cloudcompare - p = g.get_mesh('points_cloudcompare.xyz') + p = g.get_mesh("points_cloudcompare.xyz") assert p.vertices.shape == (101, 3) assert p.colors.shape == (101, 4) # test a small file from agisoft - p = g.get_mesh('points_agisoft.xyz') + p = g.get_mesh("points_agisoft.xyz") assert p.vertices.shape == (100, 3) assert p.colors.shape == (100, 4) # test exports - e = p.export(file_type='xyz') - p = g.trimesh.load(g.trimesh.util.wrap_as_stream(e), - file_type='xyz') + e = p.export(file_type="xyz") + p = g.trimesh.load(g.trimesh.util.wrap_as_stream(e), file_type="xyz") assert p.vertices.shape == (100, 3) assert p.colors.shape == (100, 4) def test_obb(self): - p = g.get_mesh('points_agisoft.xyz') + p = g.get_mesh("points_agisoft.xyz") original = p.bounds.copy() matrix = p.apply_obb() assert matrix.shape == (4, 4) assert not g.np.allclose(p.bounds, original) def test_ply(self): - p = g.get_mesh('points_agisoft.xyz') + p = g.get_mesh("points_agisoft.xyz") assert isinstance(p, g.trimesh.PointCloud) assert len(p.vertices) > 0 # initial color CRC initial = hash(p.visual) # set to random colors - p.colors = g.random( - (len(p.vertices), 4)) + p.colors = g.random((len(p.vertices), 4)) # visual CRC should have changed assert hash(p.visual) != initial # test exporting a pointcloud to a PLY file - r = g.wrapload(p.export(file_type='ply'), file_type='ply') + r = g.wrapload(p.export(file_type="ply"), file_type="ply") assert r.vertices.shape == p.vertices.shape # make sure colors survived the round trip assert g.np.allclose(r.colors, p.colors) def test_glb(self): - p = g.get_mesh('points_agisoft.xyz') + p = g.get_mesh("points_agisoft.xyz") assert isinstance(p, g.trimesh.PointCloud) assert len(p.vertices) > 0 # test exporting a pointcloud to a GLTF # TODO : WE SHOULD IMPLEMENT THE IMPORTER TOO - r = p.export(file_type='gltf') - assert len( - g.json.loads( - r['model.gltf'].decode('utf-8'))['meshes']) == 1 + r = p.export(file_type="gltf") + assert len(g.json.loads(r["model.gltf"].decode("utf-8"))["meshes"]) == 1 def test_remove_close(self): # create 100 unique points p = g.np.arange(300).reshape((100, 3)) # should return the original 100 points - culled, mask = g.trimesh.points.remove_close( - g.np.vstack((p, p)), radius=0.1) + culled, mask = g.trimesh.points.remove_close(g.np.vstack((p, p)), radius=0.1) assert culled.shape == (100, 3) assert mask.shape == (200,) @@ -270,47 +255,42 @@ def test_add_operator(self): cloud_sum = cloud_1 + cloud_2 assert g.np.allclose( - cloud_sum.colors, g.np.vstack( - (cloud_1.colors, cloud_2.colors))) + cloud_sum.colors, g.np.vstack((cloud_1.colors, cloud_2.colors)) + ) # Next test: Only second cloud has colors cloud_1 = g.trimesh.points.PointCloud(points_1) cloud_2 = g.trimesh.points.PointCloud(points_2, colors=colors_2) cloud_sum = cloud_1 + cloud_2 - assert g.np.allclose( - cloud_sum.colors[len(cloud_1.vertices):], cloud_2.colors) + assert g.np.allclose(cloud_sum.colors[len(cloud_1.vertices) :], cloud_2.colors) # Next test: Only first cloud has colors cloud_1 = g.trimesh.points.PointCloud(points_1, colors=colors_1) cloud_2 = g.trimesh.points.PointCloud(points_2) cloud_sum = cloud_1 + cloud_2 - assert g.np.allclose( - cloud_sum.colors[:len(cloud_1.vertices)], cloud_1.colors) + assert g.np.allclose(cloud_sum.colors[: len(cloud_1.vertices)], cloud_1.colors) def test_radial_sort(self): theta = g.np.linspace(0.0, g.np.pi * 2.0, 1000) - points = g.np.column_stack(( - g.np.cos(theta), - g.np.sin(theta), - g.np.zeros(len(theta)))) + points = g.np.column_stack( + (g.np.cos(theta), g.np.sin(theta), g.np.zeros(len(theta))) + ) points *= g.random(len(theta)).reshape((-1, 1)) # apply a random order to the points - order = g.np.random.permutation( - g.np.arange(len(points))) + order = g.np.random.permutation(g.np.arange(len(points))) # get the sorted version of these points # when we pass them the randomly ordered points sort = g.trimesh.points.radial_sort( - points[order], - origin=[0, 0, 0], - normal=[0, 0, 1]) + points[order], origin=[0, 0, 0], normal=[0, 0, 1] + ) # should have re-established original order assert g.np.allclose(points, sort) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_polygons.py b/tests/test_polygons.py index 2e93102f0..65d54d867 100644 --- a/tests/test_polygons.py +++ b/tests/test_polygons.py @@ -5,40 +5,38 @@ class PolygonTests(g.unittest.TestCase): - def test_edges(self): """ Test edges_to_polygon """ - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") # get a polygon for the second largest facet index = m.facets_area.argsort()[-2] normal = m.facets_normal[index] - origin = m._cache['facets_origin'][index] + origin = m._cache["facets_origin"][index] T = g.trimesh.geometry.plane_transform(origin, normal) vertices = g.trimesh.transform_points(m.vertices, T)[:, :2] # find boundary edges for the facet - edges = m.edges_sorted.reshape( - (-1, 6))[m.facets[index]].reshape((-1, 2)) + edges = m.edges_sorted.reshape((-1, 6))[m.facets[index]].reshape((-1, 2)) group = g.trimesh.grouping.group_rows(edges, require_count=1) # run the polygon conversion polygon = g.trimesh.path.polygons.edges_to_polygons( - edges=edges[group], - vertices=vertices) + edges=edges[group], vertices=vertices + ) assert len(polygon) == 1 - assert g.np.isclose(polygon[0].area, - m.facets_area[index]) + assert g.np.isclose(polygon[0].area, m.facets_area[index]) # try transforming the polygon around M = g.np.eye(3) M[0][2] = 10.0 P2 = g.trimesh.path.polygons.transform_polygon(polygon[0], M) - distance = g.np.array(P2.centroid.coords)[ - 0] - g.np.array(polygon[0].centroid.coords)[0] + distance = ( + g.np.array(P2.centroid.coords)[0] - g.np.array(polygon[0].centroid.coords)[0] + ) assert g.np.allclose(distance, [10.0, 0]) def test_random_polygon(self): @@ -61,7 +59,7 @@ def test_sample(self): assert len(s) <= count assert s.shape[1] == 2 - radius = (s ** 2).sum(axis=1).max() + radius = (s**2).sum(axis=1).max() assert radius < (1.0 + 1e-8) # test Path2D sample wiring @@ -69,7 +67,7 @@ def test_sample(self): s = path.sample(count=count) assert len(s) <= count assert s.shape[1] == 2 - radius = (s ** 2).sum(axis=1).max() + radius = (s**2).sum(axis=1).max() assert radius < (1.0 + 1e-8) # try getting OBB of samples @@ -81,27 +79,24 @@ def test_sample(self): # test sampling with multiple bodies for i in range(3): assert g.np.isclose(path.area, p.area * (i + 1)) - path = path + g.trimesh.load_path( - g.Point([(i + 2) * 2, 0]).buffer(1.0)) + path = path + g.trimesh.load_path(g.Point([(i + 2) * 2, 0]).buffer(1.0)) s = path.sample(count=count) assert s.shape[1] == 2 def test_project(self): m = g.trimesh.creation.icosphere(subdivisions=4) - p = [g.trimesh.path.polygons.projected(m, normal=n) - for n in g.random((100, 3))] + p = [g.trimesh.path.polygons.projected(m, normal=n) for n in g.random((100, 3))] # sphere projection should never have interiors assert all(len(i.interiors) == 0 for i in p) # sphere projected area should always be close to pi - assert g.np.allclose( - [i.area for i in p], g.np.pi, atol=0.05) + assert g.np.allclose([i.area for i in p], g.np.pi, atol=0.05) def test_project_backface(self): m = g.trimesh.Trimesh( - vertices=[[0, 0, 0], [0, 1, 0], [1, 0, 0]], - faces=[[0, 1, 2]]) + vertices=[[0, 0, 0], [0, 1, 0], [1, 0, 0]], faces=[[0, 1, 2]] + ) # check ignore_sign argument front = m.projected(m.face_normals[0], ignore_sign=False) @@ -115,8 +110,9 @@ def test_project_backface(self): assert len(back.entities) == 1 def test_project_multi(self): - mesh = (g.trimesh.creation.box() + - g.trimesh.creation.box().apply_translation([3, 0, 0])) + mesh = g.trimesh.creation.box() + g.trimesh.creation.box().apply_translation( + [3, 0, 0] + ) proj = mesh.projected(normal=[0, 0, 1]) assert mesh.body_count == 2 @@ -129,10 +125,7 @@ def rectangle(extents): # rectangle as a numpy array a = g.np.abs(g.np.array(extents) / 2.0) lower, upper = -a, a - return g.np.array([lower, - [upper[0], lower[1]], - upper, - [lower[0], upper[1]]]) + return g.np.array([lower, [upper[0], lower[1]], upper, [lower[0], upper[1]]]) def poly(bh, bhi=None): # return a rectangle centered at the origin @@ -158,12 +151,10 @@ def poly_doublecorner(bh): # This puts the centroid in the origin with Ixy != 0 shell_1 = rectangle(bh) shell_1 += shell_1.min(axis=0) - shell_2 = - shell_1 - shell = g.np.concatenate((shell_1[2:, :], - shell_1[:2, :], - shell_2[2:, :], - shell_2[:2, :] - ), axis=0) + shell_2 = -shell_1 + shell = g.np.concatenate( + (shell_1[2:, :], shell_1[:2, :], shell_2[2:, :], shell_2[:2, :]), axis=0 + ) return Polygon(shell=shell) def truth(bh, bhi=None): @@ -175,31 +166,34 @@ def truth(bh, bhi=None): bhi = g.np.zeros(2) b, h = bh bi, hi = bhi - return g.np.array([b * h**3 - bi * hi**3, - h * b**3 - hi * bi**3, - 0.0], dtype=g.np.float64) / 12 + return ( + g.np.array( + [b * h**3 - bi * hi**3, h * b**3 - hi * bi**3, 0.0], + dtype=g.np.float64, + ) + / 12 + ) def truth_corner(bh): # check a rectangle with one corner # at the origin and the rest in positive space b, h = bh - return g.np.array([b * h**3 / 3.0, - h * b**3 / 3.0, - 0.5 * b**2 * 0.5 * h**2], dtype=g.np.float64) + return g.np.array( + [b * h**3 / 3.0, h * b**3 / 3.0, 0.5 * b**2 * 0.5 * h**2], + dtype=g.np.float64, + ) from shapely.geometry import Polygon from trimesh.path.polygons import second_moments, transform_polygon - heights = g.np.array([[0.01, 0.01], - [1, 1], - [10, 2], - [3, 21]]) + heights = g.np.array([[0.01, 0.01], [1, 1], [10, 2], [3, 21]]) for bh in heights: # check the second moment of a rectangle # as polygon is already centered, centered doesn't have any effect O_moments, O_principal_moments, O_alpha, O_transform = second_moments( - poly(bh), return_centered=True) + poly(bh), return_centered=True + ) # check against wikipedia t = truth(bh) # for a centered rectangle, the principal axis are alread aligned @@ -215,7 +209,8 @@ def truth_corner(bh): # First we test with centering. The results should be same as # with the initally centered rectangles C_moments, C_principal_moments, C_alpha, C_transform = second_moments( - poly_corner(bh), return_centered=True) + poly_corner(bh), return_centered=True + ) assert g.np.allclose(O_moments, C_moments) assert g.np.allclose(O_principal_moments, C_principal_moments) assert g.np.isclose(O_alpha, C_alpha) @@ -229,12 +224,14 @@ def truth_corner(bh): # Now we will get the transform for a double rectangle. Then we will apply # the transform and test if Ixy == 0, alpha == 0 etc. C_moments, C_principal_moments, C_alpha, C_transform = second_moments( - poly_doublecorner(bh), return_centered=True) + poly_doublecorner(bh), return_centered=True + ) # apply the outputted transform to the polygon T_polygon = transform_polygon(poly_doublecorner(bh), C_transform) # call the function on the transformed polygon T_moments, T_principal_moments, T_alpha, T_transform = second_moments( - T_polygon, return_centered=True) + T_polygon, return_centered=True + ) assert g.np.any(g.np.isclose(T_moments, C_principal_moments[0])) assert g.np.allclose(C_principal_moments, T_principal_moments) assert g.np.isclose(T_alpha, 0, atol=1e-7) @@ -257,18 +254,17 @@ def test_native_centroid(self): # checks of counter-clockwise 2D coordinate loops # get some polygons without interiors - polygons = [g.Polygon(i) for i in g.data['nestable']] + polygons = [g.Polygon(i) for i in g.data["nestable"]] # same data as (n, 2) float arrays - coords = [g.np.array(i) for i in g.data['nestable']] + coords = [g.np.array(i) for i in g.data["nestable"]] for p, c in zip(polygons, coords): # area will be signed with respect to counter-clockwise ccw, area, centroid = g.trimesh.util.is_ccw(c, return_all=True) - assert g.np.allclose( - centroid, g.np.array(p.centroid.coords)[0]) + assert g.np.allclose(centroid, g.np.array(p.centroid.coords)[0]) assert g.np.isclose(abs(area), p.area) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_poses.py b/tests/test_poses.py index 093fb6b9c..4b183c8c2 100644 --- a/tests/test_poses.py +++ b/tests/test_poses.py @@ -5,7 +5,6 @@ class PosesTest(g.unittest.TestCase): - def test_nonsampling_poses(self): mesh = g.trimesh.creation.icosahedron() @@ -18,16 +17,13 @@ def test_nonsampling_poses(self): self.assertTrue(len(probs) == 20) def test_multiple(self): - for mesh in [g.trimesh.creation.icosahedron(), - g.get_mesh('unit_cube.STL')]: - + for mesh in [g.trimesh.creation.icosahedron(), g.get_mesh("unit_cube.STL")]: vectors = g.trimesh.util.grid_linspace([[0.0, 0], [1, 1.0]], 5)[1:] - vectors = g.trimesh.unitize(g.np.column_stack( - (vectors, g.np.ones(len(vectors))))) - for vector, angle in zip( - vectors, g.np.linspace(0.0, g.np.pi, len(vectors))): - matrix = g.trimesh.transformations.rotation_matrix( - angle, vector) + vectors = g.trimesh.unitize( + g.np.column_stack((vectors, g.np.ones(len(vectors)))) + ) + for vector, angle in zip(vectors, g.np.linspace(0.0, g.np.pi, len(vectors))): + matrix = g.trimesh.transformations.rotation_matrix(angle, vector) copied = mesh.copy() copied.apply_transform(matrix) @@ -51,6 +47,6 @@ def test_round(self): transforms, probabilities = mesh.compute_stable_poses(n_samples=10) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_primitives.py b/tests/test_primitives.py index c6b4cc060..ea76a0bc1 100644 --- a/tests/test_primitives.py +++ b/tests/test_primitives.py @@ -5,14 +5,14 @@ try: import triangle # NOQA + has_triangle = True except ImportError: - g.log.warning('Not testing extrude primitives!') + g.log.warning("Not testing extrude primitives!") has_triangle = False class PrimitiveTest(g.unittest.TestCase): - def setUp(self): self.primitives = [] @@ -25,49 +25,55 @@ def setUp(self): self.primitives.append( g.trimesh.primitives.Extrusion( - polygon=g.trimesh.path.polygons.random_polygon(), - height=293292.322)) + polygon=g.trimesh.path.polygons.random_polygon(), height=293292.322 + ) + ) - self.primitives.append(e.buffer(.25)) + self.primitives.append(e.buffer(0.25)) self.primitives.append( g.trimesh.primitives.Extrusion( - polygon=g.Point([0, 0]).buffer(.5), - height=1.0)) + polygon=g.Point([0, 0]).buffer(0.5), height=1.0 + ) + ) self.primitives.append( g.trimesh.primitives.Extrusion( - polygon=g.Point([0, 0]).buffer(.5), - height=-1.0)) + polygon=g.Point([0, 0]).buffer(0.5), height=-1.0 + ) + ) self.primitives.append(g.trimesh.primitives.Sphere()) - self.primitives.append(g.trimesh.primitives.Sphere(center=[0, 0, 100], - radius=10.0, - subdivisions=5)) + self.primitives.append( + g.trimesh.primitives.Sphere(center=[0, 0, 100], radius=10.0, subdivisions=5) + ) self.primitives.append(g.trimesh.primitives.Box()) try: self.primitives.append( g.trimesh.primitives.Box( - center=[102.20, 0, 102.0], - extents=[29, 100, 1000])) - raise ValueError('Box shouldnt have accepted `center`!') + center=[102.20, 0, 102.0], extents=[29, 100, 1000] + ) + ) + raise ValueError("Box shouldnt have accepted `center`!") except TypeError: # this should have raised a TypeError as `center` is not a kwarg pass - self.primitives.append(g.trimesh.primitives.Box( - extents=[10, 20, 30], - transform=g.trimesh.transformations.random_rotation_matrix())) + self.primitives.append( + g.trimesh.primitives.Box( + extents=[10, 20, 30], + transform=g.trimesh.transformations.random_rotation_matrix(), + ) + ) self.primitives.append(g.trimesh.primitives.Cylinder()) - self.primitives.append(g.trimesh.primitives.Cylinder(radius=10, - height=1, - sections=40)) + self.primitives.append( + g.trimesh.primitives.Cylinder(radius=10, height=1, sections=40) + ) self.primitives.append(g.trimesh.primitives.Capsule()) - self.primitives.append(g.trimesh.primitives.Capsule(radius=1.5, - height=10)) + self.primitives.append(g.trimesh.primitives.Capsule(radius=1.5, height=10)) def test_scaling(self): # try a simple scaling test @@ -93,38 +99,32 @@ def test_scaling(self): except BaseException: raised = True if not raised: - raise ValueError('primitives should raise on non-uniform scaling') + raise ValueError("primitives should raise on non-uniform scaling") # now try with more complicated generated data - prims = [g.trimesh.primitives.Sphere(radius=1.0), - g.trimesh.primitives.Sphere(radius=112.007), - g.trimesh.primitives.Cylinder(radius=1.0, - height=10.0), - g.trimesh.primitives.Box(), - g.trimesh.primitives.Box(extents=[12, 32, 31]), - g.trimesh.primitives.Cylinder(radius=1.1212, - height=0.001), - g.trimesh.primitives.Capsule(radius=1.0, - height=7.0)] + prims = [ + g.trimesh.primitives.Sphere(radius=1.0), + g.trimesh.primitives.Sphere(radius=112.007), + g.trimesh.primitives.Cylinder(radius=1.0, height=10.0), + g.trimesh.primitives.Box(), + g.trimesh.primitives.Box(extents=[12, 32, 31]), + g.trimesh.primitives.Cylinder(radius=1.1212, height=0.001), + g.trimesh.primitives.Capsule(radius=1.0, height=7.0), + ] for original in prims: - perm = [original, - original.copy(), - original.copy(), - original.copy()] + perm = [original, original.copy(), original.copy(), original.copy()] # try with a simple translation perm[1].primitive.transform = g.tf.translation_matrix([0, 0, 7]) - perm[2].apply_transform( - g.tf.rotation_matrix(g.np.pi / 4, [0, 0, 1])) + perm[2].apply_transform(g.tf.rotation_matrix(g.np.pi / 4, [0, 0, 1])) # try with a gnarly rotation - perm[3].primitive.transform = g.tf.random_rotation_matrix( - translate=1000) + perm[3].primitive.transform = g.tf.random_rotation_matrix(translate=1000) fields = set(dir(original.primitive)) ori_radius, ori_height = None, None - if 'radius' in fields: + if "radius" in fields: ori_radius = original.primitive.radius - if 'height' in fields: + if "height" in fields: ori_height = original.primitive.height for scale in [1e-2, 0.123, 0.5, 100.2]: @@ -149,42 +149,34 @@ def test_scaling(self): assert g.tf.is_rigid(p.primitive.transform) if ori_radius is not None: - assert g.np.isclose(p.primitive.radius, - ori_radius * scale) + assert g.np.isclose(p.primitive.radius, ori_radius * scale) if ori_height is not None: - assert g.np.isclose(p.primitive.height, - ori_height * scale) + assert g.np.isclose(p.primitive.height, ori_height * scale) # should be the same size - assert g.np.allclose( - p.extents, m.extents, atol=1e-3 * scale) + assert g.np.allclose(p.extents, m.extents, atol=1e-3 * scale) # should be in the same place assert g.np.allclose(p.bounds, m.bounds, atol=1e-3 * scale) def test_mesh_schema(self): # this schema should define every primitive. - schema = g.trimesh.resources.get_schema( - 'primitive/trimesh.schema.json') + schema = g.trimesh.resources.get_schema("primitive/trimesh.schema.json") # make sure a mesh passes the schema m = g.trimesh.creation.box() g.jsonschema.validate(m.to_dict(), schema) def test_primitives(self): - - kind = {i.__class__.__name__ - for i in self.primitives} + kind = {i.__class__.__name__ for i in self.primitives} # make sure our test data has every primitive - kinds = {'Box', 'Capsule', 'Cylinder', 'Sphere'} + kinds = {"Box", "Capsule", "Cylinder", "Sphere"} if has_triangle: - kinds.add('Extrusion') + kinds.add("Extrusion") assert kind == kinds # this schema should define every primitive. - schema = g.trimesh.resources.get_schema( - 'primitive/primitive.schema.json') + schema = g.trimesh.resources.get_schema("primitive/primitive.schema.json") for primitive in self.primitives: - # convert to a dict d = primitive.to_dict() # validate the output of the to-dict method @@ -192,20 +184,17 @@ def test_primitives(self): # just triple-check that we have a transform # this should have been validated by the schema - assert g.np.shape(d['transform']) == (4, 4) - assert g.trimesh.transformations.is_rigid( - d['transform']) + assert g.np.shape(d["transform"]) == (4, 4) + assert g.trimesh.transformations.is_rigid(d["transform"]) # make sure the value actually json-dumps assert len(g.json.dumps(d)) > 0 # make sure faces and vertices are correct - assert g.trimesh.util.is_shape(primitive.faces, - (-1, 3)) - assert g.trimesh.util.is_shape(primitive.vertices, - (-1, 3)) + assert g.trimesh.util.is_shape(primitive.faces, (-1, 3)) + assert g.trimesh.util.is_shape(primitive.vertices, (-1, 3)) # check dtype of faces and vertices - assert primitive.faces.dtype.kind == 'i' - assert primitive.vertices.dtype.kind == 'f' + assert primitive.faces.dtype.kind == "i" + assert primitive.vertices.dtype.kind == "f" assert primitive.volume > 0.0 assert primitive.area > 0.0 @@ -216,17 +205,11 @@ def test_primitives(self): assert as_mesh.volume > 0.0 assert as_mesh.area > 0.0 - assert g.np.allclose(primitive.extents, - as_mesh.extents) - assert g.np.allclose(primitive.bounds, - as_mesh.bounds) + assert g.np.allclose(primitive.extents, as_mesh.extents) + assert g.np.allclose(primitive.bounds, as_mesh.bounds) - assert g.np.isclose(primitive.volume, - as_mesh.volume, - rtol=.05) - assert g.np.isclose(primitive.area, - as_mesh.area, - rtol=.05) + assert g.np.isclose(primitive.volume, as_mesh.volume, rtol=0.05) + assert g.np.isclose(primitive.area, as_mesh.area, rtol=0.05) assert primitive.is_winding_consistent assert primitive.is_watertight @@ -234,10 +217,8 @@ def test_primitives(self): assert as_mesh.is_watertight # check that overload of dir worked - assert len([i for i in - dir(primitive.primitive) - if '_' not in i]) > 0 - if hasattr(primitive, 'direction'): + assert len([i for i in dir(primitive.primitive) if "_" not in i]) > 0 + if hasattr(primitive, "direction"): assert primitive.direction.shape == (3,) centroid = primitive.centroid.copy() @@ -245,17 +226,17 @@ def test_primitives(self): primitive.apply_translation(translation) # centroid should have translated correctly - assert g.np.allclose(primitive.centroid - centroid, - translation) + assert g.np.allclose(primitive.centroid - centroid, translation) def test_sample(self): transform = g.trimesh.transformations.random_rotation_matrix() - box = g.trimesh.primitives.Box(transform=transform, - extents=[20, 10, 100]) - for kwargs in [{'step': 8}, - {'step': [10, .4, 10]}, - {'count': 8}, - {'count': [10, 3, 5]}]: + box = g.trimesh.primitives.Box(transform=transform, extents=[20, 10, 100]) + for kwargs in [ + {"step": 8}, + {"step": [10, 0.4, 10]}, + {"count": 8}, + {"count": [10, 3, 5]}, + ]: grid = box.sample_grid(**kwargs) assert g.trimesh.util.is_shape(grid, (-1, 3)) assert (box.nearest.signed_distance(grid) > -1e-6).all() @@ -266,10 +247,8 @@ def test_box(self): """ start = [20, 10, 100] box = g.trimesh.primitives.Box(extents=start) - assert g.np.allclose(box.primitive.extents, - start) - assert g.np.allclose(box.extents, - start) + assert g.np.allclose(box.primitive.extents, start) + assert g.np.allclose(box.extents, start) if g.has_path: # check to see if outline function works assert g.np.allclose(box.as_outline().extents, start) @@ -286,7 +265,8 @@ def test_cyl_buffer(self): c = g.trimesh.primitives.Cylinder( radius=1.0, height=10.0, - transform=g.trimesh.transformations.random_rotation_matrix()) + transform=g.trimesh.transformations.random_rotation_matrix(), + ) # inflate cylinder b = c.buffer(1.0) assert g.np.isclose(b.primitive.height, 12.0) @@ -298,18 +278,17 @@ def test_cyl_buffer(self): def test_transform_attribute(self): for primitive in self.primitives: - assert hasattr(primitive, 'transform') + assert hasattr(primitive, "transform") - assert g.trimesh.util.is_shape(primitive.transform, - (4, 4)) + assert g.trimesh.util.is_shape(primitive.transform, (4, 4)) - if hasattr(primitive.primitive, 'center'): - assert g.np.allclose(primitive.primitive.center, - primitive.transform[:3, 3]) + if hasattr(primitive.primitive, "center"): + assert g.np.allclose( + primitive.primitive.center, primitive.transform[:3, 3] + ) def test_sphere_center(self): - s = g.trimesh.primitives.Sphere( - center=[0, 0, 100], radius=10.0, subdivisions=5) + s = g.trimesh.primitives.Sphere(center=[0, 0, 100], radius=10.0, subdivisions=5) assert g.np.allclose(s.center, [0, 0, 100]) s.center = [1, 1, 1] @@ -339,7 +318,8 @@ def test_copy(self): # for both Primitive objects and regular Trimesh objects. meshes = [ g.trimesh.primitives.Box(extents=start), - g.trimesh.creation.box(extents=start)] + g.trimesh.creation.box(extents=start), + ] for box in meshes: box.density = 0.3 @@ -348,9 +328,9 @@ def test_copy(self): box_copy = box.copy() assert box.density == box_copy.density assert g.np.allclose(box.center_mass, box_copy.center_mass) - assert box.metadata['foo'] == box_copy.metadata['foo'] + assert box.metadata["foo"] == box_copy.metadata["foo"] -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_proximity.py b/tests/test_proximity.py index 32bc58bec..93bd38dbe 100644 --- a/tests/test_proximity.py +++ b/tests/test_proximity.py @@ -5,7 +5,6 @@ class NearestTest(g.unittest.TestCase): - def test_naive(self): """ Test the naive nearest point function @@ -21,22 +20,21 @@ def test_naive(self): triangles = sphere.triangles # NOQA # do the check - closest, distance, tid = g.trimesh.proximity.closest_point_naive( - sphere, points) + closest, distance, tid = g.trimesh.proximity.closest_point_naive(sphere, points) # the distance from a sphere of radius 1.0 to a sphere of radius 2.0 # should be pretty darn close to 1.0 - assert (g.np.abs(distance - 1.0) < .01).all() + assert (g.np.abs(distance - 1.0) < 0.01).all() # the vector for the closest point should be the same as the vector # to the query point vector = g.trimesh.util.diagonal_dot(closest, points / 2.0) - assert (g.np.abs(vector - 1.0) < .01).all() + assert (g.np.abs(vector - 1.0) < 0.01).all() def test_helper(self): # just make sure the plumbing returns something for mesh in g.get_meshes(2): - points = (g.random((100, 3)) - .5) * 100 + points = (g.random((100, 3)) - 0.5) * 100 a = mesh.nearest.on_surface(points) assert a is not None @@ -45,8 +43,10 @@ def test_helper(self): assert b is not None def test_nearest_naive(self): - funs = [g.trimesh.proximity.closest_point_naive, - g.trimesh.proximity.closest_point] + funs = [ + g.trimesh.proximity.closest_point_naive, + g.trimesh.proximity.closest_point, + ] data_points = g.deque() data_dist = g.deque() @@ -61,13 +61,10 @@ def test_nearest_naive(self): assert g.np.ptp(data_points, axis=0).max() < g.tol.merge assert g.np.ptp(data_dist, axis=0).max() < g.tol.merge - log_msg = '\n'.join(f"{i}: {j}s" - for i, j in zip( - [i.__name__ for i in funs], - g.np.diff(tic))) - g.log.info( - 'Compared the following nearest point functions:\n' + - log_msg) + log_msg = "\n".join( + f"{i}: {j}s" for i, j in zip([i.__name__ for i in funs], g.np.diff(tic)) + ) + g.log.info("Compared the following nearest point functions:\n" + log_msg) def check_nearest_point_function(self, fun): # def plot_tri(tri, color='g'): @@ -94,7 +91,7 @@ def points_on_circle(count): # set the points up in space query[:, 2] = 10 # a circle of points inside-ish the triangle - query = g.np.vstack((query, query * .1)) + query = g.np.vstack((query, query * 0.1)) # loop through each triangle for triangle in triangles: @@ -108,17 +105,16 @@ def points_on_circle(count): # all of the points returned should be on the triangle we're # querying - assert all(polygon_buffer.intersects( - g.Point(i)) for i in result[:, 0:2]) + assert all(polygon_buffer.intersects(g.Point(i)) for i in result[:, 0:2]) # see what distance shapely thinks the nearest point # is for the 2D triangle and the query points - distance_shapely = g.np.array([polygon.distance(g.Point(i)) - for i in query[:, :2]]) + distance_shapely = g.np.array( + [polygon.distance(g.Point(i)) for i in query[:, :2]] + ) # see what distance our function returned for the nearest point - distance_ours = ((query[:, :2] - result[:, :2]) - ** 2).sum(axis=1) ** .5 + distance_ours = ((query[:, :2] - result[:, :2]) ** 2).sum(axis=1) ** 0.5 # how far was our distance from the one shapely gave distance_test = g.np.abs(distance_shapely - distance_ours) # NOQA @@ -131,10 +127,9 @@ def points_on_circle(count): # any rigid transform # chop query off to same length as triangles assert len(query) > len(triangles) - query = query[:len(triangles)] + query = query[: len(triangles)] # run the closest point query as a corresponding query - close = g.trimesh.triangles.closest_point(triangles=triangles, - points=query) + close = g.trimesh.triangles.closest_point(triangles=triangles, points=query) # distance between closest point and query point # this should stay the same regardless of frame distance = g.np.linalg.norm(close - query, axis=1) @@ -142,11 +137,11 @@ def points_on_circle(count): # transform the query points points = g.trimesh.transform_points(query, T) # transform the triangles we're checking - tri = g.trimesh.transform_points( - triangles.reshape((-1, 3)), T).reshape((-1, 3, 3)) + tri = g.trimesh.transform_points(triangles.reshape((-1, 3)), T).reshape( + (-1, 3, 3) + ) # run the closest point check - check = g.trimesh.triangles.closest_point(triangles=tri, - points=points) + check = g.trimesh.triangles.closest_point(triangles=tri, points=points) check_distance = g.np.linalg.norm(check - points, axis=1) # should be the same in any frame assert g.np.allclose(check_distance, distance) @@ -164,7 +159,7 @@ def test_coplanar_signed_distance(self): # constructed so origin is inside but also coplanar with # the nearest face - mesh = g.get_mesh('origin_inside.STL') + mesh = g.get_mesh("origin_inside.STL") # origin should be inside, so distance should be positive distance = mesh.nearest.signed_distance([[0, 0, 0]]) @@ -176,13 +171,12 @@ def test_noncoplanar_signed_distance(self): # should be well outside the box and not coplanar with a face # so the signed distance should be negative - distance = mesh.nearest.signed_distance( - [mesh.bounds[0] + [100, 100, 100]]) + distance = mesh.nearest.signed_distance([mesh.bounds[0] + [100, 100, 100]]) assert distance[0] < 0.0 def test_edge_case(self): - mesh = g.get_mesh('20mm-xyz-cube.stl') + mesh = g.get_mesh("20mm-xyz-cube.stl") assert (mesh.nearest.signed_distance([[-51, 4.7, -20.6]]) < 0.0).all() def test_acute_edge_case(self): @@ -197,22 +191,21 @@ def test_acute_edge_case(self): # -> take an even number of points n = 20 n += n % 2 - pts = g.np.transpose([g.np.zeros(n), - g.np.ones(n), - g.np.linspace(-1, 1, n)]) + pts = g.np.transpose([g.np.zeros(n), g.np.ones(n), g.np.linspace(-1, 1, n)]) # the faces facing the points should differ for first and second half of the set # check their indices for inequality faceIdxsA, faceIdxsB = g.np.split(mesh.nearest.on_surface(pts)[-1], 2) - assert (g.np.all(faceIdxsA == faceIdxsA[0]) and - g.np.all(faceIdxsB == faceIdxsB[0]) and - faceIdxsA[0] != faceIdxsB[0]) + assert ( + g.np.all(faceIdxsA == faceIdxsA[0]) + and g.np.all(faceIdxsB == faceIdxsB[0]) + and faceIdxsA[0] != faceIdxsB[0] + ) def test_candidates(self): mesh = g.trimesh.creation.random_soup(2000) points = g.random((2000, 3)) - g.trimesh.proximity.nearby_faces( - mesh=mesh, points=points) + g.trimesh.proximity.nearby_faces(mesh=mesh, points=points) def test_returns_correct_point_in_ambiguous_cases(self): mesh = g.trimesh.Trimesh( @@ -237,12 +230,15 @@ def test_unreferenced_vertex(self): # return correct values and ignore the unreferenced points query_point = [-1.0, -1.0, -1.0] mesh = g.trimesh.Trimesh( - vertices=[[1.0, 0.0, 0.0], - [0.0, 1.0, 0.0], - [0.0, 0.0, 1.0], - [-0.5, -0.5, -0.5]], + vertices=[ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + [0.0, 0.0, 1.0], + [-0.5, -0.5, -0.5], + ], faces=[[0, 1, 2]], - process=False) + process=False, + ) proximity_query = g.trimesh.proximity.ProximityQuery(mesh) q = proximity_query.on_surface([query_point]) @@ -250,6 +246,6 @@ def test_unreferenced_vertex(self): assert all(len(i) == 1 for i in q) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_raster.py b/tests/test_raster.py index d3cf91c41..4e514273d 100644 --- a/tests/test_raster.py +++ b/tests/test_raster.py @@ -5,42 +5,33 @@ class RasterTest(g.unittest.TestCase): - def test_rasterize(self): - p = g.get_mesh('2D/wrench.dxf') + p = g.get_mesh("2D/wrench.dxf") origin = p.bounds[0] pitch = p.extents.max() / 600 resolution = g.np.ceil(p.extents / pitch).astype(int) # rasterize with filled - filled = p.rasterize(origin=origin, - pitch=pitch, - resolution=resolution, - fill=True, - width=None) + filled = p.rasterize( + origin=origin, pitch=pitch, resolution=resolution, fill=True, width=None + ) # rasterize just the outline - outline = p.rasterize(origin=origin, - pitch=pitch, - resolution=resolution, - fill=False, - width=2.0) + outline = p.rasterize( + origin=origin, pitch=pitch, resolution=resolution, fill=False, width=2.0 + ) # rasterize both - both = p.rasterize(origin=origin, - pitch=pitch, - resolution=resolution, - fill=True, - width=2.0) + both = p.rasterize( + origin=origin, pitch=pitch, resolution=resolution, fill=True, width=2.0 + ) # rasterize with two-dimensional pitch pitch = p.extents / 600 - filled_2dpitch = p.rasterize(origin=origin, - pitch=pitch, - resolution=resolution, - fill=True, - width=None) + filled_2dpitch = p.rasterize( + origin=origin, pitch=pitch, resolution=resolution, fill=True, width=None + ) # count the number of filled pixels fill_cnt = g.np.array(filled).sum() @@ -74,19 +65,17 @@ def test_nested(self): pitch = path.extents.max() / 1000 origin = path.bounds[0] - pitch - resolution = (g.np.ceil( - path.extents / pitch) + 2).astype(int) + resolution = (g.np.ceil(path.extents / pitch) + 2).astype(int) # rasterize using the settings - r = path.rasterize( - pitch=pitch, origin=origin, resolution=resolution) + r = path.rasterize(pitch=pitch, origin=origin, resolution=resolution) # it's a boolean image so filled cells times # pitch area should be about the same as the area - filled = g.np.array(r).sum() * pitch ** 2 + filled = g.np.array(r).sum() * pitch**2 assert g.np.isclose(filled, path.area, rtol=0.01) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_ray.py b/tests/test_ray.py index 061015bbb..46c4f1d6c 100644 --- a/tests/test_ray.py +++ b/tests/test_ray.py @@ -5,33 +5,29 @@ class RayTests(g.unittest.TestCase): - def test_rays(self): - meshes = [g.get_mesh(**k) - for k in g.data['ray_data']['load_kwargs']] - rays = g.data['ray_data']['rays'] - names = [m.metadata['file_name'] for m in meshes] + meshes = [g.get_mesh(**k) for k in g.data["ray_data"]["load_kwargs"]] + rays = g.data["ray_data"]["rays"] + names = [m.metadata["file_name"] for m in meshes] hit_id = [] hit_loc = [] hit_any = [] for m in meshes: - name = m.metadata['file_name'] + name = m.metadata["file_name"] hit_any.append(m.ray.intersects_any(**rays[name])) hit_loc.append(m.ray.intersects_location(**rays[name])[0]) hit_id.append(m.ray.intersects_id(**rays[name])) hit_any = g.np.array(hit_any, dtype=g.np.int64) - for i in g.trimesh.grouping.group( - g.np.unique(names, return_inverse=True)[1]): + for i in g.trimesh.grouping.group(g.np.unique(names, return_inverse=True)[1]): broken = hit_any[i].astype(g.np.int64).ptp(axis=0).sum() assert broken == 0 def test_rps(self): for use_embree in [True, False]: dimension = (10000, 3) - sphere = g.get_mesh('unit_sphere.STL', - use_embree=use_embree) + sphere = g.get_mesh("unit_sphere.STL", use_embree=use_embree) ray_origins = g.random(dimension) ray_directions = g.np.tile([0, 0, 1], (dimension[0], 1)) @@ -40,11 +36,9 @@ def test_rps(self): # force ray object to allocate tree before timing it # tree = sphere.ray.tree tic = [g.time.time()] - a = sphere.ray.intersects_id( - ray_origins, ray_directions) + a = sphere.ray.intersects_id(ray_origins, ray_directions) tic.append(g.time.time()) - b = sphere.ray.intersects_location( - ray_origins, ray_directions) + b = sphere.ray.intersects_location(ray_origins, ray_directions) tic.append(g.time.time()) # make sure ray functions always return numpy arrays @@ -53,9 +47,7 @@ def test_rps(self): rps = dimension[0] / g.np.diff(tic) - g.log.info('Measured %s rays/second with embree %d', - str(rps), - use_embree) + g.log.info("Measured %s rays/second with embree %d", str(rps), use_embree) def test_empty(self): """ @@ -63,8 +55,7 @@ def test_empty(self): """ for use_embree in [True, False]: dimension = (100, 3) - sphere = g.get_mesh('unit_sphere.STL', - use_embree=use_embree) + sphere = g.get_mesh("unit_sphere.STL", use_embree=use_embree) # should never hit the sphere ray_origins = g.random(dimension) ray_directions = g.np.tile([0, 1, 0], (dimension[0], 1)) @@ -73,18 +64,20 @@ def test_empty(self): # make sure ray functions always return numpy arrays # these functions return multiple results all of which # should always be a numpy array - assert all(len(i.shape) >= 0 for i in - sphere.ray.intersects_id( - ray_origins, ray_directions)) - assert all(len(i.shape) >= 0 for i in - sphere.ray.intersects_location( - ray_origins, ray_directions)) + assert all( + len(i.shape) >= 0 + for i in sphere.ray.intersects_id(ray_origins, ray_directions) + ) + assert all( + len(i.shape) >= 0 + for i in sphere.ray.intersects_location(ray_origins, ray_directions) + ) def test_contains(self): scale = 1.5 for use_embree in [True, False]: - mesh = g.get_mesh('unit_cube.STL', use_embree=use_embree) - g.log.info('Contains test ray engine: ' + str(mesh.ray.__class__)) + mesh = g.get_mesh("unit_cube.STL", use_embree=use_embree) + g.log.info("Contains test ray engine: " + str(mesh.ray.__class__)) test_on = mesh.ray.contains_points(mesh.vertices) # NOQA test_in = mesh.ray.contains_points(mesh.vertices * (1.0 / scale)) @@ -93,9 +86,7 @@ def test_contains(self): test_out = mesh.ray.contains_points(mesh.vertices * scale) assert not test_out.any() - points_way_out = ( - g.random( - (30, 3)) * 100) + 1.0 + mesh.bounds[1] + points_way_out = (g.random((30, 3)) * 100) + 1.0 + mesh.bounds[1] test_way_out = mesh.ray.contains_points(points_way_out) assert not test_way_out.any() @@ -109,22 +100,19 @@ def test_on_vertex(self): origins = g.np.zeros_like(m.vertices) vectors = m.vertices.copy() - assert m.ray.intersects_any(ray_origins=origins, - ray_directions=vectors).all() + assert m.ray.intersects_any(ray_origins=origins, ray_directions=vectors).all() - (locations, - index_ray, - index_tri) = m.ray.intersects_location(ray_origins=origins, - ray_directions=vectors) + (locations, index_ray, index_tri) = m.ray.intersects_location( + ray_origins=origins, ray_directions=vectors + ) - hit_count = g.np.bincount(index_ray, - minlength=len(origins)) + hit_count = g.np.bincount(index_ray, minlength=len(origins)) assert (hit_count == 1).all() def test_on_edge(self): for use_embree in [True, False]: - m = g.get_mesh('7_8ths_cube.stl', use_embree=use_embree) + m = g.get_mesh("7_8ths_cube.stl", use_embree=use_embree) points = [[4.5, 0, -23], [4.5, 0, -2], [0, 0, -1e-6], [0, 0, -1]] truth = [False, True, True, True] @@ -133,44 +121,41 @@ def test_on_edge(self): assert (result == truth).all() def test_multiple_hits(self): - """ - """ + """ """ # Set camera focal length (in pixels) - f = g.np.array([1000., 1000.]) + f = g.np.array([1000.0, 1000.0]) h, w = 256, 256 # Set up a list of ray directions - one for each pixel in our (256, # 256) output image. ray_directions = g.trimesh.util.grid_arange( - [[-h / 2, -w / 2], - [h / 2, w / 2]], - step=2.0) + [[-h / 2, -w / 2], [h / 2, w / 2]], step=2.0 + ) ray_directions = g.np.column_stack( - (ray_directions, - g.np.ones(len(ray_directions)) * f[0])) + (ray_directions, g.np.ones(len(ray_directions)) * f[0]) + ) # Initialize the camera origin to be somewhere behind the cube. - cam_t = g.np.array([0, 0, -15.]) + cam_t = g.np.array([0, 0, -15.0]) # Duplicate to ensure we have an camera_origin per ray direction ray_origins = g.np.tile(cam_t, (ray_directions.shape[0], 1)) for use_embree in [True, False]: # Generate a 1 x 1 x 1 cube using the trimesh box primitive - cube_mesh = g.trimesh.creation.box(extents=[2, 2, 2], - use_embree=use_embree) + cube_mesh = g.trimesh.creation.box(extents=[2, 2, 2], use_embree=use_embree) # Perform 256 * 256 raycasts, one for each pixel on the image # plane. We only want the 'first' hit. index_triangles, index_ray = cube_mesh.ray.intersects_id( ray_origins=ray_origins, ray_directions=ray_directions, - multiple_hits=False) + multiple_hits=False, + ) assert len(g.np.unique(index_triangles)) == 2 index_triangles, index_ray = cube_mesh.ray.intersects_id( - ray_origins=ray_origins, - ray_directions=ray_directions, - multiple_hits=True) + ray_origins=ray_origins, ray_directions=ray_directions, multiple_hits=True + ) assert len(g.np.unique(index_triangles)) > 2 def test_contain_single(self): @@ -194,18 +179,13 @@ def test_box(self): ray origin XY. """ - for kwargs in [{'use_embree': True}, - {'use_embree': False}]: - - mesh = g.get_mesh('unit_cube.STL', **kwargs) + for kwargs in [{"use_embree": True}, {"use_embree": False}]: + mesh = g.get_mesh("unit_cube.STL", **kwargs) # grid is across meshes XY profile - origins = g.trimesh.util.grid_linspace(mesh.bounds[:, :2] + - g.np.reshape( - [-.02, .02], (-1, 1)), - 100) - origins = g.np.column_stack(( - origins, - g.np.ones(len(origins)) * -100)) + origins = g.trimesh.util.grid_linspace( + mesh.bounds[:, :2] + g.np.reshape([-0.02, 0.02], (-1, 1)), 100 + ) + origins = g.np.column_stack((origins, g.np.ones(len(origins)) * -100)) # all vectors are along Z axis vectors = g.np.ones((len(origins), 3)) * [0, 0, 1.0] @@ -213,8 +193,8 @@ def test_box(self): # (n,) int, index of original ray # (m,) int, index of mesh.faces pos, ray, tri = mesh.ray.intersects_location( - ray_origins=origins, - ray_directions=vectors) + ray_origins=origins, ray_directions=vectors + ) for p, r in zip(pos, ray): # intersect location XY should match ray origin XY @@ -228,22 +208,24 @@ def test_broken(self): Test a mesh with badly defined face normals """ - ray_origins = g.np.array([[0.12801793, 24.5030052, -5.], - [0.12801793, 24.5030052, -5.]]) - ray_directions = g.np.array([[-0.13590759, -0.98042506, 0.], - [0.13590759, 0.98042506, -0.]]) + ray_origins = g.np.array( + [[0.12801793, 24.5030052, -5.0], [0.12801793, 24.5030052, -5.0]] + ) + ray_directions = g.np.array( + [[-0.13590759, -0.98042506, 0.0], [0.13590759, 0.98042506, -0.0]] + ) - for kwargs in [{'use_embree': True}, - {'use_embree': False}]: - mesh = g.get_mesh('broken.STL', **kwargs) + for kwargs in [{"use_embree": True}, {"use_embree": False}]: + mesh = g.get_mesh("broken.STL", **kwargs) locations, index_ray, index_tri = mesh.ray.intersects_location( - ray_origins=ray_origins, ray_directions=ray_directions) + ray_origins=ray_origins, ray_directions=ray_directions + ) # should be same number of location hits assert len(locations) == len(ray_origins) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_registration.py b/tests/test_registration.py index 319bdefaa..a295a9c9a 100644 --- a/tests/test_registration.py +++ b/tests/test_registration.py @@ -5,9 +5,7 @@ class RegistrationTest(g.unittest.TestCase): - def test_procrustes(self): - # every combination of possible boolean options # a_flip and a_scale are apply-to-test-data opt = list(g.itertools.combinations([True, False] * 6, 6)) @@ -16,7 +14,7 @@ def test_procrustes(self): for reflection, translation, scale, a_flip, a_scale, weight in opt: # create random points in space - points_a = (g.random((1000, 3)) - .5) * 1000 + points_a = (g.random((1000, 3)) - 0.5) * 1000 # get a random transform from the iterator matrix = next(matrices) # apply a flip (reflection) to test data @@ -24,12 +22,14 @@ def test_procrustes(self): matrix = g.np.dot( matrix, g.trimesh.transformations.reflection_matrix( - point=[0, 0, 0], normal=[0, 1, 0])) + point=[0, 0, 0], normal=[0, 1, 0] + ), + ) # apply scale to test data if a_scale: matrix = g.np.dot( - matrix, - g.trimesh.transformations.scale_matrix(0.1235234)) + matrix, g.trimesh.transformations.scale_matrix(0.1235234) + ) # apply transform to points A points_b = g.trimesh.transform_points(points_a, matrix) @@ -40,24 +40,24 @@ def test_procrustes(self): weights = None # run the solver - (matrixN, - transformed, - cost) = g.trimesh.registration.procrustes( - points_a, points_b, - reflection=reflection, - translation=translation, - scale=scale, - weights=weights) + (matrixN, transformed, cost) = g.trimesh.registration.procrustes( + points_a, + points_b, + reflection=reflection, + translation=translation, + scale=scale, + weights=weights, + ) # if we're not weighting the results # should be identical with None vs all-ones - (matrixN_C, - transformed_C, - cost_C) = g.trimesh.registration.procrustes( - points_a, points_b, - reflection=reflection, - translation=translation, - scale=scale, - weights=g.np.ones(len(points_a))) + (matrixN_C, transformed_C, cost_C) = g.trimesh.registration.procrustes( + points_a, + points_b, + reflection=reflection, + translation=translation, + scale=scale, + weights=g.np.ones(len(points_a)), + ) if weight: # weights should have changed the matrix assert not g.np.allclose(matrixN, matrixN_C) @@ -70,10 +70,12 @@ def test_procrustes(self): # the points should be identical if the function # was allowed to translate in space # and there were no weights, scaling, or reflection - identical = (translation and - (not weight) and - (not a_flip or reflection) and - (not a_scale or scale)) + identical = ( + translation + and (not weight) + and (not a_flip or reflection) + and (not a_scale or scale) + ) if identical: assert cost < 0.001 @@ -108,21 +110,18 @@ def test_icp_mesh(self): m = g.trimesh.creation.box() X = m.sample(10) X = X + [0.1, 0.1, 0.1] - matrix, transformed, cost = g.trimesh.registration.icp( - X, m, scale=False) + matrix, transformed, cost = g.trimesh.registration.icp(X, m, scale=False) assert cost < 0.01 def test_icp_points(self): # see if ICP alignment works with point clouds # create random points in space - points_a = (g.random((1000, 3)) - .5) * 1000 + points_a = (g.random((1000, 3)) - 0.5) * 1000 # create a random transform # matrix = g.trimesh.transformations.random_rotation_matrix() # create a small transform # ICP will not work at all with large transforms - matrix = g.trimesh.transformations.rotation_matrix( - g.np.radians(1.0), - [0, 0, 1]) + matrix = g.trimesh.transformations.rotation_matrix(g.np.radians(1.0), [0, 0, 1]) # take a few randomly chosen points and make # sure the order is permutated @@ -130,15 +129,13 @@ def test_icp_points(self): # transform and apply index points_b = g.trimesh.transform_points(points_a[index], matrix) # tun the solver - matrixN, transformed, cost = g.trimesh.registration.icp(points_b, - points_a) + matrixN, transformed, cost = g.trimesh.registration.icp(points_b, points_a) assert cost < 1e-3 - assert g.np.allclose(matrix, - g.np.linalg.inv(matrixN)) + assert g.np.allclose(matrix, g.np.linalg.inv(matrixN)) assert g.np.allclose(transformed, points_a[index]) def test_mesh(self): - noise = .05 + noise = 0.05 extents = [6, 12, 3] # create the mesh as a simple box @@ -151,7 +148,7 @@ def test_mesh(self): mesh = mesh.permutate.noise(noise) # randomly rotation with translation transform = g.trimesh.transformations.random_rotation_matrix() - transform[:3, 3] = (g.random(3) - .5) * 1000 + transform[:3, 3] = (g.random(3) - 0.5) * 1000 mesh.apply_transform(transform) @@ -165,11 +162,7 @@ def test_mesh(self): a_check = a.copy() a_check.apply_transform(a_to_b) - assert g.np.linalg.norm( - a_check.centroid - - b.centroid) < ( - noise * - 2) + assert g.np.linalg.norm(a_check.centroid - b.centroid) < (noise * 2) # find the distance from the truth mesh to each scan vertex distance = a_check.nearest.on_surface(b.vertices)[1] @@ -177,8 +170,8 @@ def test_mesh(self): # try our registration with points points = g.trimesh.transform_points( - scan.sample(100), - matrix=g.trimesh.transformations.random_rotation_matrix()) + scan.sample(100), matrix=g.trimesh.transformations.random_rotation_matrix() + ) truth_to_points, cost = truth.register(points) truth.apply_transform(truth_to_points) distance = truth.nearest.on_surface(points)[1] @@ -186,46 +179,47 @@ def test_mesh(self): assert distance.mean() < noise def test_nricp(self): - # Get two meshes that have a comparable shape - source = g.get_mesh('reference.obj', process=False) - target = g.get_mesh('target.obj', process=False) + source = g.get_mesh("reference.obj", process=False) + target = g.get_mesh("target.obj", process=False) # Vertex indices of landmarks source / target - landmarks_vertex_indices = g.np.array([ - [177, 1633], - [181, 1561], - [614, 1556], - [610, 1629], - [114, 315], - [398, 413], - [812, 412], - [227, 99], - [241, 87], - [674, 86], - [660, 98], - [362, 574], - [779, 573], - ]) + landmarks_vertex_indices = g.np.array( + [ + [177, 1633], + [181, 1561], + [614, 1556], + [610, 1629], + [114, 315], + [398, 413], + [812, 412], + [227, 99], + [241, 87], + [674, 86], + [660, 98], + [362, 574], + [779, 573], + ] + ) source_markers_vertices = source.vertices[landmarks_vertex_indices[:, 0]] target_markers_vertices = target.vertices[landmarks_vertex_indices[:, 1]] - T = g.trimesh.registration.procrustes(source_markers_vertices, - target_markers_vertices)[0] - source.vertices = g.trimesh.transformations.transform_points( - source.vertices, T) + T = g.trimesh.registration.procrustes( + source_markers_vertices, target_markers_vertices + )[0] + source.vertices = g.trimesh.transformations.transform_points(source.vertices, T) # Just for the sake of using barycentric coordinates... use_barycentric_coordinates = True if use_barycentric_coordinates: source_markers_vertices = source.vertices[landmarks_vertex_indices[:, 0]] - source_markers_tids = \ - g.trimesh.proximity.closest_point( - source, source_markers_vertices)[2] - source_markers_barys = \ - g.trimesh.triangles.points_to_barycentric( - source.triangles[source_markers_tids], source_markers_vertices) + source_markers_tids = g.trimesh.proximity.closest_point( + source, source_markers_vertices + )[2] + source_markers_barys = g.trimesh.triangles.points_to_barycentric( + source.triangles[source_markers_tids], source_markers_vertices + ) source_landmarks = (source_markers_tids, source_markers_barys) else: source_landmarks = landmarks_vertex_indices[:, 0] @@ -263,12 +257,21 @@ def test_nricp(self): ] # Amberg et. al 2007 records_amberg_no_ldm = g.trimesh.registration.nricp_amberg( - source, target, distance_threshold=0.05, - steps=steps_amberg, return_records=True) + source, + target, + distance_threshold=0.05, + steps=steps_amberg, + return_records=True, + ) records_amberg_ldm = g.trimesh.registration.nricp_amberg( - source, target, source_landmarks=source_landmarks, - target_positions=target_markers_vertices, steps=steps_amberg, - return_records=True, distance_threshold=0.05) + source, + target, + source_landmarks=source_landmarks, + target_positions=target_markers_vertices, + steps=steps_amberg, + return_records=True, + distance_threshold=0.05, + ) try: g.trimesh.registration.nricp_amberg(source, target) except KeyError: @@ -276,29 +279,38 @@ def test_nricp(self): # Sumner and Popovic 2004 records_sumner_no_ldm = g.trimesh.registration.nricp_sumner( - source, target, distance_threshold=0.05, - steps=steps_sumner, return_records=True) + source, + target, + distance_threshold=0.05, + steps=steps_sumner, + return_records=True, + ) records_sumner_ldm = g.trimesh.registration.nricp_sumner( - source, target, source_landmarks=source_landmarks, - target_positions=target_markers_vertices, steps=steps_sumner, - return_records=True, distance_threshold=0.05) + source, + target, + source_landmarks=source_landmarks, + target_positions=target_markers_vertices, + steps=steps_sumner, + return_records=True, + distance_threshold=0.05, + ) try: g.trimesh.registration.nricp_sumner(source, target) except KeyError: raise AssertionError() # related to #1724 - d_amberg_no_ldm = \ - g.trimesh.proximity.closest_point( - target, records_amberg_no_ldm[-1])[1] - d_amberg_ldm = \ - g.trimesh.proximity.closest_point( - target, records_amberg_ldm[-1])[1] - d_sumner_no_ldm = \ - g.trimesh.proximity.closest_point( - target, records_sumner_no_ldm[-1])[1] - d_sumner_ldm = \ - g.trimesh.proximity.closest_point( - target, records_sumner_ldm[-1])[1] + d_amberg_no_ldm = g.trimesh.proximity.closest_point( + target, records_amberg_no_ldm[-1] + )[1] + d_amberg_ldm = g.trimesh.proximity.closest_point(target, records_amberg_ldm[-1])[ + 1 + ] + d_sumner_no_ldm = g.trimesh.proximity.closest_point( + target, records_sumner_no_ldm[-1] + )[1] + d_sumner_ldm = g.trimesh.proximity.closest_point(target, records_sumner_ldm[-1])[ + 1 + ] # Meshes should remain untouched assert g.np.allclose(source.vertices, source_copy.vertices) @@ -321,18 +333,26 @@ def test_nricp(self): assert d_sumner_ldm.max() > 0.05 assert d_sumner_ldm.mean() < 1e-3 - dl_amberg_no_ldm = \ - g.np.linalg.norm(records_amberg_no_ldm[-1][landmarks_vertex_indices[:, 0]] - - target_markers_vertices, axis=-1) - dl_amberg_ldm = \ - g.np.linalg.norm(records_amberg_ldm[-1][landmarks_vertex_indices[:, 0]] - - target_markers_vertices, axis=-1) - dl_sumner_no_ldm = \ - g.np.linalg.norm(records_sumner_no_ldm[-1][landmarks_vertex_indices[:, 0]] - - target_markers_vertices, axis=-1) - dl_sumner_ldm = \ - g.np.linalg.norm(records_sumner_ldm[-1][landmarks_vertex_indices[:, 0]] - - target_markers_vertices, axis=-1) + dl_amberg_no_ldm = g.np.linalg.norm( + records_amberg_no_ldm[-1][landmarks_vertex_indices[:, 0]] + - target_markers_vertices, + axis=-1, + ) + dl_amberg_ldm = g.np.linalg.norm( + records_amberg_ldm[-1][landmarks_vertex_indices[:, 0]] + - target_markers_vertices, + axis=-1, + ) + dl_sumner_no_ldm = g.np.linalg.norm( + records_sumner_no_ldm[-1][landmarks_vertex_indices[:, 0]] + - target_markers_vertices, + axis=-1, + ) + dl_sumner_ldm = g.np.linalg.norm( + records_sumner_ldm[-1][landmarks_vertex_indices[:, 0]] + - target_markers_vertices, + axis=-1, + ) assert dl_amberg_no_ldm.min() > 0.01 assert dl_amberg_no_ldm.max() > 0.1 @@ -354,16 +374,19 @@ def test_query_from_points(self): points = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] query_point = [[0, 0.5, 0]] qres = g.trimesh.registration._from_points( - points, query_point, return_normals=False) - assert qres['vertex_indices'][0] == 1 - assert g.np.all(qres['nearest'][0] == [0, 1, 0]) - assert 'normals' not in qres + points, query_point, return_normals=False + ) + assert qres["vertex_indices"][0] == 1 + assert g.np.all(qres["nearest"][0] == [0, 1, 0]) + assert "normals" not in qres qres = g.trimesh.registration._from_points( - points, query_point, return_normals=True) + points, query_point, return_normals=True + ) normal = g.np.ones(3) normal = normal / g.np.linalg.norm(normal) - assert g.np.allclose(qres['normals'][0], normal) or \ - g.np.allclose(qres['normals'][0], -normal) + assert g.np.allclose(qres["normals"][0], normal) or g.np.allclose( + qres["normals"][0], -normal + ) def test_query_from_mesh(self): points = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] @@ -371,22 +394,30 @@ def test_query_from_mesh(self): mesh = g.trimesh.Trimesh(vertices=points, faces=faces) query_point = [[0, 0.5, 0]] qres = g.trimesh.registration._from_mesh( - mesh, query_point, return_barycentric_coordinates=False, - return_normals=False, return_interpolated_normals=False) + mesh, + query_point, + return_barycentric_coordinates=False, + return_normals=False, + return_interpolated_normals=False, + ) - assert 'normals' not in qres - assert 'barycentric_coordinates' not in qres - assert 'interpolated_normals'not in qres + assert "normals" not in qres + assert "barycentric_coordinates" not in qres + assert "interpolated_normals" not in qres qres = g.trimesh.registration._from_mesh( - mesh, query_point, return_barycentric_coordinates=False, - return_normals=False, return_interpolated_normals=True) + mesh, + query_point, + return_barycentric_coordinates=False, + return_normals=False, + return_interpolated_normals=True, + ) - assert 'normals' not in qres - assert 'barycentric_coordinates' in qres - assert 'interpolated_normals' in qres + assert "normals" not in qres + assert "barycentric_coordinates" in qres + assert "interpolated_normals" in qres -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_remesh.py b/tests/test_remesh.py index 80e96bf9f..fe6cedda5 100644 --- a/tests/test_remesh.py +++ b/tests/test_remesh.py @@ -5,12 +5,12 @@ class SubDivideTest(g.unittest.TestCase): - def test_subdivide(self): meshes = [ - g.get_mesh('soup.stl'), # a soup of random triangles - g.get_mesh('cycloidal.ply'), # a mesh with multiple bodies - g.get_mesh('featuretype.STL')] # a mesh with a single body + g.get_mesh("soup.stl"), # a soup of random triangles + g.get_mesh("cycloidal.ply"), # a mesh with multiple bodies + g.get_mesh("featuretype.STL"), + ] # a mesh with a single body for m in meshes: sub = m.subdivide() @@ -18,12 +18,11 @@ def test_subdivide(self): assert len(sub.faces) > len(m.faces) max_edge = m.scale / 50 - sub, idx = m.subdivide_to_size( - max_edge=max_edge, return_index=True) + sub, idx = m.subdivide_to_size(max_edge=max_edge, return_index=True) assert g.np.allclose(m.area, sub.area) - edge_len = (g.np.diff( - sub.vertices[sub.edges_unique], - axis=1).reshape((-1, 3))**2).sum(axis=1)**.5 + edge_len = ( + g.np.diff(sub.vertices[sub.edges_unique], axis=1).reshape((-1, 3)) ** 2 + ).sum(axis=1) ** 0.5 assert (edge_len < max_edge).all() # should be the same order of magnitude size @@ -38,7 +37,8 @@ def test_subdivide(self): for vid in sub.faces.T: # find the barycentric coordinates bary = g.trimesh.triangles.points_to_barycentric( - m.triangles[idx], sub.vertices[vid]) + m.triangles[idx], sub.vertices[vid] + ) # if face indexes are correct they will be on the triangle # which means all barycentric coordinates are between 0.0-1.0 assert bary.max() < (1 + epsilon) @@ -46,20 +46,17 @@ def test_subdivide(self): # make sure it's not all zeros assert bary.ptp() > epsilon - v, f = g.trimesh.remesh.subdivide( - vertices=m.vertices, - faces=m.faces) + v, f = g.trimesh.remesh.subdivide(vertices=m.vertices, faces=m.faces) max_edge = m.scale / 50 v, f, idx = g.trimesh.remesh.subdivide_to_size( - vertices=m.vertices, - faces=m.faces, - max_edge=max_edge, - return_index=True) + vertices=m.vertices, faces=m.faces, max_edge=max_edge, return_index=True + ) ms = g.trimesh.Trimesh(vertices=v, faces=f) assert g.np.allclose(m.area, ms.area) - edge_len = (g.np.diff(ms.vertices[ms.edges_unique], - axis=1).reshape((-1, 3))**2).sum(axis=1)**.5 + edge_len = ( + g.np.diff(ms.vertices[ms.edges_unique], axis=1).reshape((-1, 3)) ** 2 + ).sum(axis=1) ** 0.5 assert (edge_len < max_edge).all() # should be one index per new face @@ -71,8 +68,7 @@ def test_subdivide(self): epsilon = 1e-3 for vid in f.T: # find the barycentric coordinates - bary = g.trimesh.triangles.points_to_barycentric( - m.triangles[idx], v[vid]) + bary = g.trimesh.triangles.points_to_barycentric(m.triangles[idx], v[vid]) # if face indexes are correct they will be on the triangle # which means all barycentric coordinates are between 0.0-1.0 assert bary.max() < (1 + epsilon) @@ -81,15 +77,13 @@ def test_subdivide(self): assert bary.ptp() > epsilon check = m.subdivide_to_size( - max_edge=m.extents.sum(), - max_iter=1, - return_index=False) + max_edge=m.extents.sum(), max_iter=1, return_index=False + ) assert check.faces.shape == m.faces.shape def test_sub(self): # try on some primitives - meshes = [g.trimesh.creation.box(), - g.trimesh.creation.icosphere()] + meshes = [g.trimesh.creation.box(), g.trimesh.creation.icosphere()] for m in meshes: s = m.subdivide(face_index=[0, len(m.faces) - 1]) @@ -111,8 +105,9 @@ def test_sub(self): def test_loop(self): meshes = [ - g.get_mesh('soup.stl'), # a soup of random triangles - g.get_mesh('featuretype.STL')] # a mesh with a single body + g.get_mesh("soup.stl"), # a soup of random triangles + g.get_mesh("featuretype.STL"), + ] # a mesh with a single body for m in meshes: sub = m.subdivide_loop(iterations=1) @@ -123,7 +118,7 @@ def test_loop(self): def test_loop_multibody(self): # a mesh with multiple bodies - mesh = g.get_mesh('cycloidal.ply') + mesh = g.get_mesh("cycloidal.ply") sub = mesh.subdivide_loop(iterations=2) # number of faces should increase @@ -164,9 +159,9 @@ def test_loop_correct(self): def test_loop_bound(self): def _get_boundary_vertices(mesh): boundary_groups = g.trimesh.grouping.group_rows( - mesh.edges_sorted, require_count=1) - return mesh.vertices[g.np.unique( - mesh.edges_sorted[boundary_groups])] + mesh.edges_sorted, require_count=1 + ) + return mesh.vertices[g.np.unique(mesh.edges_sorted[boundary_groups])] box = g.trimesh.creation.box() bottom_mask = g.np.zeros(len(box.faces), dtype=bool) @@ -180,13 +175,13 @@ def _get_boundary_vertices(mesh): sub_bottom_vrts = _get_boundary_vertices(sub) # y value of bottom boundary vertices should not be changed - assert g.np.isclose(bottom_vrts[:, 1].mean(), - sub_bottom_vrts[:, 1].mean(), - atol=1e-5) + assert g.np.isclose( + bottom_vrts[:, 1].mean(), sub_bottom_vrts[:, 1].mean(), atol=1e-5 + ) def test_uv(self): # get a mesh with texture - m = g.get_mesh('fuze.obj') + m = g.get_mesh("fuze.obj") # m.show() # get the shape of the initial mesh shape = m.vertices.shape @@ -241,40 +236,41 @@ def test_max_iter(self): assert r.is_watertight def test_idx_simple(self): - vertices = g.np.array( - [0, 0, 0, - 0, 1, 0, - 1, 1, 0, - 1, 0, 0]).reshape((-1, 3)) * 10 + vertices = g.np.array([0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0]).reshape((-1, 3)) * 10 faces = g.np.array( - [0, 1, 2, - 0, 2, 3, ]).reshape((-1, 3)) + [ + 0, + 1, + 2, + 0, + 2, + 3, + ] + ).reshape((-1, 3)) def test(fidx): v, f, idx = g.trimesh.remesh.subdivide( - vertices, - faces, - face_index=fidx, - return_index=True) + vertices, faces, face_index=fidx, return_index=True + ) eps = 1e-8 for fid in fidx: # get the new triangles, as indicated by the index tri_new = v[f[idx[fid]]] # this is the original triangle - original = vertices[faces[ - g.np.tile(fid, len(tri_new) * 3)]] + original = vertices[faces[g.np.tile(fid, len(tri_new) * 3)]] bary = g.trimesh.triangles.points_to_barycentric( - triangles=original, - points=tri_new.reshape((-1, 3))) + triangles=original, points=tri_new.reshape((-1, 3)) + ) assert (bary < 1 + eps).all() assert (bary > -eps).all() + test([0, 1]) test([1, 0]) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_render.py b/tests/test_render.py index 8cbac6db6..052fd0128 100644 --- a/tests/test_render.py +++ b/tests/test_render.py @@ -17,22 +17,17 @@ def test_args(self): # a viewer show() call) from trimesh import rendering - files = ['featuretype.STL', - 'fuze.obj', - 'points_bin.ply'] + files = ["featuretype.STL", "fuze.obj", "points_bin.ply"] for file_name in files: m = g.get_mesh(file_name) args = rendering.convert_to_vertexlist(m) if isinstance(m, g.trimesh.Trimesh): # try turning smoothing off and on - rendering.mesh_to_vertexlist( - m, smooth_threshold=0) - rendering.mesh_to_vertexlist( - m, smooth_threshold=g.np.inf) + rendering.mesh_to_vertexlist(m, smooth_threshold=0) + rendering.mesh_to_vertexlist(m, smooth_threshold=g.np.inf) - P30 = m.section(plane_normal=[0, 0, 1], - plane_origin=m.centroid) + P30 = m.section(plane_normal=[0, 0, 1], plane_origin=m.centroid) args = rendering.path_to_vertexlist(P30) args_auto = rendering.convert_to_vertexlist(P30) assert len(args) == 6 @@ -44,7 +39,7 @@ def test_args(self): assert len(args) == 6 assert len(args_auto) == len(args) - P21 = g.get_mesh('2D/wrench.dxf') + P21 = g.get_mesh("2D/wrench.dxf") args = rendering.path_to_vertexlist(P21) args_auto = rendering.convert_to_vertexlist(P21) assert len(args) == 6 @@ -68,6 +63,6 @@ def test_args(self): assert len(args_auto) == len(args) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_repr.py b/tests/test_repr.py index 8c9a59c91..629b311fd 100644 --- a/tests/test_repr.py +++ b/tests/test_repr.py @@ -5,40 +5,39 @@ class ReprTest(g.unittest.TestCase): - def test_repr(self): m = g.trimesh.creation.icosphere() r = str(m) - assert 'trimesh.Trimesh' in r - assert 'vertices' in r - assert 'faces' in r + assert "trimesh.Trimesh" in r + assert "vertices" in r + assert "faces" in r s = m.scene() assert isinstance(s, g.trimesh.Scene) r = str(s) - assert 'Scene' in r - assert 'geometry' in r + assert "Scene" in r + assert "geometry" in r p = g.trimesh.PointCloud(m.vertices) r = str(p) - assert 'trimesh.PointCloud' in r - assert 'vertices' in r + assert "trimesh.PointCloud" in r + assert "vertices" in r p = g.trimesh.path.creation.rectangle([[0, 0], [1, 1]]) assert isinstance(p, g.trimesh.path.Path2D) r = str(p) - assert 'trimesh.Path2D' in r - assert 'entities' in r - assert 'vertices' in r + assert "trimesh.Path2D" in r + assert "entities" in r + assert "vertices" in r p = p.to_3D() assert isinstance(p, g.trimesh.path.Path3D) r = str(p) - assert 'trimesh.Path3D' in r - assert 'entities' in r - assert 'vertices' in r + assert "trimesh.Path3D" in r + assert "entities" in r + assert "vertices" in r -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_resolvers.py b/tests/test_resolvers.py index ba1255f47..fba82a946 100644 --- a/tests/test_resolvers.py +++ b/tests/test_resolvers.py @@ -5,50 +5,48 @@ class ResolverTest(g.unittest.TestCase): - def test_filepath_namespace(self): # check the namespaced method models = g.dir_models - subdir = '2D' + subdir = "2D" # create a resolver for the models diretory resolver = g.trimesh.resolvers.FilePathResolver(models) # should be able to get an asset - assert len(resolver.get('rabbit.obj')) > 0 + assert len(resolver.get("rabbit.obj")) > 0 # check a few file path keys - check = {'ballA.off', 'featuretype.STL'} + check = {"ballA.off", "featuretype.STL"} assert set(resolver.keys()).issuperset(check) # try a namespaced resolver ns = resolver.namespaced(subdir) assert not set(ns.keys()).issuperset(check) - assert set(ns.keys()).issuperset(['tray-easy1.dxf', - 'single_arc.dxf']) + assert set(ns.keys()).issuperset(["tray-easy1.dxf", "single_arc.dxf"]) def test_web_namespace(self): - base = 'https://example.com' - name = 'stuff' - target = 'hi.gltf' + base = "https://example.com" + name = "stuff" + target = "hi.gltf" # check with a trailing slash - a = g.trimesh.resolvers.WebResolver(base + '/') - b = g.trimesh.resolvers.WebResolver(base + '//') + a = g.trimesh.resolvers.WebResolver(base + "/") + b = g.trimesh.resolvers.WebResolver(base + "//") c = g.trimesh.resolvers.WebResolver(base) d = a.namespaced(name) # base URL's should always be the same with one trailing slash assert a.base_url == b.base_url assert b.base_url == c.base_url - assert c.base_url == base + '/' + assert c.base_url == base + "/" # check namespaced - assert d.base_url == base + '/' + name + '/' + assert d.base_url == base + "/" + name + "/" # should have correct slashes - truth = '/'.join([base, name, target]) + truth = "/".join([base, name, target]) - assert a.base_url + name + '/' + target == truth + assert a.base_url + name + "/" + target == truth assert d.base_url + target == truth def test_items(self): @@ -56,26 +54,26 @@ def test_items(self): archive = {} resolver = g.trimesh.resolvers.ZipResolver(archive) assert len(set(resolver.keys())) == 0 - resolver['hi'] = b'what' + resolver["hi"] = b"what" # should have one item - assert set(resolver.keys()) == {'hi'} + assert set(resolver.keys()) == {"hi"} # should have the right value - assert resolver['hi'] == b'what' + assert resolver["hi"] == b"what" # original archive should have been modified - assert set(archive.keys()) == {'hi'} + assert set(archive.keys()) == {"hi"} # add a subdirectory key - resolver['stuff/nah'] = b'sup' - assert set(archive.keys()) == {'hi', 'stuff/nah'} - assert set(resolver.keys()) == {'hi', 'stuff/nah'} + resolver["stuff/nah"] = b"sup" + assert set(archive.keys()) == {"hi", "stuff/nah"} + assert set(resolver.keys()) == {"hi", "stuff/nah"} # try namespacing - ns = resolver.namespaced('stuff') - assert ns['nah'] == b'sup' + ns = resolver.namespaced("stuff") + assert ns["nah"] == b"sup" g.log.debug(ns.keys()) - assert set(ns.keys()) == {'nah'} + assert set(ns.keys()) == {"nah"} -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_runlength.py b/tests/test_runlength.py index c63d7136c..84541cc4d 100644 --- a/tests/test_runlength.py +++ b/tests/test_runlength.py @@ -8,8 +8,13 @@ def random_rle_encoding(n=20, max_value=255, dtype=np.uint8): - return (np.random.uniform(size=(n,),) * - (max_value - 1) + 1).astype(np.uint8) + return ( + np.random.uniform( + size=(n,), + ) + * (max_value - 1) + + 1 + ).astype(np.uint8) def random_brle_encoding(n=20, max_value=255, dtype=np.uint8): @@ -61,32 +66,21 @@ def test_split_long_rle_lengths(self): def test_rle_length(self): np.testing.assert_equal( - rl.rle_length( - [0, 5, 1, 3, 0, 6]), rl.brle_length([5, 3, 6, 0])) + rl.rle_length([0, 5, 1, 3, 0, 6]), rl.brle_length([5, 3, 6, 0]) + ) def test_rle_to_brle(self): - np.testing.assert_equal( - rl.rle_to_brle([0, 5, 1, 3, 0, 10]), - [5, 3, 10, 0]) - np.testing.assert_equal( - rl.rle_to_brle([0, 5, 0, 3, 1, 10]), - [8, 10]) - np.testing.assert_equal( - rl.rle_to_brle([1, 5, 0, 3, 1, 10]), - [0, 5, 3, 10]) - np.testing.assert_equal( - rl.rle_to_brle([1, 5, 0, 2]), - [0, 5, 2, 0]) + np.testing.assert_equal(rl.rle_to_brle([0, 5, 1, 3, 0, 10]), [5, 3, 10, 0]) + np.testing.assert_equal(rl.rle_to_brle([0, 5, 0, 3, 1, 10]), [8, 10]) + np.testing.assert_equal(rl.rle_to_brle([1, 5, 0, 3, 1, 10]), [0, 5, 3, 10]) + np.testing.assert_equal(rl.rle_to_brle([1, 5, 0, 2]), [0, 5, 2, 0]) def test_rle_to_dense(self): - np.testing.assert_equal( - rl.rle_to_dense([5, 3, 4, 10]), [5] * 3 + [4] * 10) - np.testing.assert_equal( - rl.rle_to_dense([5, 300, 4, 100]), [5] * 300 + [4] * 100) + np.testing.assert_equal(rl.rle_to_dense([5, 3, 4, 10]), [5] * 3 + [4] * 10) + np.testing.assert_equal(rl.rle_to_dense([5, 300, 4, 100]), [5] * 300 + [4] * 100) def test_brle_encode_decode(self): - small = np.array( - [False] * 500 + [True] * 1000 + [False], dtype=bool) + small = np.array([False] * 500 + [True] * 1000 + [False], dtype=bool) rand = np.random.uniform(size=(10000,)) > 0.05 for original in [small, rand]: for dtype in [np.uint8, np.int64]: @@ -108,23 +102,19 @@ def test_brle_logical_not(self): notted = rl.brle_logical_not(original) dense_notted = rl.brle_to_dense(notted) dense_original = rl.brle_to_dense(original) - np.testing.assert_equal( - dense_notted, np.logical_not(dense_original)) + np.testing.assert_equal(dense_notted, np.logical_not(dense_original)) def test_merge_brle_lengths(self): - np.testing.assert_equal( - rl.merge_brle_lengths([10, 0, 10, 2]), [20, 2]) - np.testing.assert_equal( - rl.merge_brle_lengths([10, 0, 10, 2]), [20, 2]) - np.testing.assert_equal( - rl.merge_brle_lengths([10, 1, 10, 2]), [10, 1, 10, 2]) - np.testing.assert_equal( - rl.merge_brle_lengths([0, 10, 2, 3]), [0, 10, 2, 3]) + np.testing.assert_equal(rl.merge_brle_lengths([10, 0, 10, 2]), [20, 2]) + np.testing.assert_equal(rl.merge_brle_lengths([10, 0, 10, 2]), [20, 2]) + np.testing.assert_equal(rl.merge_brle_lengths([10, 1, 10, 2]), [10, 1, 10, 2]) + np.testing.assert_equal(rl.merge_brle_lengths([0, 10, 2, 3]), [0, 10, 2, 3]) def test_split_long_brle_lengths(self): np.testing.assert_equal( rl.split_long_brle_lengths([300, 600, 10], np.uint8), - [255, 0, 45, 255, 0, 255, 0, 90, 10]) + [255, 0, 45, 255, 0, 255, 0, 90, 10], + ) def test_brle_split_merge(self): # TODO: REMOVE RETURN @@ -142,9 +132,7 @@ def test_brle_to_rle(self): rle_data = rl.brle_to_rle(brle_data) rle_dense = rl.rle_to_dense(rle_data) np.testing.assert_equal(brle_dense, rle_dense) - np.testing.assert_equal( - rl.brle_to_rle([0, 5, 2, 0]), - [1, 5, 0, 2]) + np.testing.assert_equal(rl.brle_to_rle([0, 5, 2, 0]), [1, 5, 0, 2]) def test_dense_to_brle(self): # should be an (300, 200, 1000) array @@ -154,21 +142,25 @@ def test_dense_to_brle(self): if True: return # TODO: FIGURE OUT WHY THIS FAILS - np.testing.assert_equal(rl.dense_to_brle(x), - [300, 200, 1000, 0]) + np.testing.assert_equal(rl.dense_to_brle(x), [300, 200, 1000, 0]) np.testing.assert_equal( rl.dense_to_brle(x, np.uint8), - [255, 0, 45, 200, 255, 0, 255, 0, 255, 0, 235, 0]) + [255, 0, 45, 200, 255, 0, 255, 0, 255, 0, 235, 0], + ) def test_brle_to_dense(self): np.testing.assert_equal( rl.brle_to_dense(np.array([300, 200, 1000, 0], dtype=np.int64)), - [False] * 300 + [True] * 200 + [False] * 1000) + [False] * 300 + [True] * 200 + [False] * 1000, + ) np.testing.assert_equal( - rl.brle_to_dense(np.array( - [255, 0, 45, 200, 255, 0, 255, 0, 255, 0, 235, 0], - dtype=np.int64)), - [False] * 300 + [True] * 200 + [False] * 1000) + rl.brle_to_dense( + np.array( + [255, 0, 45, 200, 255, 0, 255, 0, 255, 0, 235, 0], dtype=np.int64 + ) + ), + [False] * 300 + [True] * 200 + [False] * 1000, + ) def test_brle_length(self): enc = random_brle_encoding(dtype=np.int64) @@ -193,13 +185,13 @@ def test_brle_mask(self): def test_rle_strip(self): for rle_data, expected_rle, expected_padding in ( - ([0, 5, 1, 3, 0, 10], [1, 3], (5, 10)), - ([1, 3, 0, 10], [1, 3], (0, 10)), - ([0, 5, 1, 3], [1, 3], (5, 0)), - ([0, 5, 1, 3, 0, 0], [1, 3], (5, 0)), - ([0, 5, 1, 3, 0, 10, 0, 5], [1, 3], (5, 15)), - ([0, 5, 0, 3, 1, 3, 0, 10, 0, 5], [1, 3], (8, 15)), - ([1, 3], [1, 3], (0, 0)), + ([0, 5, 1, 3, 0, 10], [1, 3], (5, 10)), + ([1, 3, 0, 10], [1, 3], (0, 10)), + ([0, 5, 1, 3], [1, 3], (5, 0)), + ([0, 5, 1, 3, 0, 0], [1, 3], (5, 0)), + ([0, 5, 1, 3, 0, 10, 0, 5], [1, 3], (5, 15)), + ([0, 5, 0, 3, 1, 3, 0, 10, 0, 5], [1, 3], (8, 15)), + ([1, 3], [1, 3], (0, 0)), ): actual_rle, actual_padding = rl.rle_strip(rle_data) np.testing.assert_equal(actual_rle, expected_rle) @@ -207,19 +199,19 @@ def test_rle_strip(self): def test_brle_strip(self): for brle_data, expected_brle, expected_padding in ( - ([5, 3, 10], [0, 3], [5, 10]), - ([0, 3, 10], [0, 3], [0, 10]), - ([5, 3], [0, 3], [5, 0]), - ([5, 3, 0, 0], [0, 3], (5, 0)), - ([5, 3, 10, 0, 5], [0, 3], (5, 15)), - ([5, 0, 3, 3, 10, 0, 5], [0, 3], (8, 15)), - ([0, 3], [0, 3], (0, 0)), + ([5, 3, 10], [0, 3], [5, 10]), + ([0, 3, 10], [0, 3], [0, 10]), + ([5, 3], [0, 3], [5, 0]), + ([5, 3, 0, 0], [0, 3], (5, 0)), + ([5, 3, 10, 0, 5], [0, 3], (5, 15)), + ([5, 0, 3, 3, 10, 0, 5], [0, 3], (8, 15)), + ([0, 3], [0, 3], (0, 0)), ): actual_brle, actual_padding = rl.brle_strip(brle_data) np.testing.assert_equal(actual_brle, expected_brle) np.testing.assert_equal(actual_padding, expected_padding) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_sample.py b/tests/test_sample.py index def25a4bc..5811ad5a0 100644 --- a/tests/test_sample.py +++ b/tests/test_sample.py @@ -5,9 +5,8 @@ class SampleTest(g.unittest.TestCase): - def test_sample(self): - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") samples = m.sample(1000) @@ -28,31 +27,28 @@ def test_weights(self): weights[0] = 1.0 # sample with passed weights - points, fid = m.sample(count=100, - return_index=True, - face_weight=weights) + points, fid = m.sample(count=100, return_index=True, face_weight=weights) # all faces should be on single face assert (fid == 0).all() # oversample box to make sure weights aren't screwing # up ability to get every face when weighted by area - assert set(g.np.unique(m.sample( - 100000, return_index=True)[1])) == set(range(len(m.faces))) + assert set(g.np.unique(m.sample(100000, return_index=True)[1])) == set( + range(len(m.faces)) + ) def test_color(self): # check to see if sampling by color works # sample a textured mesh - m = g.get_mesh('fuze.obj') - points, index, color = g.trimesh.sample.sample_surface( - m, 100, sample_color=True) + m = g.get_mesh("fuze.obj") + points, index, color = g.trimesh.sample.sample_surface(m, 100, sample_color=True) assert len(points) == len(color) # sample a color mesh - m = g.get_mesh('machinist.XAML') - assert m.visual.kind == 'face' - points, index, color = g.trimesh.sample.sample_surface( - m, 100, sample_color=True) + m = g.get_mesh("machinist.XAML") + assert m.visual.kind == "face" + points, index, color = g.trimesh.sample.sample_surface(m, 100, sample_color=True) assert len(points) == len(color) def test_sample_volume(self): @@ -65,7 +61,7 @@ def test_sample_volume(self): def sample_volume_rectangular(self): # check to see if our OBB volume sampling runs - m = g.get_mesh('rabbit.obj') + m = g.get_mesh("rabbit.obj") obb = m.bounding_box_oriented # should use a box-specific volume sampling method @@ -73,7 +69,7 @@ def sample_volume_rectangular(self): assert samples.shape == (100, 3) def test_deterministic_sample(self): - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") # Without seed passed should return non-deterministic results even_first, index_first = g.trimesh.sample.sample_surface(m, 10000) @@ -88,6 +84,6 @@ def test_deterministic_sample(self): assert (index_first == index_last).all() -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_scene.py b/tests/test_scene.py index 448c6017f..26e26e6f7 100644 --- a/tests/test_scene.py +++ b/tests/test_scene.py @@ -5,15 +5,14 @@ class SceneTests(g.unittest.TestCase): - def test_scene(self): - for file_name in ['cycloidal.ply', 'sphere.ply']: + for file_name in ["cycloidal.ply", "sphere.ply"]: mesh = g.get_mesh(file_name) if mesh.units is None: - mesh.units = 'in' + mesh.units = "in" scene_split = g.trimesh.scene.split_scene(mesh) - scene_split.convert_units('in') + scene_split.convert_units("in") scene_base = g.trimesh.Scene(mesh) # save hash of scene before concat @@ -23,7 +22,7 @@ def test_scene(self): assert scene_base.__hash__() == pre[1] # __hash__ is a long int which fails isinstance in Python 2 - assert type(scene_base.__hash__()).__name__ in ('int', 'long') + assert type(scene_base.__hash__()).__name__ in ("int", "long") # try out scene appending concat = scene_split + scene_base @@ -33,8 +32,9 @@ def test_scene(self): assert scene_base.__hash__() == pre[1] # make sure concatenate appended things, stuff - assert len(concat.geometry) == (len(scene_split.geometry) + - len(scene_base.geometry)) + assert len(concat.geometry) == ( + len(scene_split.geometry) + len(scene_base.geometry) + ) for s in [scene_split, scene_base]: pre = s.__hash__() @@ -64,8 +64,9 @@ def test_scene(self): # should be autogenerated lights assert len(s.lights) > 0 # all lights should be lights - assert all(isinstance(L, g.trimesh.scene.lighting.Light) - for L in s.lights) + assert all( + isinstance(L, g.trimesh.scene.lighting.Light) for L in s.lights + ) # all lights should be added to scene graph assert all(L.name in s.graph for L in s.lights) @@ -74,16 +75,16 @@ def test_scene(self): r = s.dump() - gltf = s.export(file_type='gltf') + gltf = s.export(file_type="gltf") assert isinstance(gltf, dict) assert len(gltf) > 0 - assert len(gltf['model.gltf']) > 0 + assert len(gltf["model.gltf"]) > 0 - glb = s.export(file_type='glb') + glb = s.export(file_type="glb") assert len(glb) > 0 assert isinstance(glb, bytes) - for export_format in ['dict', 'dict64']: + for export_format in ["dict", "dict64"]: # try exporting the scene as a dict # then make sure json can serialize it e = g.json.dumps(s.export(file_type=export_format)) @@ -91,9 +92,7 @@ def test_scene(self): r = g.trimesh.load(g.json.loads(e)) # make sure the extents are similar before and after - assert g.np.allclose( - g.np.prod(s.extents), - g.np.prod(r.extents)) + assert g.np.allclose(g.np.prod(s.extents), g.np.prod(r.extents)) # move the scene to origin s.rezero() @@ -106,7 +105,7 @@ def test_scene(self): def test_scaling(self): # Test the scaling of scenes including unit conversion. - scene = g.get_mesh('cycloidal.3DXML') + scene = g.get_mesh("cycloidal.3DXML") hash_val = scene.__hash__() extents = scene.bounding_box_oriented.primitive.extents.copy() @@ -120,9 +119,8 @@ def test_scaling(self): # the oriented bounding box should scale exactly # with the scaling factor assert g.np.allclose( - scaled.bounding_box_oriented.primitive.extents / - extents, - factor) + scaled.bounding_box_oriented.primitive.extents / extents, factor + ) # check bounding primitives assert scene.bounding_box.volume > 0.0 @@ -133,72 +131,58 @@ def test_scaling(self): assert scaled.__hash__() != hash_val # 3DXML comes in as mm - assert all(m.units == 'mm' - for m in scene.geometry.values()) - assert scene.units == 'mm' + assert all(m.units == "mm" for m in scene.geometry.values()) + assert scene.units == "mm" - converted = scene.convert_units('in') + converted = scene.convert_units("in") assert g.np.allclose( converted.bounding_box_oriented.primitive.extents / extents, 1.0 / 25.4, - atol=1e-3) + atol=1e-3, + ) # shouldn't have changed the original extents - assert g.np.allclose( - extents, - scene.bounding_box_oriented.primitive.extents) + assert g.np.allclose(extents, scene.bounding_box_oriented.primitive.extents) # original shouldn't have changed - assert converted.units == 'in' - assert all(m.units == 'in' for m in converted.geometry.values()) + assert converted.units == "in" + assert all(m.units == "in" for m in converted.geometry.values()) - assert scene.units == 'mm' + assert scene.units == "mm" # we shouldn't have modified the original scene assert scene.__hash__() == hash_val assert converted.__hash__() != hash_val def test_scaling_3D(self): - scene = g.get_mesh('cycloidal.3DXML') + scene = g.get_mesh("cycloidal.3DXML") extents = scene.extents.copy() factor = [0.2, 1.3, 3.3] scaled = scene.scaled(factor) - assert g.np.allclose( - scaled.extents / - extents, - factor) + assert g.np.allclose(scaled.extents / extents, factor) factor = [3.0, 3.0, 3.0] scaled = scene.scaled(factor) - assert g.np.allclose( - scaled.extents / - extents, - factor) + assert g.np.allclose(scaled.extents / extents, factor) def test_scaling_3D_mixed(self): # same as test_scaling_3D but input scene contains 2D and 3D geometry - scene = g.get_mesh('scenes.zip', mixed=True) + scene = g.get_mesh("scenes.zip", mixed=True) extents = scene.extents.copy() factor = [0.2, 1.3, 3.3] scaled = scene.scaled(factor) - assert g.np.allclose( - scaled.extents / - extents, - factor) + assert g.np.allclose(scaled.extents / extents, factor) factor = [3.0, 3.0, 3.0] scaled = scene.scaled(factor) - assert g.np.allclose( - scaled.extents / - extents, - factor) + assert g.np.allclose(scaled.extents / extents, factor) def test_add_geometry(self): # list-typed geometry should create multiple nodes, @@ -225,7 +209,7 @@ def test_delete(self): # check to make sure our geometry delete cleans up a = g.trimesh.creation.icosphere() b = g.trimesh.creation.icosphere().apply_translation([2, 0, 0]) - s = g.trimesh.Scene({'a': a, 'b': b}) + s = g.trimesh.Scene({"a": a, "b": b}) assert len(s.geometry) == 2 assert len(s.graph.nodes_geometry) == 2 @@ -233,16 +217,14 @@ def test_delete(self): [s.graph[n] for n in s.graph.nodes] # delete a geometry - s.delete_geometry('a') + s.delete_geometry("a") assert len(s.geometry) == 1 assert len(s.graph.nodes_geometry) == 1 # if we screwed up the delete this will crash [s.graph[n] for n in s.graph.nodes] def test_dupe(self): - m = g.get_mesh('tube.obj', - merge_norm=True, - merge_tex=True) + m = g.get_mesh("tube.obj", merge_norm=True, merge_tex=True) assert m.body_count == 1 @@ -258,7 +240,7 @@ def test_dupe(self): assert len(c.duplicate_nodes) == 1 assert len(c.duplicate_nodes[0]) == 1 - u = s.convert_units('in', guess=True) + u = s.convert_units("in", guess=True) assert len(u.graph.nodes_geometry) == 1 assert len(u.duplicate_nodes) == 1 assert len(u.duplicate_nodes[0]) == 1 @@ -283,7 +265,7 @@ def test_dedupe(self): assert len(d.graph.nodes_geometry) == 1 def test_3DXML(self): - s = g.get_mesh('rod.3DXML') + s = g.get_mesh("rod.3DXML") assert len(s.geometry) == 3 assert len(s.graph.nodes_geometry) == 29 @@ -293,9 +275,9 @@ def test_3DXML(self): # test cache dumping and survivability of bad # non-existent geometry specified in node_geometry - s.graph.update(dupe[0][0], geometry='GARBAGE') + s.graph.update(dupe[0][0], geometry="GARBAGE") # make sure geometry was updated - assert s.graph[dupe[0][0]][1] == 'GARBAGE' + assert s.graph[dupe[0][0]][1] == "GARBAGE" # get the regenerated duplicates dupe = s.duplicate_nodes assert len(dupe) == 3 @@ -303,14 +285,14 @@ def test_3DXML(self): assert sum(len(i) for i in dupe) == 28 def test_tri(self): - scene = g.get_mesh('cycloidal.3DXML') + scene = g.get_mesh("cycloidal.3DXML") # scene should have triangles assert g.trimesh.util.is_shape(scene.triangles, (-1, 3, 3)) assert len(scene.triangles_node) == len(scene.triangles) # node name of inserted 2D geometry - node = scene.add_geometry(g.get_mesh('2D/wrench.dxf')) + node = scene.add_geometry(g.get_mesh("2D/wrench.dxf")) # should be in the graph assert node in scene.graph.nodes # should have geometry defined @@ -320,17 +302,16 @@ def test_tri(self): assert node not in scene.triangles_node # every geometry node except the one 2D thing # we inserted should be in triangles_node - assert len(set(scene.triangles_node)) == len( - scene.graph.nodes_geometry) - 1 + assert len(set(scene.triangles_node)) == len(scene.graph.nodes_geometry) - 1 def test_empty(self): - m = g.get_mesh('bunny.ply') + m = g.get_mesh("bunny.ply") # not watertight so will result in empty scene s = g.trimesh.scene.split_scene(m) assert len(s.geometry) == 0 - s = s.convert_units('inches') + s = s.convert_units("inches") n = s.duplicate_nodes assert len(n) == 0 @@ -339,20 +320,18 @@ def test_zipped(self): # is returned as a single scene. # allow mixed 2D and 3D geometry - m = g.get_mesh('scenes.zip', mixed=True) + m = g.get_mesh("scenes.zip", mixed=True) assert len(m.geometry) >= 6 assert len(m.graph.nodes_geometry) >= 10 - assert any(isinstance(i, g.trimesh.path.Path2D) - for i in m.geometry.values()) - assert any(isinstance(i, g.trimesh.Trimesh) - for i in m.geometry.values()) + assert any(isinstance(i, g.trimesh.path.Path2D) for i in m.geometry.values()) + assert any(isinstance(i, g.trimesh.Trimesh) for i in m.geometry.values()) - m = g.get_mesh('scenes.zip', mixed=False) + m = g.get_mesh("scenes.zip", mixed=False) assert len(m.geometry) < 6 def test_doubling(self): - s = g.get_mesh('cycloidal.3DXML') + s = g.get_mesh("cycloidal.3DXML") # make sure we parked our car where we thought assert len(s.geometry) == 13 @@ -363,8 +342,7 @@ def test_doubling(self): # new scene should have twice as much geometry assert len(r.geometry) == (2 * len(s.geometry)) - assert g.np.allclose(s.extents, - r.extents) + assert g.np.allclose(s.extents, r.extents) # duplicate node groups should be twice as long set_ori = {len(i) * 2 for i in s.duplicate_nodes} @@ -395,52 +373,48 @@ def test_empty_scene(self): def test_transform(self): # check transforming scenes scene = g.trimesh.creation.box() - assert g.np.allclose(scene.bounds, [[-.5, -.5, -.5], [.5, .5, .5]]) + assert g.np.allclose(scene.bounds, [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]]) scene.apply_translation([1, 0, 1]) - assert g.np.allclose(scene.bounds, [[.5, -.5, .5], [1.5, .5, 1.5]]) + assert g.np.allclose(scene.bounds, [[0.5, -0.5, 0.5], [1.5, 0.5, 1.5]]) def test_material_group(self): # check scene is correctly grouped by materials - s = g.get_mesh('box.obj', group_material=True) - assert set(s.geometry.keys()) == {'Material', 'SecondMaterial'} - assert len(s.geometry['Material'].faces) == 8 - assert len(s.geometry['SecondMaterial'].faces) == 4 + s = g.get_mesh("box.obj", group_material=True) + assert set(s.geometry.keys()) == {"Material", "SecondMaterial"} + assert len(s.geometry["Material"].faces) == 8 + assert len(s.geometry["SecondMaterial"].faces) == 4 # make sure our flag does something - s = g.get_mesh('box.obj', group_material=False) - assert set(s.geometry.keys()) != {'Material', 'SecondMaterial'} + s = g.get_mesh("box.obj", group_material=False) + assert set(s.geometry.keys()) != {"Material", "SecondMaterial"} def test_strip(self): - m = g.get_mesh('cycloidal.3DXML') - assert any(g.visual.kind is not None - for g in m.geometry.values()) + m = g.get_mesh("cycloidal.3DXML") + assert any(g.visual.kind is not None for g in m.geometry.values()) m.strip_visuals() - assert all(g.visual.kind is None - for g in m.geometry.values()) + assert all(g.visual.kind is None for g in m.geometry.values()) def test_export_concat(self): # Scenes exported in mesh formats should be # concatenating the meshes somewhere. - original = g.trimesh.creation.icosphere( - radius=0.123312) + original = g.trimesh.creation.icosphere(radius=0.123312) original_hash = original.identifier_hash scene = g.trimesh.Scene() scene.add_geometry(original) with g.TemporaryDirectory() as d: - for ext in ['stl', 'ply']: - file_name = g.os.path.join(d, 'mesh.' + ext) + for ext in ["stl", "ply"]: + file_name = g.os.path.join(d, "mesh." + ext) scene.export(file_name) loaded = g.trimesh.load(file_name) - assert g.np.isclose(loaded.volume, - original.volume) + assert g.np.isclose(loaded.volume, original.volume) # nothing should have changed assert original.identifier_hash == original_hash def test_exact_bounds(self): - m = g.get_mesh('cycloidal.3DXML') + m = g.get_mesh("cycloidal.3DXML") assert isinstance(m, g.trimesh.Scene) dump = m.dump(concatenate=True) @@ -450,25 +424,31 @@ def test_exact_bounds(self): assert g.np.allclose(m.bounds, dump.bounds) def test_concatenate_mixed(self): - scene = g.trimesh.Scene([g.trimesh.creation.icosphere(), - g.trimesh.path.creation.rectangle([[0, 0], [1, 1]])]) + scene = g.trimesh.Scene( + [ + g.trimesh.creation.icosphere(), + g.trimesh.path.creation.rectangle([[0, 0], [1, 1]]), + ] + ) dump = scene.dump(concatenate=True) assert isinstance(dump, g.trimesh.Trimesh) def test_append_scenes(self): - scene_0 = g.trimesh.Scene(base_frame='not_world') - scene_1 = g.trimesh.Scene(base_frame='not_world') + scene_0 = g.trimesh.Scene(base_frame="not_world") + scene_1 = g.trimesh.Scene(base_frame="not_world") scene_sum = g.trimesh.scene.scene.append_scenes( - (scene_0, scene_1), common=['not_world'], base_frame='not_world') + (scene_0, scene_1), common=["not_world"], base_frame="not_world" + ) - assert scene_sum.graph.base_frame == 'not_world' + assert scene_sum.graph.base_frame == "not_world" def test_scene_concat(self): # check that primitives get upgraded to meshes - a = g.trimesh.Scene([g.trimesh.primitives.Sphere(center=[5, 5, 5]), - g.trimesh.primitives.Box()]) + a = g.trimesh.Scene( + [g.trimesh.primitives.Sphere(center=[5, 5, 5]), g.trimesh.primitives.Box()] + ) c = a.dump(concatenate=True) assert isinstance(c, g.trimesh.Trimesh) assert g.np.allclose(c.bounds, a.bounds) @@ -477,7 +457,7 @@ def test_scene_concat(self): assert len(c) == len(a.geometry) # scene 2D - scene_2D = g.trimesh.Scene(g.get_mesh('2D/250_cycloidal.DXF').split()) + scene_2D = g.trimesh.Scene(g.get_mesh("2D/250_cycloidal.DXF").split()) concat = scene_2D.dump(concatenate=True) assert isinstance(concat, g.trimesh.path.Path2D) @@ -487,8 +467,8 @@ def test_scene_concat(self): # all Path3D objects scene_3D = g.trimesh.Scene( - [i.to_3D() for i in - g.get_mesh('2D/250_cycloidal.DXF').split()]) + [i.to_3D() for i in g.get_mesh("2D/250_cycloidal.DXF").split()] + ) dump = scene_3D.dump(concatenate=False) assert len(dump) >= 5 @@ -508,6 +488,6 @@ def test_scene_concat(self): assert isinstance(concat, g.trimesh.path.Path3D) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_scenegraph.py b/tests/test_scenegraph.py index 6a35b4d37..56f900036 100644 --- a/tests/test_scenegraph.py +++ b/tests/test_scenegraph.py @@ -7,11 +7,10 @@ def random_chr(): - return chr(ord('a') + int(round(g.random() * 25))) + return chr(ord("a") + int(round(g.random() * 25))) class GraphTests(g.unittest.TestCase): - def test_forest(self): graph = EnforcedForest() for _i in range(5000): @@ -27,48 +26,40 @@ def test_cache(self): mod.append(scene.graph.__hash__()) assert mod[-1] != mod[-2] - assert not g.np.allclose( - scene.camera_transform, - g.np.eye(4)) + assert not g.np.allclose(scene.camera_transform, g.np.eye(4)) scene.camera_transform = g.np.eye(4) mod.append(scene.graph.__hash__()) assert mod[-1] != mod[-2] - assert g.np.allclose( - scene.camera_transform, - g.np.eye(4)) + assert g.np.allclose(scene.camera_transform, g.np.eye(4)) assert mod[-1] != mod[-2] def test_successors(self): - s = g.get_mesh('CesiumMilkTruck.glb') + s = g.get_mesh("CesiumMilkTruck.glb") assert len(s.graph.nodes_geometry) == 5 # world should be root frame - assert (s.graph.transforms.successors( - s.graph.base_frame) == set(s.graph.nodes)) + assert s.graph.transforms.successors(s.graph.base_frame) == set(s.graph.nodes) for n in s.graph.nodes: # successors should always return subset of nodes succ = s.graph.transforms.successors(n) - assert succ.issubset( - s.graph.nodes) + assert succ.issubset(s.graph.nodes) # we self-include node in successors assert n in succ # test getting a subscene from successors - ss = s.subscene('3') + ss = s.subscene("3") assert len(ss.geometry) == 1 assert len(ss.graph.nodes_geometry) == 1 - assert isinstance(s.graph.to_networkx(), - g.nx.DiGraph) + assert isinstance(s.graph.to_networkx(), g.nx.DiGraph) def test_nodes(self): # get a scene graph - graph = g.get_mesh('cycloidal.3DXML').graph + graph = g.get_mesh("cycloidal.3DXML").graph # get any non-root node - node = next(iter(set(graph.nodes).difference( - [graph.base_frame]))) + node = next(iter(set(graph.nodes).difference([graph.base_frame]))) # remove that node graph.transforms.remove_node(node) # should have dumped the cache and removed the node @@ -121,17 +112,17 @@ def test_subscene(self): assert len(s.graph.transforms.node_data) == 9 assert len(s.graph.transforms.edge_data) == 8 - ss = s.subscene('3') + ss = s.subscene("3") - assert ss.graph.base_frame == '3' - assert set(ss.graph.nodes) == {'3', '4'} + assert ss.graph.base_frame == "3" + assert set(ss.graph.nodes) == {"3", "4"} assert len(ss.graph.transforms.node_data) == 2 assert len(ss.graph.transforms.edge_data) == 1 - assert list(ss.graph.transforms.edge_data.keys()) == [('3', '4')] + assert list(ss.graph.transforms.edge_data.keys()) == [("3", "4")] def test_scene_transform(self): # get a scene graph - scene = g.get_mesh('cycloidal.3DXML') + scene = g.get_mesh("cycloidal.3DXML") # copy the original bounds of the scene's convex hull b = scene.convex_hull.bounds.tolist() @@ -150,8 +141,7 @@ def test_scene_transform(self): scene.apply_transform(T) # the mesh and scene should have the same bounds - assert g.np.allclose(m.convex_hull.bounds, - scene.convex_hull.bounds) + assert g.np.allclose(m.convex_hull.bounds, scene.convex_hull.bounds) # should have moved from original position assert not g.np.allclose(m.convex_hull.bounds, b) @@ -161,29 +151,31 @@ def test_reverse(self): s = g.trimesh.scene.Scene() s.add_geometry( g.trimesh.creation.box(), - parent_node_name='world', - node_name='foo', - transform=tf.translation_matrix([0, 0, 1])) + parent_node_name="world", + node_name="foo", + transform=tf.translation_matrix([0, 0, 1]), + ) s.add_geometry( g.trimesh.creation.box(), - parent_node_name='foo', - node_name='foo2', - transform=tf.translation_matrix([0, 0, 1])) + parent_node_name="foo", + node_name="foo2", + transform=tf.translation_matrix([0, 0, 1]), + ) assert len(s.graph.transforms.edge_data) == 2 - a = s.graph.get(frame_from='world', frame_to='foo2') + a = s.graph.get(frame_from="world", frame_to="foo2") assert len(s.graph.transforms.edge_data) == 2 # try going backward - i = s.graph.get(frame_from='foo2', frame_to='world') + i = s.graph.get(frame_from="foo2", frame_to="world") # matrix should be inverted if you're going the other way assert g.np.allclose(a[0], g.np.linalg.inv(i[0])) # try getting foo2 with shorthand - b = s.graph.get(frame_to='foo2') - c = s.graph['foo2'] + b = s.graph.get(frame_to="foo2") + c = s.graph["foo2"] # matrix should be inverted if you're going the other way assert g.np.allclose(a[0], c[0]) assert g.np.allclose(b[0], c[0]) @@ -201,21 +193,20 @@ def test_shortest_path(self): tf = g.trimesh.transformations # start with creating a random tree edgelist = {} - tree = g.nx.random_tree( - n=1000, seed=0, create_using=g.nx.DiGraph) + tree = g.nx.random_tree(n=1000, seed=0, create_using=g.nx.DiGraph) edges = list(tree.edges) r_choices = g.random((len(edges), 2)) r_matrices = g.random_transforms(len(edges)) for e, r_choice, r_mat in zip(edges, r_choices, r_matrices): data = {} - if r_choice[0] > .5: + if r_choice[0] > 0.5: # if a matrix is omitted but an edge exists it is # the same as passing an identity matrix - data['matrix'] = r_mat - if r_choice[1] > .4: + data["matrix"] = r_mat + if r_choice[1] > 0.4: # a geometry is not required for a node - data['geometry'] = str(int(r_choice[1] * 1e8)) + data["geometry"] = str(int(r_choice[1] * 1e8)) edgelist[e] = data # now apply the random data to an EnforcedForest @@ -224,8 +215,7 @@ def test_shortest_path(self): forest.add_edge(*k, **v) # generate a lot of random queries - queries = g.np.random.choice( - list(forest.nodes), 10000).reshape((-1, 2)) + queries = g.np.random.choice(list(forest.nodes), 10000).reshape((-1, 2)) # filter out any self-queries as networkx doesn't handle them queries = queries[queries.ptp(axis=1) > 0] @@ -249,14 +239,13 @@ def test_shortest_path(self): # now try creating this as a full scenegraph sg = g.trimesh.scene.transforms.SceneGraph() - [sg.update(frame_from=k[0], - frame_to=k[1], **kwargs) - for k, kwargs in edgelist.items()] + [ + sg.update(frame_from=k[0], frame_to=k[1], **kwargs) + for k, kwargs in edgelist.items() + ] with g.Profiler() as P: - matgeom = [sg.get( - frame_from=q[0], - frame_to=q[1]) for q in queries] + matgeom = [sg.get(frame_from=q[0], frame_to=q[1]) for q in queries] g.log.debug(P.output_text()) # all of the matrices should be rigid transforms @@ -266,8 +255,7 @@ def test_scaling_order(self): s = g.trimesh.creation.box().scene() scaling = 1.0 / 3.0 c = s.scaled(scaling) - factor = (c.geometry['geometry_0'].vertices / - s.geometry['geometry_0'].vertices) + factor = c.geometry["geometry_0"].vertices / s.geometry["geometry_0"].vertices assert g.np.allclose(factor, scaling) # should be returning itself r = s.apply_translation([10.5, 10.5, 10.5]) @@ -276,7 +264,7 @@ def test_scaling_order(self): def test_translation_cache(self): # scene with non-geometry nodes - c = g.get_mesh('cycloidal.3DXML') + c = g.get_mesh("cycloidal.3DXML") s = c.scaled(1.0 / c.extents) # get the pre-translation bounds ori = s.bounds.copy() @@ -286,7 +274,7 @@ def test_translation_cache(self): def test_translation_origin(self): # check to see if we can translate to the origin - c = g.get_mesh('cycloidal.3DXML') + c = g.get_mesh("cycloidal.3DXML") c.apply_transform(g.trimesh.transformations.random_rotation_matrix()) s = c.scaled(1.0 / c.extents) # shouldn't be at the origin @@ -296,6 +284,6 @@ def test_translation_origin(self): assert g.np.allclose(s.bounds[0], 0) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_section.py b/tests/test_section.py index a75ea9508..2b8e91710 100644 --- a/tests/test_section.py +++ b/tests/test_section.py @@ -5,32 +5,30 @@ class SectionTest(g.unittest.TestCase): - def test_section(self): - mesh = g.get_mesh('featuretype.STL') + mesh = g.get_mesh("featuretype.STL") # this hits many edge cases - step = .125 + step = 0.125 # step through mesh - z_levels = g.np.arange(start=mesh.bounds[0][2], - stop=mesh.bounds[1][2] + 2 * step, - step=step) + z_levels = g.np.arange( + start=mesh.bounds[0][2], stop=mesh.bounds[1][2] + 2 * step, step=step + ) # randomly order Z so first level is probably not zero z_levels = g.np.random.permutation(z_levels) # rotate around so we're not just testing XY parallel planes for angle in [0.0, g.np.radians(1.0), g.np.radians(11.11232)]: # arbitrary test rotation axis - axis = g.trimesh.unitize([1.0, 2.0, .32343]) + axis = g.trimesh.unitize([1.0, 2.0, 0.32343]) # rotate plane around axis to test along non- parallel planes - base = g.trimesh.transformations.rotation_matrix(angle=angle, - direction=axis) + base = g.trimesh.transformations.rotation_matrix(angle=angle, direction=axis) # to return to parallel to XY plane base_inv = g.np.linalg.inv(base) # transform normal to be slightly rotated - plane_normal = g.trimesh.transform_points([[0, 0, 1.0]], - base, - translate=False)[0] + plane_normal = g.trimesh.transform_points( + [[0, 0, 1.0]], base, translate=False + )[0] # store Path2D and Path3D results sections = [None] * len(z_levels) @@ -39,11 +37,11 @@ def test_section(self): for index, z in enumerate(z_levels): # move origin into rotated frame plane_origin = [0, 0, z] - plane_origin = g.trimesh.transform_points( - [plane_origin], base)[0] + plane_origin = g.trimesh.transform_points([plane_origin], base)[0] - section = mesh.section(plane_origin=plane_origin, - plane_normal=plane_normal) + section = mesh.section( + plane_origin=plane_origin, plane_normal=plane_normal + ) if section is None: # section will return None if the plane doesn't @@ -61,7 +59,7 @@ def test_section(self): planar, to_3D = section.to_planar() assert planar.is_closed - assert (len(planar.polygons_full) > 0) + assert len(planar.polygons_full) > 0 assert len(planar.centroid) == 2 sections[index] = planar @@ -69,16 +67,17 @@ def test_section(self): # try getting the sections as Path2D through # the multiplane method - paths = mesh.section_multiplane(plane_origin=[0, 0, 0], - plane_normal=plane_normal, - heights=z_levels) + paths = mesh.section_multiplane( + plane_origin=[0, 0, 0], plane_normal=plane_normal, heights=z_levels + ) # call the multiplane method directly lines, faces, T = g.trimesh.intersections.mesh_multiplane( mesh=mesh, plane_origin=[0, 0, 0], plane_normal=plane_normal, - heights=z_levels) + heights=z_levels, + ) # make sure various methods return the same results for index in range(len(z_levels)): @@ -95,7 +94,7 @@ def test_section(self): # send Path2D back to 3D using the transform returned by # section - back_3D = paths[index].to_3D(paths[index].metadata['to_3D']) + back_3D = paths[index].to_3D(paths[index].metadata["to_3D"]) # move to parallel test plane back_3D.apply_transform(base_inv) @@ -104,12 +103,13 @@ def test_section(self): assert sections_3D[index].vertices[:, 2].ptp() < 1e-8 # make sure reconstructed 3D section is at right height - assert g.np.isclose(back_3D.vertices[:, 2].mean(), - sections_3D[index].vertices[:, 2].mean()) + assert g.np.isclose( + back_3D.vertices[:, 2].mean(), + sections_3D[index].vertices[:, 2].mean(), + ) # make sure reconstruction is at z of frame - assert g.np.isclose(back_3D.vertices[:, 2].mean(), - z_levels[index]) + assert g.np.isclose(back_3D.vertices[:, 2].mean(), z_levels[index]) def test_multi_index(self): # make sure returned face indexes on a section are correct @@ -122,21 +122,21 @@ def test_multi_index(self): # compute the multiple crosss sections with `section_multiplane` multi = mesh.section_multiplane(origin, normal, heights) # get the face indexes this should have hit by checking the normal - idx = set(g.np.nonzero(g.np.isclose( - g.np.dot(mesh.face_normals, normal), 0, atol=1e-4))[0]) + idx = set( + g.np.nonzero(g.np.isclose(g.np.dot(mesh.face_normals, normal), 0, atol=1e-4))[ + 0 + ] + ) # make sure all indexes are set correctly - assert all(set(m.metadata['face_index'] == idx) - for m in multi) + assert all(set(m.metadata["face_index"] == idx) for m in multi) class PlaneLine(g.unittest.TestCase): - def test_planes(self): count = 10 z = g.np.linspace(-1, 1, count) - plane_origins = g.np.column_stack(( - g.random((count, 2)), z)) + plane_origins = g.np.column_stack((g.random((count, 2)), z)) plane_normals = g.np.tile([0, 0, -1], (count, 1)) line_origins = g.np.tile([0, 0, 0], (count, 1)) @@ -146,13 +146,13 @@ def test_planes(self): plane_origins=plane_origins, plane_normals=plane_normals, line_origins=line_origins, - line_directions=line_directions) + line_directions=line_directions, + ) assert valid.all() assert (g.np.abs(i[:, 2] - z) < g.tol.merge).all() class SliceTest(g.unittest.TestCase): - def test_slice(self): mesh = g.trimesh.creation.box() @@ -162,8 +162,7 @@ def test_slice(self): plane_origin = mesh.bounds[1] - 0.05 plane_normal = mesh.bounds[1] - sliced = mesh.slice_plane(plane_origin=plane_origin, - plane_normal=plane_normal) + sliced = mesh.slice_plane(plane_origin=plane_origin, plane_normal=plane_normal) assert g.np.isclose(sliced.bounds[0], mesh.bounds[1] - 0.15).all() assert g.np.isclose(sliced.bounds[1], mesh.bounds[1]).all() @@ -174,22 +173,21 @@ def test_slice(self): plane_origin = mesh.bounds[1] - 0.05 plane_normal = g.np.array([0, 0, 1]) - sliced = mesh.slice_plane(plane_origin=plane_origin, - plane_normal=plane_normal) + sliced = mesh.slice_plane(plane_origin=plane_origin, plane_normal=plane_normal) assert g.np.isclose( - sliced.bounds[0], mesh.bounds[0] + g.np.array([0, 0, 0.95])).all() + sliced.bounds[0], mesh.bounds[0] + g.np.array([0, 0, 0.95]) + ).all() assert g.np.isclose(sliced.bounds[1], mesh.bounds[1]).all() assert len(sliced.faces) == 14 # non- watertight more complex mesh - bunny = g.get_mesh('bunny.ply') + bunny = g.get_mesh("bunny.ply") origin = bunny.bounds.mean(axis=0) normal = g.trimesh.unitize([1, 1, 2]) - sliced = bunny.slice_plane(plane_origin=origin, - plane_normal=normal) + sliced = bunny.slice_plane(plane_origin=origin, plane_normal=normal) assert len(sliced.faces) > 0 # check the projections manually @@ -202,22 +200,23 @@ def test_slice(self): plane_origins = [mesh.bounds[1] - 0.05, mesh.bounds[1] - 0.05] plane_normals = [g.np.array([0, 0, 1]), g.np.array([0, 1, 0])] - sliced = mesh.slice_plane(plane_origin=plane_origins, - plane_normal=plane_normals) + sliced = mesh.slice_plane(plane_origin=plane_origins, plane_normal=plane_normals) assert g.np.isclose( - sliced.bounds[0], mesh.bounds[0] + g.np.array([0, 0.95, 0.95])).all() + sliced.bounds[0], mesh.bounds[0] + g.np.array([0, 0.95, 0.95]) + ).all() assert g.np.isclose(sliced.bounds[1], mesh.bounds[1]).all() assert len(sliced.faces) == 11 # Try with more complicated mesh and make sure we get correct projections # and some faces - origins = [bunny.bounds.mean(axis=0), bunny.bounds.mean( - axis=0) + 0.01 * g.trimesh.unitize([1, 1, 2])] + origins = [ + bunny.bounds.mean(axis=0), + bunny.bounds.mean(axis=0) + 0.01 * g.trimesh.unitize([1, 1, 2]), + ] normals = [g.trimesh.unitize([1, 1, 2]), -g.trimesh.unitize([1, 1, 2])] - sliced = bunny.slice_plane(plane_origin=origins, - plane_normal=normals) + sliced = bunny.slice_plane(plane_origin=origins, plane_normal=normals) assert len(sliced.faces) > 0 for o, n in zip(origins, normals): @@ -228,25 +227,23 @@ def test_slice(self): # Test cap on more complicated watertight mesh to make sure the # resulting mesh is still watertight and slice is correct - featuretype = g.get_mesh('featuretype.STL') + featuretype = g.get_mesh("featuretype.STL") - origins = [featuretype.center_mass, featuretype.center_mass - + 0.01 * g.trimesh.unitize([1, 0, 2])] + origins = [ + featuretype.center_mass, + featuretype.center_mass + 0.01 * g.trimesh.unitize([1, 0, 2]), + ] normals = [g.trimesh.unitize([1, 1, 1]), g.trimesh.unitize([1, 2, 3])] def test_slice_onplane(self): - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") # on a plane with a lot of coplanar triangles n = g.np.array([0, 0, 1.0]) o = g.np.array([0, 0, 1.0]) # slice the mesh in two pieces - a = m.slice_plane(plane_origin=o, - plane_normal=-n, - cap=True) - b = m.slice_plane(plane_origin=o, - plane_normal=n, - cap=True) + a = m.slice_plane(plane_origin=o, plane_normal=-n, cap=True) + b = m.slice_plane(plane_origin=o, plane_normal=n, cap=True) # both slices should be watertiight assert a.is_watertight @@ -255,16 +252,15 @@ def test_slice_onplane(self): assert g.np.isclose(m.volume, a.volume + b.volume) def test_slice_submesh(self): - bunny = g.get_mesh('bunny.ply') + bunny = g.get_mesh("bunny.ply") # Find the faces on the body. - neck_plane_origin = g.np.array( - [-0.0441905, 0.124347, 0.0235287]) - neck_plane_normal = g.np.array( - [0.35534835, -0.93424839, -0.03012456]) + neck_plane_origin = g.np.array([-0.0441905, 0.124347, 0.0235287]) + neck_plane_normal = g.np.array([0.35534835, -0.93424839, -0.03012456]) - dots = g.np.einsum('i,ij->j', neck_plane_normal, - (bunny.vertices - neck_plane_origin).T) + dots = g.np.einsum( + "i,ij->j", neck_plane_normal, (bunny.vertices - neck_plane_origin).T + ) signs = g.np.zeros(len(bunny.vertices), dtype=g.np.int8) signs[dots < -g.tol.merge] = 1 signs[dots > g.tol.merge] = -1 @@ -279,9 +275,11 @@ def test_slice_submesh(self): slicing_plane_origin = bunny.bounds.mean(axis=0) slicing_plane_normal = g.trimesh.unitize([1, 1, 2]) - sliced = bunny.slice_plane(plane_origin=slicing_plane_origin, - plane_normal=slicing_plane_normal, - face_index=body_face_index) + sliced = bunny.slice_plane( + plane_origin=slicing_plane_origin, + plane_normal=slicing_plane_normal, + face_index=body_face_index, + ) # Ideally we would assert that the triangles in `body_face_index` were # sliced if they are on in front of side of the slicing plane, and the @@ -294,25 +292,29 @@ def test_slice_submesh(self): def test_textured_mesh(self): # Create a plane mesh with UV == xy plane = g.trimesh.Trimesh( - vertices=g.np.array([[0., 0., 0.], [1., 0., 0.], [1., 1., 0.], [0., 1., 0.]]), - faces=g.np.array([[0, 1, 2], [2, 3, 0]])) + vertices=g.np.array( + [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]] + ), + faces=g.np.array([[0, 1, 2], [2, 3, 0]]), + ) plane.visual = g.trimesh.visual.TextureVisuals( - uv=plane.vertices[:, :2], material=g.trimesh.visual.material.empty_material()) + uv=plane.vertices[:, :2], material=g.trimesh.visual.material.empty_material() + ) # We cut the plane and check that the new UV match the new vertices - origin = g.np.array([0.5, 0.5, 0.]) + origin = g.np.array([0.5, 0.5, 0.0]) normal = g.trimesh.unitize([2, 1, 2]) sliced = plane.slice_plane(plane_origin=origin, plane_normal=normal) assert g.np.isclose(sliced.vertices[:, :2], sliced.visual.uv).all() # Test scenario when plane does not cut - origin = g.np.array([-1., -1., 0.]) + origin = g.np.array([-1.0, -1.0, 0.0]) normal = g.trimesh.unitize([1, 1, 2]) sliced = plane.slice_plane(plane_origin=origin, plane_normal=normal) assert g.np.isclose(sliced.vertices[:, :2], sliced.visual.uv).all() # Test cut with no new vertices - origin = g.np.array([0.5, 0.5, 0.]) + origin = g.np.array([0.5, 0.5, 0.0]) normal = g.trimesh.unitize([2, -2, 1]) sliced = plane.slice_plane(plane_origin=origin, plane_normal=normal) assert g.np.isclose(sliced.vertices[:, :2], sliced.visual.uv).all() @@ -324,16 +326,16 @@ def test_cap_coplanar(self): if not g.has_earcut: return - s = g.get_mesh('cap.zip') + s = g.get_mesh("cap.zip") mesh = next(iter(s.geometry.values())) plane_origin = [0, 0, 5000] plane_normal = [0, 0, -1] assert mesh.is_watertight - newmesh = mesh.slice_plane(plane_origin=plane_origin, - plane_normal=plane_normal, - cap=True) + newmesh = mesh.slice_plane( + plane_origin=plane_origin, plane_normal=plane_normal, cap=True + ) assert newmesh.is_watertight def test_slice_exit(self): @@ -367,19 +369,21 @@ def test_cap_nohit(self): return from trimesh.transformations import random_rotation_matrix + for _i in range(100): box1 = g.trimesh.primitives.Box( - extents=[10, 20, 30], - transform=random_rotation_matrix()) + extents=[10, 20, 30], transform=random_rotation_matrix() + ) box2 = g.trimesh.primitives.Box( - extents=[10, 20, 30], - transform=random_rotation_matrix()) + extents=[10, 20, 30], transform=random_rotation_matrix() + ) result = g.trimesh.intersections.slice_mesh_plane( mesh=box2, plane_normal=-box1.face_normals, plane_origin=box1.vertices[box1.faces[:, 1]], - cap=True) + cap=True, + ) assert len(result.faces) > 0 def test_cap(self): @@ -394,14 +398,12 @@ def test_cap(self): plane_normal = mesh.bounds[1] # Same test with capping (should only add three more triangles) - sliced_capped = mesh.slice_plane(plane_origin=plane_origin, - plane_normal=plane_normal, - cap=True) + sliced_capped = mesh.slice_plane( + plane_origin=plane_origin, plane_normal=plane_normal, cap=True + ) assert len(sliced_capped.faces) == 8 - assert g.np.isclose( - sliced_capped.bounds[0], - mesh.bounds[1] - 0.15).all() + assert g.np.isclose(sliced_capped.bounds[0], mesh.bounds[1] - 0.15).all() assert g.np.isclose(sliced_capped.bounds[1], mesh.bounds[1]).all() assert sliced_capped.is_watertight @@ -411,13 +413,14 @@ def test_cap(self): plane_normal = g.np.array([0, 0, 1]) # Same test with capping (should only add six triangles) - sliced_capped = mesh.slice_plane(plane_origin=plane_origin, - plane_normal=plane_normal, - cap=True) + sliced_capped = mesh.slice_plane( + plane_origin=plane_origin, plane_normal=plane_normal, cap=True + ) assert len(sliced_capped.faces) == 20 assert g.np.isclose( - sliced_capped.bounds[0], mesh.bounds[0] + g.np.array([0, 0, 0.95])).all() + sliced_capped.bounds[0], mesh.bounds[0] + g.np.array([0, 0, 0.95]) + ).all() assert g.np.isclose(sliced_capped.bounds[1], mesh.bounds[1]).all() assert sliced_capped.is_watertight @@ -428,26 +431,28 @@ def test_cap(self): # Test cap for multiple slices to check watertightness # (should add nine triangles) - sliced_capped = mesh.slice_plane(plane_origin=plane_origins, - plane_normal=plane_normals, - cap=True) + sliced_capped = mesh.slice_plane( + plane_origin=plane_origins, plane_normal=plane_normals, cap=True + ) assert g.np.isclose( - sliced_capped.bounds[0], mesh.bounds[0] + g.np.array([0, 0.95, 0.95])).all() + sliced_capped.bounds[0], mesh.bounds[0] + g.np.array([0, 0.95, 0.95]) + ).all() assert g.np.isclose(sliced_capped.bounds[1], mesh.bounds[1]).all() assert sliced_capped.is_watertight # Test cap on more complicated watertight mesh to make sure the # resulting mesh is still watertight and slice is correct - featuretype = g.get_mesh('featuretype.STL') + featuretype = g.get_mesh("featuretype.STL") - origins = [featuretype.center_mass, featuretype.center_mass - + 0.01 * g.trimesh.unitize([1, 0, 2])] + origins = [ + featuretype.center_mass, + featuretype.center_mass + 0.01 * g.trimesh.unitize([1, 0, 2]), + ] normals = [g.trimesh.unitize([1, 0, 1]), g.trimesh.unitize([1, 0, 0])] sliced_capped = featuretype.slice_plane( - plane_origin=origins, - plane_normal=normals, - cap=True) + plane_origin=origins, plane_normal=normals, cap=True + ) assert len(sliced_capped.faces) > 0 assert sliced_capped.is_winding_consistent @@ -458,6 +463,6 @@ def test_cap(self): assert g.np.isclose(dot.min(), 0.0) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_segments.py b/tests/test_segments.py index 3a1275599..45daa87b1 100644 --- a/tests/test_segments.py +++ b/tests/test_segments.py @@ -5,7 +5,6 @@ class SegmentsTest(g.unittest.TestCase): - def test_param(self): from trimesh.path import segments @@ -34,64 +33,52 @@ def test_param(self): def test_colinear(self): from trimesh.path import segments - seg = g.np.array([[[0., 0., 0.], - [0., 1., 0.]], - - [[0., 1., 0.], - [0., 0., 0.]], - - [[0., 0., 0.], - [0., 2., 0.]], - - [[0., 0.5, 0.], - [0., 0.75, 0.]], - - [[0., 2.1, 0.], - [0., 2.2, 0.]], - - [[0., 2.0, 0.], - [0., 2.3, 0.]], - - [[0., 0., 0.], - [1., 1., 0.]]]) + seg = g.np.array( + [ + [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]], + [[0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0], [0.0, 2.0, 0.0]], + [[0.0, 0.5, 0.0], [0.0, 0.75, 0.0]], + [[0.0, 2.1, 0.0], [0.0, 2.2, 0.0]], + [[0.0, 2.0, 0.0], [0.0, 2.3, 0.0]], + [[0.0, 0.0, 0.0], [1.0, 1.0, 0.0]], + ] + ) # get the unit direction vector for the segments - unit = g.trimesh.unitize(g.np.diff( - seg, axis=1).reshape((-1, 3))) + unit = g.trimesh.unitize(g.np.diff(seg, axis=1).reshape((-1, 3))) L = segments.colinear_pairs(seg[:3]) assert len(L) == 3 # make sure all pairs are really colinear dots = [g.np.dot(*row) for row in unit[L]] - assert (g.np.isclose(dots, 1.0) | - g.np.isclose(dots, -1)).all() + assert (g.np.isclose(dots, 1.0) | g.np.isclose(dots, -1)).all() L = segments.colinear_pairs(seg) dots = [g.np.dot(*row) for row in unit[L]] - assert (g.np.isclose(dots, 1.0) | - g.np.isclose(dots, -1)).all() + assert (g.np.isclose(dots, 1.0) | g.np.isclose(dots, -1)).all() epsilon = 1e-6 # length should only include vectors with one # vertex closer than epsilon n = segments.colinear_pairs(seg, length=epsilon) dots = [g.np.dot(*row) for row in unit[L]] - assert (g.np.isclose(dots, 1.0) | - g.np.isclose(dots, -1)).all() + assert (g.np.isclose(dots, 1.0) | g.np.isclose(dots, -1)).all() for pair in n: val = seg[pair] close = g.np.append( - (val[0] - val[1]).ptp(axis=1), - (val[0] - val[1][::-1]).ptp(axis=1)).min() + (val[0] - val[1]).ptp(axis=1), (val[0] - val[1][::-1]).ptp(axis=1) + ).min() assert close < epsilon def test_extrude(self): from trimesh.path.segments import extrude + # hand tuned segments - manual = g.np.column_stack(( - g.np.zeros((3, 2)), - [[0, 1], [0, -1], [1, 2]])).reshape((-1, 2, 2)) + manual = g.np.column_stack( + (g.np.zeros((3, 2)), [[0, 1], [0, -1], [1, 2]]) + ).reshape((-1, 2, 2)) for seg in [manual, g.random((10, 2, 2))]: height = 1.22 @@ -105,6 +92,7 @@ def test_extrude(self): def test_resample(self): from trimesh.path.segments import length, resample + # create some random segments seg = g.random((1000, 2, 3)) # set a maximum segment length @@ -127,6 +115,7 @@ def test_resample(self): def test_svg(self): from trimesh.path.segments import to_svg + # create some 2D segments seg = g.random((1000, 2, 2)) # make one of the segments a duplicate @@ -134,26 +123,26 @@ def test_svg(self): # create an SVG path string svg = to_svg(seg, merge=False) # should be one move and one line per segment - assert svg.count('M') == len(seg) - assert svg.count('L') == len(seg) + assert svg.count("M") == len(seg) + assert svg.count("L") == len(seg) # try with a transform svg = to_svg(seg, matrix=g.np.eye(3), merge=False) - assert svg.count('M') == len(seg) - assert svg.count('L') == len(seg) + assert svg.count("M") == len(seg) + assert svg.count("L") == len(seg) # remove the duplicate segments svg = to_svg(seg, matrix=g.np.eye(3), merge=True) - assert svg.count('M') < len(seg) - assert svg.count('L') < len(seg) + assert svg.count("M") < len(seg) + assert svg.count("L") < len(seg) try: to_svg(g.random((100, 2, 3))) except ValueError: return - raise ValueError('to_svg accepted wrong input!') + raise ValueError("to_svg accepted wrong input!") -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_simplify.py b/tests/test_simplify.py index 50aaf26fc..f9eacd69c 100644 --- a/tests/test_simplify.py +++ b/tests/test_simplify.py @@ -5,9 +5,7 @@ class SimplifyTest(g.unittest.TestCase): - def polygon_simplify(self, polygon, arc_count): - # loading the polygon will make all arcs discrete path = g.trimesh.load_path(polygon) @@ -22,18 +20,17 @@ def polygon_simplify(self, polygon, arc_count): for _garbage in range(2): # the simplified version shouldn't have lost area - assert g.np.allclose(path.area, - simplified.area, - rtol=1e-2) + assert g.np.allclose(path.area, simplified.area, rtol=1e-2) # see if we fit as many arcs as existed in the original drawing - new_count = sum(int(type(i).__name__ == 'Arc') - for i in simplified.entities) + new_count = sum(int(type(i).__name__ == "Arc") for i in simplified.entities) if new_count != arc_count: g.log.debug(new_count, arc_count) if arc_count > 1: - g.log.info(f'originally were {arc_count} arcs, simplify found {new_count}') + g.log.info( + f"originally were {arc_count} arcs, simplify found {new_count}" + ) assert new_count > 0 assert new_count <= arc_count @@ -44,46 +41,39 @@ def polygon_simplify(self, polygon, arc_count): assert path.__hash__() == hash_pre def test_simplify(self): - - for file_name in ['2D/cycloidal.dxf', - '2D/125_cycloidal.DXF', - '2D/spline_1.dxf']: - + for file_name in ["2D/cycloidal.dxf", "2D/125_cycloidal.DXF", "2D/spline_1.dxf"]: original = g.get_mesh(file_name) split = original.split() - assert g.np.allclose(original.area, - sum(i.area for i in split)) + assert g.np.allclose(original.area, sum(i.area for i in split)) for drawing in split: # we split so there should be only one polygon per drawing now assert len(drawing.polygons_full) == 1 polygon = drawing.polygons_full[0] - arc_count = sum(int(type(i).__name__ == 'Arc') - for i in drawing.entities) + arc_count = sum(int(type(i).__name__ == "Arc") for i in drawing.entities) - self.polygon_simplify(polygon=polygon, - arc_count=arc_count) + self.polygon_simplify(polygon=polygon, arc_count=arc_count) def test_spline(self): """ Test basic spline simplification of Path2D objects """ - scene = g.get_mesh('cycloidal.3DXML') - m = scene.geometry['disc_cam_A'] + scene = g.get_mesh("cycloidal.3DXML") + m = scene.geometry["disc_cam_A"] path_3D = m.outline(m.facets[m.facets_area.argmax()]) path_2D, to_3D = path_3D.to_planar() - simple = g.trimesh.path.simplify.simplify_spline(path_2D, - smooth=.01, - verbose=True) - assert g.np.isclose(path_2D.area, simple.area, rtol=.01) + simple = g.trimesh.path.simplify.simplify_spline( + path_2D, smooth=0.01, verbose=True + ) + assert g.np.isclose(path_2D.area, simple.area, rtol=0.01) # check the kwargs simple = path_2D.simplify_spline(smooth=0.01) - assert g.np.isclose(path_2D.area, simple.area, rtol=.01) + assert g.np.isclose(path_2D.area, simple.area, rtol=0.01) def test_merge_colinear(self): num = 100 @@ -91,12 +81,12 @@ def test_merge_colinear(self): direction = g.trimesh.unitize([1, g.np.random.rand()]) points = direction * dists.reshape((-1, 1)) merged = g.trimesh.path.simplify.merge_colinear(points, scale=1000) - g.log.debug('direction:', direction) - g.log.debug('points:', points.shape) - g.log.debug('merged:', merged.shape) + g.log.debug("direction:", direction) + g.log.debug("points:", points.shape) + g.log.debug("merged:", merged.shape) assert merged.shape[0] == 2 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_smooth.py b/tests/test_smooth.py index c1b83e88e..6b0144e5a 100644 --- a/tests/test_smooth.py +++ b/tests/test_smooth.py @@ -5,9 +5,8 @@ class SmoothTest(g.unittest.TestCase): - def test_smooth(self): - m = g.get_mesh('chair_model.obj', force='mesh') + m = g.get_mesh("chair_model.obj", force="mesh") s = m.smoothed() ori = g.np.hstack((m.visual.uv, m.vertices)) @@ -20,6 +19,6 @@ def test_smooth(self): # g.texture_equal(m, s) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_smoothing.py b/tests/test_smoothing.py index 7c1285ba9..b9efc4ab3 100644 --- a/tests/test_smoothing.py +++ b/tests/test_smoothing.py @@ -10,8 +10,7 @@ def test_smooth(self): Load a collada scene with pycollada. """ m = g.trimesh.creation.icosahedron() - m.vertices, m.faces = g.trimesh.remesh.subdivide_to_size( - m.vertices, m.faces, 0.1) + m.vertices, m.faces = g.trimesh.remesh.subdivide_to_size(m.vertices, m.faces, 0.1) s = m.copy() q = m.copy() @@ -23,8 +22,7 @@ def test_smooth(self): assert m.is_volume # Equal Weights - lap = g.trimesh.smoothing.laplacian_calculation( - mesh=m, equal_weight=True) + lap = g.trimesh.smoothing.laplacian_calculation(mesh=m, equal_weight=True) g.trimesh.smoothing.filter_laplacian(s, 0.5, 10, False, True, lap) g.trimesh.smoothing.filter_laplacian(q, 0.5, 10, True, True, lap) @@ -79,6 +77,6 @@ def test_smooth(self): assert g.np.isclose(v.volume, m.volume, rtol=0.1) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_splines.py b/tests/test_splines.py index 5b58ed418..204caa35c 100644 --- a/tests/test_splines.py +++ b/tests/test_splines.py @@ -5,10 +5,9 @@ class SplineTests(g.unittest.TestCase): - def test_bezier_example(self): # path with a bunch of bezier spline - p = g.get_mesh('2D/MIL.svg') + p = g.get_mesh("2D/MIL.svg") # should have one body assert len(p.polygons_full) == 1 @@ -16,13 +15,14 @@ def test_bezier_example(self): truth = 12696.6 # perimeter should be about right if it was discretized properly - if not g.np.isclose(p.polygons_full[0].exterior.length, - truth, - atol=100.0): - raise ValueError('perimeter wrong: {} != {}'.format( - truth, p.polygons_full[0].exterior.length)) + if not g.np.isclose(p.polygons_full[0].exterior.length, truth, atol=100.0): + raise ValueError( + "perimeter wrong: {} != {}".format( + truth, p.polygons_full[0].exterior.length + ) + ) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_stl.py b/tests/test_stl.py index 1e9e1a81e..2cc48187a 100644 --- a/tests/test_stl.py +++ b/tests/test_stl.py @@ -8,97 +8,92 @@ class STLTests(g.unittest.TestCase): - def test_header(self): - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") # make sure we have the right mesh assert g.np.isclose(m.volume, 11.627733431196749, atol=1e-6) # should have saved the header from the STL file - assert len(m.metadata['header']) > 0 + assert len(m.metadata["header"]) > 0 # should have saved the STL face attributes - assert len(m.face_attributes['stl']) == len(m.faces) + assert len(m.face_attributes["stl"]) == len(m.faces) assert len(m.faces) > 1000 # add a non-correlated face attribute, which should be ignored - m.face_attributes['nah'] = 10 + m.face_attributes["nah"] = 10 # remove all faces except three random ones m.update_faces([1, 3, 4]) # faces and face attributes should be untouched assert len(m.faces) == 3 - assert len(m.face_attributes['stl']) == 3 + assert len(m.face_attributes["stl"]) == 3 # attribute that wasn't len(m.faces) shouldn't have been touched - assert m.face_attributes['nah'] == 10 + assert m.face_attributes["nah"] == 10 def test_attrib(self): - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") len_vertices = len(m.vertices) # assign some random vertex attributes random = g.random(len(m.vertices)) - m.vertex_attributes['random'] = random - m.vertex_attributes['nah'] = 20 + m.vertex_attributes["random"] = random + m.vertex_attributes["nah"] = 20 # should have saved the STL face attributes - assert len(m.face_attributes['stl']) == len(m.faces) + assert len(m.face_attributes["stl"]) == len(m.faces) assert len(m.faces) > 1000 # add a non-correlated face attribute, which should be ignored - m.face_attributes['nah'] = 10 + m.face_attributes["nah"] = 10 # remove all faces except three random ones m.update_faces([1, 3, 4]) # faces and face attributes should be untouched assert len(m.faces) == 3 - assert len(m.face_attributes['stl']) == 3 + assert len(m.face_attributes["stl"]) == 3 # attribute that wasn't len(m.faces) shouldn't have been touched - assert m.face_attributes['nah'] == 10 + assert m.face_attributes["nah"] == 10 # check all vertices are still in place - assert m.vertex_attributes['nah'] == 20 - assert g.np.allclose(random, m.vertex_attributes['random']) + assert m.vertex_attributes["nah"] == 20 + assert g.np.allclose(random, m.vertex_attributes["random"]) assert len(m.vertices) == len_vertices # remove all vertices except four v_mask = [0, 1, 2, 3] m.update_vertices(v_mask) # make sure things are still correct - assert m.vertex_attributes['nah'] == 20 - assert g.np.allclose(m.vertex_attributes['random'], random[v_mask]) + assert m.vertex_attributes["nah"] == 20 + assert g.np.allclose(m.vertex_attributes["random"], random[v_mask]) assert len(m.vertices) == len(v_mask) def test_ascii_multibody(self): - s = g.get_mesh('multibody.stl') + s = g.get_mesh("multibody.stl") assert len(s.geometry) == 2 - assert set(s.geometry.keys()) == {'bodya', 'bodyb'} + assert set(s.geometry.keys()) == {"bodya", "bodyb"} def test_empty(self): # demo files to check - empty_files = ['stl_empty_ascii.stl', - 'stl_empty_bin.stl'] + empty_files = ["stl_empty_ascii.stl", "stl_empty_bin.stl"] for empty_file in empty_files: - e = g.get_mesh('emptyIO/' + empty_file) + e = g.get_mesh("emptyIO/" + empty_file) # result should be an empty scene without vertices assert isinstance(e, g.trimesh.Scene) - assert not hasattr(e, 'vertices') + assert not hasattr(e, "vertices") # create export try: - e.export(file_type='ply') + e.export(file_type="ply") except BaseException: return raise ValueError("Shouldn't export empty scenes!") def test_vertex_order(self): - for stl in ['featuretype.STL', - 'ADIS16480.STL', - '1002_tray_bottom.STL']: + for stl in ["featuretype.STL", "ADIS16480.STL", "1002_tray_bottom.STL"]: # removing doubles should respect the vertex order m_raw = g.get_mesh(stl, process=False) - m_proc = g.get_mesh(stl, process=True, - keep_vertex_order=True) + m_proc = g.get_mesh(stl, process=True, keep_vertex_order=True) verts_raw = g.trimesh.grouping.hashable_rows(m_raw.vertices) verts_proc = g.trimesh.grouping.hashable_rows(m_proc.vertices) @@ -117,14 +112,14 @@ def test_vertex_order(self): # of course mesh needs to have same faces as before assert g.np.allclose( - g.np.sort(tris_raw, axis=0), - g.np.sort(tris_proc, axis=0)) + g.np.sort(tris_raw, axis=0), g.np.sort(tris_proc, axis=0) + ) def test_ascii_keyword(self): - m = g.get_mesh('ascii.stl.zip', force='mesh') + m = g.get_mesh("ascii.stl.zip", force="mesh") assert m.is_watertight -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_svg.py b/tests/test_svg.py index d6275c5d0..04127806c 100644 --- a/tests/test_svg.py +++ b/tests/test_svg.py @@ -5,31 +5,25 @@ class ExportTest(g.unittest.TestCase): - def test_svg(self): for d in g.get_2D(): if g.np.isclose(d.area, 0.0): continue # export and reload the exported SVG loaded = g.trimesh.load( - g.trimesh.util.wrap_as_stream( - d.export(file_type='svg')), - file_type='svg') + g.trimesh.util.wrap_as_stream(d.export(file_type="svg")), file_type="svg" + ) # we only have line and arc primitives as SVG # export and import - if all(i.__class__.__name__ in ['Line', 'Arc'] - for i in d.entities): + if all(i.__class__.__name__ in ["Line", "Arc"] for i in d.entities): # perimeter should stay the same-ish # on export/import - assert g.np.isclose(d.length, - loaded.length, - rtol=.01) + assert g.np.isclose(d.length, loaded.length, rtol=0.01) assert len(d.entities) == len(loaded.entities) - path_str = g.trimesh.path.exchange.svg_io.export_svg( - d, return_path=True) + path_str = g.trimesh.path.exchange.svg_io.export_svg(d, return_path=True) assert isinstance(path_str, str) assert len(path_str) > 0 @@ -38,10 +32,10 @@ def test_layer(self): # create two disjoint circles and apply layers a = g.trimesh.load_path(Point([0, 0]).buffer(1)) - a.apply_layer('ACIRCLE') + a.apply_layer("ACIRCLE") b = g.trimesh.load_path(Point([2, 0]).buffer(1)) - b.apply_layer('BCIRCLE') + b.apply_layer("BCIRCLE") assert id(a.entities[0]._metadata) != id(b.entities[0]._metadata) @@ -52,26 +46,23 @@ def test_layer(self): assert g.np.isclose(c.area, a.area + b.area) # export C with just layer of A - aX = g.trimesh.load(g.io_wrap( - c.export(file_type='svg', - only_layers=['ACIRCLE'])), - file_type='svg') + aX = g.trimesh.load( + g.io_wrap(c.export(file_type="svg", only_layers=["ACIRCLE"])), file_type="svg" + ) # export C with all layers - cX = g.trimesh.load(g.io_wrap( - c.export(file_type='svg', - only_layers=None)), - file_type='svg') + cX = g.trimesh.load( + g.io_wrap(c.export(file_type="svg", only_layers=None)), file_type="svg" + ) assert len(cX.entities) == len(c.entities) # should have skipped the layers assert len(aX.entities) == 1 # make - aR = g.trimesh.load(g.io_wrap(c.export( - file_type='dxf', - only_layers=['ACIRCLE'])), - file_type='dxf') + aR = g.trimesh.load( + g.io_wrap(c.export(file_type="dxf", only_layers=["ACIRCLE"])), file_type="dxf" + ) assert g.np.isclose(aR.area, a.area) @@ -79,49 +70,38 @@ def test_trans(self): from trimesh.path.exchange.svg_io import transform_to_matrices as tf # empty strings shouldn't have matrix - assert len(tf('')) == 0 + assert len(tf("")) == 0 # check translate with different whitespace - a = tf('translate(1.1, 2.2 )') + a = tf("translate(1.1, 2.2 )") assert len(a) == 1 - assert g.np.allclose(a[0], - [[1, 0, 1.1], - [0, 1, 2.2], - [0, 0, 1]]) - a = tf(' translate(1.1 1.2 ) ') + assert g.np.allclose(a[0], [[1, 0, 1.1], [0, 1, 2.2], [0, 0, 1]]) + a = tf(" translate(1.1 1.2 ) ") assert len(a) == 1 - assert g.np.allclose(a[0], - [[1, 0, 1.1], - [0, 1, 1.2], - [0, 0, 1]]) + assert g.np.allclose(a[0], [[1, 0, 1.1], [0, 1, 1.2], [0, 0, 1]]) - a = tf(' translate(1.1 1.2 ) ' + - 'matrix ( {} {} {} {} {} {})'.format(*g.np.arange(6))) + a = tf( + " translate(1.1 1.2 ) " + + "matrix ( {} {} {} {} {} {})".format(*g.np.arange(6)) + ) assert len(a) == 2 # check the translate - assert g.np.allclose(a[0], - [[1, 0, 1.1], - [0, 1, 1.2], - [0, 0, 1]]) + assert g.np.allclose(a[0], [[1, 0, 1.1], [0, 1, 1.2], [0, 0, 1]]) # check the matrix string - assert g.np.allclose(a[1], - [[0, 2, 4], - [1, 3, 5], - [0, 0, 1]]) + assert g.np.allclose(a[1], [[0, 2, 4], [1, 3, 5], [0, 0, 1]]) def test_roundtrip(self): """ Check to make sure a roundtrip from both a Scene and a Path2D results in the same file on both sides """ - for fn in ['2D/250_cycloidal.DXF', '2D/tray-easy1.dxf']: + for fn in ["2D/250_cycloidal.DXF", "2D/tray-easy1.dxf"]: p = g.get_mesh(fn) assert isinstance(p, g.trimesh.path.Path2D) # load the exported SVG r = g.trimesh.load( - g.trimesh.util.wrap_as_stream( - p.export(file_type='svg')), - file_type='svg') + g.trimesh.util.wrap_as_stream(p.export(file_type="svg")), file_type="svg" + ) assert isinstance(r, g.trimesh.path.Path2D) assert g.np.isclose(r.length, p.length) assert g.np.isclose(r.area, p.area) @@ -129,11 +109,9 @@ def test_roundtrip(self): assert set(r.metadata.keys()) == set(p.metadata.keys()) s = g.trimesh.scene.split_scene(p) - as_svg = s.export(file_type='svg') + as_svg = s.export(file_type="svg") assert isinstance(s, g.trimesh.Scene) - r = g.trimesh.load( - g.trimesh.util.wrap_as_stream(as_svg), - file_type='svg') + r = g.trimesh.load(g.trimesh.util.wrap_as_stream(as_svg), file_type="svg") assert isinstance(r, g.trimesh.Scene) assert s.metadata == r.metadata @@ -148,9 +126,9 @@ def test_roundtrip(self): assert g.np.isclose(a.area, b.area) assert a.body_count == b.body_count - assert r.metadata['file_path'].endswith(fn[3:]) + assert r.metadata["file_path"].endswith(fn[3:]) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_texture.py b/tests/test_texture.py index 090080f5c..6637b6651 100644 --- a/tests/test_texture.py +++ b/tests/test_texture.py @@ -5,7 +5,6 @@ class TextureTest(g.unittest.TestCase): - def test_uv_to_color(self): try: import PIL.Image @@ -15,8 +14,7 @@ def test_uv_to_color(self): # n_vertices = 100 uv = g.np.array([[0.25, 0.2], [0.4, 0.5]], dtype=float) texture = g.np.arange(96, dtype=g.np.uint8).reshape(8, 4, 3) - colors = g.trimesh.visual.uv_to_color( - uv, PIL.Image.fromarray(texture)) + colors = g.trimesh.visual.uv_to_color(uv, PIL.Image.fromarray(texture)) colors_expected = [[75, 76, 77, 255], [51, 52, 53, 255]] @@ -24,21 +22,22 @@ def test_uv_to_color(self): def test_bad_uv(self): # get a textured OBJ - m = g.get_mesh('fuze.obj', force='mesh') + m = g.get_mesh("fuze.obj", force="mesh") # add malformed UV coordinates m.visual.uv = m.visual.uv[:100] m.merge_vertices() def test_order_kwarg(self): - for file_name in ['ico4.obj', 'ico4uv.obj']: + for file_name in ["ico4.obj", "ico4uv.obj"]: # get the location of the model file file_path = g.get_path(file_name) with open(file_path) as f: # get the raw ordered vertices from the file with basic string # ops v_raw = g.np.array( - [line[2:].split() for line in f if line.startswith('v ')], - dtype=g.np.float64) + [line[2:].split() for line in f if line.startswith("v ")], + dtype=g.np.float64, + ) # load them with maximal correspondence captain a = g.trimesh.load(file_path, process=False, maintain_order=True) @@ -51,17 +50,18 @@ def test_order_kwarg(self): assert a.vertices.shape != v_raw.shape def test_fuze(self): - # create a local web server to test remote assets with g.serve_meshes() as address: # see if web resolvers work tex = g.trimesh.exchange.load.load_remote( - url=address + '/fuze.obj', process=False) + url=address + "/fuze.obj", process=False + ) g.check_fuze(tex) # see if web + zip resolvers work scene = g.trimesh.exchange.load.load_remote( - url=address + '/fuze.zip', process=False) + url=address + "/fuze.zip", process=False + ) # zip files get loaded into a scene assert len(scene.geometry) == 1 @@ -70,12 +70,12 @@ def test_fuze(self): # obj with texture, assets should be loaded # through a FilePathResolver - m = g.get_mesh('fuze.obj', process=False) + m = g.get_mesh("fuze.obj", process=False) g.check_fuze(tex) # obj with texture, assets should be loaded # through a ZipResolver into a scene - scene = g.get_mesh('fuze.zip', process=False) + scene = g.get_mesh("fuze.zip", process=False) # zip files get loaded into a scene assert len(scene.geometry) == 1 @@ -83,11 +83,11 @@ def test_fuze(self): g.check_fuze(m) # the PLY should have textures defined - m = g.get_mesh('fuze.ply', process=False) + m = g.get_mesh("fuze.ply", process=False) g.check_fuze(m) # ASCII PLY should have textures defined - m = g.get_mesh('fuze_ascii.ply', process=False) + m = g.get_mesh("fuze_ascii.ply", process=False) g.check_fuze(m) # textured meshes should subdivide OK-ish @@ -97,9 +97,7 @@ def test_fuze(self): # load without doing the vertex separation # will look like garbage but represents original # and skips "disconnect vertices with different UV" - b = g.get_mesh('fuze.ply', - process=False, - fix_texture=False) + b = g.get_mesh("fuze.ply", process=False, fix_texture=False) assert len(b.vertices) == 502 assert len(b.visual.uv) == 502 @@ -111,32 +109,32 @@ def test_upsize(self): try: from PIL import Image except BaseException: - g.log.warning('no PIL, not testing power_resize!') + g.log.warning("no PIL, not testing power_resize!") return # shortcut for the function resize = g.trimesh.visual.texture.power_resize - img = Image.new('RGB', (10, 20)) + img = Image.new("RGB", (10, 20)) assert img.size == (10, 20) assert resize(img).size == (16, 32) assert resize(img, square=True).size == (32, 32) # check with one value on-size - img = Image.new('RGB', (10, 32)) + img = Image.new("RGB", (10, 32)) assert img.size == (10, 32) assert resize(img).size == (16, 32) assert resize(img, square=True).size == (32, 32) # check early exit pathOA - img = Image.new('RGB', (32, 32)) + img = Image.new("RGB", (32, 32)) assert img.size == (32, 32) assert resize(img).size == (32, 32) assert resize(img, square=True).size == (32, 32) def test_concatenate(self): # test concatenation with texture - a = g.get_mesh('fuze.obj') + a = g.get_mesh("fuze.obj") b = a.copy() b.apply_translation([b.extents[0] * 1.25, 0, 0]) @@ -144,19 +142,22 @@ def test_concatenate(self): assert len(c.vertices) > len(a.vertices) assert len(c.visual.uv) == len(c.vertices) # should have deduplicated image texture - assert g.np.allclose(c.visual.material.image.size, - a.visual.material.image.size) + assert g.np.allclose(c.visual.material.image.size, a.visual.material.image.size) def test_concatentate_multi(self): - colors = [[255, 0, 0, 255], - [0, 255, 0, 255], - [0, 0, 255, 255], - [100, 100, 100, 255]] - funcs = [g.trimesh.creation.box, - g.trimesh.creation.icosphere, - g.trimesh.creation.capsule] - - fuze = g.get_mesh('fuze.obj') + colors = [ + [255, 0, 0, 255], + [0, 255, 0, 255], + [0, 0, 255, 255], + [100, 100, 100, 255], + ] + funcs = [ + g.trimesh.creation.box, + g.trimesh.creation.icosphere, + g.trimesh.creation.capsule, + ] + + fuze = g.get_mesh("fuze.obj") fuze.apply_scale(1.0 / fuze.extents.max()) fuze.apply_translation([-2, 0, 0] - fuze.bounds[0]) @@ -177,14 +178,13 @@ def test_concatentate_multi(self): # convert texture back to color roundtrip = c.visual.to_color() - assert roundtrip.kind == 'vertex' + assert roundtrip.kind == "vertex" vertex_c = roundtrip.vertex_colors # get the unique colors unique = vertex_c[g.trimesh.grouping.unique_rows(vertex_c)[0]] # roundtripped colors should be a superset of original colors - assert {tuple(c) for c in unique}.issuperset( - {tuple(c) for c in colors}) + assert {tuple(c) for c in unique}.issuperset({tuple(c) for c in colors}) def test_to_tex(self): m = g.trimesh.creation.box() @@ -198,7 +198,7 @@ def test_to_tex(self): def test_uv_none(self): # setting UV coordinates to None should work - m = g.get_mesh('fuze.obj') + m = g.get_mesh("fuze.obj") m.visual.uv = None assert m.visual.uv is None @@ -208,13 +208,13 @@ def test_uv_none(self): def test_pbr_export(self): # try loading a textured box - m = next(iter(g.get_mesh('BoxTextured.glb').geometry.values())) + m = next(iter(g.get_mesh("BoxTextured.glb").geometry.values())) # make sure material copy doesn't crash m.visual.copy() with g.TemporaryDirectory() as d: # exports by path allow files to be written - path = g.os.path.join(d, 'box.obj') + path = g.os.path.join(d, "box.obj") m.export(path) # try reloading r = g.trimesh.load(path) @@ -228,7 +228,7 @@ def test_pbr_material_fusion(self): # one with PBR textures # one with emissive textures # and one with specular glossiness color values - m = g.get_mesh("pbr_cubes_emissive_spec_gloss.zip", force='mesh') + m = g.get_mesh("pbr_cubes_emissive_spec_gloss.zip", force="mesh") assert isinstance(m, g.trimesh.Trimesh) @@ -239,7 +239,6 @@ def test_pbr_material_fusion(self): assert mat_m.emissiveTexture is not None - -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_thickness.py b/tests/test_thickness.py index 988834931..c5f02403a 100644 --- a/tests/test_thickness.py +++ b/tests/test_thickness.py @@ -5,7 +5,6 @@ class ThicknessTest(g.unittest.TestCase): - def test_sphere_thickness(self): m = g.trimesh.creation.box() @@ -17,15 +16,15 @@ def test_sphere_thickness(self): points=samples, exterior=False, normals=m.face_normals[faces], - method='max_sphere') + method="max_sphere", + ) assert (thickness > -g.trimesh.tol.merge).all() # check thickness at a specific point - point = g.np.array([[0.5, 0., 0.]]) + point = g.np.array([[0.5, 0.0, 0.0]]) point_thickness = g.trimesh.proximity.thickness( - m, point, - exterior=False, - method='max_sphere') + m, point, exterior=False, method="max_sphere" + ) # its a unit cube assert g.np.allclose(point_thickness, 1.0) @@ -41,17 +40,16 @@ def test_ray_thickness(self): points=samples, exterior=False, normals=m.face_normals[faces], - method='ray') + method="ray", + ) assert (thickness > -g.trimesh.tol.merge).all() # check thickness at a specific point - point = g.np.array([[0.5, 0., 0.]]) + point = g.np.array([[0.5, 0.0, 0.0]]) point_thickness = g.trimesh.proximity.thickness( - mesh=m, - points=point, - exterior=False, - method='ray') + mesh=m, points=point, exterior=False, method="ray" + ) assert g.np.allclose(point_thickness, 1.0) def test_sphere_reach(self): @@ -65,7 +63,8 @@ def test_sphere_reach(self): points=samples, exterior=True, normals=m.face_normals[faces], - method='max_sphere') + method="max_sphere", + ) assert g.np.isinf(reach).all() def test_ray_reach(self): @@ -79,24 +78,21 @@ def test_ray_reach(self): points=samples, exterior=True, normals=m.face_normals[faces], - method='ray') + method="ray", + ) assert g.np.isinf(reach).all() def test_known(self): # an axis aligned plate part - m = g.get_mesh('1002_tray_bottom.STL') + m = g.get_mesh("1002_tray_bottom.STL") # points on the surface samples = m.sample(1000) # compute thicknesses using sphere method - rs = g.trimesh.proximity.thickness(mesh=m, - points=samples, - method='max_sphere') + rs = g.trimesh.proximity.thickness(mesh=m, points=samples, method="max_sphere") # compute thicknesses using pure ray tests - ra = g.trimesh.proximity.thickness(mesh=m, - points=samples, - method='ray') + ra = g.trimesh.proximity.thickness(mesh=m, points=samples, method="ray") # mesh is axis aligned plate so thickness is min extent truth = m.extents.min() @@ -104,6 +100,6 @@ def test_known(self): assert g.np.isclose(g.np.median(ra), truth) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_trackball.py b/tests/test_trackball.py index 61012f6de..df878f494 100644 --- a/tests/test_trackball.py +++ b/tests/test_trackball.py @@ -7,13 +7,9 @@ class TrackballTest(g.unittest.TestCase): - def test_resize(self): trackball = Trackball( - pose=g.np.eye(4), - size=(640, 480), - scale=1.0, - target=g.np.array([0, 0, 0]) + pose=g.np.eye(4), size=(640, 480), scale=1.0, target=g.np.array([0, 0, 0]) ) trackball.down((320, 240)) trackball.drag((321, 240)) @@ -27,10 +23,7 @@ def test_resize(self): def test_drag_rotate(self): trackball = Trackball( - pose=g.np.eye(4), - size=(640, 480), - scale=1.0, - target=g.np.array([0, 0, 0]) + pose=g.np.eye(4), size=(640, 480), scale=1.0, target=g.np.array([0, 0, 0]) ) # rotates around y-axis @@ -71,10 +64,7 @@ def test_drag_rotate(self): def test_drag_roll(self): trackball = Trackball( - pose=g.np.eye(4), - size=(640, 480), - scale=1.0, - target=g.np.array([0, 0, 0]) + pose=g.np.eye(4), size=(640, 480), scale=1.0, target=g.np.array([0, 0, 0]) ) # rotates around z-axis @@ -98,10 +88,7 @@ def test_drag_roll(self): def test_drag_pan(self): trackball = Trackball( - pose=g.np.eye(4), - size=(640, 480), - scale=1.0, - target=g.np.array([0, 0, 0]) + pose=g.np.eye(4), size=(640, 480), scale=1.0, target=g.np.array([0, 0, 0]) ) # translate to x @@ -116,10 +103,7 @@ def test_drag_pan(self): def test_drag_zoom(self): pose = g.trimesh.transformations.translation_matrix([0, 0, 1]) trackball = Trackball( - pose=pose, - size=(640, 480), - scale=1.0, - target=g.np.array([0, 0, 0]) + pose=pose, size=(640, 480), scale=1.0, target=g.np.array([0, 0, 0]) ) # translate to x @@ -135,10 +119,7 @@ def test_drag_zoom(self): def test_scroll(self): pose = g.trimesh.transformations.translation_matrix([0, 0, 1]) trackball = Trackball( - pose=pose, - size=(640, 480), - scale=1.0, - target=g.np.array([0, 0, 0]) + pose=pose, size=(640, 480), scale=1.0, target=g.np.array([0, 0, 0]) ) g.np.testing.assert_allclose(trackball.pose[:3, :3], g.np.eye(3)) g.np.testing.assert_allclose(trackball.pose[:3, 3], [0, 0, 1]) @@ -148,10 +129,7 @@ def test_scroll(self): def test_rotate(self): trackball = Trackball( - pose=g.np.eye(4), - size=(640, 480), - scale=1.0, - target=g.np.array([0, 0, 0]) + pose=g.np.eye(4), size=(640, 480), scale=1.0, target=g.np.array([0, 0, 0]) ) # rotates around y-axis trackball.rotate(g.np.deg2rad(1)) @@ -168,6 +146,6 @@ def test_rotate(self): g.np.testing.assert_allclose(trackball.pose[3, :], [0, 0, 0, 1]) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_triangles.py b/tests/test_triangles.py index 586431144..d94821d63 100644 --- a/tests/test_triangles.py +++ b/tests/test_triangles.py @@ -5,33 +5,36 @@ class TrianglesTest(g.unittest.TestCase): - def test_barycentric(self): for m in g.get_meshes(4): # a simple test which gets the barycentric coordinate at each of the three # vertices, checks to make sure the barycentric is [1,0,0] for the vertex # and then converts back to cartesian and makes sure the original points # are the same as the conversion and back - for method in ['cross', 'cramer']: + for method in ["cross", "cramer"]: for i in range(3): barycentric = g.trimesh.triangles.points_to_barycentric( - m.triangles, m.triangles[:, i], method=method) - assert (g.np.abs(barycentric - - g.np.roll([1.0, 0, 0], i)) < 1e-8).all() + m.triangles, m.triangles[:, i], method=method + ) + assert ( + g.np.abs(barycentric - g.np.roll([1.0, 0, 0], i)) < 1e-8 + ).all() points = g.trimesh.triangles.barycentric_to_points( - m.triangles, barycentric) + m.triangles, barycentric + ) assert (g.np.abs(points - m.triangles[:, i]) < 1e-8).all() def test_closest(self): closest = g.trimesh.triangles.closest_point( - triangles=g.data['triangles']['triangles'], - points=g.data['triangles']['points']) + triangles=g.data["triangles"]["triangles"], + points=g.data["triangles"]["points"], + ) - comparison = (closest - g.data['triangles']['closest']).all() + comparison = (closest - g.data["triangles"]["closest"]).all() assert (comparison < 1e-8).all() - g.log.info('finished closest check on %d triangles', len(closest)) + g.log.info("finished closest check on %d triangles", len(closest)) def test_closest_obtuse(self): # simple triangle in the xy-plane with an obtuse corner at vertex A @@ -50,14 +53,15 @@ def test_closest_obtuse(self): radius = 3 nPtsOnCircle = 100 alphas = g.np.linspace( - g.np.pi / nPtsOnCircle, - g.np.pi * 2 - g.np.pi / nPtsOnCircle, - nPtsOnCircle) - ptsOnCircle = g.np.transpose( - [g.np.cos(alphas), g.np.sin(alphas), g.np.zeros(nPtsOnCircle)]) * radius + g.np.pi / nPtsOnCircle, g.np.pi * 2 - g.np.pi / nPtsOnCircle, nPtsOnCircle + ) + ptsOnCircle = ( + g.np.transpose([g.np.cos(alphas), g.np.sin(alphas), g.np.zeros(nPtsOnCircle)]) + * radius + ) def norm(v): - return g.np.sqrt(g.np.einsum('...i,...i', v, v)) + return g.np.sqrt(g.np.einsum("...i,...i", v, v)) def distToLine(o, v, p): return norm((o - p) - g.np.dot(o - p, v) * v) @@ -80,37 +84,30 @@ def distPointToEdge(U, V, P): # edge [U, V], point P # get closest points from trimesh and compute distances to the circle # points tm_dists = norm( - ptsOnCircle - - g.trimesh.triangles.closest_point( - [ABC] * - nPtsOnCircle, - ptsOnCircle)) + ptsOnCircle + - g.trimesh.triangles.closest_point([ABC] * nPtsOnCircle, ptsOnCircle) + ) # compute naive point-to-edge distances for all points and take the min of # the three edges - gt_dists = g.np.float32([[distPointToEdge(ABC[i], ABC[(i + 1) % 3], pt) - for i in range(3)] for pt in ptsOnCircle]).min(axis=1) + gt_dists = g.np.float32( + [ + [distPointToEdge(ABC[i], ABC[(i + 1) % 3], pt) for i in range(3)] + for pt in ptsOnCircle + ] + ).min(axis=1) diff_dists = tm_dists - gt_dists assert g.np.dot(diff_dists, diff_dists) < g.tol.merge def test_degenerate(self): - tri = [[[0, 0, 0], - [1, 0, 0], - [-.5, 0, 0]], - [[0, 0, 0], - [0, 0, 0], - [10, 10, 0]], - [[0, 0, 0], - [0, 0, 2], - [0, 0, 2.2]], - [[0, 0, 0], - [1, 0, 0], - [0, 1, 0]]] - - tri_gt = [False, - False, - False, - True] + tri = [ + [[0, 0, 0], [1, 0, 0], [-0.5, 0, 0]], + [[0, 0, 0], [0, 0, 0], [10, 10, 0]], + [[0, 0, 0], [0, 0, 2], [0, 0, 2.2]], + [[0, 0, 0], [1, 0, 0], [0, 1, 0]], + ] + + tri_gt = [False, False, False, True] r = g.trimesh.triangles.nondegenerate(tri) assert len(r) == len(tri) @@ -118,8 +115,7 @@ def test_degenerate(self): def test_angles(self): # a zero- area triangle - tris = g.np.array( - [[[0, 0, 0], [1, 0, 0], [1, 0, 0]]], dtype=g.np.float64) + tris = g.np.array([[[0, 0, 0], [1, 0, 0], [1, 0, 0]]], dtype=g.np.float64) angles = g.trimesh.triangles.angles(tris) # degenerate angles should be zero, not NaN g.log.debug(angles) @@ -127,39 +123,35 @@ def test_angles(self): # an equilateral triangle tris = g.np.array( - [[[-1, 0, 0], - [1, 0, 0], - [0, g.np.sqrt(3), 0]]], dtype=g.np.float64) + [[[-1, 0, 0], [1, 0, 0], [0, g.np.sqrt(3), 0]]], dtype=g.np.float64 + ) angles = g.trimesh.triangles.angles(tris) # degenerate angles should be zero, not NaN assert g.np.allclose(angles, g.np.radians(60)) # an equilateral triangle transformed into space tris = g.trimesh.transform_points( - g.np.array( - [[-1, 0, 0], - [1, 0, 0], - [0, g.np.sqrt(3), 0]], dtype=g.np.float64), - g.trimesh.transformations.random_rotation_matrix()).reshape((-1, 3, 3)) + g.np.array([[-1, 0, 0], [1, 0, 0], [0, g.np.sqrt(3), 0]], dtype=g.np.float64), + g.trimesh.transformations.random_rotation_matrix(), + ).reshape((-1, 3, 3)) angles = g.trimesh.triangles.angles(tris) # all angles should be 60 degrees assert g.np.allclose(angles, g.np.radians(60)) # an 3-4-5 right triangle tris = g.trimesh.transform_points( - g.np.array( - [[0, 0, 0], - [3, 0, 0], - [0, 4, 0]], dtype=g.np.float64), - g.trimesh.transformations.random_rotation_matrix()).reshape((-1, 3, 3)) + g.np.array([[0, 0, 0], [3, 0, 0], [0, 4, 0]], dtype=g.np.float64), + g.trimesh.transformations.random_rotation_matrix(), + ).reshape((-1, 3, 3)) # get angles angles = g.trimesh.triangles.angles(tris) # make sure they match a 3-4-5 assert g.np.allclose( g.np.sort(angles.ravel()), - [g.np.arcsin(3.0 / 5), g.np.arcsin(4.0 / 5), g.np.pi / 2]) + [g.np.arcsin(3.0 / 5), g.np.arcsin(4.0 / 5), g.np.pi / 2], + ) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_typed.py b/tests/test_typed.py index bd50c835d..f4c580d50 100644 --- a/tests/test_typed.py +++ b/tests/test_typed.py @@ -7,5 +7,6 @@ def _check(values: ArrayLike) -> NDArray[int64]: return (np.array(values, dtype=float64) * 100).astype(int64) + def _run() -> NDArray[int64]: return _check(values=[1, 2]) diff --git a/tests/test_units.py b/tests/test_units.py index 4f1d23d4d..6878a641b 100644 --- a/tests/test_units.py +++ b/tests/test_units.py @@ -5,36 +5,33 @@ class UnitsTest(g.unittest.TestCase): - def test_units(self): - # make sure unit conversions fail for fake units - fake_units = 'blorbs' - fake_units = 'in' + fake_units = "blorbs" + fake_units = "in" try: - c = g.trimesh.units.unit_conversion('inches', # NOQA - fake_units) + c = g.trimesh.units.unit_conversion("inches", fake_units) # NOQA raise AssertionError() except BaseException: pass - m = g.get_mesh('featuretype.STL') + m = g.get_mesh("featuretype.STL") self.assertTrue(m.units is None) - m.units = 'in' - self.assertTrue(m.units == 'in') + m.units = "in" + self.assertTrue(m.units == "in") extents_pre = m.extents - m.convert_units('mm') + m.convert_units("mm") scale = g.np.divide(m.extents, extents_pre) self.assertTrue(g.np.allclose(scale, 25.4)) - self.assertTrue(m.units == 'mm') + self.assertTrue(m.units == "mm") def test_conversion(self): # test conversions on a multibody STL in a scene # a multibody STL with a unit hint in filename - m = g.get_mesh('counter.unitsmm.STL') + m = g.get_mesh("counter.unitsmm.STL") # nothing should be set assert m.units is None @@ -47,26 +44,22 @@ def test_conversion(self): # should extract units from file name without # raising a ValueError - c = s.convert_units('in', guess=False) + c = s.convert_units("in", guess=False) # should have converted mm -> in, 1/25.4 # extents should scale exactly with unit conversion - assert g.np.allclose(extents_pre / c.extents, - 25.4, - atol=.01) + assert g.np.allclose(extents_pre / c.extents, 25.4, atol=0.01) def test_path(self): - p = g.get_mesh('2D/tray-easy1.dxf') + p = g.get_mesh("2D/tray-easy1.dxf") # should be inches - assert 'in' in p.units + assert "in" in p.units extents_pre = p.extents - p.convert_units('mm') + p.convert_units("mm") # should have converted in -> mm 25.4 # extents should scale exactly with unit conversion - assert g.np.allclose(p.extents / extents_pre, - 25.4, - atol=.01) + assert g.np.allclose(p.extents / extents_pre, 25.4, atol=0.01) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_unwrap.py b/tests/test_unwrap.py index 8bb52be5a..5bc1b16ed 100644 --- a/tests/test_unwrap.py +++ b/tests/test_unwrap.py @@ -5,32 +5,29 @@ class UnwrapTest(g.unittest.TestCase): - def test_image(self): try: import xatlas # noqa except BaseException: - g.log.info('not testing unwrap as no `xatlas`') + g.log.info("not testing unwrap as no `xatlas`") return - a = g.get_mesh('bunny.ply', force="mesh") + a = g.get_mesh("bunny.ply", force="mesh") u = a.unwrap() assert u.visual.uv.shape == (len(u.vertices), 2) - checkerboard = g.np.kron( - [[1, 0] * 4, [0, 1] * 4] * 4, g.np.ones((10, 10))) + checkerboard = g.np.kron([[1, 0] * 4, [0, 1] * 4] * 4, g.np.ones((10, 10))) try: from PIL import Image except BaseException: return - image = Image.fromarray( - (checkerboard * 255).astype(g.np.uint8)) + image = Image.fromarray((checkerboard * 255).astype(g.np.uint8)) u = a.unwrap(image=image) # make sure image was attached correctly assert u.visual.material.image.size == image.size -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_upstream.py b/tests/test_upstream.py index 13aebdeaa..1a312db6d 100644 --- a/tests/test_upstream.py +++ b/tests/test_upstream.py @@ -21,6 +21,6 @@ def test_shapely(self): assert g.np.isclose(string.length, 1.0) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_urdf.py b/tests/test_urdf.py index 45b3623a5..6cabbc631 100644 --- a/tests/test_urdf.py +++ b/tests/test_urdf.py @@ -17,7 +17,7 @@ def test_export(self): # a viewer show() call) from trimesh.exchange import urdf - mesh = g.get_mesh('featuretype.STL') + mesh = g.get_mesh("featuretype.STL") out_dir = g.tempfile.mkdtemp() @@ -31,6 +31,6 @@ def test_export(self): g.shutil.rmtree(out_dir) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_vector.py b/tests/test_vector.py index 61cdb1f4f..bf489fff0 100644 --- a/tests/test_vector.py +++ b/tests/test_vector.py @@ -5,13 +5,12 @@ class SphericalTests(g.unittest.TestCase): - def test_spherical(self): """ Convert vectors to spherical coordinates """ # random unit vectors - v = g.trimesh.unitize(g.random((1000, 3)) - .5) + v = g.trimesh.unitize(g.random((1000, 3)) - 0.5) # (n, 2) angles in radians spherical = g.trimesh.util.vector_to_spherical(v) # back to unit vectors @@ -21,12 +20,10 @@ def test_spherical(self): class HemisphereTests(g.unittest.TestCase): - def test_hemisphere(self): for dimension in [2, 3]: # random unit vectors - v = g.trimesh.unitize( - g.random((10000, dimension)) - .5) + v = g.trimesh.unitize(g.random((10000, dimension)) - 0.5) # add some on- axis points v[:dimension] = g.np.eye(dimension) @@ -37,15 +34,16 @@ def test_hemisphere(self): resigned = g.trimesh.util.vector_hemisphere(v) # after resigning, negative vectors should equal positive - check = (abs(g.np.diff(resigned.reshape((-1, 2, dimension)), - axis=1).sum(axis=2)) < - g.trimesh.constants.tol.zero).all() + check = ( + abs(g.np.diff(resigned.reshape((-1, 2, dimension)), axis=1).sum(axis=2)) + < g.trimesh.constants.tol.zero + ).all() assert check a, s = g.trimesh.util.vector_hemisphere(v, return_sign=True) assert g.np.allclose(v, a * s.reshape((-1, 1))) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_vertices.py b/tests/test_vertices.py index b17e3189a..5c979cb85 100644 --- a/tests/test_vertices.py +++ b/tests/test_vertices.py @@ -5,12 +5,9 @@ class VerticesTest(g.unittest.TestCase): - def test_vertex_faces(self): - # One watertight, one not; also various sizes - meshes = [g.get_mesh('featuretype.STL'), - g.get_mesh('cycloidal.ply')] + meshes = [g.get_mesh("featuretype.STL"), g.get_mesh("cycloidal.ply")] for m in meshes: # make sure every @@ -22,12 +19,10 @@ def test_vertex_faces(self): # choose some random vertices and make sure their # face indices are correct - rand_vertices = g.np.random.randint( - low=0, high=len(m.vertices), size=100) + rand_vertices = g.np.random.randint(low=0, high=len(m.vertices), size=100) for v in rand_vertices: v_faces = g.np.where(m.faces == v)[0][::-1] - assert ( - g.np.all(v_faces == m.vertex_faces[v][m.vertex_faces[v] >= 0])) + assert g.np.all(v_faces == m.vertex_faces[v][m.vertex_faces[v] >= 0]) # make mesh degenerate m.faces[0] = [0, 0, 0] @@ -40,6 +35,6 @@ def test_vertex_faces(self): assert all(i in face for face in faces) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_viewer.py b/tests/test_viewer.py index 2af6732b1..f48d896be 100644 --- a/tests/test_viewer.py +++ b/tests/test_viewer.py @@ -5,13 +5,13 @@ class ViewerTest(g.unittest.TestCase): - def test_viewer(self): # if the runner has not asked to include rendering exit if not g.include_rendering: return from pyglet import gl + # set a GL config that fixes a depth buffer issue in xvfb # this should raise an exception if pyglet can't get a library window_conf = gl.Config(double_buffer=True, depth_size=24) @@ -24,12 +24,11 @@ def test_viewer(self): scene = mesh.scene() # run the actual render call - png = scene.save_image(resolution=[1920, 1080], - window_conf=window_conf) + png = scene.save_image(resolution=[1920, 1080], window_conf=window_conf) assert len(png) > 0 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_visual.py b/tests/test_visual.py index 64a4d7ba8..00261c41c 100644 --- a/tests/test_visual.py +++ b/tests/test_visual.py @@ -5,9 +5,8 @@ class VisualTest(g.unittest.TestCase): - def test_face_subset_texture_visuals(self): - m = g.get_mesh('fuze.obj', force='mesh') + m = g.get_mesh("fuze.obj", force="mesh") face_index = g.np.random.choice(len(m.faces), len(m.triangles) // 2) idx = m.faces[g.np.unique(face_index)].flatten() @@ -21,11 +20,11 @@ def test_face_subset_texture_visuals(self): def test_face_subset_color_visuals(self): import trimesh - m = g.get_mesh('torus.STL') + + m = g.get_mesh("torus.STL") vertex_colors = g.np.random.randint(0, 255, size=(len(m.vertices), 3)) - m.visual = trimesh.visual.ColorVisuals( - mesh=m, vertex_colors=vertex_colors) + m.visual = trimesh.visual.ColorVisuals(mesh=m, vertex_colors=vertex_colors) face_index = g.np.random.choice(len(m.faces), len(m.triangles) // 2) idx = m.faces[g.np.unique(face_index)].flatten() @@ -55,6 +54,6 @@ def test_face_subset_color_visuals(self): # assert distances.max() < 1e-8 -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/tests/test_voxel.py b/tests/test_voxel.py index 685625db1..fe172f39d 100644 --- a/tests/test_voxel.py +++ b/tests/test_voxel.py @@ -5,21 +5,22 @@ class VoxelGridTest(g.unittest.TestCase): - def test_voxel(self): """ Test that voxels work at all """ - for m in [g.get_mesh('featuretype.STL'), - g.trimesh.primitives.Box(), - g.trimesh.primitives.Sphere()]: - for pitch in [.1, .1 - g.tol.merge]: + for m in [ + g.get_mesh("featuretype.STL"), + g.trimesh.primitives.Box(), + g.trimesh.primitives.Sphere(), + ]: + for pitch in [0.1, 0.1 - g.tol.merge]: surface = m.voxelized(pitch=pitch) # make sure the voxelized pitch is similar to passed assert g.np.allclose(surface.pitch, pitch) - for fill_method in ('base', 'orthographic'): + for fill_method in ("base", "orthographic"): solid = surface.copy().fill(method=fill_method) assert len(surface.encoding.dense.shape) == 3 @@ -35,11 +36,8 @@ def test_voxel(self): assert isinstance(box_surface, g.trimesh.Trimesh) assert abs(box_solid.volume - solid.volume) < g.tol.merge - assert g.trimesh.util.is_shape( - surface.sparse_indices, (-1, 3)) - assert len( - solid.sparse_indices) >= len( - surface.sparse_indices) + assert g.trimesh.util.is_shape(surface.sparse_indices, (-1, 3)) + assert len(solid.sparse_indices) >= len(surface.sparse_indices) assert solid.sparse_indices.shape == solid.points.shape outside = m.bounds[1] + m.scale for vox in surface, solid: @@ -51,11 +49,13 @@ def test_voxel(self): cubes = surface.marching_cubes assert cubes.area > 0.0 except ImportError: - g.log.info('no skimage, skipping marching cubes test') + g.log.info("no skimage, skipping marching cubes test") - g.log.info('Mesh volume was %f, voxelized volume was %f', - m.volume, - surface.volume) + g.log.info( + "Mesh volume was %f, voxelized volume was %f", + m.volume, + surface.volume, + ) def test_marching(self): """ @@ -64,7 +64,7 @@ def test_marching(self): try: from skimage import measure # NOQA except ImportError: - g.log.warning('no skimage, skipping marching cubes test') + g.log.warning("no skimage, skipping marching cubes test") return # make sure offset is correct @@ -72,8 +72,9 @@ def test_marching(self): mesh = g.trimesh.voxel.ops.matrix_to_marching_cubes(matrix=matrix) assert mesh.is_watertight - mesh = g.trimesh.voxel.ops.matrix_to_marching_cubes( - matrix=matrix).apply_scale(3.0) + mesh = g.trimesh.voxel.ops.matrix_to_marching_cubes(matrix=matrix).apply_scale( + 3.0 + ) assert mesh.is_watertight def test_marching_points(self): @@ -83,7 +84,7 @@ def test_marching_points(self): try: from skimage import measure # NOQA except ImportError: - g.log.warning('no skimage, skipping marching cubes test') + g.log.warning("no skimage, skipping marching cubes test") return # get some points on the surface of an icosahedron @@ -91,14 +92,13 @@ def test_marching_points(self): # make the pitch proportional to scale pitch = points.ptp(axis=0).min() / 10 # run marching cubes - mesh = g.trimesh.voxel.ops.points_to_marching_cubes( - points=points, pitch=pitch) + mesh = g.trimesh.voxel.ops.points_to_marching_cubes(points=points, pitch=pitch) # mesh should have faces assert len(mesh.faces) > 0 # mesh should be roughly centered - assert (mesh.bounds[0] < -.5).all() - assert (mesh.bounds[1] > .5).all() + assert (mesh.bounds[0] < -0.5).all() + assert (mesh.bounds[1] > 0.5).all() def test_local(self): """ @@ -110,31 +110,22 @@ def test_local(self): # it should have some stuff voxel = creation.local_voxelize( - mesh=mesh, - point=[.5, .5, .5], - pitch=.1, - radius=5, - fill=True) + mesh=mesh, point=[0.5, 0.5, 0.5], pitch=0.1, radius=5, fill=True + ) assert len(voxel.shape) == 3 # try it when it definitely doesn't hit anything empty = creation.local_voxelize( - mesh=mesh, - point=[10, 10, 10], - pitch=.1, - radius=5, - fill=True) + mesh=mesh, point=[10, 10, 10], pitch=0.1, radius=5, fill=True + ) # shouldn't have hit anything assert empty is None # try it when it is in the center of a volume creation.local_voxelize( - mesh=mesh, - point=[0, 0, 0], - pitch=.1, - radius=2, - fill=True) + mesh=mesh, point=[0, 0, 0], pitch=0.1, radius=2, fill=True + ) def test_points_to_from_indices(self): # indices = (points - origin) / pitch @@ -145,23 +136,27 @@ def test_points_to_from_indices(self): # points -> indices indices2 = g.trimesh.voxel.ops.points_to_indices( - points=points, origin=origin, pitch=pitch) + points=points, origin=origin, pitch=pitch + ) g.np.testing.assert_allclose(indices, indices2, atol=0, rtol=0) # indices -> points points2 = g.trimesh.voxel.ops.indices_to_points( - indices=indices, origin=origin, pitch=pitch) - g.np.testing.assert_allclose( - g.np.array(indices) * pitch + origin, points2, atol=0, rtol=0) + indices=indices, origin=origin, pitch=pitch + ) g.np.testing.assert_allclose( - points, points2, atol=pitch / 2 * 1.01, rtol=0) + g.np.array(indices) * pitch + origin, points2, atol=0, rtol=0 + ) + g.np.testing.assert_allclose(points, points2, atol=pitch / 2 * 1.01, rtol=0) # indices -> points -> indices (this must be consistent) points2 = g.trimesh.voxel.ops.indices_to_points( - indices=indices, origin=origin, pitch=pitch) + indices=indices, origin=origin, pitch=pitch + ) indices2 = g.trimesh.voxel.ops.points_to_indices( - points=points2, origin=origin, pitch=pitch) + points=points2, origin=origin, pitch=pitch + ) g.np.testing.assert_allclose(indices, indices2, atol=0, rtol=0) def test_as_boxes(self): @@ -172,40 +167,37 @@ def test_as_boxes(self): matrix = g.np.eye(9, dtype=bool).reshape((-1, 3, 3)) centers = g.trimesh.voxel.ops.matrix_to_points( - matrix=matrix, pitch=pitch, origin=origin) - v = voxel.VoxelGrid(matrix).apply_scale( - pitch).apply_translation(origin) + matrix=matrix, pitch=pitch, origin=origin + ) + v = voxel.VoxelGrid(matrix).apply_scale(pitch).apply_translation(origin) boxes1 = v.as_boxes() boxes2 = g.trimesh.voxel.ops.multibox(centers).apply_scale(pitch) colors = [g.trimesh.visual.DEFAULT_COLOR] * matrix.sum() * 12 for boxes in [boxes1, boxes2]: - g.np.testing.assert_allclose( - boxes.visual.face_colors, colors, atol=0, rtol=0) + g.np.testing.assert_allclose(boxes.visual.face_colors, colors, atol=0, rtol=0) # check assigning a single color color = [255, 0, 0, 255] boxes1 = v.as_boxes(colors=color) - boxes2 = g.trimesh.voxel.ops.multibox( - centers=centers, colors=color).apply_scale(pitch) + boxes2 = g.trimesh.voxel.ops.multibox(centers=centers, colors=color).apply_scale( + pitch + ) colors = g.np.array([color] * len(centers) * 12) for boxes in [boxes1, boxes2]: - g.np.testing.assert_allclose( - boxes.visual.face_colors, colors, atol=0, rtol=0) + g.np.testing.assert_allclose(boxes.visual.face_colors, colors, atol=0, rtol=0) # check matrix colors - colors = color * g.np.ones(g.np.append(v.shape, 4), - dtype=g.np.uint8) + colors = color * g.np.ones(g.np.append(v.shape, 4), dtype=g.np.uint8) boxes = v.as_boxes(colors=colors) - assert g.np.allclose( - boxes.visual.face_colors, color, atol=0, rtol=0) + assert g.np.allclose(boxes.visual.face_colors, color, atol=0, rtol=0) def test_is_filled(self): """More rigorous test of VoxelGrid.is_filled.""" n = 10 matrix = g.np.random.uniform(size=(n + 1,) * 3) > 0.5 not_matrix = g.np.logical_not(matrix) - pitch = 1. / n + pitch = 1.0 / n origin = g.np.random.uniform(size=(3,)) vox = g.trimesh.voxel.VoxelGrid(matrix) vox = vox.apply_scale(pitch).apply_translation(origin) @@ -214,8 +206,7 @@ def test_is_filled(self): for a, b in ((vox, not_vox), (not_vox, vox)): points = a.points # slight jitter - shouldn't change indices - points += ( - g.np.random.uniform(size=points.shape) - 1) * 0.4 * pitch + points += (g.np.random.uniform(size=points.shape) - 1) * 0.4 * pitch g.np.random.shuffle(points) # all points are filled, and no empty points are filled @@ -235,8 +226,7 @@ def test_vox_sphere(self): # epsilon from zero eps = 1e-4 # should all be contained - grid = g.trimesh.util.grid_linspace( - [[eps] * 3, [9 - eps] * 3], 11) * scale + grid = g.trimesh.util.grid_linspace([[eps] * 3, [9 - eps] * 3], 11) * scale assert vox.is_filled(grid).all() # push it outside the filled area @@ -247,10 +237,8 @@ def test_roundtrip(self): # try exporting and reloading in the "binvox" format m = g.trimesh.creation.box() v = m.voxelized(pitch=0.1) - e = v.export(file_type='binvox') - r = g.trimesh.load( - file_obj=g.trimesh.util.wrap_as_stream(e), - file_type='binvox') + e = v.export(file_type="binvox") + r = g.trimesh.load(file_obj=g.trimesh.util.wrap_as_stream(e), file_type="binvox") assert v.filled_count == r.filled_count assert g.np.allclose(r.bounds, v.bounds) @@ -281,6 +269,7 @@ def _test_equiv(self, v0, v1, query_points=None): query_points: (optional) points as which `points_to_indices` and `is_filled` are tested for consistency. """ + def array_as_set(array2d): return {tuple(x) for x in array2d} @@ -294,8 +283,7 @@ def array_as_set(array2d): self.assertEqual(v0.volume, v1.volume) g.np.testing.assert_equal(v0.encoding.dense, v1.encoding.dense) # points will be in different order, but should contain same coords - g.np.testing.assert_equal( - array_as_set(v0.points), array_as_set(v1.points)) + g.np.testing.assert_equal(array_as_set(v0.points), array_as_set(v1.points)) # g.np.testing.assert_equal(v0.origin, v1.origin) # g.np.testing.assert_equal(v0.pitch, v1.pitch) if query_points is not None: @@ -303,27 +291,30 @@ def array_as_set(array2d): indices1 = v1.points_to_indices(query_points) g.np.testing.assert_equal(indices0, indices1) g.np.testing.assert_allclose( - v0.points_to_indices(v0.indices_to_points(indices0)), indices0) + v0.points_to_indices(v0.indices_to_points(indices0)), indices0 + ) g.np.testing.assert_allclose( - v1.points_to_indices(v1.indices_to_points(indices1)), indices1) + v1.points_to_indices(v1.indices_to_points(indices1)), indices1 + ) g.np.testing.assert_equal( - v0.is_filled(query_points), - v1.is_filled(query_points)) + v0.is_filled(query_points), v1.is_filled(query_points) + ) def test_voxel_rle(self): from trimesh.voxel import encoding as enc + np = g.np voxel = g.trimesh.voxel shape = (4, 4, 4) - rle_obj = enc.RunLengthEncoding(np.array([ - 0, 8, 1, 40, 0, 16], dtype=np.uint8), dtype=bool) - brle_obj = enc.BinaryRunLengthEncoding(np.array([ - 8, 40, 16], dtype=np.uint8)) + rle_obj = enc.RunLengthEncoding( + np.array([0, 8, 1, 40, 0, 16], dtype=np.uint8), dtype=bool + ) + brle_obj = enc.BinaryRunLengthEncoding(np.array([8, 40, 16], dtype=np.uint8)) v_rle = voxel.VoxelGrid(rle_obj.reshape(shape)) self.assertEqual(v_rle.filled_count, 40) g.np.testing.assert_equal( - v_rle.encoding.dense, - g.np.reshape([0] * 8 + [1] * 40 + [0] * 16, shape)) + v_rle.encoding.dense, g.np.reshape([0] * 8 + [1] * 40 + [0] * 16, shape) + ) v_brle = voxel.VoxelGrid(brle_obj.reshape(shape)) query_points = g.np.random.uniform(size=(100, 3), high=4) @@ -331,19 +322,19 @@ def test_voxel_rle(self): def test_hollow(self): if not g.has_binvox: - g.log.warning('no binvox to test!') + g.log.warning("no binvox to test!") return filled = g.trimesh.primitives.Sphere().voxelized( - pitch=0.1, - method='binvox', - exact=True) + pitch=0.1, method="binvox", exact=True + ) hollow = filled.copy().hollow() self.assertLess(hollow.filled_count, filled.filled_count) self.assertGreater(hollow.filled_count, 0) def test_fill(self): from trimesh.voxel.morphology import fillers + hollow = g.trimesh.primitives.Sphere().voxelized(pitch=0.1).hollow() for key in fillers: @@ -352,18 +343,17 @@ def test_fill(self): def test_strip(self): if not g.has_binvox: - g.log.warning('no binvox to test!') + g.log.warning("no binvox to test!") return octant = g.trimesh.primitives.Sphere().voxelized( - pitch=0.1, - method='binvox', - exact=True) + pitch=0.1, method="binvox", exact=True + ) dense = octant.encoding.dense.copy() nx, ny, nz = octant.shape - dense[:nx // 2] = 0 - dense[:, :ny // 2] = 0 - dense[:, :, nz // 2:] = 0 + dense[: nx // 2] = 0 + dense[:, : ny // 2] = 0 + dense[:, :, nz // 2 :] = 0 octant.encoding = dense stripped = octant.copy().strip() self.assertEqual(octant.filled_count, stripped.filled_count) @@ -373,22 +363,21 @@ def test_strip(self): def test_binvox_with_dimension(self): if not g.has_binvox: - g.log.warning('no binvox to test!') + g.log.warning("no binvox to test!") return dim = 10 octant = g.trimesh.primitives.Sphere().voxelized( - pitch=None, - dimension=dim, - method='binvox', - exact=True) + pitch=None, dimension=dim, method="binvox", exact=True + ) assert octant.shape == (dim, dim, dim) def test_transform_cache(self): encoding = [ [[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 1, 1], [0, 1, 0], [1, 1, 0]], - [[0, 0, 0], [0, 1, 0], [0, 0, 0]]] + [[0, 0, 0], [0, 1, 0], [0, 0, 0]], + ] vg = g.trimesh.voxel.VoxelGrid(g.np.asarray(encoding)) scale = g.np.asarray([12, 23, 24]) @@ -408,6 +397,6 @@ def test_transform_cache(self): assert g.np.allclose(vg.scale, scale) -if __name__ == '__main__': +if __name__ == "__main__": g.trimesh.util.attach_to_log() g.unittest.main() diff --git a/trimesh/__init__.py b/trimesh/__init__.py index 09aca216c..83673dd3c 100644 --- a/trimesh/__init__.py +++ b/trimesh/__init__.py @@ -32,24 +32,27 @@ except BaseException as E: # raise a useful error if path hasn't loaded from .exceptions import ExceptionWrapper + path = ExceptionWrapper(E) -__all__ = ["__version__", - 'Trimesh', - 'PointCloud', - 'Scene', - 'voxel', - 'unitize', - 'bounds', - 'nsphere', - 'collision', - 'smoothing', - 'tol', - 'path', - 'load', - 'load_mesh', - 'load_path', - 'load_remote', - 'primitives', - 'transform_points', - 'available_formats'] +__all__ = [ + "__version__", + "Trimesh", + "PointCloud", + "Scene", + "voxel", + "unitize", + "bounds", + "nsphere", + "collision", + "smoothing", + "tol", + "path", + "load", + "load_mesh", + "load_path", + "load_remote", + "primitives", + "transform_points", + "available_formats", +] diff --git a/trimesh/base.py b/trimesh/base.py index cdb3f5b62..e1b1ef694 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -863,9 +863,7 @@ def triangles(self) -> NDArray[float64]: # use of advanced indexing on our tracked arrays will # trigger a change flag which means the hash will have to be # recomputed. We can escape this check by viewing the array. - triangles = self.vertices.view(np.ndarray)[self.faces] - - return triangles + return self.vertices.view(np.ndarray)[self.faces] @caching.cache_decorator def triangles_tree(self) -> Index: @@ -877,8 +875,7 @@ def triangles_tree(self) -> Index: tree : rtree.index Each triangle in self.faces has a rectangular cell """ - tree = triangles.bounds_tree(self.triangles) - return tree + return triangles.bounds_tree(self.triangles) @caching.cache_decorator def triangles_center(self) -> NDArray[float64]: @@ -890,8 +887,7 @@ def triangles_center(self) -> NDArray[float64]: triangles_center : (len(self.faces), 3) float Center of each triangular face """ - triangles_center = self.triangles.mean(axis=1) - return triangles_center + return self.triangles.mean(axis=1) @caching.cache_decorator def triangles_cross(self) -> NDArray[float64]: @@ -1118,15 +1114,14 @@ def units(self) -> Optional[str]: units : str Unit system mesh is in, or None if not defined """ - if "units" in self.metadata: - return self.metadata["units"] - else: - return None + return self.metadata.get("units", None) @units.setter def units(self, value: str) -> None: - value = str(value).lower() - self.metadata["units"] = value + """ + Define the units of the current mesh. + """ + self.metadata["units"] = str(value).lower() def convert_units(self, desired: str, guess: bool = False) -> "Trimesh": """ @@ -2378,7 +2373,12 @@ def convex_hull(self) -> "Trimesh": hull = convex.convex_hull(self) return hull - def sample(self, count, return_index=False, face_weight=None): + def sample( + self, + count: int, + return_index: bool = False, + face_weight: Optional[NDArray[float64]] = None, + ): """ Return random samples distributed across the surface of the mesh @@ -2699,7 +2699,7 @@ def area_faces(self) -> NDArray[float64]: return area_faces @caching.cache_decorator - def mass_properties(self) -> Dict: + def mass_properties(self) -> triangles.MassProperties: """ Returns the mass properties of the current mesh. @@ -2720,14 +2720,13 @@ def mass_properties(self) -> Dict: # if the density or center of mass was overridden they will be put into data density = self._data.data.get("density", None) center_mass = self._data.data.get("center_mass", None) - mass = triangles.mass_properties( + return triangles.mass_properties( triangles=self.triangles, crosses=self.triangles_cross, density=density, center_mass=center_mass, skip_inertia=False, ) - return mass def invert(self) -> None: """ diff --git a/trimesh/boolean.py b/trimesh/boolean.py index 8a02123bf..ba6cb2913 100644 --- a/trimesh/boolean.py +++ b/trimesh/boolean.py @@ -22,7 +22,7 @@ def difference(meshes, engine=None, **kwargs): ---------- difference : a - (other meshes), **kwargs for a Trimesh """ - result = _engines[engine](meshes, operation='difference', **kwargs) + result = _engines[engine](meshes, operation="difference", **kwargs) return result @@ -41,7 +41,7 @@ def union(meshes, engine=None, **kwargs): ---------- union : a + (other meshes), **kwargs for a Trimesh """ - result = _engines[engine](meshes, operation='union', **kwargs) + result = _engines[engine](meshes, operation="union", **kwargs) return result @@ -61,7 +61,7 @@ def intersection(meshes, engine=None, **kwargs): intersection : **kwargs for a Trimesh object of the volume that is contained by all meshes """ - result = _engines[engine](meshes, operation='intersection', **kwargs) + result = _engines[engine](meshes, operation="intersection", **kwargs) return result @@ -86,12 +86,14 @@ def boolean_automatic(meshes, operation, **kwargs): elif interfaces.scad.exists: result = interfaces.scad.boolean(meshes, operation, **kwargs) else: - raise ValueError('No backends available for boolean operations!') + raise ValueError("No backends available for boolean operations!") return result # which backend boolean engines -_engines = {None: boolean_automatic, - 'auto': boolean_automatic, - 'scad': interfaces.scad.boolean, - 'blender': interfaces.blender.boolean} +_engines = { + None: boolean_automatic, + "auto": boolean_automatic, + "scad": interfaces.scad.boolean, + "blender": interfaces.blender.boolean, +} diff --git a/trimesh/bounds.py b/trimesh/bounds.py index 377a2aaa3..6de31e823 100644 --- a/trimesh/bounds.py +++ b/trimesh/bounds.py @@ -10,6 +10,7 @@ except BaseException as E: # raise the exception when someone tries to use it from . import exceptions + ConvexHull = exceptions.ExceptionWrapper(E) optimize = exceptions.ExceptionWrapper(E) @@ -23,7 +24,7 @@ _flip.flags.writeable = False -def oriented_bounds_2D(points, qhull_options='QbB'): +def oriented_bounds_2D(points, qhull_options="QbB"): """ Find an oriented bounding box for an array of 2D points. @@ -45,8 +46,7 @@ def oriented_bounds_2D(points, qhull_options='QbB'): # create a convex hull object of our points # 'QbB' is a qhull option which has it scale the input to unit # box to avoid precision issues with very large/small meshes - convex = ConvexHull( - points, qhull_options=qhull_options) + convex = ConvexHull(points, qhull_options=qhull_options) # (n,2,3) line segments hull_edges = convex.points[convex.simplices] @@ -56,10 +56,9 @@ def oriented_bounds_2D(points, qhull_options='QbB'): # unit vector direction of the edges of the hull polygon # filter out zero- magnitude edges via check_valid edge_vectors = hull_edges[:, 1] - hull_edges[:, 0] - edge_norm = np.sqrt(np.dot(edge_vectors ** 2, [1, 1])) + edge_norm = np.sqrt(np.dot(edge_vectors**2, [1, 1])) edge_nonzero = edge_norm > 1e-10 - edge_vectors = edge_vectors[edge_nonzero] / \ - edge_norm[edge_nonzero].reshape((-1, 1)) + edge_vectors = edge_vectors[edge_nonzero] / edge_norm[edge_nonzero].reshape((-1, 1)) # create a set of perpendicular vectors perp_vectors = np.fliplr(edge_vectors) * [-1.0, 1.0] @@ -73,14 +72,10 @@ def oriented_bounds_2D(points, qhull_options='QbB'): y = np.dot(perp_vectors, hull_points.T) # reduce the projections to maximum and minimum per edge vector - bounds = np.column_stack((x.min(axis=1), - y.min(axis=1), - x.max(axis=1), - y.max(axis=1))) + bounds = np.column_stack((x.min(axis=1), y.min(axis=1), x.max(axis=1), y.max(axis=1))) # calculate the extents and area for each edge vector pair - extents = np.diff(bounds.reshape((-1, 2, 2)), - axis=1).reshape((-1, 2)) + extents = np.diff(bounds.reshape((-1, 2, 2)), axis=1).reshape((-1, 2)) area = np.prod(extents, axis=1) area_min = area.argmin() @@ -89,7 +84,7 @@ def oriented_bounds_2D(points, qhull_options='QbB'): # find the (3,3) homogeneous transformation which moves the input # points to have a bounding box centered at the origin - offset = -bounds[area_min][:2] - (rectangle * .5) + offset = -bounds[area_min][:2] - (rectangle * 0.5) theta = np.arctan2(*edge_vectors[area_min][::-1]) transform = transformations.planar_matrix(offset, theta) @@ -105,11 +100,7 @@ def oriented_bounds_2D(points, qhull_options='QbB'): return transform, rectangle -def oriented_bounds(obj, - angle_digits=1, - ordered=True, - normal=None, - coplanar_tol=1e-12): +def oriented_bounds(obj, angle_digits=1, ordered=True, normal=None, coplanar_tol=1e-12): """ Find the oriented bounding box for a Trimesh @@ -163,7 +154,7 @@ def oriented_bounds_coplanar(points): _, _, vh = np.linalg.svd(points_demeaned, full_matrices=False) points_2d = np.matmul(points_demeaned, vh.T) if np.any(np.abs(points_2d[:, 2]) > coplanar_tol): - raise ValueError('Points must be coplanar') + raise ValueError("Points must be coplanar") # Construct a homogeneous matrix representing the transformation above to_2d = np.eye(4) @@ -175,15 +166,14 @@ def oriented_bounds_coplanar(points): # Make extents 3D extents = np.append(extents_2d, 0.0) # convert transformation from 2D to 3D and combine - to_origin = np.matmul( - transformations.planar_matrix_to_3D(to_origin_2d), to_2d) + to_origin = np.matmul(transformations.planar_matrix_to_3D(to_origin_2d), to_2d) return to_origin, extents try: # extract a set of convex hull vertices and normals from the input # we bother to do this to avoid recomputing the full convex hull if # possible - if hasattr(obj, 'convex_hull'): + if hasattr(obj, "convex_hull"): # if we have been passed a mesh, use its existing convex hull to pull from # cache rather than recomputing. This version of the cached convex hull has # normals pointing in arbitrary directions (straight from qhull) @@ -199,14 +189,13 @@ def oriented_bounds_coplanar(points): elif util.is_shape(points, (-1, 3)): hull = convex.convex_hull(points, repair=False) else: - raise ValueError('Points are not (n,3) or (n,2)!') + raise ValueError("Points are not (n,3) or (n,2)!") else: - raise ValueError( - 'Oriented bounds must be passed a mesh or a set of points!') + raise ValueError("Oriented bounds must be passed a mesh or a set of points!") except QhullError: # Try to recover from Qhull error if due to mesh being less than 3 # dimensional - if hasattr(obj, 'vertices'): + if hasattr(obj, "vertices"): points = obj.vertices.view(np.ndarray) elif util.is_sequence(obj): points = np.asanyarray(obj) @@ -224,15 +213,15 @@ def oriented_bounds_coplanar(points): # convert face normals to spherical coordinates on the upper hemisphere # the vector_hemisphere call effectivly merges negative but otherwise # identical vectors - spherical_coords = util.vector_to_spherical( - util.vector_hemisphere(hull_normals)) + spherical_coords = util.vector_to_spherical(util.vector_hemisphere(hull_normals)) # the unique_rows call on merge angles gets unique spherical directions to check # we get a substantial speedup in the transformation matrix creation # inside the loop by converting to angles ahead of time - spherical_unique = grouping.unique_rows( - spherical_coords, digits=angle_digits)[0] - matrices = [transformations.spherical_matrix(*s).T - for s in spherical_coords[spherical_unique]] + spherical_unique = grouping.unique_rows(spherical_coords, digits=angle_digits)[0] + matrices = [ + transformations.spherical_matrix(*s).T + for s in spherical_coords[spherical_unique] + ] normals = util.spherical_to_vector(spherical_coords[spherical_unique]) else: # if explicit normal was passed use it and skip the grouping @@ -265,10 +254,11 @@ def oriented_bounds_coplanar(points): edge_vert = projected[:, :2][edges] # now get them as unit vectors edge_vectors = edge_vert[:, 1, :] - edge_vert[:, 0, :] - edge_norm = np.sqrt(np.dot(edge_vectors ** 2, [1, 1])) + edge_norm = np.sqrt(np.dot(edge_vectors**2, [1, 1])) edge_nonzero = edge_norm > 1e-10 - edge_vectors = edge_vectors[edge_nonzero] / \ - edge_norm[edge_nonzero].reshape((-1, 1)) + edge_vectors = edge_vectors[edge_nonzero] / edge_norm[edge_nonzero].reshape( + (-1, 1) + ) # create a set of perpendicular vectors perp_vectors = np.fliplr(edge_vectors) * [-1.0, 1.0] @@ -279,8 +269,7 @@ def oriented_bounds_coplanar(points): # are extremely fast so in practice this usually ends up being fine x = np.dot(edge_vectors, edge_vert[:, 0, :2].T) y = np.dot(perp_vectors, edge_vert[:, 0, :2].T) - area = ((x.max(axis=1) - x.min(axis=1)) * - (y.max(axis=1) - y.min(axis=1))).min() + area = ((x.max(axis=1) - x.min(axis=1)) * (y.max(axis=1) - y.min(axis=1))).min() # the volume is 2D area plus the projected height volume = area * projected[:, 2].ptp() @@ -305,7 +294,7 @@ def oriented_bounds_coplanar(points): # transform points using our matrix to find the translation transformed = transformations.transform_points(vertices, to_origin) - box_center = (transformed.min(axis=0) + transformed.ptp(axis=0) * .5) + box_center = transformed.min(axis=0) + transformed.ptp(axis=0) * 0.5 to_origin[:3, 3] = -box_center # return ordered 3D extents @@ -327,8 +316,7 @@ def oriented_bounds_coplanar(points): # apply the order to the extents min_extents = min_extents[order] - log.debug('oriented_bounds checked %d vectors in %0.4fs', - len(matrices), now() - tic) + log.debug("oriented_bounds checked %d vectors in %0.4fs", len(matrices), now() - tic) return to_origin, min_extents @@ -384,10 +372,8 @@ def volume_from_angles(spherical, return_data=False): else: volume (float) """ - to_2D = transformations.spherical_matrix( - *spherical, axes='rxyz') - projected = transformations.transform_points( - hull, matrix=to_2D) + to_2D = transformations.spherical_matrix(*spherical, axes="rxyz") + projected = transformations.transform_points(hull, matrix=to_2D) height = projected[:, 2].ptp() try: @@ -395,19 +381,18 @@ def volume_from_angles(spherical, return_data=False): except ValueError: return np.inf - volume = np.pi * height * (radius ** 2) + volume = np.pi * height * (radius**2) if return_data: - center_3D = np.append( - center_2D, projected[:, 2].min() + (height * .5)) + center_3D = np.append(center_2D, projected[:, 2].min() + (height * 0.5)) transform = np.dot( - np.linalg.inv(to_2D), - transformations.translation_matrix(center_3D)) + np.linalg.inv(to_2D), transformations.translation_matrix(center_3D) + ) return transform, radius, height return volume # we've been passed a mesh with radial symmetry # use center mass and symmetry axis and go home early - if hasattr(obj, 'symmetry') and obj.symmetry == 'radial': + if hasattr(obj, "symmetry") and obj.symmetry == "radial": # find our origin if obj.is_watertight: # set origin to center of mass @@ -416,42 +401,38 @@ def volume_from_angles(spherical, return_data=False): # convex hull should be watertight origin = obj.convex_hull.center_mass # will align symmetry axis with Z and move origin to zero - to_2D = geometry.plane_transform( - origin=origin, - normal=obj.symmetry_axis) + to_2D = geometry.plane_transform(origin=origin, normal=obj.symmetry_axis) # transform vertices to plane to check - on_plane = transformations.transform_points( - obj.vertices, to_2D) + on_plane = transformations.transform_points(obj.vertices, to_2D) # cylinder height is overall Z span height = on_plane[:, 2].ptp() # center mass is correct on plane, but position # along symmetry axis may be wrong so slide it slide = transformations.translation_matrix( - [0, 0, (height / 2.0) - on_plane[:, 2].max()]) + [0, 0, (height / 2.0) - on_plane[:, 2].max()] + ) to_2D = np.dot(slide, to_2D) # radius is maximum radius radius = (on_plane[:, :2] ** 2).sum(axis=1).max() ** 0.5 # save kwargs - result = {'height': height, - 'radius': radius, - 'transform': np.linalg.inv(to_2D)} + result = {"height": height, "radius": radius, "transform": np.linalg.inv(to_2D)} return result # get the points on the convex hull of the result hull = convex.hull_points(obj) if not util.is_shape(hull, (-1, 3)): - raise ValueError('Input must be reducable to 3D points!') + raise ValueError("Input must be reducable to 3D points!") # sample a hemisphere so local hill climbing can do its thing samples = util.grid_linspace([[0, 0], [np.pi, np.pi]], sample_count) # if it's rotationally symmetric the bounding cylinder # is almost certainly along one of the PCI vectors - if hasattr(obj, 'principal_inertia_vectors'): + if hasattr(obj, "principal_inertia_vectors"): # add the principal inertia vectors if we have a mesh samples = np.vstack( - (samples, - util.vector_to_spherical(obj.principal_inertia_vectors))) + (samples, util.vector_to_spherical(obj.principal_inertia_vectors)) + ) tic = [now()] # the projected volume at each sample @@ -463,25 +444,19 @@ def volume_from_angles(spherical, return_data=False): # since we already explored the global space, set the bounds to be # just around the sample that had the lowest volume step = 2 * np.pi / sample_count - bounds = [(best[0] - step, best[0] + step), - (best[1] - step, best[1] + step)] + bounds = [(best[0] - step, best[0] + step), (best[1] - step, best[1] + step)] # run the local optimization - r = optimize.minimize(volume_from_angles, - best, - tol=angle_tol, - method='SLSQP', - bounds=bounds) + r = optimize.minimize( + volume_from_angles, best, tol=angle_tol, method="SLSQP", bounds=bounds + ) tic.append(now()) - log.debug('Performed search in %f and minimize in %f', *np.diff(tic)) + log.debug("Performed search in %f and minimize in %f", *np.diff(tic)) # actually chunk the information about the cylinder - transform, radius, height = volume_from_angles( - r['x'], return_data=True) + transform, radius, height = volume_from_angles(r["x"], return_data=True) - result = {'transform': transform, - 'radius': radius, - 'height': height} + result = {"transform": transform, "radius": radius, "height": height} return result @@ -504,7 +479,7 @@ def to_extents(bounds): """ bounds = np.asanyarray(bounds, dtype=np.float64) if bounds.shape != (2, 3): - raise ValueError('bounds must be (2, 3)') + raise ValueError("bounds must be (2, 3)") extents = bounds.ptp(axis=0) transform = np.eye(4) @@ -534,17 +509,37 @@ def corners(bounds): if util.is_shape(bounds, (2, 2)): bounds = np.column_stack((bounds, [0, 0])) elif not util.is_shape(bounds, (2, 3)): - raise ValueError('bounds must be (2,2) or (2,3)!') + raise ValueError("bounds must be (2,2) or (2,3)!") minx, miny, minz, maxx, maxy, maxz = np.arange(6) - corner_index = np.array([minx, miny, minz, - maxx, miny, minz, - maxx, maxy, minz, - minx, maxy, minz, - minx, miny, maxz, - maxx, miny, maxz, - maxx, maxy, maxz, - minx, maxy, maxz]).reshape((-1, 3)) + corner_index = np.array( + [ + minx, + miny, + minz, + maxx, + miny, + minz, + maxx, + maxy, + minz, + minx, + maxy, + minz, + minx, + miny, + maxz, + maxx, + miny, + maxz, + maxx, + maxy, + maxz, + minx, + maxy, + maxz, + ] + ).reshape((-1, 3)) corners = bounds.reshape(-1)[corner_index] return corners @@ -571,13 +566,13 @@ def contains(bounds, points): points = np.asanyarray(points, dtype=np.float64) if len(bounds) != 2: - raise ValueError('bounds must be (2,dimension)!') + raise ValueError("bounds must be (2,dimension)!") if not util.is_shape(points, (-1, bounds.shape[1])): - raise ValueError('bounds shape must match points!') + raise ValueError("bounds shape must match points!") # run the simple check points_inside = np.logical_and( - (points > bounds[0]).all(axis=1), - (points < bounds[1]).all(axis=1)) + (points > bounds[0]).all(axis=1), (points < bounds[1]).all(axis=1) + ) return points_inside diff --git a/trimesh/collision.py b/trimesh/collision.py index fd546de8a..a2fea0e22 100644 --- a/trimesh/collision.py +++ b/trimesh/collision.py @@ -26,10 +26,7 @@ def __init__(self, names, contact): The contact in question. """ self.names = set(names) - self._inds = { - names[0]: contact.b1, - names[1]: contact.b2 - } + self._inds = {names[0]: contact.b1, names[1]: contact.b2} self._normal = contact.normal self._point = contact.pos self._depth = contact.penetration_depth @@ -105,13 +102,10 @@ def __init__(self, names, result): The distance query result. """ self.names = set(names) - self._inds = { - names[0]: result.b1, - names[1]: result.b2 - } + self._inds = {names[0]: result.b1, names[1]: result.b2} self._points = { names[0]: result.nearest_points[0], - names[1]: result.nearest_points[1] + names[1]: result.nearest_points[1], } self._distance = result.min_distance @@ -171,8 +165,7 @@ def __init__(self): Initialize a mesh-mesh collision manager. """ if fcl is None: - raise ValueError( - 'No FCL Available! Please install the python-fcl library') + raise ValueError("No FCL Available! Please install the python-fcl library") # {name: {geom:, obj}} self._objs = {} # {id(bvh) : str, name} @@ -182,10 +175,7 @@ def __init__(self): self._manager = fcl.DynamicAABBTreeCollisionManager() self._manager.setup() - def add_object(self, - name, - mesh, - transform=None): + def add_object(self, name, mesh, transform=None): """ Add an object to the collision manager. @@ -207,7 +197,7 @@ def add_object(self, transform = np.eye(4) transform = np.asanyarray(transform, dtype=np.float32) if transform.shape != (4, 4): - raise ValueError('transform must be (4,4)!') + raise ValueError("transform must be (4,4)!") # create BVH/Convex geom = self._get_fcl_obj(mesh) @@ -219,8 +209,7 @@ def add_object(self, # Add collision object to set if name in self._objs: self._manager.unregisterObject(self._objs[name]) - self._objs[name] = {'obj': o, - 'geom': geom} + self._objs[name] = {"obj": o, "geom": geom} # store the name of the geometry self._names[id(geom)] = name @@ -238,14 +227,14 @@ def remove_object(self, name): The identifier for the object """ if name in self._objs: - self._manager.unregisterObject(self._objs[name]['obj']) - self._manager.update(self._objs[name]['obj']) + self._manager.unregisterObject(self._objs[name]["obj"]) + self._manager.update(self._objs[name]["obj"]) # remove objects from _objs - geom_id = id(self._objs.pop(name)['geom']) + geom_id = id(self._objs.pop(name)["geom"]) # remove names self._names.pop(geom_id) else: - raise ValueError(f'{name} not in collision manager!') + raise ValueError(f"{name} not in collision manager!") def set_transform(self, name, transform): """ @@ -260,18 +249,16 @@ def set_transform(self, name, transform): A new homogeneous transform matrix for the object """ if name in self._objs: - o = self._objs[name]['obj'] + o = self._objs[name]["obj"] o.setRotation(transform[:3, :3]) o.setTranslation(transform[:3, 3]) self._manager.update(o) else: - raise ValueError(f'{name} not in collision manager!') + raise ValueError(f"{name} not in collision manager!") - def in_collision_single(self, - mesh, - transform=None, - return_names=False, - return_data=False): + def in_collision_single( + self, mesh, transform=None, return_names=False, return_data=False + ): """ Check a single object for collisions against all objects in the manager. @@ -311,9 +298,9 @@ def in_collision_single(self, # Collide with manager's objects cdata = fcl.CollisionData() if return_names or return_data: - cdata = fcl.CollisionData(request=fcl.CollisionRequest( - num_max_contacts=100000, - enable_contact=True)) + cdata = fcl.CollisionData( + request=fcl.CollisionRequest(num_max_contacts=100000, enable_contact=True) + ) self._manager.collide(o, cdata, fcl.defaultCollisionCallback) result = cdata.result.is_collision @@ -328,7 +315,7 @@ def in_collision_single(self, cg = contact.o2 name = self._extract_name(cg) - names = (name, '__external') + names = (name, "__external") if cg == contact.o2: names = tuple(reversed(names)) @@ -372,8 +359,9 @@ def in_collision_internal(self, return_names=False, return_data=False): """ cdata = fcl.CollisionData() if return_names or return_data: - cdata = fcl.CollisionData(request=fcl.CollisionRequest( - num_max_contacts=100000, enable_contact=True)) + cdata = fcl.CollisionData( + request=fcl.CollisionRequest(num_max_contacts=100000, enable_contact=True) + ) self._manager.collide(cdata, fcl.defaultCollisionCallback) @@ -383,8 +371,7 @@ def in_collision_internal(self, return_names=False, return_data=False): contact_data = [] if return_names or return_data: for contact in cdata.result.contacts: - names = (self._extract_name(contact.o1), - self._extract_name(contact.o2)) + names = (self._extract_name(contact.o1), self._extract_name(contact.o2)) if return_names: objs_in_collision.add(tuple(sorted(names))) @@ -400,8 +387,7 @@ def in_collision_internal(self, return_names=False, return_data=False): else: return result - def in_collision_other(self, other_manager, - return_names=False, return_data=False): + def in_collision_other(self, other_manager, return_names=False, return_data=False): """ Check if any object from this manager collides with any object from another manager. @@ -432,12 +418,9 @@ def in_collision_other(self, other_manager, cdata = fcl.CollisionData() if return_names or return_data: cdata = fcl.CollisionData( - request=fcl.CollisionRequest( - num_max_contacts=100000, - enable_contact=True)) - self._manager.collide(other_manager._manager, - cdata, - fcl.defaultCollisionCallback) + request=fcl.CollisionRequest(num_max_contacts=100000, enable_contact=True) + ) + self._manager.collide(other_manager._manager, cdata, fcl.defaultCollisionCallback) result = cdata.result.is_collision objs_in_collision = set() @@ -445,11 +428,15 @@ def in_collision_other(self, other_manager, if return_names or return_data: for contact in cdata.result.contacts: reverse = False - names = (self._extract_name(contact.o1), - other_manager._extract_name(contact.o2)) + names = ( + self._extract_name(contact.o1), + other_manager._extract_name(contact.o2), + ) if names[0] is None: - names = (self._extract_name(contact.o2), - other_manager._extract_name(contact.o1)) + names = ( + self._extract_name(contact.o2), + other_manager._extract_name(contact.o1), + ) reverse = True if return_names: @@ -468,11 +455,9 @@ def in_collision_other(self, other_manager, else: return result - def min_distance_single(self, - mesh, - transform=None, - return_name=False, - return_data=False): + def min_distance_single( + self, mesh, transform=None, return_name=False, return_data=False + ): """ Get the minimum distance between a single object and any object in the manager. @@ -508,16 +493,13 @@ def min_distance_single(self, o = fcl.CollisionObject(geom, t) # Collide with manager's objects - ddata = fcl.DistanceData( - fcl.DistanceRequest( - enable_signed_distance=True)) + ddata = fcl.DistanceData(fcl.DistanceRequest(enable_signed_distance=True)) if return_data: ddata = fcl.DistanceData( fcl.DistanceRequest( - enable_nearest_points=True, - enable_signed_distance=True + enable_nearest_points=True, enable_signed_distance=True ), - fcl.DistanceResult() + fcl.DistanceResult(), ) self._manager.distance(o, ddata, fcl.defaultDistanceCallback) @@ -533,7 +515,7 @@ def min_distance_single(self, name = self._extract_name(cg) - names = (name, '__external') + names = (name, "__external") if cg == ddata.result.o2: names = tuple(reversed(names)) data = DistanceData(names, ddata.result) @@ -568,16 +550,14 @@ def min_distance_internal(self, return_names=False, return_data=False): data : DistanceData Extra data about the distance query """ - ddata = fcl.DistanceData( - fcl.DistanceRequest( - enable_signed_distance=True)) + ddata = fcl.DistanceData(fcl.DistanceRequest(enable_signed_distance=True)) if return_data: ddata = fcl.DistanceData( fcl.DistanceRequest( enable_nearest_points=True, enable_signed_distance=True, ), - fcl.DistanceResult() + fcl.DistanceResult(), ) self._manager.distance(ddata, fcl.defaultDistanceCallback) @@ -586,8 +566,10 @@ def min_distance_internal(self, return_names=False, return_data=False): names, data = None, None if return_names or return_data: - names = (self._extract_name(ddata.result.o1), - self._extract_name(ddata.result.o2)) + names = ( + self._extract_name(ddata.result.o1), + self._extract_name(ddata.result.o2), + ) data = DistanceData(names, ddata.result) names = tuple(sorted(names)) @@ -600,8 +582,7 @@ def min_distance_internal(self, return_names=False, return_data=False): else: return distance - def min_distance_other(self, other_manager, - return_names=False, return_data=False): + def min_distance_other(self, other_manager, return_names=False, return_data=False): """ Get the minimum distance between any pair of objects, one in each manager. @@ -628,33 +609,33 @@ def min_distance_other(self, other_manager, data : DistanceData Extra data about the distance query """ - ddata = fcl.DistanceData( - fcl.DistanceRequest( - enable_signed_distance=True)) + ddata = fcl.DistanceData(fcl.DistanceRequest(enable_signed_distance=True)) if return_data: ddata = fcl.DistanceData( fcl.DistanceRequest( enable_nearest_points=True, enable_signed_distance=True, ), - fcl.DistanceResult() + fcl.DistanceResult(), ) - self._manager.distance(other_manager._manager, - ddata, - fcl.defaultDistanceCallback) + self._manager.distance(other_manager._manager, ddata, fcl.defaultDistanceCallback) distance = ddata.result.min_distance names, data = None, None if return_names or return_data: reverse = False - names = (self._extract_name(ddata.result.o1), - other_manager._extract_name(ddata.result.o2)) + names = ( + self._extract_name(ddata.result.o1), + other_manager._extract_name(ddata.result.o2), + ) if names[0] is None: reverse = True - names = (self._extract_name(ddata.result.o2), - other_manager._extract_name(ddata.result.o1)) + names = ( + self._extract_name(ddata.result.o2), + other_manager._extract_name(ddata.result.o1), + ) dnames = tuple(names) if reverse: @@ -724,10 +705,8 @@ def mesh_to_BVH(mesh): BVH of input geometry """ bvh = fcl.BVHModel() - bvh.beginModel(num_tris_=len(mesh.faces), - num_vertices_=len(mesh.vertices)) - bvh.addSubModel(verts=mesh.vertices, - triangles=mesh.faces) + bvh.beginModel(num_tris_=len(mesh.faces), num_vertices_=len(mesh.vertices)) + bvh.addSubModel(verts=mesh.vertices, triangles=mesh.faces) bvh.endModel() return bvh @@ -746,8 +725,9 @@ def mesh_to_convex(mesh): convex : fcl.Convex Convex of input geometry """ - fs = np.concatenate((3 * np.ones((len(mesh.faces), 1), dtype=np.int64), mesh.faces), - axis=1) + fs = np.concatenate( + (3 * np.ones((len(mesh.faces), 1), dtype=np.int64), mesh.faces), axis=1 + ) return fcl.Convex(mesh.vertices, len(fs), fs.flatten()) @@ -771,7 +751,7 @@ def scene_to_collision(scene): objects = {} for node in scene.graph.nodes_geometry: T, geometry = scene.graph[node] - objects[node] = manager.add_object(name=node, - mesh=scene.geometry[geometry], - transform=T) + objects[node] = manager.add_object( + name=node, mesh=scene.geometry[geometry], transform=T + ) return manager, objects diff --git a/trimesh/comparison.py b/trimesh/comparison.py index 02ac50ebc..57c4452de 100644 --- a/trimesh/comparison.py +++ b/trimesh/comparison.py @@ -15,13 +15,16 @@ # how many significant figures to use for each # field of the identifier based on hand-tuning id_sigfig = np.array( - [5, # area - 10, # euler number - 5, # area/volume ratio - 2, # convex/mesh area ratio - 2, # convex area/volume ratio - 3, # max radius squared / area - 1]) # sign of triangle count for mirrored + [ + 5, # area + 10, # euler number + 5, # area/volume ratio + 2, # convex/mesh area ratio + 2, # convex area/volume ratio + 3, # max radius squared / area + 1, + ] +) # sign of triangle count for mirrored def identifier_simple(mesh): @@ -68,8 +71,9 @@ def identifier_simple(mesh): if mesh.is_volume: # side length of a cube ratio # 1.0 for cubes, different values for other things - identifier[2] = (((mesh_area / 6.0) ** (1.0 / 2.0)) / - (mesh.volume ** (1.0 / 3.0))) + identifier[2] = ((mesh_area / 6.0) ** (1.0 / 2.0)) / ( + mesh.volume ** (1.0 / 3.0) + ) else: # if we don't have a watertight mesh add information about the # convex hull which is slow to compute and unreliable @@ -86,12 +90,13 @@ def identifier_simple(mesh): identifier[3] = mesh_area / hull_area # cube side length ratio for the hull if hull_volume > 1e-12: - identifier[4] = (((hull_area / 6.0) ** (1.0 / 2.0)) / - (hull_volume ** (1.0 / 3.0))) + identifier[4] = ((hull_area / 6.0) ** (1.0 / 2.0)) / ( + hull_volume ** (1.0 / 3.0) + ) # calculate maximum mesh radius vertices = mesh.vertices - mesh.centroid # add in max radius^2 to area ratio - R2 = np.dot((vertices ** 2), [1, 1, 1]).max() + R2 = np.dot((vertices**2), [1, 1, 1]).max() identifier[5] = R2 / mesh_area # mirrored meshes will look identical in terms of @@ -106,8 +111,7 @@ def identifier_simple(mesh): variance = edges_length.std() / edges_length.mean() if variance > 0.25: # the length of each edge in faces - norms = edges_length[ - mesh.edges_unique_inverse].reshape((-1, 3)) + norms = edges_length[mesh.edges_unique_inverse].reshape((-1, 3)) # stack edge length and get the relative difference stack = np.diff(np.column_stack((norms, norms[:, 0])), axis=1) pick_idx = np.abs(stack).argmin(axis=1) @@ -135,11 +139,10 @@ def identifier_hash(identifier): """ # convert identifier to integers and order of magnitude - as_int, multiplier = util.sigfig_int( - identifier, id_sigfig) + as_int, multiplier = util.sigfig_int(identifier, id_sigfig) # make all scales positive if (multiplier < 0).any(): multiplier += np.abs(multiplier.min()) - data = (as_int * (10 ** multiplier)).astype(np.int64) + data = (as_int * (10**multiplier)).astype(np.int64) return sha256(data.tobytes()).hexdigest() diff --git a/trimesh/convex.py b/trimesh/convex.py index 6e0ccc05d..396589e73 100644 --- a/trimesh/convex.py +++ b/trimesh/convex.py @@ -18,6 +18,7 @@ from scipy.spatial import ConvexHull except ImportError as E: from .exceptions import ExceptionWrapper + ConvexHull = ExceptionWrapper(E) try: @@ -26,7 +27,7 @@ QhullError = BaseException -def convex_hull(obj, qhull_options='QbB Pp Qt', repair=True): +def convex_hull(obj, qhull_options="QbB Pp Qt", repair=True): """ Get a new Trimesh object representing the convex hull of the current mesh attempting to return a watertight mesh with correct @@ -55,16 +56,14 @@ def convex_hull(obj, qhull_options='QbB Pp Qt', repair=True): # will remove subclassing points = np.asarray(obj, dtype=np.float64) if not util.is_shape(points, (-1, 3)): - raise ValueError('Object must be Trimesh or (n,3) points!') + raise ValueError("Object must be Trimesh or (n,3) points!") try: hull = ConvexHull(points, qhull_options=qhull_options) except QhullError: - util.log.debug( - 'Failed to compute convex hull: retrying with `QJ`', - exc_info=True) + util.log.debug("Failed to compute convex hull: retrying with `QJ`", exc_info=True) # try with "joggle" enabled - hull = ConvexHull(points, qhull_options='QJ') + hull = ConvexHull(points, qhull_options="QJ") # hull object doesn't remove unreferenced vertices # create a mask to re- index faces for only referenced vertices @@ -78,10 +77,7 @@ def convex_hull(obj, qhull_options='QbB Pp Qt', repair=True): if not repair: # create the Trimesh object for the convex hull - return Trimesh(vertices=vertices, - faces=faces, - process=True, - validate=False) + return Trimesh(vertices=vertices, faces=faces, process=True, validate=False) # qhull returns faces with random winding # calculate the returned normal of each face @@ -103,9 +99,7 @@ def convex_hull(obj, qhull_options='QbB Pp Qt', repair=True): # should have a positive dot product with the normal of that face # if it doesn't it is probably backwards # note that this sometimes gets screwed up by precision issues - centroid = np.average(triangles_center, - weights=triangles_area, - axis=0) + centroid = np.average(triangles_center, weights=triangles_area, axis=0) # a vector from the centroid to a point on each face test_vector = triangles_center - centroid # check the projection against face normals @@ -117,18 +111,22 @@ def convex_hull(obj, qhull_options='QbB Pp Qt', repair=True): normals[backwards] *= -1.0 # save the work we did to the cache so it doesn't have to be recomputed - initial_cache = {'triangles_cross': crosses, - 'triangles_center': triangles_center, - 'area_faces': triangles_area, - 'centroid': centroid} + initial_cache = { + "triangles_cross": crosses, + "triangles_center": triangles_center, + "area_faces": triangles_area, + "centroid": centroid, + } # create the Trimesh object for the convex hull - convex = Trimesh(vertices=vertices, - faces=faces, - face_normals=normals, - initial_cache=initial_cache, - process=True, - validate=False) + convex = Trimesh( + vertices=vertices, + faces=faces, + face_normals=normals, + initial_cache=initial_cache, + process=True, + validate=False, + ) # we did the gross case above, but sometimes precision issues # leave some faces backwards anyway @@ -139,8 +137,7 @@ def convex_hull(obj, qhull_options='QbB Pp Qt', repair=True): # sometimes the QbB option will cause precision issues # so try the hull again without it and # check for qhull_options is None to avoid infinite recursion - if (qhull_options is not None and - not convex.is_winding_consistent): + if qhull_options is not None and not convex.is_winding_consistent: return convex_hull(convex, qhull_options=None) return convex @@ -218,7 +215,7 @@ def is_convex(mesh): return convex -def hull_points(obj, qhull_options='QbB Pp'): +def hull_points(obj, qhull_options="QbB Pp"): """ Try to extract a convex set of points from multiple input formats. @@ -232,12 +229,12 @@ def hull_points(obj, qhull_options='QbB Pp'): -------- points: (o,d) convex set of points """ - if hasattr(obj, 'convex_hull'): + if hasattr(obj, "convex_hull"): return obj.convex_hull.vertices initial = np.asanyarray(obj, dtype=np.float64) if len(initial.shape) != 2: - raise ValueError('points must be (n, dimension)!') + raise ValueError("points must be (n, dimension)!") hull = ConvexHull(initial, qhull_options=qhull_options) points = hull.points[hull.vertices] diff --git a/trimesh/creation.py b/trimesh/creation.py index 26f059aaf..864c65981 100644 --- a/trimesh/creation.py +++ b/trimesh/creation.py @@ -32,11 +32,7 @@ _tri_earcut = exceptions.ExceptionWrapper(E) -def revolve(linestring, - angle=None, - sections=None, - transform=None, - **kwargs): +def revolve(linestring, angle=None, sections=None, transform=None, **kwargs): """ Revolve a 2D line string around the 2D Y axis, with a result with the 2D Y axis pointing along the 3D Z axis. @@ -72,7 +68,7 @@ def revolve(linestring, # linestring must be ordered 2D points if len(linestring.shape) != 2 or linestring.shape[1] != 2: - raise ValueError('linestring must be 2D!') + raise ValueError("linestring must be 2D!") if angle is None: # default to closing the revolution @@ -101,17 +97,18 @@ def revolve(linestring, # use the 2D Y component as the height along revolution height = linestring[:, 1] # a lot of tiling to get our 3D vertices - vertices = np.column_stack(( - np.tile(points, (1, per)).reshape((-1, 2)) * - np.tile(radius, len(points)).reshape((-1, 1)), - np.tile(height, len(points)))) + vertices = np.column_stack( + ( + np.tile(points, (1, per)).reshape((-1, 2)) + * np.tile(radius, len(points)).reshape((-1, 1)), + np.tile(height, len(points)), + ) + ) if closed: # should be a duplicate set of vertices if tol.strict: - assert util.allclose(vertices[:per], - vertices[-per:], - atol=1e-8) + assert util.allclose(vertices[:per], vertices[-per:], atol=1e-8) # chop off duplicate vertices vertices = vertices[:-per] @@ -125,8 +122,7 @@ def revolve(linestring, # start with a quad for every segment # this is a superset which will then be reduced - quad = np.array([0, per, 1, - 1, per, per + 1]) + quad = np.array([0, per, 1, 1, per, per + 1]) # stack the faces for a single slice of the revolution single = np.tile(quad, per).reshape((-1, 3)) # `per` is basically the stride of the vertices @@ -138,34 +134,27 @@ def revolve(linestring, # how much to offset each slice # note arange multiplied by vertex stride # but tiled by the number of faces we actually have - offset = np.tile(np.arange(slices) * per, - (len(single), 1)).T.reshape((-1, 1)) + offset = np.tile(np.arange(slices) * per, (len(single), 1)).T.reshape((-1, 1)) # stack a single slice into N slices stacked = np.tile(single.ravel(), slices).reshape((-1, 3)) if tol.strict: # make sure we didn't screw up stacking operation - assert np.allclose( - stacked.reshape( - (-1, single.shape[0], 3)) - single, 0) + assert np.allclose(stacked.reshape((-1, single.shape[0], 3)) - single, 0) # offset stacked and wrap vertices faces = (stacked + offset) % len(vertices) - - - #if 'process' not in kwargs: + # if 'process' not in kwargs: # kwargs['process'] = False # create the mesh from our vertices and faces - mesh = Trimesh(vertices=vertices, - faces=faces, - **kwargs) + mesh = Trimesh(vertices=vertices, faces=faces, **kwargs) # strict checks run only in unit tests - if (tol.strict and - (np.allclose(radius[[0, -1]], 0.0) or - np.allclose(linestring[0], linestring[-1]))): + if tol.strict and ( + np.allclose(radius[[0, -1]], 0.0) or np.allclose(linestring[0], linestring[-1]) + ): # if revolved curve starts and ends with zero radius # it should really be a valid volume, unless the sign # reversed on the input linestring @@ -176,10 +165,7 @@ def revolve(linestring, return mesh -def extrude_polygon(polygon, - height, - transform=None, - **kwargs): +def extrude_polygon(polygon, height, transform=None, **kwargs): """ Extrude a 2D shapely polygon into a 3D mesh @@ -202,18 +188,13 @@ def extrude_polygon(polygon, # create a triangulation from the polygon vertices, faces = triangulate_polygon(polygon, **kwargs) # extrude that triangulation along Z - mesh = extrude_triangulation(vertices=vertices, - faces=faces, - height=height, - transform=transform, - **kwargs) + mesh = extrude_triangulation( + vertices=vertices, faces=faces, height=height, transform=transform, **kwargs + ) return mesh -def sweep_polygon(polygon, - path, - angles=None, - **kwargs): +def sweep_polygon(polygon, path, angles=None, **kwargs): """ Extrude a 2D shapely polygon into a 3D mesh along an arbitrary 3D path. Doesn't handle sharp curvature well. @@ -238,12 +219,11 @@ def sweep_polygon(polygon, path = np.asanyarray(path, dtype=np.float64) if not util.is_shape(path, (-1, 3)): - raise ValueError('Path must be (n, 3)!') + raise ValueError("Path must be (n, 3)!") # Extract 2D vertices and triangulation verts_2d = np.array(polygon.exterior.coords)[:-1] - base_verts_2d, faces_2d = triangulate_polygon( - polygon, **kwargs) + base_verts_2d, faces_2d = triangulate_polygon(polygon, **kwargs) n = len(verts_2d) # Create basis for first planar polygon cap @@ -255,10 +235,8 @@ def sweep_polygon(polygon, # Compute 3D locations of those vertices verts_3d = np.c_[verts_2d, np.zeros(n)] verts_3d = tf.transform_points(verts_3d, tf_mat) - base_verts_3d = np.c_[base_verts_2d, - np.zeros(len(base_verts_2d))] - base_verts_3d = tf.transform_points(base_verts_3d, - tf_mat) + base_verts_3d = np.c_[base_verts_2d, np.zeros(len(base_verts_2d))] + base_verts_3d = tf.transform_points(base_verts_3d, tf_mat) # keep matching sequence of vertices and 0- indexed faces vertices = [base_verts_3d] @@ -282,17 +260,14 @@ def sweep_polygon(polygon, # Rotate if needed if angles is not None: - tf_mat = tf.rotation_matrix(angles[i], - norms[i], - path[i]) - verts_3d_prev = tf.transform_points(verts_3d_prev, - tf_mat) + tf_mat = tf.rotation_matrix(angles[i], norms[i], path[i]) + verts_3d_prev = tf.transform_points(verts_3d_prev, tf_mat) # Project vertices onto plane in 3D - ds = np.einsum('ij,j->i', (path[i + 1] - verts_3d_prev), norms[i]) + ds = np.einsum("ij,j->i", (path[i + 1] - verts_3d_prev), norms[i]) ds = ds / np.dot(v1s[i], norms[i]) - verts_3d_new = np.einsum('i,j->ij', ds, v1s[i]) + verts_3d_prev + verts_3d_new = np.einsum("i,j->ij", ds, v1s[i]) + verts_3d_prev # Add to face and vertex lists new_faces = [[i + n, (i + 1) % n, i] for i in range(n)] @@ -312,22 +287,19 @@ def sweep_polygon(polygon, # Create final cap x, y, z = util.generate_basis(path[-1] - path[-2]) vecs = verts_3d - path[-1] - coords = np.c_[np.einsum('ij,j->i', vecs, x), - np.einsum('ij,j->i', vecs, y)] + coords = np.c_[np.einsum("ij,j->i", vecs, x), np.einsum("ij,j->i", vecs, y)] base_verts_2d, faces_2d = triangulate_polygon(Polygon(coords), **kwargs) - base_verts_3d = (np.einsum('i,j->ij', base_verts_2d[:, 0], x) + - np.einsum('i,j->ij', base_verts_2d[:, 1], y)) + path[-1] + base_verts_3d = ( + np.einsum("i,j->ij", base_verts_2d[:, 0], x) + + np.einsum("i,j->ij", base_verts_2d[:, 1], y) + ) + path[-1] faces = np.vstack((faces, faces_2d + len(vertices))) vertices = np.vstack((vertices, base_verts_3d)) return Trimesh(vertices, faces) -def extrude_triangulation(vertices, - faces, - height, - transform=None, - **kwargs): +def extrude_triangulation(vertices, faces, height, transform=None, **kwargs): """ Extrude a 2D triangulation into a watertight mesh. @@ -352,15 +324,14 @@ def extrude_triangulation(vertices, faces = np.asanyarray(faces, dtype=np.int64) if not util.is_shape(vertices, (-1, 2)): - raise ValueError('Vertices must be (n,2)') + raise ValueError("Vertices must be (n,2)") if not util.is_shape(faces, (-1, 3)): - raise ValueError('Faces must be (n,3)') + raise ValueError("Faces must be (n,3)") if np.abs(height) < tol.merge: - raise ValueError('Height must be nonzero!') + raise ValueError("Height must be nonzero!") # check the winding of the first few triangles - signs = np.array([np.cross(*i) for i in - np.diff(vertices[faces[:10]], axis=1)]) + signs = np.array([np.cross(*i) for i in np.diff(vertices[faces[:10]], axis=1)]) # make sure the triangulation is aligned with the sign of # the height we've been passed if len(signs) > 0 and np.sign(signs.mean()) != np.sign(height): @@ -372,8 +343,7 @@ def extrude_triangulation(vertices, # edges which only occur once are on the boundary of the polygon # since the triangulation may have subdivided the boundary of the # shapely polygon, we need to find it again - edges_unique = grouping.group_rows( - edges_sorted, require_count=1) + edges_unique = grouping.group_rows(edges_sorted, require_count=1) # (n, 2, 2) set of line segments (positions, not references) boundary = vertices[edges[edges_unique]] @@ -381,11 +351,8 @@ def extrude_triangulation(vertices, # we are creating two vertical triangles for every 2D line segment # on the boundary of the 2D triangulation vertical = np.tile(boundary.reshape((-1, 2)), 2).reshape((-1, 2)) - vertical = np.column_stack((vertical, - np.tile([0, height, 0, height], - len(boundary)))) - vertical_faces = np.tile([3, 1, 2, 2, 1, 0], - (len(boundary), 1)) + vertical = np.column_stack((vertical, np.tile([0, height, 0, height], len(boundary)))) + vertical_faces = np.tile([3, 1, 2, 2, 1, 0], (len(boundary), 1)) vertical_faces += np.arange(len(boundary)).reshape((-1, 1)) * 4 vertical_faces = vertical_faces.reshape((-1, 3)) @@ -394,28 +361,21 @@ def extrude_triangulation(vertices, # a sequence of zero- indexed faces, which will then be appended # with offsets to create the final mesh - faces_seq = [faces[:, ::-1], - faces.copy(), - vertical_faces] - vertices_seq = [vertices_3D, - vertices_3D.copy() + [0.0, 0, height], - vertical] + faces_seq = [faces[:, ::-1], faces.copy(), vertical_faces] + vertices_seq = [vertices_3D, vertices_3D.copy() + [0.0, 0, height], vertical] # append sequences into flat nicely indexed arrays vertices, faces = util.append_faces(vertices_seq, faces_seq) if transform is not None: # apply transform here to avoid later bookkeeping - vertices = tf.transform_points( - vertices, transform) + vertices = tf.transform_points(vertices, transform) # if the transform flips the winding flip faces back # so that the normals will be facing outwards if tf.flips_winding(transform): # fliplr makes arrays non-contiguous faces = np.ascontiguousarray(np.fliplr(faces)) # create mesh object with passed keywords - mesh = Trimesh(vertices=vertices, - faces=faces, - **kwargs) + mesh = Trimesh(vertices=vertices, faces=faces, **kwargs) # only check in strict mode (unit tests) if tol.strict: assert mesh.volume > 0.0 @@ -423,10 +383,7 @@ def extrude_triangulation(vertices, return mesh -def triangulate_polygon(polygon, - triangle_args=None, - engine=None, - **kwargs): +def triangulate_polygon(polygon, triangle_args=None, engine=None, **kwargs): """ Given a shapely polygon create a triangulation using a python interface to `triangle.c` or mapbox-earcut. @@ -449,38 +406,44 @@ def triangulate_polygon(polygon, faces : (n, 3) int Index of vertices that make up triangles """ - if engine is None or engine == 'earcut': + if engine is None or engine == "earcut": # get vertices as sequence where exterior # is the first value vertices = [np.array(polygon.exterior.coords)] - vertices.extend(np.array(i.coords) - for i in polygon.interiors) + vertices.extend(np.array(i.coords) for i in polygon.interiors) # record the index from the length of each vertex array rings = np.cumsum([len(v) for v in vertices]) # stack vertices into (n, 2) float array vertices = np.vstack(vertices) # run triangulation - faces = _tri_earcut(vertices, rings).reshape( - (-1, 3)).astype(np.int64).reshape((-1, 3)) + faces = ( + _tri_earcut(vertices, rings) + .reshape((-1, 3)) + .astype(np.int64) + .reshape((-1, 3)) + ) return vertices, faces - elif engine == 'triangle': + elif engine == "triangle": from triangle import triangulate + # set default triangulation arguments if not specified if triangle_args is None: - triangle_args = 'p' + triangle_args = "p" # turn the polygon in to vertices, segments, and holes arg = _polygon_to_kwargs(polygon) # run the triangulation result = triangulate(arg, triangle_args) - return result['vertices'], result['triangles'] + return result["vertices"], result["triangles"] else: - log.warning('try running `pip install mapbox-earcut`' + - 'or explicitly pass:\n' + - '`triangulate_polygon(*args, engine="triangle")`\n' + - 'to use the non-FSF-approved-license triangle engine') - raise ValueError('no valid triangulation engine!') + log.warning( + "try running `pip install mapbox-earcut`" + + "or explicitly pass:\n" + + '`triangulate_polygon(*args, engine="triangle")`\n' + + "to use the non-FSF-approved-license triangle engine" + ) + raise ValueError("no valid triangulation engine!") def _polygon_to_kwargs(polygon): @@ -500,7 +463,7 @@ def _polygon_to_kwargs(polygon): """ if not polygon.is_valid: - raise ValueError('invalid shapely polygon passed!') + raise ValueError("invalid shapely polygon passed!") def round_trip(start, length): """ @@ -537,8 +500,7 @@ def add_boundary(boundary, start): # the points, but this is more robust (to things like concavity), if # slower. test = Polygon(cleaned) - holes.append(np.array( - test.representative_point().coords)[0]) + holes.append(np.array(test.representative_point().coords)[0]) return len(cleaned) @@ -554,7 +516,7 @@ def add_boundary(boundary, start): try: start += add_boundary(interior, start) except BaseException: - log.warning('invalid interior, continuing') + log.warning("invalid interior, continuing") continue # create clean (n,2) float array of vertices @@ -566,14 +528,13 @@ def add_boundary(boundary, start): # strip it out for the triangulation if vertices.shape[1] == 3: vertices = vertices[:, :2] - result = {'vertices': vertices, - 'segments': facets} + result = {"vertices": vertices, "segments": facets} # holes in meshpy lingo are a (h, 2) list of (x,y) points # which are inside the region of the hole # we added a hole for the exterior, which we slice away here holes = np.array(holes)[1:] if len(holes) > 0: - result['holes'] = holes + result["holes"] = holes return result @@ -598,26 +559,26 @@ def box(extents=None, transform=None, bounds=None, **kwargs): Mesh of a cuboid """ # vertices of the cube - vertices = np.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, - 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], - order='C', - dtype=np.float64).reshape((-1, 3)) + vertices = np.array( + [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], + order="C", + dtype=np.float64, + ).reshape((-1, 3)) # resize cube based on passed extents if bounds is not None: - if transform is not None or extents is not None: - raise ValueError('`bounds` overrides `extents`/`transform`!') + raise ValueError("`bounds` overrides `extents`/`transform`!") bounds = np.array(bounds, dtype=np.float64) if bounds.shape != (2, 3): - raise ValueError('`bounds` must be (2, 3) float!') + raise ValueError("`bounds` must be (2, 3) float!") extents = bounds.ptp(axis=0) vertices *= extents vertices += bounds[0] elif extents is not None: extents = np.asanyarray(extents, dtype=np.float64) if extents.shape != (3,): - raise ValueError('Extents must be (3,)!') + raise ValueError("Extents must be (3,)!") vertices -= 0.5 vertices *= extents else: @@ -625,27 +586,93 @@ def box(extents=None, transform=None, bounds=None, **kwargs): extents = np.asarray((1.0, 1.0, 1.0), dtype=np.float64) # hardcoded face indices - faces = [1, 3, 0, 4, 1, 0, 0, 3, 2, 2, 4, 0, 1, 7, 3, 5, 1, 4, - 5, 7, 1, 3, 7, 2, 6, 4, 2, 2, 7, 6, 6, 5, 4, 7, 5, 6] - faces = np.array(faces, order='C', dtype=np.int64).reshape((-1, 3)) - - face_normals = [-1, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, -1, 0, 0, 1, 0, -1, - 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 1, 0, 1, 0, 0, 1, 0, 0] - face_normals = np.asanyarray(face_normals, - order='C', - dtype=np.float64).reshape(-1, 3) - - if 'metadata' not in kwargs: - kwargs['metadata'] = {} - kwargs['metadata'].update( - {'shape': 'box', - 'extents': extents}) - - box = Trimesh(vertices=vertices, - faces=faces, - face_normals=face_normals, - process=False, - **kwargs) + faces = [ + 1, + 3, + 0, + 4, + 1, + 0, + 0, + 3, + 2, + 2, + 4, + 0, + 1, + 7, + 3, + 5, + 1, + 4, + 5, + 7, + 1, + 3, + 7, + 2, + 6, + 4, + 2, + 2, + 7, + 6, + 6, + 5, + 4, + 7, + 5, + 6, + ] + faces = np.array(faces, order="C", dtype=np.int64).reshape((-1, 3)) + + face_normals = [ + -1, + 0, + 0, + 0, + -1, + 0, + -1, + 0, + 0, + 0, + 0, + -1, + 0, + 0, + 1, + 0, + -1, + 0, + 0, + 0, + 1, + 0, + 1, + 0, + 0, + 0, + -1, + 0, + 1, + 0, + 1, + 0, + 0, + 1, + 0, + 0, + ] + face_normals = np.asanyarray(face_normals, order="C", dtype=np.float64).reshape(-1, 3) + + if "metadata" not in kwargs: + kwargs["metadata"] = {} + kwargs["metadata"].update({"shape": "box", "extents": extents}) + + box = Trimesh( + vertices=vertices, faces=faces, face_normals=face_normals, process=False, **kwargs + ) # do the transform here to preserve face normals if transform is not None: @@ -668,21 +695,114 @@ def icosahedron(**kwargs): ico : trimesh.Trimesh Icosahederon centered at the origin. """ - t = (1.0 + 5.0**.5) / 2.0 - vertices = [-1, t, 0, 1, t, 0, -1, -t, 0, 1, -t, 0, 0, -1, t, 0, 1, t, - 0, -1, -t, 0, 1, -t, t, 0, -1, t, 0, 1, -t, 0, -1, -t, 0, 1] - faces = [0, 11, 5, 0, 5, 1, 0, 1, 7, 0, 7, 10, 0, 10, 11, - 1, 5, 9, 5, 11, 4, 11, 10, 2, 10, 7, 6, 7, 1, 8, - 3, 9, 4, 3, 4, 2, 3, 2, 6, 3, 6, 8, 3, 8, 9, - 4, 9, 5, 2, 4, 11, 6, 2, 10, 8, 6, 7, 9, 8, 1] + t = (1.0 + 5.0**0.5) / 2.0 + vertices = [ + -1, + t, + 0, + 1, + t, + 0, + -1, + -t, + 0, + 1, + -t, + 0, + 0, + -1, + t, + 0, + 1, + t, + 0, + -1, + -t, + 0, + 1, + -t, + t, + 0, + -1, + t, + 0, + 1, + -t, + 0, + -1, + -t, + 0, + 1, + ] + faces = [ + 0, + 11, + 5, + 0, + 5, + 1, + 0, + 1, + 7, + 0, + 7, + 10, + 0, + 10, + 11, + 1, + 5, + 9, + 5, + 11, + 4, + 11, + 10, + 2, + 10, + 7, + 6, + 7, + 1, + 8, + 3, + 9, + 4, + 3, + 4, + 2, + 3, + 2, + 6, + 3, + 6, + 8, + 3, + 8, + 9, + 4, + 9, + 5, + 2, + 4, + 11, + 6, + 2, + 10, + 8, + 6, + 7, + 9, + 8, + 1, + ] # scale vertices so each vertex radius is 1.0 vertices = np.reshape(vertices, (-1, 3)) / np.sqrt(2.0 + t) faces = np.reshape(faces, (-1, 3)) - return Trimesh(vertices=vertices, - faces=faces, - process=kwargs.pop('process', False), - **kwargs) + return Trimesh( + vertices=vertices, faces=faces, process=kwargs.pop("process", False), **kwargs + ) def icosphere(subdivisions=3, radius=1.0, **kwargs): @@ -711,24 +831,27 @@ def icosphere(subdivisions=3, radius=1.0, **kwargs): for _ in range(subdivisions): ico = ico.subdivide() vectors = ico.vertices - scalar = np.sqrt(np.dot(vectors ** 2, [1, 1, 1])) + scalar = np.sqrt(np.dot(vectors**2, [1, 1, 1])) unit = vectors / scalar.reshape((-1, 1)) ico.vertices += unit * (radius - scalar).reshape((-1, 1)) if "color" in kwargs: warnings.warn( - '`icosphere(color=...)` is deprecated and will ' + - 'be removed in June 2024: replace with Trimesh constructor ' + - 'kewyword argument `icosphere(face_colors=...)`', - category=DeprecationWarning, stacklevel=2) + "`icosphere(color=...)` is deprecated and will " + + "be removed in June 2024: replace with Trimesh constructor " + + "kewyword argument `icosphere(face_colors=...)`", + category=DeprecationWarning, + stacklevel=2, + ) kwargs["face_colors"] = kwargs.pop("color") - return Trimesh(vertices=ico.vertices, - faces=ico.faces, - metadata={'shape': 'sphere', - 'radius': radius}, - process=kwargs.pop('process', False), - **kwargs) + return Trimesh( + vertices=ico.vertices, + faces=ico.faces, + metadata={"shape": "sphere", "radius": radius}, + process=kwargs.pop("process", False), + **kwargs, + ) def uv_sphere(radius=1.0, count=None, transform=None, **kwargs): @@ -764,18 +887,16 @@ def uv_sphere(radius=1.0, count=None, transform=None, **kwargs): linestring = np.column_stack((np.sin(theta), -np.cos(theta))) * radius # revolve the curve to create a volume - return revolve(linestring=linestring, - sections=count[1], - transform=transform, - metadata={'shape': 'sphere', - 'radius': radius}, - **kwargs) - - -def capsule(height=1.0, - radius=1.0, - count=None, - transform=None): + return revolve( + linestring=linestring, + sections=count[1], + transform=transform, + metadata={"shape": "sphere", "radius": radius}, + **kwargs, + ) + + +def capsule(height=1.0, radius=1.0, count=None, transform=None): """ Create a mesh of a capsule, or a cylinder with hemispheric ends. @@ -814,19 +935,15 @@ def capsule(height=1.0, linestring[:half][:, 1] -= height / 2.0 linestring[half:][:, 1] += height / 2.0 - return revolve(linestring, - sections=count[1], - transform=transform, - metadata={'shape': 'capsule', - 'height': height, - 'radius': radius}) + return revolve( + linestring, + sections=count[1], + transform=transform, + metadata={"shape": "capsule", "height": height, "radius": radius}, + ) -def cone(radius, - height, - sections=None, - transform=None, - **kwargs): +def cone(radius, height, sections=None, transform=None, **kwargs): """ Create a mesh of a cone along Z centered at the origin. @@ -849,30 +966,19 @@ def cone(radius, Resulting mesh of a cone """ # create the 2D outline of a cone - linestring = [[0, 0], - [radius, 0], - [0, height]] + linestring = [[0, 0], [radius, 0], [0, height]] # revolve the profile to create a cone - if 'metadata' not in kwargs: - kwargs['metadata'] = {} - kwargs['metadata'].update( - {'shape': 'cone', - 'radius': radius, - 'height': height}) - cone = revolve(linestring=linestring, - sections=sections, - transform=transform, - **kwargs) + if "metadata" not in kwargs: + kwargs["metadata"] = {} + kwargs["metadata"].update({"shape": "cone", "radius": radius, "height": height}) + cone = revolve( + linestring=linestring, sections=sections, transform=transform, **kwargs + ) return cone -def cylinder(radius, - height=None, - sections=None, - segment=None, - transform=None, - **kwargs): +def cylinder(radius, height=None, sections=None, segment=None, transform=None, **kwargs): """ Create a mesh of a cylinder along Z centered at the origin. @@ -902,34 +1008,23 @@ def cylinder(radius, transform, height = _segment_to_cylinder(segment=segment) if height is None: - raise ValueError('either `height` or `segment` must be passed!') + raise ValueError("either `height` or `segment` must be passed!") half = abs(float(height)) / 2.0 # create a profile to revolve - linestring = [[0, -half], - [radius, -half], - [radius, half], - [0, half]] - if 'metadata' not in kwargs: - kwargs['metadata'] = {} - kwargs['metadata'].update( - {'shape': 'cylinder', - 'height': height, - 'radius': radius}) + linestring = [[0, -half], [radius, -half], [radius, half], [0, half]] + if "metadata" not in kwargs: + kwargs["metadata"] = {} + kwargs["metadata"].update({"shape": "cylinder", "height": height, "radius": radius}) # generate cylinder through simple revolution - return revolve(linestring=linestring, - sections=sections, - transform=transform, - **kwargs) - - -def annulus(r_min, - r_max, - height=None, - sections=None, - transform=None, - segment=None, - **kwargs): + return revolve( + linestring=linestring, sections=sections, transform=transform, **kwargs + ) + + +def annulus( + r_min, r_max, height=None, sections=None, transform=None, segment=None, **kwargs +): """ Create a mesh of an annular cylinder along Z centered at the origin. @@ -960,39 +1055,36 @@ def annulus(r_min, transform, height = _segment_to_cylinder(segment=segment) if height is None: - raise ValueError('either `height` or `segment` must be passed!') + raise ValueError("either `height` or `segment` must be passed!") r_min = abs(float(r_min)) # if center radius is zero this is a cylinder if r_min < tol.merge: - return cylinder(radius=r_max, - height=height, - sections=sections, - transform=transform, - **kwargs) + return cylinder( + radius=r_max, height=height, sections=sections, transform=transform, **kwargs + ) r_max = abs(float(r_max)) # we're going to center at XY plane so take half the height half = abs(float(height)) / 2.0 # create counter-clockwise rectangle - linestring = [[r_min, -half], - [r_max, -half], - [r_max, half], - [r_min, half], - [r_min, -half]] - - if 'metadata' not in kwargs: - kwargs['metadata'] = {} - kwargs['metadata'].update( - {'shape': 'annulus', - 'r_min': r_min, - 'r_max': r_max, - 'height': height}) + linestring = [ + [r_min, -half], + [r_max, -half], + [r_max, half], + [r_min, half], + [r_min, -half], + ] + + if "metadata" not in kwargs: + kwargs["metadata"] = {} + kwargs["metadata"].update( + {"shape": "annulus", "r_min": r_min, "r_max": r_max, "height": height} + ) # revolve the curve - annulus = revolve(linestring=linestring, - sections=sections, - transform=transform, - **kwargs) + annulus = revolve( + linestring=linestring, sections=sections, transform=transform, **kwargs + ) return annulus @@ -1016,7 +1108,7 @@ def _segment_to_cylinder(segment): """ segment = np.asanyarray(segment, dtype=np.float64) if segment.shape != (2, 3): - raise ValueError('segment must be 2 3D points!') + raise ValueError("segment must be 2 3D points!") vector = segment[1] - segment[0] # override height with segment length height = np.linalg.norm(vector) @@ -1051,11 +1143,13 @@ def random_soup(face_count=100): return soup -def axis(origin_size=0.04, - transform=None, - origin_color=None, - axis_radius=None, - axis_length=None): +def axis( + origin_size=0.04, + transform=None, + origin_color=None, + axis_radius=None, + axis_length=None, +): """ Return an XYZ axis marker as a Trimesh, which represents position and orientation. If you set the origin size the other parameters @@ -1094,58 +1188,48 @@ def axis(origin_size=0.04, axis_length = origin_size * 10.0 # generate a ball for the origin - axis_origin = uv_sphere(radius=origin_size, - count=[10, 10]) + axis_origin = uv_sphere(radius=origin_size, count=[10, 10]) axis_origin.apply_transform(transform) # apply color to the origin ball axis_origin.visual.face_colors = origin_color # create the cylinder for the z-axis - translation = tf.translation_matrix( - [0, 0, axis_length / 2]) + translation = tf.translation_matrix([0, 0, axis_length / 2]) z_axis = cylinder( - radius=axis_radius, - height=axis_length, - transform=transform.dot(translation)) + radius=axis_radius, height=axis_length, transform=transform.dot(translation) + ) # XYZ->RGB, Z is blue z_axis.visual.face_colors = [0, 0, 255] # create the cylinder for the y-axis - translation = tf.translation_matrix( - [0, 0, axis_length / 2]) - rotation = tf.rotation_matrix(np.radians(-90), - [1, 0, 0]) + translation = tf.translation_matrix([0, 0, axis_length / 2]) + rotation = tf.rotation_matrix(np.radians(-90), [1, 0, 0]) y_axis = cylinder( radius=axis_radius, height=axis_length, - transform=transform.dot(rotation).dot(translation)) + transform=transform.dot(rotation).dot(translation), + ) # XYZ->RGB, Y is green y_axis.visual.face_colors = [0, 255, 0] # create the cylinder for the x-axis - translation = tf.translation_matrix( - [0, 0, axis_length / 2]) - rotation = tf.rotation_matrix(np.radians(90), - [0, 1, 0]) + translation = tf.translation_matrix([0, 0, axis_length / 2]) + rotation = tf.rotation_matrix(np.radians(90), [0, 1, 0]) x_axis = cylinder( radius=axis_radius, height=axis_length, - transform=transform.dot(rotation).dot(translation)) + transform=transform.dot(rotation).dot(translation), + ) # XYZ->RGB, X is red x_axis.visual.face_colors = [255, 0, 0] # append the sphere and three cylinders - marker = util.concatenate([axis_origin, - x_axis, - y_axis, - z_axis]) + marker = util.concatenate([axis_origin, x_axis, y_axis, z_axis]) return marker -def camera_marker(camera, - marker_height=0.4, - origin_size=None): +def camera_marker(camera, marker_height=0.4, origin_size=None): """ Create a visual marker for a camera object, including an axis and FOV. @@ -1176,8 +1260,7 @@ def camera_marker(camera, from .path.exchange.load import load_path except ImportError: # they probably don't have shapely installed - log.warning('unable to create FOV visualization!', - exc_info=True) + log.warning("unable to create FOV visualization!", exc_info=True) return meshes # calculate vertices from camera FOV angles @@ -1187,26 +1270,16 @@ def camera_marker(camera, # combine the points into the vertices of an FOV visualization points = np.array( - [(0, 0, 0), - (-x, -y, z), - (x, -y, z), - (x, y, z), - (-x, y, z)], - dtype=float) + [(0, 0, 0), (-x, -y, z), (x, -y, z), (x, y, z), (-x, y, z)], dtype=float + ) # create line segments for the FOV visualization # a segment from the origin to each bound of the FOV - segments = np.column_stack( - (np.zeros_like(points), points)).reshape( - (-1, 3)) + segments = np.column_stack((np.zeros_like(points), points)).reshape((-1, 3)) # add a loop for the outside of the FOV then reshape # the whole thing into multiple line segments - segments = np.vstack((segments, - points[[1, 2, - 2, 3, - 3, 4, - 4, 1]])).reshape((-1, 2, 3)) + segments = np.vstack((segments, points[[1, 2, 2, 3, 3, 4, 4, 1]])).reshape((-1, 2, 3)) # add a single Path3D object for all line segments meshes.append(load_path(segments)) @@ -1239,8 +1312,7 @@ def truncated_prisms(tris, origin=None, normal=None): transform = plane_transform(origin=origin, normal=normal) # transform the triangles to the specified plane - transformed = tf.transform_points( - tris.reshape((-1, 3)), transform).reshape((-1, 9)) + transformed = tf.transform_points(tris.reshape((-1, 3)), transform).reshape((-1, 9)) # stack triangles such that every other one is repeated vs = np.column_stack((transformed, transformed)).reshape((-1, 3, 3)) @@ -1248,28 +1320,29 @@ def truncated_prisms(tris, origin=None, normal=None): vs[1::2, :, 2] = 0 # reshape triangles to a flat array of points and transform back to # original frame - vertices = tf.transform_points( - vs.reshape((-1, 3)), matrix=np.linalg.inv(transform)) + vertices = tf.transform_points(vs.reshape((-1, 3)), matrix=np.linalg.inv(transform)) # face indexes for a *single* truncated triangular prism - f = np.array([[2, 1, 0], - [3, 4, 5], - [0, 1, 4], - [1, 2, 5], - [2, 0, 3], - [4, 3, 0], - [5, 4, 1], - [3, 5, 2]]) + f = np.array( + [ + [2, 1, 0], + [3, 4, 5], + [0, 1, 4], + [1, 2, 5], + [2, 0, 3], + [4, 3, 0], + [5, 4, 1], + [3, 5, 2], + ] + ) # find the projection of each triangle with the normal vector - cross = np.dot([0, 0, 1], triangles.cross( - transformed.reshape((-1, 3, 3))).T) + cross = np.dot([0, 0, 1], triangles.cross(transformed.reshape((-1, 3, 3))).T) # stack faces into one prism per triangle f_seq = np.tile(f, (len(transformed), 1)).reshape((-1, len(f), 3)) # if the normal of the triangle was positive flip the winding f_seq[cross > 0] = np.fliplr(f) # offset stacked faces to create correct indices - faces = (f_seq + (np.arange(len(f_seq)) * - 6).reshape((-1, 1, 1))).reshape((-1, 3)) + faces = (f_seq + (np.arange(len(f_seq)) * 6).reshape((-1, 1, 1))).reshape((-1, 3)) # create a mesh from the data mesh = Trimesh(vertices=vertices, faces=faces, process=False) @@ -1277,12 +1350,14 @@ def truncated_prisms(tris, origin=None, normal=None): return mesh -def torus(major_radius, - minor_radius, - major_sections=32, - minor_sections=32, - transform=None, - **kwargs): +def torus( + major_radius, + minor_radius, + major_sections=32, + minor_sections=32, + transform=None, + **kwargs, +): """Create a mesh of a torus around Z centered at the origin. Parameters @@ -1308,19 +1383,17 @@ def torus(major_radius, Mesh of a torus """ phi = np.linspace(0, 2 * np.pi, minor_sections, endpoint=False) - linestring = np.column_stack((minor_radius * np.cos(phi), - minor_radius * np.sin(phi))) \ - + [major_radius, 0] + linestring = np.column_stack( + (minor_radius * np.cos(phi), minor_radius * np.sin(phi)) + ) + [major_radius, 0] - if 'metadata' not in kwargs: - kwargs['metadata'] = {} - kwargs['metadata'].update( - {'shape': 'torus', - 'major_radius': major_radius, - 'minor_radius': minor_radius}) + if "metadata" not in kwargs: + kwargs["metadata"] = {} + kwargs["metadata"].update( + {"shape": "torus", "major_radius": major_radius, "minor_radius": minor_radius} + ) # generate torus through simple revolution - return revolve(linestring=linestring, - sections=major_sections, - transform=transform, - **kwargs) + return revolve( + linestring=linestring, sections=major_sections, transform=transform, **kwargs + ) diff --git a/trimesh/curvature.py b/trimesh/curvature.py index 40a9d7b6e..49d9f04b1 100644 --- a/trimesh/curvature.py +++ b/trimesh/curvature.py @@ -12,6 +12,7 @@ from scipy.sparse import coo_matrix except ImportError as E: from . import exceptions + coo_matrix = exceptions.ExceptionWrapper(E) @@ -24,10 +25,10 @@ def face_angles_sparse(mesh): sparse : scipy.sparse.coo_matrix matrix is float shaped (len(vertices), len(faces)) """ - matrix = coo_matrix(( - mesh.face_angles.flatten(), - (mesh.faces_sparse.row, mesh.faces_sparse.col)), - mesh.faces_sparse.shape) + matrix = coo_matrix( + (mesh.face_angles.flatten(), (mesh.faces_sparse.row, mesh.faces_sparse.col)), + mesh.faces_sparse.shape, + ) return matrix @@ -75,7 +76,7 @@ def discrete_gaussian_curvature_measure(mesh, points, radius): points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): - raise ValueError('points must be (n,3)!') + raise ValueError("points must be (n,3)!") nearest = mesh.kdtree.query_ball_point(points, radius) gauss_curv = [mesh.vertex_defects[vertices].sum() for vertices in nearest] @@ -107,24 +108,20 @@ def discrete_mean_curvature_measure(mesh, points, radius): points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): - raise ValueError('points must be (n,3)!') + raise ValueError("points must be (n,3)!") # axis aligned bounds - bounds = np.column_stack((points - radius, - points + radius)) + bounds = np.column_stack((points - radius, points + radius)) # line segments that intersect axis aligned bounding box - candidates = [list(mesh.face_adjacency_tree.intersection(b)) - for b in bounds] + candidates = [list(mesh.face_adjacency_tree.intersection(b)) for b in bounds] mean_curv = np.empty(len(points)) for i, (x, x_candidates) in enumerate(zip(points, candidates)): endpoints = mesh.vertices[mesh.face_adjacency_edges[x_candidates]] lengths = line_ball_intersection( - endpoints[:, 0], - endpoints[:, 1], - center=x, - radius=radius) + endpoints[:, 0], endpoints[:, 1], center=x, radius=radius + ) angles = mesh.face_adjacency_angles[x_candidates] signs = np.where(mesh.face_adjacency_convex[x_candidates], 1, -1) mean_curv[i] = (lengths * angles * signs).sum() / 2 @@ -155,9 +152,9 @@ def line_ball_intersection(start_points, end_points, center, radius): L = end_points - start_points oc = start_points - center # o-c r = radius - ldotl = np.einsum('ij, ij->i', L, L) # l.l - ldotoc = np.einsum('ij, ij->i', L, oc) # l.(o-c) - ocdotoc = np.einsum('ij, ij->i', oc, oc) # (o-c).(o-c) + ldotl = np.einsum("ij, ij->i", L, L) # l.l + ldotoc = np.einsum("ij, ij->i", L, oc) # l.(o-c) + ocdotoc = np.einsum("ij, ij->i", oc, oc) # (o-c).(o-c) discrims = ldotoc**2 - ldotl * (ocdotoc - r**2) # If discriminant is non-positive, then we have zero length diff --git a/trimesh/exceptions.py b/trimesh/exceptions.py index 1e4500262..05894d003 100644 --- a/trimesh/exceptions.py +++ b/trimesh/exceptions.py @@ -24,11 +24,11 @@ def __getattribute__(self, *args, **kwargs): # will raise when this object is accessed like an object # if it's asking for our class type return None # this allows isinstance() checks to not re-raise - if args[0] == '__class__': + if args[0] == "__class__": return None.__class__ # otherwise raise our original exception - raise super().__getattribute__('exception') + raise super().__getattribute__("exception") def __call__(self, *args, **kwargs): # will raise when this object is called like a function - raise super().__getattribute__('exception') + raise super().__getattribute__("exception") diff --git a/trimesh/exchange/binvox.py b/trimesh/exchange/binvox.py index 8bcbd7b51..50ed691c2 100644 --- a/trimesh/exchange/binvox.py +++ b/trimesh/exchange/binvox.py @@ -17,9 +17,8 @@ from ..base import Trimesh # find the executable for binvox in PATH -binvox_encoder = util.which('binvox') -Binvox = collections.namedtuple( - 'Binvox', ['rle_data', 'shape', 'translate', 'scale']) +binvox_encoder = util.which("binvox") +Binvox = collections.namedtuple("Binvox", ["rle_data", "shape", "translate", "scale"]) def parse_binvox_header(fp): @@ -49,18 +48,16 @@ def parse_binvox_header(fp): """ line = fp.readline().strip() - if hasattr(line, 'decode'): - binvox = b'#binvox' - space = b' ' + if hasattr(line, "decode"): + binvox = b"#binvox" + space = b" " else: - binvox = '#binvox' - space = ' ' + binvox = "#binvox" + space = " " if not line.startswith(binvox): - raise OSError('Not a binvox file') - shape = tuple( - int(s) for s in fp.readline().strip().split(space)[1:]) - translate = tuple( - float(s) for s in fp.readline().strip().split(space)[1:]) + raise OSError("Not a binvox file") + shape = tuple(int(s) for s in fp.readline().strip().split(space)[1:]) + translate = tuple(float(s) for s in fp.readline().strip().split(space)[1:]) scale = float(fp.readline().strip().split(space)[1]) fp.readline() return shape, translate, scale @@ -99,12 +96,12 @@ def parse_binvox(fp, writeable=False): return Binvox(rle_data, shape, translate, scale) -_binvox_header = '''#binvox 1 +_binvox_header = """#binvox 1 dim {sx} {sy} {sz} translate {tx} {ty} {tz} scale {scale} data -''' +""" def binvox_header(shape, translate, scale): @@ -123,8 +120,7 @@ def binvox_header(shape, translate, scale): """ sx, sy, sz = (int(s) for s in shape) tx, ty, tz = translate - return _binvox_header.format( - sx=sx, sy=sy, sz=sz, tx=tx, ty=ty, tz=tz, scale=scale) + return _binvox_header.format(sx=sx, sy=sy, sz=sz, tx=tx, ty=ty, tz=tz, scale=scale) def binvox_bytes(rle_data, shape, translate=(0, 0, 0), scale=1): @@ -147,15 +143,13 @@ def binvox_bytes(rle_data, shape, translate=(0, 0, 0), scale=1): Suitable for writing to binary file """ if rle_data.dtype != np.uint8: - raise ValueError( - "rle_data.dtype must be np.uint8, got %s" % rle_data.dtype) + raise ValueError("rle_data.dtype must be np.uint8, got %s" % rle_data.dtype) header = binvox_header(shape, translate, scale).encode() return header + rle_data.tobytes() -def voxel_from_binvox( - rle_data, shape, translate=None, scale=1.0, axis_order='xzy'): +def voxel_from_binvox(rle_data, shape, translate=None, scale=1.0, axis_order="xzy"): """ Factory for building from data associated with binvox files. @@ -196,28 +190,24 @@ def voxel_from_binvox( # translate = np.asanyarray(translate) * scale) # translate = [0, 0, 0] transform = transformations.scale_and_translate( - scale=scale / (np.array(shape) - 1), - translate=translate) + scale=scale / (np.array(shape) - 1), translate=translate + ) - if axis_order == 'xzy': + if axis_order == "xzy": perm = (0, 2, 1) shape = tuple(shape[p] for p in perm) encoding = encoding.reshape(shape).transpose(perm) - elif axis_order is None or axis_order == 'xyz': + elif axis_order is None or axis_order == "xyz": encoding = encoding.reshape(shape) else: - raise ValueError( - "Invalid axis_order '%s': must be None, 'xyz' or 'xzy'") + raise ValueError("Invalid axis_order '%s': must be None, 'xyz' or 'xzy'") assert encoding.shape == shape return VoxelGrid(encoding, transform) -def load_binvox(file_obj, - resolver=None, - axis_order='xzy', - file_type=None): +def load_binvox(file_obj, resolver=None, axis_order="xzy", file_type=None): """ Load trimesh `VoxelGrid` instance from file. @@ -236,19 +226,19 @@ def load_binvox(file_obj, result : trimesh.voxel.VoxelGrid Loaded voxel data """ - if file_type is not None and file_type != 'binvox': - raise ValueError( - 'file_type must be None or binvox, got %s' % file_type) + if file_type is not None and file_type != "binvox": + raise ValueError("file_type must be None or binvox, got %s" % file_type) data = parse_binvox(file_obj, writeable=True) return voxel_from_binvox( rle_data=data.rle_data, shape=data.shape, translate=data.translate, scale=data.scale, - axis_order=axis_order) + axis_order=axis_order, + ) -def export_binvox(voxel, axis_order='xzy'): +def export_binvox(voxel, axis_order="xzy"): """ Export `trimesh.voxel.VoxelGrid` instance to bytes @@ -269,19 +259,18 @@ def export_binvox(voxel, axis_order='xzy'): """ translate = voxel.translation scale = voxel.scale * (np.array(voxel.shape) - 1) - neg_scale, = np.where(scale < 0) + (neg_scale,) = np.where(scale < 0) encoding = voxel.encoding.flip(neg_scale) scale = np.abs(scale) if not util.allclose(scale[0], scale[1:], 1e-6 * scale[0] + 1e-8): - raise ValueError('Can only export binvox with uniform scale') + raise ValueError("Can only export binvox with uniform scale") scale = scale[0] - if axis_order == 'xzy': + if axis_order == "xzy": encoding = encoding.transpose((0, 2, 1)) - elif axis_order != 'xyz': + elif axis_order != "xyz": raise ValueError('Invalid axis_order: must be one of ("xyz", "xzy")') rle_data = encoding.flat.run_length_data(dtype=np.uint8) - return binvox_bytes( - rle_data, shape=voxel.shape, translate=translate, scale=scale) + return binvox_bytes(rle_data, shape=voxel.shape, translate=translate, scale=scale) class Binvoxer: @@ -324,49 +313,50 @@ class Binvoxer: """ SUPPORTED_INPUT_TYPES = ( - 'ug', - 'obj', - 'off', - 'dfx', - 'xgl', - 'pov', - 'brep', - 'ply', - 'jot', + "ug", + "obj", + "off", + "dfx", + "xgl", + "pov", + "brep", + "ply", + "jot", ) SUPPORTED_OUTPUT_TYPES = ( - 'binvox', - 'hips', - 'mira', - 'vtk', - 'raw', - 'schematic', - 'msh', + "binvox", + "hips", + "mira", + "vtk", + "raw", + "schematic", + "msh", ) def __init__( - self, - dimension=32, - file_type='binvox', - z_buffer_carving=True, - z_buffer_voting=True, - dilated_carving=False, - exact=True, - bounding_box=None, - remove_internal=False, - center=False, - rotate_x=0, - rotate_z=0, - wireframe=False, - fit=False, - block_id=None, - use_material_block_id=False, - use_offscreen_pbuffer=False, - downsample_factor=None, - downsample_threshold=None, - verbose=False, - binvox_path=None): + self, + dimension=32, + file_type="binvox", + z_buffer_carving=True, + z_buffer_voting=True, + dilated_carving=False, + exact=True, + bounding_box=None, + remove_internal=False, + center=False, + rotate_x=0, + rotate_z=0, + wireframe=False, + fit=False, + block_id=None, + use_material_block_id=False, + use_offscreen_pbuffer=False, + downsample_factor=None, + downsample_threshold=None, + verbose=False, + binvox_path=None, + ): """ Configure the voxelizer. @@ -421,65 +411,71 @@ def __init__( encoder = binvox_path if encoder is None: - raise OSError(' '.join([ - 'No `binvox_path` provided and no binvox executable found', - 'on PATH, please go to https://www.patrickmin.com/binvox/ and', - 'download the appropriate version.'])) + raise OSError( + " ".join( + [ + "No `binvox_path` provided and no binvox executable found", + "on PATH, please go to https://www.patrickmin.com/binvox/ and", + "download the appropriate version.", + ] + ) + ) if dimension > 1024 and not exact: - raise ValueError( - 'Maximum dimension using exact is 1024, got %d' % dimension) + raise ValueError("Maximum dimension using exact is 1024, got %d" % dimension) if file_type not in Binvoxer.SUPPORTED_OUTPUT_TYPES: raise ValueError( - f'file_type {file_type} not in set of supported output types {str(Binvoxer.SUPPORTED_OUTPUT_TYPES)}') - args = [encoder, '-d', str(dimension), '-t', file_type] + f"file_type {file_type} not in set of supported output types {str(Binvoxer.SUPPORTED_OUTPUT_TYPES)}" + ) + args = [encoder, "-d", str(dimension), "-t", file_type] if exact: - args.append('-e') + args.append("-e") if z_buffer_carving: if z_buffer_voting: pass else: - args.append('-c') + args.append("-c") elif z_buffer_voting: - args.append('-v') + args.append("-v") else: raise ValueError( - 'One of `z_buffer_carving` or `z_buffer_voting` must be True') + "One of `z_buffer_carving` or `z_buffer_voting` must be True" + ) if dilated_carving: - args.append('-dc') + args.append("-dc") # Additional parameters if bounding_box is not None: if len(bounding_box) != 6: - raise ValueError('bounding_box must have 6 elements') - args.append('-bb') + raise ValueError("bounding_box must have 6 elements") + args.append("-bb") args.extend(str(b) for b in bounding_box) if remove_internal: - args.append('-ri') + args.append("-ri") if center: - args.append('-cb') - args.extend(('-rotx',) * rotate_x) - args.extend(('-rotz',) * rotate_z) + args.append("-cb") + args.extend(("-rotx",) * rotate_x) + args.extend(("-rotz",) * rotate_z) if wireframe: - args.append('-aw') + args.append("-aw") if fit: - args.append('-fit') + args.append("-fit") if block_id is not None: - args.extend(('-bi', block_id)) + args.extend(("-bi", block_id)) if use_material_block_id: - args.append('-mb') + args.append("-mb") if use_offscreen_pbuffer: - args.append('-pb') + args.append("-pb") if downsample_factor is not None: times = np.log2(downsample_factor) if int(times) != times: raise ValueError( - 'downsample_factor must be a power of 2, got %d' - % downsample_factor) - args.extend(('-down',) * int(times)) + "downsample_factor must be a power of 2, got %d" % downsample_factor + ) + args.extend(("-down",) * int(times)) if downsample_threshold is not None: - args.extend(('-dmin', str(downsample_threshold))) - args.append('PATH') + args.extend(("-dmin", str(downsample_threshold))) + args.append("PATH") self._args = args self._file_type = file_type @@ -516,10 +512,11 @@ def __call__(self, path, overwrite=False): ext = ext[1:].lower() if ext not in Binvoxer.SUPPORTED_INPUT_TYPES: raise ValueError( - f'file_type {ext} not in set of supported input types {str(Binvoxer.SUPPORTED_INPUT_TYPES)}') - out_path = f'{head}.{self._file_type}' + f"file_type {ext} not in set of supported input types {str(Binvoxer.SUPPORTED_INPUT_TYPES)}" + ) + out_path = f"{head}.{self._file_type}" if os.path.isfile(out_path) and not overwrite: - raise OSError('Attempted to voxelize object at existing path') + raise OSError("Attempted to voxelize object at existing path") self._args[-1] = path # generalizes to python2 and python3 @@ -533,10 +530,7 @@ def __call__(self, path, overwrite=False): return out_path -def voxelize_mesh(mesh, - binvoxer=None, - export_type='off', - **binvoxer_kwargs): +def voxelize_mesh(mesh, binvoxer=None, export_type="off", **binvoxer_kwargs): """ Interface for voxelizing Trimesh object via the binvox tool. @@ -558,22 +552,21 @@ def voxelize_mesh(mesh, `VoxelGrid` object resulting. """ if not isinstance(mesh, Trimesh): - raise ValueError('mesh must be Trimesh instance, got %s' % str(mesh)) + raise ValueError("mesh must be Trimesh instance, got %s" % str(mesh)) if binvoxer is None: binvoxer = Binvoxer(**binvoxer_kwargs) elif len(binvoxer_kwargs) > 0: - raise ValueError('Cannot provide binvoxer and binvoxer_kwargs') - if binvoxer.file_type != 'binvox': - raise ValueError( - 'Only "binvox" binvoxer `file_type` currently supported') + raise ValueError("Cannot provide binvoxer and binvoxer_kwargs") + if binvoxer.file_type != "binvox": + raise ValueError('Only "binvox" binvoxer `file_type` currently supported') with TemporaryDirectory() as folder: - model_path = os.path.join(folder, 'model.%s' % export_type) - with open(model_path, 'wb') as fp: + model_path = os.path.join(folder, "model.%s" % export_type) + with open(model_path, "wb") as fp: mesh.export(fp, file_type=export_type) out_path = binvoxer(model_path) - with open(out_path, 'rb') as fp: + with open(out_path, "rb") as fp: out_model = load_binvox(fp) return out_model -_binvox_loaders = {'binvox': load_binvox} +_binvox_loaders = {"binvox": load_binvox} diff --git a/trimesh/exchange/dae.py b/trimesh/exchange/dae.py index dbf10b2fb..9e0595483 100644 --- a/trimesh/exchange/dae.py +++ b/trimesh/exchange/dae.py @@ -12,10 +12,7 @@ _EYE.flags.writeable = False -def load_collada(file_obj, - resolver=None, - ignore_broken=True, - **kwargs): +def load_collada(file_obj, resolver=None, ignore_broken=True, **kwargs): """ Load a COLLADA (.dae) file into a list of trimesh kwargs. @@ -40,19 +37,19 @@ def load_collada(file_obj, import collada if ignore_broken: - ignores = [collada.common.DaeError, - collada.common.DaeIncompleteError, - collada.common.DaeMalformedError, - collada.common.DaeBrokenRefError, - collada.common.DaeUnsupportedError, - collada.common.DaeIncompleteError] + ignores = [ + collada.common.DaeError, + collada.common.DaeIncompleteError, + collada.common.DaeMalformedError, + collada.common.DaeBrokenRefError, + collada.common.DaeUnsupportedError, + collada.common.DaeIncompleteError, + ] else: ignores = None # load scene using pycollada - c = collada.Collada( - file_obj, - ignore=ignores) + c = collada.Collada(file_obj, ignore=ignores) # Create material map from Material ID to trimesh material material_map = {} @@ -67,18 +64,18 @@ def load_collada(file_obj, # list of dict graph = [] for node in c.scene.nodes: - _parse_node(node=node, - parent_matrix=_EYE, - material_map=material_map, - meshes=meshes, - meshes_count=meshes_count, - graph=graph, - resolver=resolver) + _parse_node( + node=node, + parent_matrix=_EYE, + material_map=material_map, + meshes=meshes, + meshes_count=meshes_count, + graph=graph, + resolver=resolver, + ) # create kwargs for load_kwargs - result = {'class': 'Scene', - 'graph': graph, - 'geometry': meshes} + result = {"class": "Scene", "graph": graph, "geometry": meshes} return result @@ -105,57 +102,58 @@ def export_collada(mesh, **kwargs): c = collada.Collada() nodes = [] for i, m in enumerate(meshes): - # Load uv, colors, materials uv = None colors = None mat = _unparse_material(None) if m.visual.defined: - if m.visual.kind == 'texture': + if m.visual.kind == "texture": mat = _unparse_material(m.visual.material) uv = m.visual.uv - elif m.visual.kind == 'vertex': + elif m.visual.kind == "vertex": colors = (m.visual.vertex_colors / 255.0)[:, :3] c.effects.append(mat.effect) c.materials.append(mat) # Create geometry object vertices = collada.source.FloatSource( - 'verts-array', m.vertices.flatten(), ('X', 'Y', 'Z')) + "verts-array", m.vertices.flatten(), ("X", "Y", "Z") + ) normals = collada.source.FloatSource( - 'normals-array', m.vertex_normals.flatten(), ('X', 'Y', 'Z')) + "normals-array", m.vertex_normals.flatten(), ("X", "Y", "Z") + ) input_list = collada.source.InputList() - input_list.addInput(0, 'VERTEX', '#verts-array') - input_list.addInput(1, 'NORMAL', '#normals-array') + input_list.addInput(0, "VERTEX", "#verts-array") + input_list.addInput(1, "NORMAL", "#normals-array") arrays = [vertices, normals] - if ((uv is not None) and (len(uv) > 0)): + if (uv is not None) and (len(uv) > 0): texcoords = collada.source.FloatSource( - 'texcoords-array', uv.flatten(), ('U', 'V')) - input_list.addInput(2, 'TEXCOORD', '#texcoords-array') + "texcoords-array", uv.flatten(), ("U", "V") + ) + input_list.addInput(2, "TEXCOORD", "#texcoords-array") arrays.append(texcoords) if colors is not None: idx = 2 if uv: idx = 3 - colors = collada.source.FloatSource('colors-array', - colors.flatten(), ('R', 'G', 'B')) - input_list.addInput(idx, 'COLOR', '#colors-array') + colors = collada.source.FloatSource( + "colors-array", colors.flatten(), ("R", "G", "B") + ) + input_list.addInput(idx, "COLOR", "#colors-array") arrays.append(colors) - geom = collada.geometry.Geometry( - c, uuid.uuid4().hex, uuid.uuid4().hex, arrays - ) + geom = collada.geometry.Geometry(c, uuid.uuid4().hex, uuid.uuid4().hex, arrays) indices = np.repeat(m.faces.flatten(), len(arrays)) - matref = f'material{i}' + matref = f"material{i}" triset = geom.createTriangleSet(indices, input_list, matref) geom.primitives.append(triset) c.geometries.append(geom) matnode = collada.scene.MaterialNode(matref, mat, inputs=[]) geomnode = collada.scene.GeometryNode(geom, [matnode]) - node = collada.scene.Node(f'node{i}', children=[geomnode]) + node = collada.scene.Node(f"node{i}", children=[geomnode]) nodes.append(node) - scene = collada.scene.Scene('scene', nodes) + scene = collada.scene.Scene("scene", nodes) c.scenes.append(scene) c.scene = scene @@ -165,13 +163,9 @@ def export_collada(mesh, **kwargs): return b.read() -def _parse_node(node, - parent_matrix, - material_map, - meshes, - meshes_count, - graph, - resolver=None): +def _parse_node( + node, parent_matrix, material_map, meshes, meshes_count, graph, resolver=None +): """ Recursively parse COLLADA scene nodes. """ @@ -198,58 +192,54 @@ def _parse_node(node, if isinstance(primitive, collada.triangleset.TriangleSet): vertex = primitive.vertex vertex_index = primitive.vertex_index - vertices = vertex[vertex_index].reshape( - len(vertex_index) * 3, 3) + vertices = vertex[vertex_index].reshape(len(vertex_index) * 3, 3) # Get normals if present normals = None if primitive.normal is not None: normal = primitive.normal normal_index = primitive.normal_index - normals = normal[normal_index].reshape( - len(normal_index) * 3, 3) + normals = normal[normal_index].reshape(len(normal_index) * 3, 3) # Get colors if present colors = None s = primitive.sources - if ('COLOR' in s and len(s['COLOR']) - > 0 and len(primitive.index) > 0): - color = s['COLOR'][0][4].data - color_index = primitive.index[:, :, s['COLOR'][0][0]] - colors = color[color_index].reshape( - len(color_index) * 3, -1) + if "COLOR" in s and len(s["COLOR"]) > 0 and len(primitive.index) > 0: + color = s["COLOR"][0][4].data + color_index = primitive.index[:, :, s["COLOR"][0][0]] + colors = color[color_index].reshape(len(color_index) * 3, -1) - faces = np.arange( - vertices.shape[0]).reshape( - vertices.shape[0] // 3, 3) + faces = np.arange(vertices.shape[0]).reshape(vertices.shape[0] // 3, 3) # Get UV coordinates if possible vis = None if primitive.material in local_material_map: - material = copy.copy( - local_material_map[primitive.material]) + material = copy.copy(local_material_map[primitive.material]) uv = None if len(primitive.texcoordset) > 0: texcoord = primitive.texcoordset[0] texcoord_index = primitive.texcoord_indexset[0] uv = texcoord[texcoord_index].reshape( - (len(texcoord_index) * 3, 2)) - vis = visual.texture.TextureVisuals( - uv=uv, material=material) + (len(texcoord_index) * 3, 2) + ) + vis = visual.texture.TextureVisuals(uv=uv, material=material) - geom_name = unique_name(geometry.id, - contains=meshes, - counts=meshes_count) + geom_name = unique_name(geometry.id, contains=meshes, counts=meshes_count) meshes[geom_name] = { - 'vertices': vertices, - 'faces': faces, - 'vertex_normals': normals, - 'vertex_colors': colors, - 'visual': vis} - - graph.append({'frame_to': geom_name, - 'matrix': parent_matrix, - 'geometry': geom_name}) + "vertices": vertices, + "faces": faces, + "vertex_normals": normals, + "vertex_colors": colors, + "visual": vis, + } + + graph.append( + { + "frame_to": geom_name, + "matrix": parent_matrix, + "geometry": geom_name, + } + ) # recurse down tree for nodes with children elif isinstance(node, collada.scene.Node): @@ -265,7 +255,8 @@ def _parse_node(node, meshes=meshes, meshes_count=meshes_count, graph=graph, - resolver=resolver) + resolver=resolver, + ) elif isinstance(node, collada.scene.CameraNode): # TODO: convert collada cameras to trimesh cameras @@ -298,10 +289,10 @@ def _parse_material(effect, resolver): if isinstance(effect.diffuse, collada.material.Map): try: baseColorTexture = _load_texture( - effect.diffuse.sampler.surface.image.path, resolver) + effect.diffuse.sampler.surface.image.path, resolver + ) except BaseException: - log.debug('unable to load base texture', - exc_info=True) + log.debug("unable to load base texture", exc_info=True) elif effect.diffuse is not None: baseColorFactor = effect.diffuse @@ -311,17 +302,19 @@ def _parse_material(effect, resolver): if isinstance(effect.emission, collada.material.Map): try: emissiveTexture = _load_texture( - effect.diffuse.sampler.surface.image.path, resolver) + effect.diffuse.sampler.surface.image.path, resolver + ) except BaseException: - log.warning('unable to load emissive texture', - exc_info=True) + log.warning("unable to load emissive texture", exc_info=True) elif effect.emission is not None: emissiveFactor = effect.emission[:3] # Compute roughness roughnessFactor = 1.0 - if (not isinstance(effect.shininess, collada.material.Map) - and effect.shininess is not None): + if ( + not isinstance(effect.shininess, collada.material.Map) + and effect.shininess is not None + ): roughnessFactor = np.sqrt(2.0 / (2.0 + effect.shininess)) # Compute metallic factor @@ -332,16 +325,18 @@ def _parse_material(effect, resolver): if effect.bumpmap is not None: try: normalTexture = _load_texture( - effect.bumpmap.sampler.surface.image.path, resolver) + effect.bumpmap.sampler.surface.image.path, resolver + ) except BaseException: - log.warning('unable to load bumpmap', - exc_info=True) + log.warning("unable to load bumpmap", exc_info=True) # Compute opacity - if (effect.transparent is not None - and not isinstance(effect.transparent, collada.material.Map)): + if effect.transparent is not None and not isinstance( + effect.transparent, collada.material.Map + ): baseColorFactor = tuple( - np.append(baseColorFactor[:3], float(effect.transparent[3]))) + np.append(baseColorFactor[:3], float(effect.transparent[3])) + ) return visual.material.PBRMaterial( emissiveFactor=emissiveFactor, @@ -350,7 +345,8 @@ def _parse_material(effect, resolver): baseColorTexture=baseColorTexture, baseColorFactor=baseColorFactor, metallicFactor=metallicFactor, - roughnessFactor=roughnessFactor) + roughnessFactor=roughnessFactor, + ) def _unparse_material(material): @@ -368,28 +364,25 @@ def _unparse_material(material): emission = material.emissiveFactor if emission is not None: - emission = [float(emission[0]), float(emission[1]), - float(emission[2]), 1.0] + emission = [float(emission[0]), float(emission[1]), float(emission[2]), 1.0] shininess = material.roughnessFactor if shininess is not None: shininess = 2.0 / shininess**2 - 2.0 effect = collada.material.Effect( - uuid.uuid4().hex, params=[], shadingtype='phong', - diffuse=diffuse, emission=emission, - specular=[1.0, 1.0, 1.0, 1.0], shininess=float(shininess) - ) - material = collada.material.Material( - uuid.uuid4().hex, 'pbrmaterial', effect + uuid.uuid4().hex, + params=[], + shadingtype="phong", + diffuse=diffuse, + emission=emission, + specular=[1.0, 1.0, 1.0, 1.0], + shininess=float(shininess), ) + material = collada.material.Material(uuid.uuid4().hex, "pbrmaterial", effect) else: - effect = collada.material.Effect( - uuid.uuid4().hex, params=[], shadingtype='phong' - ) - material = collada.material.Material( - uuid.uuid4().hex, 'defaultmaterial', effect - ) + effect = collada.material.Effect(uuid.uuid4().hex, params=[], shadingtype="phong") + material = collada.material.Material(uuid.uuid4().hex, "defaultmaterial", effect) return material @@ -413,35 +406,30 @@ def load_zae(file_obj, resolver=None, **kwargs): """ # a dict, {file name : file object} - archive = util.decompress(file_obj, - file_type='zip') + archive = util.decompress(file_obj, file_type="zip") # load the first file with a .dae extension - file_name = next(i for i in archive.keys() - if i.lower().endswith('.dae')) + file_name = next(i for i in archive.keys() if i.lower().endswith(".dae")) # a resolver so the loader can load textures / etc resolver = visual.resolvers.ZipResolver(archive) # run the regular collada loader - loaded = load_collada(archive[file_name], - resolver=resolver, - **kwargs) + loaded = load_collada(archive[file_name], resolver=resolver, **kwargs) return loaded # only provide loaders if `pycollada` is installed _collada_loaders = {} _collada_exporters = {} -if util.has_module('collada'): - - _collada_loaders['dae'] = load_collada - _collada_loaders['zae'] = load_zae - _collada_exporters['dae'] = export_collada +if util.has_module("collada"): + _collada_loaders["dae"] = load_collada + _collada_loaders["zae"] = load_zae + _collada_exporters["dae"] = export_collada else: # store an exception to raise later from ..exceptions import ExceptionWrapper - _exc = ExceptionWrapper( - ImportError('missing `pip install pycollada`')) - _collada_loaders.update({'dae': _exc, 'zae': _exc}) - _collada_exporters['dae'] = _exc + + _exc = ExceptionWrapper(ImportError("missing `pip install pycollada`")) + _collada_loaders.update({"dae": _exc, "zae": _exc}) + _collada_exporters["dae"] = _exc diff --git a/trimesh/exchange/export.py b/trimesh/exchange/export.py index fa060a8e8..a3b310bb4 100644 --- a/trimesh/exchange/export.py +++ b/trimesh/exchange/export.py @@ -16,11 +16,7 @@ from .xyz import _xyz_exporters -def export_mesh(mesh, - file_obj, - file_type=None, - resolver=None, - **kwargs): +def export_mesh(mesh, file_obj, file_type=None, resolver=None, **kwargs): """ Export a Trimesh object to a file- like object, or to a filename @@ -50,13 +46,13 @@ def export_mesh(mesh, if util.is_string(file_obj): if file_type is None: # get file type from file name - file_type = (str(file_obj).split('.')[-1]).lower() + file_type = (str(file_obj).split(".")[-1]).lower() if file_type in _mesh_exporters: was_opened = True file_name = file_obj # get full path of file before opening file_path = os.path.abspath(os.path.expanduser(file_obj)) - file_obj = open(file_path, 'wb') + file_obj = open(file_path, "wb") if resolver is None: # create a resolver which can write files to the path resolver = resolvers.FilePathResolver(file_path) @@ -65,22 +61,25 @@ def export_mesh(mesh, file_type = str(file_type).lower() if file_type not in _mesh_exporters: - raise ValueError('%s exporter not available!', file_type) + raise ValueError("%s exporter not available!", file_type) if isinstance(mesh, (list, tuple, set, np.ndarray)): faces = 0 for m in mesh: faces += len(m.faces) - log.debug('Exporting %d meshes with a total of %d faces as %s', - len(mesh), faces, file_type.upper()) - elif hasattr(mesh, 'faces'): + log.debug( + "Exporting %d meshes with a total of %d faces as %s", + len(mesh), + faces, + file_type.upper(), + ) + elif hasattr(mesh, "faces"): # if the mesh has faces log the number - log.debug('Exporting %d faces as %s', len(mesh.faces), - file_type.upper()) + log.debug("Exporting %d faces as %s", len(mesh.faces), file_type.upper()) # OBJ files save assets everywhere - if file_type == 'obj': - kwargs['resolver'] = resolver + if file_type == "obj": + kwargs["resolver"] = resolver # run the exporter export = _mesh_exporters[file_type](mesh, **kwargs) @@ -88,8 +87,8 @@ def export_mesh(mesh, # if the export is multiple files (i.e. GLTF) if isinstance(export, dict): # if we have a filename rename the default GLTF - if file_name is not None and 'model.gltf' in export: - export[os.path.basename(file_name)] = export.pop('model.gltf') + if file_name is not None and "model.gltf" in export: + export[os.path.basename(file_name)] = export.pop("model.gltf") # write the files if a resolver has been passed if resolver is not None: @@ -98,7 +97,7 @@ def export_mesh(mesh, return export - if hasattr(file_obj, 'write'): + if hasattr(file_obj, "write"): result = util.write_encoded(file_obj, export) else: result = export @@ -115,7 +114,7 @@ def export_dict64(mesh): Export a mesh as a dictionary, with data encoded to base64. """ - return export_dict(mesh, encoding='base64') + return export_dict(mesh, encoding="base64") def export_dict(mesh, encoding=None): @@ -147,19 +146,19 @@ def encode(item, dtype=None): # sometimes there are giant datastructures we don't # care about in metadata which causes exports to be # extremely slow, so skip all but known good keys - meta_keys = ['units', 'file_name', 'file_path'] + meta_keys = ["units", "file_name", "file_path"] metadata = {k: v for k, v in mesh.metadata.items() if k in meta_keys} export = { - 'metadata': metadata, - 'faces': encode(mesh.faces), - 'face_normals': encode(mesh.face_normals), - 'vertices': encode(mesh.vertices) + "metadata": metadata, + "faces": encode(mesh.faces), + "face_normals": encode(mesh.face_normals), + "vertices": encode(mesh.vertices), } - if mesh.visual.kind == 'face': - export['face_colors'] = encode(mesh.visual.face_colors) - elif mesh.visual.kind == 'vertex': - export['vertex_colors'] = encode(mesh.visual.vertex_colors) + if mesh.visual.kind == "face": + export["face_colors"] = encode(mesh.visual.face_colors) + elif mesh.visual.kind == "vertex": + export["vertex_colors"] = encode(mesh.visual.vertex_colors) return export @@ -180,47 +179,49 @@ def scene_to_dict(scene, use_base64=False, include_metadata=True): """ # save some basic data about the scene - export = {'graph': scene.graph.to_edgelist(), - 'geometry': {}, - 'scene_cache': {'bounds': scene.bounds.tolist(), - 'extents': scene.extents.tolist(), - 'centroid': scene.centroid.tolist(), - 'scale': scene.scale}} + export = { + "graph": scene.graph.to_edgelist(), + "geometry": {}, + "scene_cache": { + "bounds": scene.bounds.tolist(), + "extents": scene.extents.tolist(), + "centroid": scene.centroid.tolist(), + "scale": scene.scale, + }, + } if include_metadata: try: # jsonify will convert numpy arrays to lists recursively # a little silly round-tripping to json but it is pretty fast - export['metadata'] = json.loads(util.jsonify(scene.metadata)) + export["metadata"] = json.loads(util.jsonify(scene.metadata)) except BaseException: - log.warning('failed to serialize metadata', exc_info=True) + log.warning("failed to serialize metadata", exc_info=True) # encode arrays with base64 or not if use_base64: - file_type = 'dict64' + file_type = "dict64" else: - file_type = 'dict' + file_type = "dict" # if the mesh has an export method use it # otherwise put the mesh itself into the export object for geometry_name, geometry in scene.geometry.items(): - if hasattr(geometry, 'export'): + if hasattr(geometry, "export"): # export the data - exported = {'data': geometry.export(file_type=file_type), - 'file_type': file_type} - export['geometry'][geometry_name] = exported + exported = { + "data": geometry.export(file_type=file_type), + "file_type": file_type, + } + export["geometry"][geometry_name] = exported else: # case where mesh object doesn't have exporter # might be that someone replaced the mesh with a URL - export['geometry'][geometry_name] = geometry + export["geometry"][geometry_name] = geometry return export -def export_scene(scene, - file_obj, - file_type=None, - resolver=None, - **kwargs): +def export_scene(scene, file_obj, file_type=None, resolver=None, **kwargs): """ Export a snapshot of the current scene. @@ -247,42 +248,41 @@ def export_scene(scene, # if we weren't passed a file type extract from file_obj if file_type is None: if util.is_string(file_obj): - file_type = str(file_obj).split('.')[-1] + file_type = str(file_obj).split(".")[-1] else: - raise ValueError('file_type not specified!') + raise ValueError("file_type not specified!") # always remove whitepace and leading characters - file_type = file_type.strip().lower().lstrip('.') + file_type = file_type.strip().lower().lstrip(".") # now handle our different scene export types - if file_type == 'gltf': + if file_type == "gltf": data = export_gltf(scene, **kwargs) - elif file_type == 'glb': + elif file_type == "glb": data = export_glb(scene, **kwargs) - elif file_type == 'dict': + elif file_type == "dict": data = scene_to_dict(scene, *kwargs) - elif file_type == 'obj': + elif file_type == "obj": # if we are exporting by name automatically create a # resolver which lets the exporter write assets like # the materials and textures next to the exported mesh if resolver is None and util.is_string(file_obj): resolver = resolvers.FilePathResolver(file_obj) data = export_obj(scene, resolver=resolver, **kwargs) - elif file_type == 'dict64': + elif file_type == "dict64": data = scene_to_dict(scene, use_base64=True) - elif file_type == 'svg': + elif file_type == "svg": from trimesh.path.exchange import svg_io + data = svg_io.export_svg(scene, **kwargs) - elif file_type == 'ply': - data = _mesh_exporters['ply']( - scene.dump(concatenate=True), **kwargs) - elif file_type == 'stl': + elif file_type == "ply": + data = _mesh_exporters["ply"](scene.dump(concatenate=True), **kwargs) + elif file_type == "stl": data = export_stl(scene.dump(concatenate=True), **kwargs) - elif file_type == '3mf': - data = _mesh_exporters['3mf'](scene, **kwargs) + elif file_type == "3mf": + data = _mesh_exporters["3mf"](scene, **kwargs) else: - raise ValueError( - f'unsupported export format: {file_type}') + raise ValueError(f"unsupported export format: {file_type}") # now write the data or return bytes of result if isinstance(data, dict): @@ -295,7 +295,7 @@ def export_scene(scene, # the requested "gltf" bare_path = os.path.split(file_obj)[-1] for name, blob in data.items(): - if name == 'model.gltf': + if name == "model.gltf": # write the root data to specified file resolver.write(bare_path, blob) else: @@ -303,14 +303,13 @@ def export_scene(scene, resolver.write(name, blob) return data - if hasattr(file_obj, 'write'): + if hasattr(file_obj, "write"): # if it's just a regular file object return util.write_encoded(file_obj, data) elif util.is_string(file_obj): # assume strings are file paths - file_path = os.path.expanduser( - os.path.abspath(file_obj)) - with open(file_path, 'wb') as f: + file_path = os.path.expanduser(os.path.abspath(file_obj)) + with open(file_path, "wb") as f: util.write_encoded(f, data) # no writeable file object so return data @@ -318,13 +317,14 @@ def export_scene(scene, _mesh_exporters = { - 'stl': export_stl, - 'dict': export_dict, - 'glb': export_glb, - 'obj': export_obj, - 'gltf': export_gltf, - 'dict64': export_dict64, - 'stl_ascii': export_stl_ascii} + "stl": export_stl, + "dict": export_dict, + "glb": export_glb, + "obj": export_obj, + "gltf": export_gltf, + "dict64": export_dict64, + "stl_ascii": export_stl_ascii, +} _mesh_exporters.update(_ply_exporters) _mesh_exporters.update(_off_exporters) _mesh_exporters.update(_collada_exporters) diff --git a/trimesh/exchange/load.py b/trimesh/exchange/load.py index 33f3e79ea..80ca60bb5 100644 --- a/trimesh/exchange/load.py +++ b/trimesh/exchange/load.py @@ -47,8 +47,7 @@ def mesh_formats(): i.e. 'stl', 'ply', etc. """ # filter out exceptionmodule loaders - return {k for k, v in mesh_loaders.items() - if not isinstance(v, ExceptionWrapper)} + return {k for k, v in mesh_loaders.items() if not isinstance(v, ExceptionWrapper)} def available_formats(): @@ -69,11 +68,7 @@ def available_formats(): return loaders -def load(file_obj, - file_type=None, - resolver=None, - force=None, - **kwargs): +def load(file_obj, file_type=None, resolver=None, force=None, **kwargs): """ Load a mesh or vectorized path into objects like Trimesh, Path2D, Path3D, Scene @@ -100,19 +95,17 @@ def load(file_obj, # check to see if we're trying to load something # that is already a native trimesh Geometry subclass if isinstance(file_obj, Geometry): - log.info('Load called on %s object, returning input', - file_obj.__class__.__name__) + log.info("Load called on %s object, returning input", file_obj.__class__.__name__) return file_obj # parse the file arguments into clean loadable form - (file_obj, # file- like object - file_type, # str, what kind of file - metadata, # dict, any metadata from file name - opened, # bool, did we open the file ourselves - resolver # object to load referenced resources - ) = parse_file_args(file_obj=file_obj, - file_type=file_type, - resolver=resolver) + ( + file_obj, # file- like object + file_type, # str, what kind of file + metadata, # dict, any metadata from file name + opened, # bool, did we open the file ourselves + resolver, # object to load referenced resources + ) = parse_file_args(file_obj=file_obj, file_type=file_type, resolver=resolver) try: if isinstance(file_obj, dict): @@ -121,34 +114,24 @@ def load(file_obj, loaded = load_kwargs(kwargs) elif file_type in path_formats(): # path formats get loaded with path loader - loaded = load_path(file_obj, - file_type=file_type, - **kwargs) + loaded = load_path(file_obj, file_type=file_type, **kwargs) elif file_type in mesh_loaders: # mesh loaders use mesh loader - loaded = load_mesh(file_obj, - file_type=file_type, - resolver=resolver, - **kwargs) + loaded = load_mesh(file_obj, file_type=file_type, resolver=resolver, **kwargs) elif file_type in compressed_loaders: # for archives, like ZIP files - loaded = load_compressed(file_obj, - file_type=file_type, - **kwargs) + loaded = load_compressed(file_obj, file_type=file_type, **kwargs) elif file_type in voxel_loaders: loaded = voxel_loaders[file_type]( - file_obj, - file_type=file_type, - resolver=resolver, - **kwargs) + file_obj, file_type=file_type, resolver=resolver, **kwargs + ) else: - if file_type in ['svg', 'dxf']: + if file_type in ["svg", "dxf"]: # call the dummy function to raise the import error # this prevents the exception from being super opaque load_path() else: - raise ValueError('File type: %s not supported' % - file_type) + raise ValueError("File type: %s not supported" % file_type) finally: # close any opened files even if we crashed out if opened: @@ -164,18 +147,15 @@ def load(file_obj, file_obj.close() # combine a scene into a single mesh - if force == 'mesh' and isinstance(loaded, Scene): + if force == "mesh" and isinstance(loaded, Scene): return util.concatenate(loaded.dump()) - if force == 'scene' and not isinstance(loaded, Scene): + if force == "scene" and not isinstance(loaded, Scene): return Scene(loaded) return loaded -def load_mesh(file_obj, - file_type=None, - resolver=None, - **kwargs): +def load_mesh(file_obj, file_type=None, resolver=None, **kwargs): """ Load a mesh file into a Trimesh object @@ -195,24 +175,20 @@ def load_mesh(file_obj, """ # parse the file arguments into clean loadable form - (file_obj, # file- like object - file_type, # str, what kind of file - metadata, # dict, any metadata from file name - opened, # bool, did we open the file ourselves - resolver # object to load referenced resources - ) = parse_file_args(file_obj=file_obj, - file_type=file_type, - resolver=resolver) + ( + file_obj, # file- like object + file_type, # str, what kind of file + metadata, # dict, any metadata from file name + opened, # bool, did we open the file ourselves + resolver, # object to load referenced resources + ) = parse_file_args(file_obj=file_obj, file_type=file_type, resolver=resolver) try: # make sure we keep passed kwargs to loader # but also make sure loader keys override passed keys loader = mesh_loaders[file_type] tic = now() - results = loader(file_obj, - file_type=file_type, - resolver=resolver, - **kwargs) + results = loader(file_obj, file_type=file_type, resolver=resolver, **kwargs) if not isinstance(results, list): results = [results] @@ -225,7 +201,8 @@ def load_mesh(file_obj, loaded = loaded[0] # show the repr for loaded, loader used, and time log.debug( - f'loaded {str(loaded)} using `{loader.__name__}` in {now() - tic:0.4f}s') + f"loaded {str(loaded)} using `{loader.__name__}` in {now() - tic:0.4f}s" + ) finally: # if we failed to load close file if opened: @@ -234,11 +211,7 @@ def load_mesh(file_obj, return loaded -def load_compressed(file_obj, - file_type=None, - resolver=None, - mixed=False, - **kwargs): +def load_compressed(file_obj, file_type=None, resolver=None, mixed=False, **kwargs): """ Given a compressed archive load all the geometry that we can from it. @@ -260,19 +233,17 @@ def load_compressed(file_obj, """ # parse the file arguments into clean loadable form - (file_obj, # file- like object - file_type, # str, what kind of file - metadata, # dict, any metadata from file name - opened, # bool, did we open the file ourselves - resolver # object to load referenced resources - ) = parse_file_args(file_obj=file_obj, - file_type=file_type, - resolver=resolver) + ( + file_obj, # file- like object + file_type, # str, what kind of file + metadata, # dict, any metadata from file name + opened, # bool, did we open the file ourselves + resolver, # object to load referenced resources + ) = parse_file_args(file_obj=file_obj, file_type=file_type, resolver=resolver) try: # a dict of 'name' : file-like object - files = util.decompress(file_obj=file_obj, - file_type=file_type) + files = util.decompress(file_obj=file_obj, file_type=file_type) # store loaded geometries as a list geometries = [] @@ -280,18 +251,17 @@ def load_compressed(file_obj, resolver = resolvers.ZipResolver(files) # try to save the files with meaningful metadata - if 'file_path' in metadata: - archive_name = metadata['file_path'] + if "file_path" in metadata: + archive_name = metadata["file_path"] else: - archive_name = 'archive' + archive_name = "archive" # populate our available formats if mixed: available = available_formats() else: # all types contained in ZIP archive - contains = {util.split_extension(n).lower() - for n in files.keys()} + contains = {util.split_extension(n).lower() for n in files.keys()} # if there are no mesh formats available if contains.isdisjoint(mesh_formats()): available = path_formats() @@ -302,29 +272,31 @@ def load_compressed(file_obj, for name, data in files.items(): try: # only load formats that we support - compressed_type = util.split_extension( - name).lower() + compressed_type = util.split_extension(name).lower() # if file has metadata type include it - if compressed_type in 'yaml': + if compressed_type in "yaml": import yaml + meta_archive[name] = yaml.safe_load(data) - elif compressed_type in 'json': + elif compressed_type in "json": import json + meta_archive[name] = json.loads(data) if compressed_type not in available: # don't raise an exception, just try the next one continue # store the file name relative to the archive - metadata['file_name'] = (archive_name + '/' + - os.path.basename(name)) + metadata["file_name"] = archive_name + "/" + os.path.basename(name) # load the individual geometry - loaded = load(file_obj=data, - file_type=compressed_type, - resolver=resolver, - metadata=metadata, - **kwargs) + loaded = load( + file_obj=data, + file_type=compressed_type, + resolver=resolver, + metadata=metadata, + **kwargs, + ) # some loaders return multiple geometries if util.is_sequence(loaded): @@ -334,8 +306,7 @@ def load_compressed(file_obj, # if the loader has returned a single geometry geometries.append(loaded) except BaseException: - log.debug('failed to load file in zip', - exc_info=True) + log.debug("failed to load file in zip", exc_info=True) finally: # if we opened the file in this function @@ -388,18 +359,15 @@ def load_remote(url, **kwargs): # will be wrong so try to clean up the URL # urllib is Python 3 only import urllib + # remove the url-safe encoding then split off query params - file_type = urllib.parse.unquote( - url).split('?', 1)[0].split('/')[-1].strip() + file_type = urllib.parse.unquote(url).split("?", 1)[0].split("/")[-1].strip() except BaseException: # otherwise just use the last chunk of URL - file_type = url.split('/')[-1].split('?', 1)[0] + file_type = url.split("/")[-1].split("?", 1)[0] # actually load the data from the retrieved bytes - loaded = load(file_obj=file_obj, - file_type=file_type, - resolver=resolver, - **kwargs) + loaded = load(file_obj=file_obj, file_type=file_type, resolver=resolver, **kwargs) return loaded @@ -407,6 +375,7 @@ def load_kwargs(*args, **kwargs): """ Load geometry from a properly formatted dict or kwargs """ + def handle_scene(): """ Load a scene from our kwargs. @@ -416,9 +385,8 @@ def handle_scene(): graph: list of dict, kwargs for scene.graph.update base_frame: str, base frame of graph """ - graph = kwargs.get('graph', None) - geometry = {k: load_kwargs(v) for - k, v in kwargs['geometry'].items()} + graph = kwargs.get("graph", None) + geometry = {k: load_kwargs(v) for k, v in kwargs["geometry"].items()} if graph is not None: scene = Scene() @@ -431,18 +399,19 @@ def handle_scene(): else: scene = Scene(geometry) - if 'base_frame' in kwargs: - scene.graph.base_frame = kwargs['base_frame'] - metadata = kwargs.get('metadata') + if "base_frame" in kwargs: + scene.graph.base_frame = kwargs["base_frame"] + metadata = kwargs.get("metadata") if isinstance(metadata, dict): - scene.metadata.update(kwargs['metadata']) + scene.metadata.update(kwargs["metadata"]) elif isinstance(metadata, str): # some ways someone might have encoded a string # note that these aren't evaluated until we # actually call the lambda in the loop candidates = [ lambda: json.loads(metadata), - lambda: json.loads(metadata.replace("'", '"'))] + lambda: json.loads(metadata.replace("'", '"')), + ] for c in candidates: try: scene.metadata.update(c()) @@ -450,7 +419,7 @@ def handle_scene(): except BaseException: pass elif metadata is not None: - log.warning('unloadable metadata') + log.warning("unloadable metadata") return scene @@ -459,8 +428,7 @@ def handle_mesh(): Handle the keyword arguments for a Trimesh object """ # if they've been serialized as a dict - if (isinstance(kwargs['vertices'], dict) or - isinstance(kwargs['faces'], dict)): + if isinstance(kwargs["vertices"], dict) or isinstance(kwargs["faces"], dict): return Trimesh(**misc.load_dict(kwargs)) # otherwise just load that puppy return Trimesh(**kwargs) @@ -469,16 +437,16 @@ def handle_export(): """ Handle an exported mesh. """ - data, file_type = kwargs['data'], kwargs['file_type'] + data, file_type = kwargs["data"], kwargs["file_type"] if not isinstance(data, dict): data = util.wrap_as_stream(data) - k = mesh_loaders[file_type]( - data, file_type=file_type) + k = mesh_loaders[file_type](data, file_type=file_type) return Trimesh(**k) def handle_path(): from ..path import Path2D, Path3D - shape = np.shape(kwargs['vertices']) + + shape = np.shape(kwargs["vertices"]) if len(shape) < 2: return Path2D() if shape[1] == 2: @@ -486,26 +454,25 @@ def handle_path(): elif shape[1] == 3: return Path3D(**kwargs) else: - raise ValueError('Vertices must be 2D or 3D!') + raise ValueError("Vertices must be 2D or 3D!") def handle_pointcloud(): return PointCloud(**kwargs) # if we've been passed a single dict instead of kwargs # substitute the dict for kwargs - if (len(kwargs) == 0 and - len(args) == 1 and - isinstance(args[0], dict)): + if len(kwargs) == 0 and len(args) == 1 and isinstance(args[0], dict): kwargs = args[0] # (function, tuple of expected keys) # order is important handlers = ( - (handle_scene, ('geometry',)), - (handle_mesh, ('vertices', 'faces')), - (handle_path, ('entities', 'vertices')), - (handle_pointcloud, ('vertices',)), - (handle_export, ('file_type', 'data'))) + (handle_scene, ("geometry",)), + (handle_mesh, ("vertices", "faces")), + (handle_path, ("entities", "vertices")), + (handle_pointcloud, ("vertices",)), + (handle_export, ("file_type", "data")), + ) # filter out keys with a value of None kwargs = {k: v for k, v in kwargs.items() if v is not None} @@ -517,15 +484,12 @@ def handle_pointcloud(): # exit the loop as we found one break else: - raise ValueError(f'unable to determine type: {kwargs.keys()}') + raise ValueError(f"unable to determine type: {kwargs.keys()}") return handler() -def parse_file_args(file_obj, - file_type, - resolver=None, - **kwargs): +def parse_file_args(file_obj, file_type, resolver=None, **kwargs): """ Given a file_obj and a file_type try to magically convert arguments to a file-like object and a lowercase string of @@ -577,16 +541,15 @@ def parse_file_args(file_obj, """ metadata = {} opened = False - if ('metadata' in kwargs and - isinstance(kwargs['metadata'], dict)): - metadata.update(kwargs['metadata']) + if "metadata" in kwargs and isinstance(kwargs["metadata"], dict): + metadata.update(kwargs["metadata"]) if util.is_pathlib(file_obj): # convert pathlib objects to string file_obj = str(file_obj.absolute()) if util.is_file(file_obj) and file_type is None: - raise ValueError('file_type must be set for file objects!') + raise ValueError("file_type must be set for file objects!") if util.is_string(file_obj): try: # os.path.isfile will return False incorrectly @@ -603,38 +566,35 @@ def parse_file_args(file_obj, if resolver is None: resolver = resolvers.FilePathResolver(file_path) # save the file name and path to metadata - metadata['file_path'] = file_path - metadata['file_name'] = os.path.basename(file_obj) + metadata["file_path"] = file_path + metadata["file_name"] = os.path.basename(file_obj) # if file_obj is a path that exists use extension as file_type if file_type is None: - file_type = util.split_extension( - file_path, - special=['tar.gz', 'tar.bz2']) + file_type = util.split_extension(file_path, special=["tar.gz", "tar.bz2"]) # actually open the file - file_obj = open(file_path, 'rb') + file_obj = open(file_path, "rb") opened = True else: - if '{' in file_obj: + if "{" in file_obj: # if a dict bracket is in the string, its probably a straight # JSON - file_type = 'json' - elif 'https://' in file_obj or 'http://' in file_obj: + file_type = "json" + elif "https://" in file_obj or "http://" in file_obj: # we've been passed a URL, warn to use explicit function # and don't do network calls via magical pipeline - raise ValueError( - f'use load_remote to load URL: {file_obj}') + raise ValueError(f"use load_remote to load URL: {file_obj}") elif file_type is None: - raise ValueError(f'string is not a file: {file_obj}') + raise ValueError(f"string is not a file: {file_obj}") if file_type is None: file_type = file_obj.__class__.__name__ - if util.is_string(file_type) and '.' in file_type: + if util.is_string(file_type) and "." in file_type: # if someone has passed the whole filename as the file_type # use the file extension as the file_type - if 'file_path' not in metadata: - metadata['file_path'] = file_type - metadata['file_name'] = os.path.basename(file_type) + if "file_path" not in metadata: + metadata["file_path"] = file_type + metadata["file_name"] = os.path.basename(file_type) file_type = util.split_extension(file_type) if resolver is None and os.path.exists(file_type): resolver = resolvers.FilePathResolver(file_type) @@ -643,19 +603,23 @@ def parse_file_args(file_obj, file_type = file_type.lower() # if we still have no resolver try using file_obj name - if (resolver is None and - hasattr(file_obj, 'name') and - file_obj.name is not None and - len(file_obj.name) > 0): + if ( + resolver is None + and hasattr(file_obj, "name") + and file_obj.name is not None + and len(file_obj.name) > 0 + ): resolver = resolvers.FilePathResolver(file_obj.name) return file_obj, file_type, metadata, opened, resolver # loader functions for compressed extensions -compressed_loaders = {'zip': load_compressed, - 'tar.bz2': load_compressed, - 'tar.gz': load_compressed} +compressed_loaders = { + "zip": load_compressed, + "tar.bz2": load_compressed, + "tar.gz": load_compressed, +} # map file_type to loader function mesh_loaders = {} diff --git a/trimesh/exchange/misc.py b/trimesh/exchange/misc.py index a8dd9373d..f74d1ffaa 100644 --- a/trimesh/exchange/misc.py +++ b/trimesh/exchange/misc.py @@ -31,43 +31,46 @@ def load_dict(data, **kwargs): -face_normals: (n,3) float (optional) """ if data is None: - raise ValueError('data passed to load_dict was None!') - if util.is_instance_named(data, 'Trimesh'): + raise ValueError("data passed to load_dict was None!") + if util.is_instance_named(data, "Trimesh"): return data if util.is_string(data): - if '{' not in data: - raise ValueError('Object is not a JSON encoded dictionary!') - data = json.loads(data.decode('utf-8')) + if "{" not in data: + raise ValueError("Object is not a JSON encoded dictionary!") + data = json.loads(data.decode("utf-8")) elif util.is_file(data): data = json.load(data) # what shape should the data be to be usable - mesh_data = {'vertices': (-1, 3), - 'faces': (-1, (3, 4)), - 'face_normals': (-1, 3), - 'face_colors': (-1, (3, 4)), - 'vertex_normals': (-1, 3), - 'vertex_colors': (-1, (3, 4))} + mesh_data = { + "vertices": (-1, 3), + "faces": (-1, (3, 4)), + "face_normals": (-1, 3), + "face_colors": (-1, (3, 4)), + "vertex_normals": (-1, 3), + "vertex_colors": (-1, (3, 4)), + } # now go through data structure and if anything is encoded as base64 # pull it back into numpy arrays if isinstance(data, dict): loaded = {} - data = util.decode_keys(data, 'utf-8') + data = util.decode_keys(data, "utf-8") for key, shape in mesh_data.items(): if key in data: loaded[key] = util.encoded_to_array(data[key]) if not util.is_shape(loaded[key], shape): - raise ValueError('Shape of %s is %s, not %s!', - key, - str(loaded[key].shape), - str(shape)) + raise ValueError( + "Shape of %s is %s, not %s!", + key, + str(loaded[key].shape), + str(shape), + ) if len(key) == 0: - raise ValueError('Unable to extract any mesh data!') + raise ValueError("Unable to extract any mesh data!") return loaded else: - raise ValueError('%s object passed to dict loader!', - data.__class__.__name__) + raise ValueError("%s object passed to dict loader!", data.__class__.__name__) def load_meshio(file_obj, file_type=None, **kwargs): @@ -97,41 +100,36 @@ def load_meshio(file_obj, file_type=None, **kwargs): mesh = None for file_format in file_formats: try: - mesh = meshio.read( - file_obj.name, - file_format=file_format) + mesh = meshio.read(file_obj.name, file_format=file_format) break except BaseException: - util.log.debug('failed to load', exc_info=True) + util.log.debug("failed to load", exc_info=True) if mesh is None: - raise ValueError('Failed to load file!') + raise ValueError("Failed to load file!") # save data as kwargs for a trimesh.Trimesh result = {} # pass kwargs to mesh constructor result.update(kwargs) # add vertices - result['vertices'] = mesh.points + result["vertices"] = mesh.points try: # add faces - result['faces'] = mesh.get_cells_type("triangle") + result["faces"] = mesh.get_cells_type("triangle") except BaseException: - util.log.warning('unable to get faces', exc_info=True) - result['faces'] = [] + util.log.warning("unable to get faces", exc_info=True) + result["faces"] = [] return result -_misc_loaders = {'dict': load_dict, - 'dict64': load_dict, - 'json': load_dict} +_misc_loaders = {"dict": load_dict, "dict64": load_dict, "json": load_dict} try: import meshio + # add meshio loaders here - _meshio_loaders = { - k[1:]: load_meshio for k in - meshio.extension_to_filetypes.keys()} + _meshio_loaders = {k[1:]: load_meshio for k in meshio.extension_to_filetypes.keys()} _misc_loaders.update(_meshio_loaders) except BaseException: _meshio_loaders = {} diff --git a/trimesh/exchange/obj.py b/trimesh/exchange/obj.py index 22dae35ca..569610fbf 100644 --- a/trimesh/exchange/obj.py +++ b/trimesh/exchange/obj.py @@ -11,6 +11,7 @@ # if someone tries to use Image re-raise # the import error so they can debug easily from ..exceptions import ExceptionWrapper + Image = ExceptionWrapper(E) from .. import util @@ -20,12 +21,14 @@ from ..visual.texture import TextureVisuals, unmerge_faces -def load_obj(file_obj, - resolver=None, - group_material=True, - skip_materials=False, - maintain_order=False, - **kwargs): +def load_obj( + file_obj, + resolver=None, + group_material=True, + skip_materials=False, + maintain_order=False, + **kwargs, +): """ Load a Wavefront OBJ file into kwargs for a trimesh.Scene object. @@ -59,33 +62,30 @@ def load_obj(file_obj, # add leading and trailing newlines so we can use the # same logic even if they jump directly in to data lines - text = '\n{}\n'.format(text.strip().replace('\r\n', '\n')) + text = "\n{}\n".format(text.strip().replace("\r\n", "\n")) # remove backslash continuation characters and merge them into the same # line - text = text.replace('\\\n', '') + text = text.replace("\\\n", "") # Load Materials materials = {} - mtl_position = text.find('mtllib') + mtl_position = text.find("mtllib") if not skip_materials and mtl_position >= 0: # take the line of the material file after `mtllib` # which should be the file location of the .mtl file - mtl_path = text[mtl_position + 6:text.find('\n', mtl_position)].strip() + mtl_path = text[mtl_position + 6 : text.find("\n", mtl_position)].strip() try: # use the resolver to get the data - material_kwargs = parse_mtl(resolver[mtl_path], - resolver=resolver) + material_kwargs = parse_mtl(resolver[mtl_path], resolver=resolver) # turn parsed kwargs into material objects - materials = {k: SimpleMaterial(**v) - for k, v in material_kwargs.items()} + materials = {k: SimpleMaterial(**v) for k, v in material_kwargs.items()} except (OSError, TypeError): # usually the resolver couldn't find the asset - log.debug(f'unable to load materials from: {mtl_path}') + log.debug(f"unable to load materials from: {mtl_path}") except BaseException: # something else happened so log a warning - log.debug(f'unable to load materials from: {mtl_path}', - exc_info=True) + log.debug(f"unable to load materials from: {mtl_path}", exc_info=True) # extract vertices from raw text v, vn, vt, vc = _parse_vertices(text=text) @@ -103,11 +103,11 @@ def load_obj(file_obj, # no faces but points given # return point cloud if not len(face_tuples) and v is not None: - pc = {'vertices': v} + pc = {"vertices": v} if vn is not None: - pc['vertex_normals'] = vn + pc["vertex_normals"] = vn if vc is not None: - pc['vertex_colors'] = vc + pc["vertex_colors"] = vc return pc # Load Faces @@ -130,38 +130,38 @@ def load_obj(file_obj, # maxsplit=1 means that it can stop working # after it finds the first newline # passed as arg as it's not a kwarg in python2 - face_lines = [i.split('\n', 1)[0].strip() - for i in re.split('^f', chunk, flags=re.MULTILINE)[1:]] + face_lines = [ + i.split("\n", 1)[0].strip() + for i in re.split("^f", chunk, flags=re.MULTILINE)[1:] + ] # check every face for mixed tri-quad-ngon - columns = len(face_lines[0].replace('/', ' ').split()) - flat_array = all(columns == len(f.replace('/', ' ').split()) - for f in face_lines) + columns = len(face_lines[0].replace("/", " ").split()) + flat_array = all(columns == len(f.replace("/", " ").split()) for f in face_lines) # make sure we have the right number of values for vectorized if flat_array: # the fastest way to get to a numpy array # processes the whole string at once into a 1D array - array = np.fromstring(' '.join(face_lines).replace('/', ' '), - sep=' ', dtype=np.int64) + array = np.fromstring( + " ".join(face_lines).replace("/", " "), sep=" ", dtype=np.int64 + ) # also wavefront is 1-indexed (vs 0-indexed) so offset # only applies to positive indices array[array > 0] -= 1 # everything is a nice 2D array faces, faces_tex, faces_norm = _parse_faces_vectorized( - array=array, - columns=columns, - sample_line=face_lines[0]) + array=array, columns=columns, sample_line=face_lines[0] + ) else: # if we had something annoying like mixed in quads # or faces that differ per-line we have to loop # i.e. something like: # '31407 31406 31408', # '32303/2469 32304/2469 32305/2469', - log.debug('faces have mixed data: using slow fallback!') - faces, faces_tex, faces_norm = _parse_faces_fallback( - face_lines) + log.debug("faces have mixed data: using slow fallback!") + faces, faces_tex, faces_norm = _parse_faces_fallback(face_lines) if group_material: name = material @@ -179,12 +179,14 @@ def load_obj(file_obj, # where each face if faces_norm is not None and len(faces_norm) == len(faces): new_faces, mask_v, mask_vt, mask_vn = unmerge_faces( - faces, faces_tex, faces_norm, maintain_faces=maintain_order) + faces, faces_tex, faces_norm, maintain_faces=maintain_order + ) else: mask_vn = None # no face normals but face texturre new_faces, mask_v, mask_vt = unmerge_faces( - faces, faces_tex, maintain_faces=maintain_order) + faces, faces_tex, maintain_faces=maintain_order + ) if tol.strict: # we should NOT have messed up the faces @@ -198,12 +200,11 @@ def load_obj(file_obj, # want materials without UV coordinates uv = vt[mask_vt] except BaseException: - log.debug('index failed on UV coordinates, skipping!') + log.debug("index failed on UV coordinates, skipping!") uv = None # mask vertices and use new faces - mesh.update({'vertices': v[mask_v].copy(), - 'faces': new_faces}) + mesh.update({"vertices": v[mask_v].copy(), "faces": new_faces}) else: # otherwise just use unmasked vertices @@ -214,9 +215,8 @@ def load_obj(file_obj, if vn is not None and np.shape(faces_norm) == faces.shape: # do the crazy unmerging logic for split indices new_faces, mask_v, mask_vn = unmerge_faces( - faces, - faces_norm, - maintain_faces=maintain_order) + faces, faces_norm, maintain_faces=maintain_order + ) else: # generate the mask so we only include # referenced vertices in every new mesh @@ -231,43 +231,41 @@ def load_obj(file_obj, mask_vn = None # start with vertices and faces - mesh.update({'faces': new_faces, - 'vertices': v[mask_v].copy()}) + mesh.update({"faces": new_faces, "vertices": v[mask_v].copy()}) # if colors and normals are OK save them if vc is not None: try: # may fail on a malformed color mask - mesh['vertex_colors'] = vc[mask_v] + mesh["vertex_colors"] = vc[mask_v] except BaseException: - log.debug('failed to load vertex_colors', - exc_info=True) + log.debug("failed to load vertex_colors", exc_info=True) if mask_vn is not None: try: # may fail on a malformed mask normals = vn[mask_vn] - if normals.shape != mesh['vertices'].shape: - raise ValueError('incorrect normals {} != {}'.format( - str(normals.shape), - str(mesh['vertices'].shape))) - mesh['vertex_normals'] = normals + if normals.shape != mesh["vertices"].shape: + raise ValueError( + "incorrect normals {} != {}".format( + str(normals.shape), str(mesh["vertices"].shape) + ) + ) + mesh["vertex_normals"] = normals except BaseException: - log.debug('failed to load vertex_normals', - exc_info=True) + log.debug("failed to load vertex_normals", exc_info=True) visual = None if material in materials: # use the material with the UV coordinates - visual = TextureVisuals( - uv=uv, material=materials[material]) - elif uv is not None and len(uv) == len(mesh['vertices']): + visual = TextureVisuals(uv=uv, material=materials[material]) + elif uv is not None and len(uv) == len(mesh["vertices"]): # create a texture with an empty materials visual = TextureVisuals(uv=uv) elif material is not None: # case where material is specified but not available - log.debug(f'specified material ({material}) not loaded!') + log.debug(f"specified material ({material}) not loaded!") # assign the visual - mesh['visual'] = visual + mesh["visual"] = visual # store geometry by name geometry[name] = mesh @@ -276,13 +274,10 @@ def load_obj(file_obj, return next(iter(geometry.values())) # add an identity transform for every geometry - graph = [{'geometry': k, - 'frame_to': k} - for k in geometry.keys()] + graph = [{"geometry": k, "frame_to": k} for k in geometry.keys()] # convert to scene kwargs - result = {'geometry': geometry, - 'graph': graph} + result = {"geometry": geometry, "graph": graph} return result @@ -314,10 +309,7 @@ def parse_mtl(mtl, resolver=None): lines = str.splitlines(str(mtl).strip()) # remap OBJ property names to kwargs for SimpleMaterial - mapped = {'kd': 'diffuse', - 'ka': 'ambient', - 'ks': 'specular', - 'ns': 'glossiness'} + mapped = {"kd": "diffuse", "ka": "ambient", "ks": "specular", "ns": "glossiness"} for line in lines: # split by white space @@ -328,27 +320,26 @@ def parse_mtl(mtl, resolver=None): # the first value is the parameter name key = split[0].lower() # start a new material - if key == 'newmtl': + if key == "newmtl": # material name extracted from line like: # newmtl material_0 if material is not None: # save the old material by old name and remove key - materials[material.pop('newmtl')] = material + materials[material.pop("newmtl")] = material # start a fresh new material - material = {'newmtl': ' '.join(split[1:])} + material = {"newmtl": " ".join(split[1:])} - elif key == 'map_kd': + elif key == "map_kd": # represents the file name of the texture image - index = line.lower().index('map_kd') + 6 + index = line.lower().index("map_kd") + 6 file_name = line[index:].strip() try: file_data = resolver.get(file_name) # load the bytes into a PIL image # an image file name - material['image'] = Image.open( - util.wrap_as_stream(file_data)) + material["image"] = Image.open(util.wrap_as_stream(file_data)) except BaseException: - log.debug('failed to load image', exc_info=True) + log.debug("failed to load image", exc_info=True) elif key in mapped.keys(): try: @@ -362,14 +353,14 @@ def parse_mtl(mtl, resolver=None): # also store key by OBJ name material[key] = value except BaseException: - log.debug('failed to convert color!', exc_info=True) + log.debug("failed to convert color!", exc_info=True) # pass everything as kwargs to material constructor elif material is not None: # save any other unspecified keys material[key] = split[1:] # reached EOF so save any existing materials if material: - materials[material.pop('newmtl')] = material + materials[material.pop("newmtl")] = material return materials @@ -419,7 +410,7 @@ def _parse_faces_vectorized(array, columns, sample_line): # count how many delimiters are in the first face line # to see if our second value is texture or normals # do splitting to clip off leading/trailing slashes - count = ''.join(i.strip('/') for i in sample_line.split()).count('/') + count = "".join(i.strip("/") for i in sample_line.split()).count("/") if count == columns: # case where each face line looks like: # ' 75//139 76//141 77//141' @@ -431,7 +422,7 @@ def _parse_faces_vectorized(array, columns, sample_line): # which is vertex/texture faces_tex = array[:, index + 1] else: - log.debug(f'face lines are weird: {sample_line}') + log.debug(f"face lines are weird: {sample_line}") elif columns == 9: # if we have three values per vertex # second value is always texture @@ -464,39 +455,37 @@ def _parse_faces_fallback(lines): for line in lines: # remove leading newlines then # take first bit before newline then split by whitespace - split = line.strip().split('\n')[0].split() + split = line.strip().split("\n")[0].split() # split into: ['76/558/76', '498/265/498', '456/267/456'] len_split = len(split) if len_split == 3: pass elif len_split == 4: # triangulate quad face - split = [split[0], - split[1], - split[2], - split[2], - split[3], - split[0]] + split = [split[0], split[1], split[2], split[2], split[3], split[0]] elif len_split > 4: # triangulate polygon as a triangles fan collect = [] # we need a flat list so append inside # a list comprehension collect_append = collect.append - [[collect_append(split[0]), - collect_append(split[i + 1]), - collect_append(split[i + 2])] - for i in range(len(split) - 2)] + [ + [ + collect_append(split[0]), + collect_append(split[i + 1]), + collect_append(split[i + 2]), + ] + for i in range(len(split) - 2) + ] split = collect else: - log.debug( - f'face needs more values 3>{len(split)} skipping!') + log.debug(f"face needs more values 3>{len(split)} skipping!") continue # f is like: '76/558/76' for f in split: # vertex, vertex texture, vertex normal - split = f.split('/') + split = f.split("/") # we always have a vertex reference v.append(int(split[0])) @@ -553,28 +542,31 @@ def _parse_vertices(text): # up to the location of out our first vertex but we # are going to use this check for "do we have texture" # determination later so search the whole stupid file - starts = {k: text.find(f'\n{k} ') for k in - ['v', 'vt', 'vn']} + starts = {k: text.find(f"\n{k} ") for k in ["v", "vt", "vn"]} # no valid values so exit early if not any(v >= 0 for v in starts.values()): return None, None, None, None # find the last position of each valid value - ends = {k: text.find( - '\n', text.rfind(f'\n{k} ') + 2 + len(k)) - for k, v in starts.items() if v >= 0} + ends = { + k: text.find("\n", text.rfind(f"\n{k} ") + 2 + len(k)) + for k, v in starts.items() + if v >= 0 + } # take the first and last position of any vertex property start = min(s for s in starts.values() if s >= 0) end = max(e for e in ends.values() if e >= 0) # get the chunk of test that contains vertex data - chunk = text[start:end].replace('+e', 'e').replace('-e', 'e') + chunk = text[start:end].replace("+e", "e").replace("-e", "e") # get the clean-ish data from the file as python lists - data = {k: [i.split('\n', 1)[0] - for i in chunk.split(f'\n{k} ')[1:]] - for k, v in starts.items() if v >= 0} + data = { + k: [i.split("\n", 1)[0] for i in chunk.split(f"\n{k} ")[1:]] + for k, v in starts.items() + if v >= 0 + } # count the number of data values per row on a sample row per_row = {k: len(v[0].split()) for k, v in data.items()} @@ -583,8 +575,7 @@ def _parse_vertices(text): result = defaultdict(lambda: None) for k, value in data.items(): # use joining and fromstring to get as numpy array - array = np.fromstring( - ' '.join(value), sep=' ', dtype=np.float64) + array = np.fromstring(" ".join(value), sep=" ", dtype=np.float64) # what should our shape be shape = (len(value), per_row[k]) # check shape of flat data @@ -597,13 +588,13 @@ def _parse_vertices(text): try: # try to get result through reshaping result[k] = np.fromstring( - ' '.join(i.split()[:count] for i in value), - sep=' ', dtype=np.float64).reshape(shape) + " ".join(i.split()[:count] for i in value), sep=" ", dtype=np.float64 + ).reshape(shape) except BaseException: pass # vertices - v = result['v'] + v = result["v"] # vertex colors are stored next to vertices vc = None if v is not None and v.shape[1] >= 6: @@ -614,24 +605,24 @@ def _parse_vertices(text): v = v[:, :3] # vertex texture or None - vt = result['vt'] + vt = result["vt"] if vt is not None: # sometimes UV coordinates come in as UVW vt = vt[:, :2] # vertex normals or None - vn = result['vn'] + vn = result["vn"] # check will generally only be run in unit tests # so we are allowed to do things that are slow if tol.strict: # check to make sure our subsetting # didn't miss any vertices or data - assert len(v) == text.count('\nv ') + assert len(v) == text.count("\nv ") # make sure optional data matches file too if vn is not None: - assert len(vn) == text.count('\nvn ') + assert len(vn) == text.count("\nvn ") if vt is not None: - assert len(vt) == text.count('\nvt ') + assert len(vt) == text.count("\nvt ") return v, vn, vt, vc @@ -653,7 +644,7 @@ def _group_by_material(face_tuples): """ # store the chunks grouped by material - grouped = defaultdict(lambda: ['', '', []]) + grouped = defaultdict(lambda: ["", "", []]) # loop through existring for material, obj, chunk in face_tuples: grouped[material][0] = material @@ -662,7 +653,7 @@ def _group_by_material(face_tuples): grouped[material][2].append(chunk) # go back and do a join to make a single string for k in grouped.keys(): - grouped[k][2] = '\n'.join(grouped[k][2]) + grouped[k][2] = "\n".join(grouped[k][2]) # return as list return list(grouped.values()) @@ -687,7 +678,7 @@ def _preprocess_faces(text): Tuples of (material, object, data-chunk) """ # see which chunk is relevant - starters = ['\nusemtl ', '\no ', '\nf ', '\ng ', '\ns '] + starters = ["\nusemtl ", "\no ", "\nf ", "\ng ", "\ns "] f_start = len(text) # first index of material, object, face, group, or smoother for st in starters: @@ -700,7 +691,7 @@ def _preprocess_faces(text): if search < f_start: f_start = search # index in blob of the newline after the last face - f_end = text.find('\n', text.rfind('\nf ') + 3) + f_end = text.find("\n", text.rfind("\nf ") + 3) # get the chunk of the file that has face information if f_end >= 0: # clip to the newline after the last face @@ -711,23 +702,18 @@ def _preprocess_faces(text): if tol.strict: # check to make sure our subsetting didn't miss any faces - assert f_chunk.count('\nf ') == text.count('\nf ') + assert f_chunk.count("\nf ") == text.count("\nf ") # two things cause new meshes to be created: # objects and materials # re.finditer was faster than find in a loop # find the index of every material change - idx_mtl = np.array([m.start(0) for m in re.finditer( - 'usemtl ', f_chunk)], dtype=int) + idx_mtl = np.array([m.start(0) for m in re.finditer("usemtl ", f_chunk)], dtype=int) # find the index of every new object - idx_obj = np.array([m.start(0) for m in re.finditer( - '\no ', f_chunk)], dtype=int) + idx_obj = np.array([m.start(0) for m in re.finditer("\no ", f_chunk)], dtype=int) # find all the indexes where we want to split - splits = np.unique(np.concatenate(( - [0, len(f_chunk)], - idx_mtl, - idx_obj))) + splits = np.unique(np.concatenate(([0, len(f_chunk)], idx_mtl, idx_obj))) # track the current material and object ID current_obj = None @@ -737,33 +723,35 @@ def _preprocess_faces(text): for start, end in zip(splits[:-1], splits[1:]): # ensure there's always a trailing newline - chunk = f_chunk[start:end].strip() + '\n' - if chunk.startswith('o '): - current_obj, chunk = chunk.split('\n', 1) + chunk = f_chunk[start:end].strip() + "\n" + if chunk.startswith("o "): + current_obj, chunk = chunk.split("\n", 1) current_obj = current_obj[2:].strip() - elif chunk.startswith('usemtl'): - current_mtl, chunk = chunk.split('\n', 1) + elif chunk.startswith("usemtl"): + current_mtl, chunk = chunk.split("\n", 1) current_mtl = current_mtl[6:].strip() # Discard the g tag line in the list of faces - elif chunk.startswith('g '): - _, chunk = chunk.split('\n', 1) + elif chunk.startswith("g "): + _, chunk = chunk.split("\n", 1) # If we have an f at the beginning of a line # then add it to the list of faces chunks - if chunk.startswith('f ') or '\nf' in chunk: + if chunk.startswith("f ") or "\nf" in chunk: face_tuples.append((current_mtl, current_obj, chunk)) return face_tuples -def export_obj(mesh, - include_normals=None, - include_color=True, - include_texture=True, - return_texture=False, - write_texture=True, - resolver=None, - digits=8, - mtl_name=None, - header='https://github.com/mikedh/trimesh'): +def export_obj( + mesh, + include_normals=None, + include_color=True, + include_texture=True, + return_texture=False, + write_texture=True, + resolver=None, + digits=8, + mtl_name=None, + header="https://github.com/mikedh/trimesh", +): """ Export a mesh as a Wavefront OBJ file. TODO: scenes with textured meshes @@ -803,41 +791,44 @@ def export_obj(mesh, """ # store the multiple options for formatting # vertex indexes for faces - face_formats = {('v',): '{}', - ('v', 'vn'): '{}//{}', - ('v', 'vt'): '{}/{}', - ('v', 'vn', 'vt'): '{}/{}/{}'} + face_formats = { + ("v",): "{}", + ("v", "vn"): "{}//{}", + ("v", "vt"): "{}/{}", + ("v", "vn", "vt"): "{}/{}/{}", + } # check the input - if util.is_instance_named(mesh, 'Trimesh'): + if util.is_instance_named(mesh, "Trimesh"): meshes = [mesh] - elif util.is_instance_named(mesh, 'Scene'): + elif util.is_instance_named(mesh, "Scene"): meshes = mesh.dump() - elif util.is_instance_named(mesh, 'PointCloud'): + elif util.is_instance_named(mesh, "PointCloud"): meshes = [mesh] else: - raise ValueError('must be Trimesh or Scene!') + raise ValueError("must be Trimesh or Scene!") # collect lines to export objects = deque([]) # keep track of the number of each export element - counts = {'v': 0, 'vn': 0, 'vt': 0} + counts = {"v": 0, "vn": 0, "vt": 0} # collect materials as we go materials = {} materials_name = set() for current in meshes: # we are going to reference face_formats with this - face_type = ['v'] + face_type = ["v"] # OBJ includes vertex color as RGB elements on the same line - if (include_color and - current.visual.kind in ['vertex', 'face'] and - len(current.visual.vertex_colors)): - + if ( + include_color + and current.visual.kind in ["vertex", "face"] + and len(current.visual.vertex_colors) + ): # create a stacked blob with position and color - v_blob = np.column_stack(( - current.vertices, - to_float(current.visual.vertex_colors[:, :3]))) + v_blob = np.column_stack( + (current.vertices, to_float(current.visual.vertex_colors[:, :3])) + ) else: # otherwise just export vertices v_blob = current.vertices @@ -845,90 +836,89 @@ def export_obj(mesh, # add the first vertex key and convert the array # add the vertices export = deque( - ['v ' + util.array_to_string( - v_blob, - col_delim=' ', - row_delim='\nv ', - digits=digits)]) + [ + "v " + + util.array_to_string( + v_blob, col_delim=" ", row_delim="\nv ", digits=digits + ) + ] + ) # if include_normals is None then # only include if they're already stored if include_normals is None: - include_normals = 'vertex_normals' in current._cache.cache + include_normals = "vertex_normals" in current._cache.cache if include_normals: try: converted = util.array_to_string( current.vertex_normals, - col_delim=' ', - row_delim='\nvn ', - digits=digits) + col_delim=" ", + row_delim="\nvn ", + digits=digits, + ) # if vertex normals are stored in cache export them - face_type.append('vn') - export.append('vn ' + converted) + face_type.append("vn") + export.append("vn " + converted) except BaseException: - log.debug('failed to convert vertex normals', - exc_info=True) + log.debug("failed to convert vertex normals", exc_info=True) # collect materials into a dict - if include_texture and hasattr(current.visual, 'uv'): + if include_texture and hasattr(current.visual, "uv"): try: # get a SimpleMaterial material = current.visual.material - if hasattr(material, 'to_simple'): + if hasattr(material, "to_simple"): material = material.to_simple() # hash the material to avoid duplicates hashed = hash(material) if hashed not in materials: # get a unique name for the material - name = util.unique_name( - material.name, materials_name) + name = util.unique_name(material.name, materials_name) # add the name to our collection materials_name.add(name) # convert material to an OBJ MTL - materials[hashed] = material.to_obj( - name=name) + materials[hashed] = material.to_obj(name=name) # get the name of the current material as-stored tex_name = materials[hashed][1] # export the UV coordinates - if len(np.shape(getattr(current.visual, 'uv', None))) == 2: + if len(np.shape(getattr(current.visual, "uv", None))) == 2: converted = util.array_to_string( - current.visual.uv, - col_delim=' ', - row_delim='\nvt ', - digits=digits) + current.visual.uv, col_delim=" ", row_delim="\nvt ", digits=digits + ) # if vertex texture exists and is the right shape - face_type.append('vt') + face_type.append("vt") # add the uv coordinates - export.append('vt ' + converted) + export.append("vt " + converted) # add the directive to use the exported material - export.appendleft(f'usemtl {tex_name}') + export.appendleft(f"usemtl {tex_name}") except BaseException: - log.debug('failed to convert UV coordinates', - exc_info=True) + log.debug("failed to convert UV coordinates", exc_info=True) # the format for a single vertex reference of a face face_format = face_formats[tuple(face_type)] # add the exported faces to the export if available - if hasattr(current, 'faces'): - export.append('f ' + util.array_to_string( - current.faces + 1 + counts['v'], - col_delim=' ', - row_delim='\nf ', - value_format=face_format)) + if hasattr(current, "faces"): + export.append( + "f " + + util.array_to_string( + current.faces + 1 + counts["v"], + col_delim=" ", + row_delim="\nf ", + value_format=face_format, + ) + ) # offset our vertex position - counts['v'] += len(current.vertices) + counts["v"] += len(current.vertices) # add object name if found in metadata - if 'name' in current.metadata: - export.appendleft( - '\no {}'.format(current.metadata['name'])) + if "name" in current.metadata: + export.appendleft("\no {}".format(current.metadata["name"])) # add this object - objects.append('\n'.join(export)) - + objects.append("\n".join(export)) # collect files like images to write mtl_data = {} @@ -940,35 +930,35 @@ def export_obj(mesh, # values are (data, name) for data, _ in materials.values(): for file_name, file_data in data.items(): - if file_name.lower().endswith('.mtl'): + if file_name.lower().endswith(".mtl"): # collect mtl lines into single file mtl_lib.append(file_data) elif file_name not in mtl_data: # things like images mtl_data[file_name] = file_data else: - log.warning(f'not writing {file_name}') + log.warning(f"not writing {file_name}") if mtl_name is None: # if no name passed set a default - mtl_name = 'material.mtl' + mtl_name = "material.mtl" # prepend a header to the MTL text if requested if header is not None: - prepend = f'# {header}\n\n'.encode() + prepend = f"# {header}\n\n".encode() else: - prepend = b'' + prepend = b"" # save the material data - mtl_data[mtl_name] = prepend + b'\n\n'.join(mtl_lib) + mtl_data[mtl_name] = prepend + b"\n\n".join(mtl_lib) # add the reference to the MTL file - objects.appendleft(f'mtllib {mtl_name}') + objects.appendleft(f"mtllib {mtl_name}") if header is not None: # add a created-with header to the top of the file - objects.appendleft(f'# {header}') + objects.appendleft(f"# {header}") # combine elements into a single string - text = '\n'.join(objects) + text = "\n".join(objects) # if we have a resolver and have asked to write texture if write_texture and resolver is not None and len(materials) > 0: @@ -982,4 +972,4 @@ def export_obj(mesh, return text -_obj_loaders = {'obj': load_obj} +_obj_loaders = {"obj": load_obj} diff --git a/trimesh/exchange/off.py b/trimesh/exchange/off.py index cf8bfb2c6..03bcc10e0 100644 --- a/trimesh/exchange/off.py +++ b/trimesh/exchange/off.py @@ -23,14 +23,12 @@ def load_off(file_obj, **kwargs): text = file_obj.read() # will magically survive weird encoding sometimes # comment strip will handle all cases of commenting - text = util.comment_strip( - util.decode_text(text)).strip() + text = util.comment_strip(util.decode_text(text)).strip() # split the first key - _, header, raw = re.split('(COFF|OFF)', text, maxsplit=1) - if header.upper() not in ['OFF', 'COFF']: - raise NameError( - f'Not an OFF file! Header was: `{header}`') + _, header, raw = re.split("(COFF|OFF)", text, maxsplit=1) + if header.upper() not in ["OFF", "COFF"]: + raise NameError(f"Not an OFF file! Header was: `{header}`") # split into lines and remove whitespace splits = [i.strip() for i in str.splitlines(str(raw))] @@ -41,24 +39,21 @@ def load_off(file_obj, **kwargs): header = np.array(splits[0].split(), dtype=np.int64) vertex_count, face_count = header[:2] - vertices = np.array([ - i.split()[:3] for i in - splits[1: vertex_count + 1]], - dtype=np.float64) + vertices = np.array( + [i.split()[:3] for i in splits[1 : vertex_count + 1]], dtype=np.float64 + ) # will fail if incorrect number of vertices loaded vertices = vertices.reshape((vertex_count, 3)) # get lines with face data - faces = [i.split() for i in - splits[vertex_count + 1:vertex_count + face_count + 1]] + faces = [i.split() for i in splits[vertex_count + 1 : vertex_count + face_count + 1]] # the first value is count - faces = [line[1:int(line[0]) + 1] for line in faces] + faces = [line[1 : int(line[0]) + 1] for line in faces] faces = triangulate_quads(faces) # save data as kwargs for a trimesh.Trimesh - kwargs = {'vertices': vertices, - 'faces': faces} + kwargs = {"vertices": vertices, "faces": faces} return kwargs @@ -82,17 +77,19 @@ def export_off(mesh, digits=10): # make sure specified digits is an int digits = int(digits) # prepend a 3 (face count) to each face - faces_stacked = np.column_stack((np.ones(len(mesh.faces)) * 3, - mesh.faces)).astype(np.int64) - export = 'OFF\n' + faces_stacked = np.column_stack((np.ones(len(mesh.faces)) * 3, mesh.faces)).astype( + np.int64 + ) + export = "OFF\n" # the header is vertex count, face count, another number - export += str(len(mesh.vertices)) + ' ' + str(len(mesh.faces)) + ' 0\n' - export += util.array_to_string( - mesh.vertices, col_delim=' ', row_delim='\n', digits=digits) + '\n' - export += util.array_to_string( - faces_stacked, col_delim=' ', row_delim='\n') + export += str(len(mesh.vertices)) + " " + str(len(mesh.faces)) + " 0\n" + export += ( + util.array_to_string(mesh.vertices, col_delim=" ", row_delim="\n", digits=digits) + + "\n" + ) + export += util.array_to_string(faces_stacked, col_delim=" ", row_delim="\n") return export -_off_loaders = {'off': load_off} -_off_exporters = {'off': export_off} +_off_loaders = {"off": load_off} +_off_exporters = {"off": export_off} diff --git a/trimesh/exchange/openctm.py b/trimesh/exchange/openctm.py index 323833fb1..871a04b0d 100644 --- a/trimesh/exchange/openctm.py +++ b/trimesh/exchange/openctm.py @@ -37,17 +37,17 @@ _ctm_loaders = {} try: - # try to find the shared library - _ctm_lib_name = ctypes.util.find_library('openctm') - if os.name == 'nt': + _ctm_lib_name = ctypes.util.find_library("openctm") + if os.name == "nt": _ctm_loader = ctypes.WinDLL else: _ctm_loader = ctypes.CDLL if _ctm_lib_name is None or len(_ctm_lib_name) == 0: - raise ImportError('libopenctm library not found!') + raise ImportError("libopenctm library not found!") except BaseException as E: from ..exceptions import ExceptionWrapper + _ctm_lib_name = None _ctm_loader = ExceptionWrapper(E) @@ -125,7 +125,7 @@ def load_ctm(file_obj, file_type=None, **kwargs): # !!load file from name # this should be replaced with something that # actually uses the file object data to support streams - name = str(file_obj.name).encode('utf-8') + name = str(file_obj.name).encode("utf-8") ctmLoad(ctm, name) err = ctmGetError(ctm) @@ -136,27 +136,24 @@ def load_ctm(file_obj, file_type=None, **kwargs): vertex_count = ctmGetInteger(ctm, CTM_VERTEX_COUNT) vertex_ctm = ctmGetFloatArray(ctm, CTM_VERTICES) # use fromiter to avoid loop - vertices = np.fromiter(vertex_ctm, - dtype=np.float64, - count=vertex_count * 3).reshape((-1, 3)) + vertices = np.fromiter(vertex_ctm, dtype=np.float64, count=vertex_count * 3).reshape( + (-1, 3) + ) # get faces face_count = ctmGetInteger(ctm, CTM_TRIANGLE_COUNT) face_ctm = ctmGetIntegerArray(ctm, CTM_INDICES) - faces = np.fromiter(face_ctm, - dtype=np.int64, - count=face_count * 3).reshape((-1, 3)) + faces = np.fromiter(face_ctm, dtype=np.int64, count=face_count * 3).reshape((-1, 3)) # create kwargs for trimesh constructor - result = {'vertices': vertices, - 'faces': faces} + result = {"vertices": vertices, "faces": faces} # get face normals if available if ctmGetInteger(ctm, CTM_HAS_NORMALS) == CTM_TRUE: normals_ctm = ctmGetFloatArray(ctm, CTM_NORMALS) - normals = np.fromiter(normals_ctm, - dtype=np.float64, - count=face_count * 3).reshape((-1, 3)) - result['face_normals'] = normals + normals = np.fromiter( + normals_ctm, dtype=np.float64, count=face_count * 3 + ).reshape((-1, 3)) + result["face_normals"] = normals # free context ctmFreeContext(ctm) @@ -166,4 +163,4 @@ def load_ctm(file_obj, file_type=None, **kwargs): if _ctm_lib_name is not None: # we have a library so add load_ctm - _ctm_loaders = {'ctm': load_ctm} + _ctm_loaders = {"ctm": load_ctm} diff --git a/trimesh/exchange/ply.py b/trimesh/exchange/ply.py index 7587adcf5..e41958817 100644 --- a/trimesh/exchange/ply.py +++ b/trimesh/exchange/ply.py @@ -11,39 +11,41 @@ # from ply specification, and additional dtypes found in the wild _dtypes = { - 'char': 'i1', - 'uchar': 'u1', - 'short': 'i2', - 'ushort': 'u2', - 'int': 'i4', - 'int8': 'i1', - 'int16': 'i2', - 'int32': 'i4', - 'int64': 'i8', - 'uint': 'u4', - 'uint8': 'u1', - 'uint16': 'u2', - 'uint32': 'u4', - 'uint64': 'u8', - 'float': 'f4', - 'float16': 'f2', - 'float32': 'f4', - 'float64': 'f8', - 'double': 'f8'} + "char": "i1", + "uchar": "u1", + "short": "i2", + "ushort": "u2", + "int": "i4", + "int8": "i1", + "int16": "i2", + "int32": "i4", + "int64": "i8", + "uint": "u4", + "uint8": "u1", + "uint16": "u2", + "uint32": "u4", + "uint64": "u8", + "float": "f4", + "float16": "f2", + "float32": "f4", + "float64": "f8", + "double": "f8", +} # Inverse of the above dict, collisions on numpy type were removed _inverse_dtypes = { - 'i1': 'char', - 'u1': 'uchar', - 'i2': 'short', - 'u2': 'ushort', - 'i4': 'int', - 'i8': 'int64', - 'u4': 'uint', - 'u8': 'uint64', - 'f4': 'float', - 'f2': 'float16', - 'f8': 'double'} + "i1": "char", + "u1": "uchar", + "i2": "short", + "u2": "ushort", + "i4": "int", + "i8": "int64", + "u4": "uint", + "u8": "uint64", + "f4": "float", + "f2": "float16", + "f8": "double", +} def _numpy_type_to_ply_type(_numpy_type): @@ -61,12 +63,9 @@ def _numpy_type_to_ply_type(_numpy_type): return _inverse_dtypes[_numpy_type.str[1:]] -def load_ply(file_obj, - resolver=None, - fix_texture=True, - prefer_color=None, - *args, - **kwargs): +def load_ply( + file_obj, resolver=None, fix_texture=True, prefer_color=None, *args, **kwargs +): """ Load a PLY file from an open file object. @@ -104,21 +103,20 @@ def load_ply(file_obj, try: # soft dependency import PIL.Image + # if an image name is passed try to load it if image_name is not None: data = resolver.get(image_name) image = PIL.Image.open(util.wrap_as_stream(data)) except ImportError: - log.debug('textures require `pip install pillow`') + log.debug("textures require `pip install pillow`") except BaseException: - log.warning('unable to load image!', exc_info=True) + log.warning("unable to load image!", exc_info=True) # translate loaded PLY elements to kwargs kwargs = _elements_to_kwargs( - image=image, - elements=elements, - fix_texture=fix_texture, - prefer_color=prefer_color) + image=image, elements=elements, fix_texture=fix_texture, prefer_color=prefer_color + ) return kwargs @@ -142,11 +140,9 @@ def _add_attributes_to_dtype(dtype, attributes): if data.ndim == 1: dtype.append((name, data.dtype)) else: - attribute_dtype = data.dtype if len( - data.dtype) == 0 else data.dtype[0] - dtype.append((f'{name}_count', 'u1')) - dtype.append( - (name, _numpy_type_to_ply_type(attribute_dtype), data.shape[1])) + attribute_dtype = data.dtype if len(data.dtype) == 0 else data.dtype[0] + dtype.append((f"{name}_count", "u1")) + dtype.append((name, _numpy_type_to_ply_type(attribute_dtype), data.shape[1])) return dtype @@ -168,11 +164,11 @@ def _add_attributes_to_header(header, attributes): """ for name, data in attributes.items(): if data.ndim == 1: - header.append( - f'property {_numpy_type_to_ply_type(data.dtype)} {name}\n') + header.append(f"property {_numpy_type_to_ply_type(data.dtype)} {name}\n") else: header.append( - f'property list uchar {_numpy_type_to_ply_type(data.dtype)} {name}\n') + f"property list uchar {_numpy_type_to_ply_type(data.dtype)} {name}\n" + ) return header @@ -194,7 +190,7 @@ def _add_attributes_to_data_array(data_array, attributes): """ for name, data in attributes.items(): if data.ndim > 1: - data_array[f'{name}_count'] = data.shape[1] * np.ones(data.shape[0]) + data_array[f"{name}_count"] = data.shape[1] * np.ones(data.shape[0]) data_array[name] = data return data_array @@ -215,17 +211,14 @@ def _assert_attributes_valid(attributes): """ for data in attributes.values(): if data.ndim not in [1, 2]: - raise ValueError('PLY attributes are limited to 1 or 2 dimensions') + raise ValueError("PLY attributes are limited to 1 or 2 dimensions") # Inelegant test for structured arrays, reference: # https://numpy.org/doc/stable/user/basics.rec.html if data.dtype.names is not None: - raise ValueError('PLY attributes must be of a single datatype') + raise ValueError("PLY attributes must be of a single datatype") -def export_ply(mesh, - encoding='binary', - vertex_normal=None, - include_attributes=True): +def export_ply(mesh, encoding="binary", vertex_normal=None, include_attributes=True): """ Export a mesh in the PLY format. @@ -243,117 +236,114 @@ def export_ply(mesh, """ # evaluate input args # allow a shortcut for binary - if encoding == 'binary': - encoding = 'binary_little_endian' - elif encoding not in ['binary_little_endian', 'ascii']: - raise ValueError('encoding must be binary or ascii') + if encoding == "binary": + encoding = "binary_little_endian" + elif encoding not in ["binary_little_endian", "ascii"]: + raise ValueError("encoding must be binary or ascii") # if vertex normals aren't specifically asked for # only export them if they are stored in cache if vertex_normal is None: - vertex_normal = 'vertex_normals' in mesh._cache + vertex_normal = "vertex_normals" in mesh._cache # if we want to include mesh attributes in the export if include_attributes: - if hasattr(mesh, 'vertex_attributes'): + if hasattr(mesh, "vertex_attributes"): # make sure to export texture coordinates as well if hasattr(mesh, "visual") and hasattr(mesh.visual, "uv"): mesh.vertex_attributes["s"] = mesh.visual.uv[:, 0] mesh.vertex_attributes["t"] = mesh.visual.uv[:, 1] _assert_attributes_valid(mesh.vertex_attributes) - if hasattr(mesh, 'face_attributes'): + if hasattr(mesh, "face_attributes"): _assert_attributes_valid(mesh.face_attributes) # custom numpy dtypes for exporting - dtype_face = [('count', ''][int('big' in encoding)] + endian = ["<", ">"][int("big" in encoding)] elements = collections.OrderedDict() # store file name of TextureFiles in the header @@ -396,54 +386,47 @@ def _parse_header(file_obj): raw = file_obj.readline() if raw is None: raise ValueError("Header not terminated properly!") - raw = raw.decode('utf-8').strip() + raw = raw.decode("utf-8").strip() line = raw.split() # we're done - if 'end_header' in line: + if "end_header" in line: break # elements are groups of properties - if 'element' in line[0]: + if "element" in line[0]: # we got a new element so add it name, length = line[1:] elements[name] = { - 'length': int(length), - 'properties': collections.OrderedDict()} + "length": int(length), + "properties": collections.OrderedDict(), + } # a property is a member of an element - elif 'property' in line[0]: + elif "property" in line[0]: # is the property a simple single value, like: # `propert float x` if len(line) == 3: dtype, field = line[1:] - elements[name]['properties'][ - str(field)] = endian + _dtypes[dtype] + elements[name]["properties"][str(field)] = endian + _dtypes[dtype] # is the property a painful list, like: # `property list uchar int vertex_indices` - elif 'list' in line[1]: + elif "list" in line[1]: dtype_count, dtype, field = line[2:] - elements[name]['properties'][ - str(field)] = ( - endian + - _dtypes[dtype_count] + - ', ($LIST,)' + - endian + - _dtypes[dtype]) + elements[name]["properties"][str(field)] = ( + endian + _dtypes[dtype_count] + ", ($LIST,)" + endian + _dtypes[dtype] + ) # referenced as a file name - elif 'texturefile' in raw.lower(): + elif "texturefile" in raw.lower(): # textures come listed like: # `comment TextureFile fuze_uv.jpg` - index = raw.lower().index('texturefile') + 11 + index = raw.lower().index("texturefile") + 11 # use the value from raw to preserve whitespace image_name = raw[index:].strip() return elements, is_ascii, image_name -def _elements_to_kwargs(elements, - fix_texture, - image, - prefer_color=None): +def _elements_to_kwargs(elements, fix_texture, image, prefer_color=None): """ Given an elements data structure, extract the keyword arguments that a Trimesh object constructor will expect. @@ -467,37 +450,35 @@ def _elements_to_kwargs(elements, Keyword arguments for Trimesh constructor """ # store the raw ply structure as an internal key in metadata - kwargs = {'metadata': {'_ply_raw': elements}} + kwargs = {"metadata": {"_ply_raw": elements}} - if 'vertex' in elements and elements['vertex']['length']: - vertices = np.column_stack( - [elements['vertex']['data'][i] - for i in 'xyz']) + if "vertex" in elements and elements["vertex"]["length"]: + vertices = np.column_stack([elements["vertex"]["data"][i] for i in "xyz"]) if not util.is_shape(vertices, (-1, 3)): - raise ValueError('Vertices were not (n,3)!') + raise ValueError("Vertices were not (n,3)!") else: # return empty geometry if there are no vertices - kwargs['geometry'] = {} + kwargs["geometry"] = {} return kwargs try: - vertex_normals = np.column_stack([elements['vertex']['data'][j] - for j in ('nx', 'ny', 'nz')]) + vertex_normals = np.column_stack( + [elements["vertex"]["data"][j] for j in ("nx", "ny", "nz")] + ) if len(vertex_normals) == len(vertices): - kwargs['vertex_normals'] = vertex_normals + kwargs["vertex_normals"] = vertex_normals except BaseException: pass - if 'face' in elements and elements['face']['length']: - face_data = elements['face']['data'] + if "face" in elements and elements["face"]["length"]: + face_data = elements["face"]["data"] else: # some PLY files only include vertices face_data = None faces = None # what keys do in-the-wild exporters use for vertices - index_names = ['vertex_index', - 'vertex_indices'] + index_names = ["vertex_index", "vertex_indices"] texcoord = None if util.is_shape(face_data, (-1, (3, 4))): @@ -509,11 +490,11 @@ def _elements_to_kwargs(elements, faces = face_data[i] break # if faces have UV coordinates defined use them - if 'texcoord' in face_data: - texcoord = face_data['texcoord'] + if "texcoord" in face_data: + texcoord = face_data["texcoord"] elif isinstance(face_data, np.ndarray): - face_blob = elements['face']['data'] + face_blob = elements["face"]["data"] # some exporters set this name to 'vertex_index' # and some others use 'vertex_indices' but we really # don't care about the name unless there are multiple @@ -526,10 +507,10 @@ def _elements_to_kwargs(elements, name = i break # get faces - faces = face_blob[name]['f1'] + faces = face_blob[name]["f1"] try: - texcoord = face_blob['texcoord']['f1'] + texcoord = face_blob["texcoord"]["f1"] except (ValueError, KeyError): # accessing numpy arrays with named fields # incorrectly is a ValueError @@ -544,16 +525,15 @@ def _elements_to_kwargs(elements, if texcoord is None: # ply has no clear definition of how texture coordinates are stored, # unfortunately there are many common names that we need to try - texcoord_names = [('texture_u', 'texture_v'), ('u', 'v'), ('s', 't')] + texcoord_names = [("texture_u", "texture_v"), ("u", "v"), ("s", "t")] for names in texcoord_names: # If texture coordinates are defined with vertices try: - t_u = elements['vertex']['data'][names[0]] - t_v = elements['vertex']['data'][names[1]] - texcoord = np.stack(( - t_u[faces.reshape(-1)], - t_v[faces.reshape(-1)]), axis=-1).reshape( - (faces.shape[0], -1)) + t_u = elements["vertex"]["data"][names[0]] + t_v = elements["vertex"]["data"][names[1]] + texcoord = np.stack( + (t_u[faces.reshape(-1)], t_v[faces.reshape(-1)]), axis=-1 + ).reshape((faces.shape[0], -1)) # stop trying once succeeded break except (ValueError, KeyError): @@ -565,10 +545,11 @@ def _elements_to_kwargs(elements, # PLY stores texture coordinates per-face which is # slightly annoying, as we have to then figure out # which vertices have the same position but different UV - if (texcoord is not None and - len(shape) == 2 and - texcoord.shape == (faces.shape[0], faces.shape[1] * 2)): - + if ( + texcoord is not None + and len(shape) == 2 + and texcoord.shape == (faces.shape[0], faces.shape[1] * 2) + ): # vertices with the same position but different # UV coordinates can't be merged without it # looking like it went through a woodchipper @@ -589,7 +570,8 @@ def _elements_to_kwargs(elements, # to only merge vertices where the position # AND uv coordinate are the same faces, mask_v, mask_vt = unmerge_faces( - faces, inverse.reshape(faces.shape)) + faces, inverse.reshape(faces.shape) + ) # apply the mask to get resulting vertices vertices = vertices[mask_v] # apply the mask to get UV coordinates @@ -601,24 +583,24 @@ def _elements_to_kwargs(elements, uv[faces.reshape(-1)] = texcoord.reshape((-1, 2)) # create the visuals object for the texture - kwargs['visual'] = visual.texture.TextureVisuals( - uv=uv, image=image) + kwargs["visual"] = visual.texture.TextureVisuals(uv=uv, image=image) elif texcoord is not None: # create a texture with an empty material from ..visual.texture import TextureVisuals + uv = np.zeros((len(vertices), 2)) uv[faces.reshape(-1)] = texcoord.reshape((-1, 2)) - kwargs['visual'] = TextureVisuals(uv=uv) + kwargs["visual"] = TextureVisuals(uv=uv) # faces were not none so assign them - kwargs['faces'] = faces + kwargs["faces"] = faces # kwargs for Trimesh or PointCloud - kwargs['vertices'] = vertices + kwargs["vertices"] = vertices # if both vertex and face color are defined pick the one - if 'face' in elements: - kwargs['face_colors'] = _element_colors(elements['face']) - if 'vertex' in elements: - kwargs['vertex_colors'] = _element_colors(elements['vertex']) + if "face" in elements: + kwargs["face_colors"] = _element_colors(elements["face"]) + if "vertex" in elements: + kwargs["vertex_colors"] = _element_colors(elements["vertex"]) return kwargs @@ -640,9 +622,8 @@ def _element_colors(element): signal : float Estimate of range """ - keys = ['red', 'green', 'blue', 'alpha'] - candidate_colors = [element['data'][i] - for i in keys if i in element['properties']] + keys = ["red", "green", "blue", "alpha"] + candidate_colors = [element["data"][i] for i in keys if i in element["properties"]] if len(candidate_colors) >= 3: return np.column_stack(candidate_colors) return None @@ -666,8 +647,8 @@ def _load_element_different(properties, data): start = 0 for name, dt in properties.items(): length = 1 - if '$LIST' in dt: - dt = dt.split('($LIST,)')[-1] + if "$LIST" in dt: + dt = dt.split("($LIST,)")[-1] # the first entry in a list-property is the number of elements # in the list length = int(row[start]) @@ -680,11 +661,15 @@ def _load_element_different(properties, data): # if the shape of any array is (n, 1) we want to # squeeze/concatenate it into (n,) - squeeze = {k: np.array(v, dtype='object') - for k, v in edata.items()} + squeeze = {k: np.array(v, dtype="object") for k, v in edata.items()} # squeeze and convert any clean 2D arrays - squeeze.update({k: v.squeeze().astype(edata[k][0].dtype) - for k, v in squeeze.items() if len(v.shape) == 2}) + squeeze.update( + { + k: v.squeeze().astype(edata[k][0].dtype) + for k, v in squeeze.items() + if len(v.shape) == 2 + } + ) return squeeze @@ -714,18 +699,17 @@ def _load_element_single(properties, data): # of items we actually have exit the loop early if current >= len(first): break - if '$LIST' in dt: - dtype = dt.split('($LIST,)')[-1] + if "$LIST" in dt: + dtype = dt.split("($LIST,)")[-1] # the first entry in a list-property # is the number of elements in the list length = int(first[current]) - columns[name] = data[ - :, current + 1:current + 1 + length].astype(dtype) + columns[name] = data[:, current + 1 : current + 1 + length].astype(dtype) # offset by length of array plus one for each uint index current += length + 1 else: - columns[name] = data[:, current:current + 1].astype(dt) + columns[name] = data[:, current : current + 1].astype(dt) current += 1 return columns @@ -746,21 +730,21 @@ def _ply_ascii(elements, file_obj): """ # get the file contents as a string - text = str(file_obj.read().decode('utf-8')) + text = str(file_obj.read().decode("utf-8")) # split by newlines lines = str.splitlines(text) # get each line as an array split by whitespace - array = [np.fromstring(i, sep=' ') for i in lines] + array = [np.fromstring(i, sep=" ") for i in lines] # store the line position in the file row_pos = 0 # loop through data we need for key, values in elements.items(): # if the element is empty ignore it - if 'length' not in values or values['length'] == 0: + if "length" not in values or values["length"] == 0: continue - data = array[row_pos:row_pos + values['length']] - row_pos += values['length'] + data = array[row_pos : row_pos + values["length"]] + row_pos += values["length"] # try stacking the data, which simplifies column-wise access. this is only # possible, if all rows have the same length. try: @@ -770,22 +754,19 @@ def _ply_ascii(elements, file_obj): col_count_equal = False # number of list properties in this element - list_count = sum( - 1 for dt in values['properties'].values() if '$LIST' in dt) + list_count = sum(1 for dt in values["properties"].values() if "$LIST" in dt) if col_count_equal and list_count <= 1: # all rows have the same length and we only have at most one list # property where all entries have the same length. this means we can # use the quick numpy-based loading. - element_data = _load_element_single( - values['properties'], data) + element_data = _load_element_single(values["properties"], data) else: # there are lists of differing lengths. we need to fall back to loading # the data by iterating all rows and checking for list-lengths. this is # slower than the variant above. - element_data = _load_element_different( - values['properties'], data) + element_data = _load_element_different(values["properties"], data) - elements[key]['data'] = element_data + elements[key]["data"] = element_data def _ply_binary(elements, file_obj): @@ -814,15 +795,15 @@ def populate_listsize(file_obj, elements): p_current = file_obj.tell() elem_pop = [] for element_key, element in elements.items(): - props = element['properties'] - prior_data = '' + props = element["properties"] + prior_data = "" for k, dtype in props.items(): prop_pop = [] - if '$LIST' in dtype: + if "$LIST" in dtype: # every list field has two data types: # the list length (single value), and the list data (multiple) # here we are only reading the single value for list length - field_dtype = np.dtype(dtype.split(',')[0]) + field_dtype = np.dtype(dtype.split(",")[0]) if len(prior_data) == 0: offset = 0 else: @@ -834,8 +815,8 @@ def populate_listsize(file_obj, elements): prop_pop.append(k) break size = np.frombuffer(blob, dtype=field_dtype)[0] - props[k] = props[k].replace('$LIST', str(size)) - prior_data += props[k] + ',' + props[k] = props[k].replace("$LIST", str(size)) + prior_data += props[k] + "," if len(prop_pop) > 0: # if a property was empty remove it for pop in prop_pop: @@ -846,9 +827,9 @@ def populate_listsize(file_obj, elements): elem_pop.append(element_key) continue # get the size of the items in bytes - itemsize = np.dtype(', '.join(props.values())).itemsize + itemsize = np.dtype(", ".join(props.values())).itemsize # offset the file based on read size - p_current += element['length'] * itemsize + p_current += element["length"] * itemsize # move the file back to where we found it file_obj.seek(p_start) # if there were elements without properties remove them @@ -861,15 +842,14 @@ def populate_data(file_obj, elements): read the data and add it to a 'data' field in the element. """ for key in elements.keys(): - items = list(elements[key]['properties'].items()) + items = list(elements[key]["properties"].items()) dtype = np.dtype(items) - data = file_obj.read(elements[key]['length'] * dtype.itemsize) + data = file_obj.read(elements[key]["length"] * dtype.itemsize) try: - elements[key]['data'] = np.frombuffer( - data, dtype=dtype) + elements[key]["data"] = np.frombuffer(data, dtype=dtype) except BaseException: - log.warning(f'PLY failed to populate: {key}') - elements[key]['data'] = None + log.warning(f"PLY failed to populate: {key}") + elements[key]["data"] = None return elements def _elements_size(elements): @@ -879,8 +859,8 @@ def _elements_size(elements): """ size = 0 for element in elements.values(): - dtype = np.dtype(','.join(element['properties'].values())) - size += element['length'] * dtype.itemsize + dtype = np.dtype(",".join(element["properties"].values())) + size += element["length"] * dtype.itemsize return size # some elements are passed where the list dimensions @@ -897,7 +877,7 @@ def _elements_size(elements): # if the number of bytes is not the same the file is probably corrupt if size_file != size_elements: - raise ValueError('PLY is unexpected length!') + raise ValueError("PLY is unexpected length!") # with everything populated and a reasonable confidence the file # is intact, read the data fields described by the header @@ -924,17 +904,21 @@ def export_draco(mesh, bits=28): data : str or bytes DRC file bytes """ - with tempfile.NamedTemporaryFile(suffix='.ply') as temp_ply: + with tempfile.NamedTemporaryFile(suffix=".ply") as temp_ply: temp_ply.write(export_ply(mesh)) temp_ply.flush() - with tempfile.NamedTemporaryFile(suffix='.drc') as encoded: - subprocess.check_output([draco_encoder, - '-qp', - str(int(bits)), - '-i', - temp_ply.name, - '-o', - encoded.name]) + with tempfile.NamedTemporaryFile(suffix=".drc") as encoded: + subprocess.check_output( + [ + draco_encoder, + "-qp", + str(int(bits)), + "-i", + temp_ply.name, + "-o", + encoded.name, + ] + ) encoded.seek(0) data = encoded.read() return data @@ -955,24 +939,25 @@ def load_draco(file_obj, **kwargs): Keyword arguments to construct a Trimesh object """ - with tempfile.NamedTemporaryFile(suffix='.drc') as temp_drc: + with tempfile.NamedTemporaryFile(suffix=".drc") as temp_drc: temp_drc.write(file_obj.read()) temp_drc.flush() - with tempfile.NamedTemporaryFile(suffix='.ply') as temp_ply: + with tempfile.NamedTemporaryFile(suffix=".ply") as temp_ply: subprocess.check_output( - [draco_decoder, '-i', temp_drc.name, '-o', temp_ply.name]) + [draco_decoder, "-i", temp_drc.name, "-o", temp_ply.name] + ) temp_ply.seek(0) kwargs = load_ply(temp_ply) return kwargs -_ply_loaders = {'ply': load_ply} -_ply_exporters = {'ply': export_ply} +_ply_loaders = {"ply": load_ply} +_ply_exporters = {"ply": export_ply} -draco_encoder = util.which('draco_encoder') -draco_decoder = util.which('draco_decoder') +draco_encoder = util.which("draco_encoder") +draco_decoder = util.which("draco_decoder") if draco_decoder is not None: - _ply_loaders['drc'] = load_draco + _ply_loaders["drc"] = load_draco if draco_encoder is not None: - _ply_exporters['drc'] = export_draco + _ply_exporters["drc"] = export_draco diff --git a/trimesh/exchange/stl.py b/trimesh/exchange/stl.py index ff08b757b..a979c345a 100644 --- a/trimesh/exchange/stl.py +++ b/trimesh/exchange/stl.py @@ -12,12 +12,11 @@ class HeaderError(Exception): # everything in STL is always Little Endian # this works natively on Little Endian systems, but blows up on Big Endians # so we always specify byteorder -_stl_dtype = np.dtype([('normals', '' - * len(batch) + '' * len(batch) ) f.write( fragment.format(*batch.flatten()).encode( @@ -308,9 +305,8 @@ def model_id(x): ) with xf.element("triangles"): xf.flush() - for i in range( - 0, len(m.faces), batch_size): - batch = m.faces[i: i + batch_size] + for i in range(0, len(m.faces), batch_size): + batch = m.faces[i : i + batch_size] fragment = ( '' * len(batch) @@ -332,7 +328,7 @@ def model_id(x): "id": model_id(node), "name": node, "type": "model", - "p:UUID": str(uuid.uuid4()) + "p:UUID": str(uuid.uuid4()), } with xf.element("object", **attribs): with xf.element("components"): @@ -363,16 +359,16 @@ def model_id(x): transform = " ".join( str(i) for i in np.array(data["matrix"])[:3, :4].T.flatten() ) - uuid_tag = "{{{}}}UUID".format(model_nsmap['p']) + uuid_tag = "{{{}}}UUID".format(model_nsmap["p"]) xf.write( etree.Element( "item", { "objectid": model_id(node), "transform": transform, - uuid_tag: str(uuid.uuid4()) + uuid_tag: str(uuid.uuid4()), }, - nsmap=model_nsmap + nsmap=model_nsmap, ) ) @@ -397,9 +393,7 @@ def model_id(x): ) as xf: xf.write_declaration() # xml namespaces - nsmap = { - None: "http://schemas.openxmlformats.org/package/2006/content-types" - } + nsmap = {None: "http://schemas.openxmlformats.org/package/2006/content-types"} # stream elements types = [ @@ -415,11 +409,7 @@ def model_id(x): ] with xf.element("Types", nsmap=nsmap): for ext, ctype in types: - xf.write( - etree.Element( - "Default", - Extension=ext, - ContentType=ctype)) + xf.write(etree.Element("Default", Extension=ext, ContentType=ctype)) return file_obj.getvalue() @@ -438,11 +428,9 @@ def _attrib_to_transform(attrib): """ transform = np.eye(4, dtype=np.float64) - if 'transform' in attrib: + if "transform" in attrib: # wangle their transform format - values = np.array( - attrib['transform'].split(), - dtype=np.float64).reshape((4, 3)).T + values = np.array(attrib["transform"].split(), dtype=np.float64).reshape((4, 3)).T transform[:3, :4] = values return transform @@ -451,9 +439,11 @@ def _attrib_to_transform(attrib): try: import networkx as nx from lxml import etree - _three_loaders = {'3mf': load_3MF} - _3mf_exporters = {'3mf': export_3MF} + + _three_loaders = {"3mf": load_3MF} + _3mf_exporters = {"3mf": export_3MF} except BaseException as E: from ..exceptions import ExceptionWrapper - _three_loaders = {'3mf': ExceptionWrapper(E)} - _3mf_exporters = {'3mf': ExceptionWrapper(E)} + + _three_loaders = {"3mf": ExceptionWrapper(E)} + _3mf_exporters = {"3mf": ExceptionWrapper(E)} diff --git a/trimesh/exchange/urdf.py b/trimesh/exchange/urdf.py index fbfc77b31..df9e8f6dd 100644 --- a/trimesh/exchange/urdf.py +++ b/trimesh/exchange/urdf.py @@ -6,11 +6,7 @@ from ..version import __version__ -def export_urdf(mesh, - directory, - scale=1.0, - color=None, - **kwargs): +def export_urdf(mesh, directory, scale=1.0, color=None, **kwargs): """ Convert a Trimesh object into a URDF package for physics simulation. This breaks the mesh into convex pieces and @@ -41,37 +37,34 @@ def export_urdf(mesh, name = os.path.basename(fullpath) _, ext = os.path.splitext(name) - if ext != '': - raise ValueError('URDF path must be a directory!') + if ext != "": + raise ValueError("URDF path must be a directory!") # Create directory if needed if not os.path.exists(fullpath): os.mkdir(fullpath) elif not os.path.isdir(fullpath): - raise ValueError('URDF path must be a directory!') + raise ValueError("URDF path must be a directory!") # Perform a convex decomposition try: convex_pieces = mesh.convex_decomposition() except BaseException: - log.error('problem with convex decomposition, using hull', - exc_info=True) + log.error("problem with convex decomposition, using hull", exc_info=True) convex_pieces = [mesh.convex_hull] # Get the effective density of the mesh - effective_density = mesh.volume / sum([ - m.volume for m in convex_pieces]) + effective_density = mesh.volume / sum([m.volume for m in convex_pieces]) # open an XML tree - root = et.Element('robot', name='root') + root = et.Element("robot", name="root") # Loop through all pieces, adding each as a link prev_link_name = None for i, piece in enumerate(convex_pieces): - # Save each nearly convex mesh out to a file - piece_name = f'{name}_convex_piece_{i}' - piece_filename = f'{piece_name}.obj' + piece_name = f"{name}_convex_piece_{i}" + piece_filename = f"{piece_name}.obj" piece_filepath = os.path.join(fullpath, piece_filename) export_mesh(piece, piece_filepath) @@ -79,89 +72,92 @@ def export_urdf(mesh, piece.center_mass = mesh.center_mass piece.density = effective_density * mesh.density - link_name = f'link_{piece_name}' - geom_name = f'{piece_filename}' - I = [['{:.2E}'.format(y) for y in x] # NOQA - for x in piece.moment_inertia] + link_name = f"link_{piece_name}" + geom_name = f"{piece_filename}" + I = [["{:.2E}".format(y) for y in x] for x in piece.moment_inertia] # NOQA # Write the link out to the XML Tree - link = et.SubElement(root, 'link', name=link_name) + link = et.SubElement(root, "link", name=link_name) # Inertial information - inertial = et.SubElement(link, 'inertial') - et.SubElement(inertial, 'origin', xyz="0 0 0", rpy="0 0 0") - et.SubElement(inertial, 'mass', value=f'{piece.mass:.2E}') + inertial = et.SubElement(link, "inertial") + et.SubElement(inertial, "origin", xyz="0 0 0", rpy="0 0 0") + et.SubElement(inertial, "mass", value=f"{piece.mass:.2E}") et.SubElement( inertial, - 'inertia', + "inertia", ixx=I[0][0], ixy=I[0][1], ixz=I[0][2], iyy=I[1][1], iyz=I[1][2], - izz=I[2][2]) + izz=I[2][2], + ) # Visual Information - visual = et.SubElement(link, 'visual') - et.SubElement(visual, 'origin', xyz="0 0 0", rpy="0 0 0") - geometry = et.SubElement(visual, 'geometry') - et.SubElement(geometry, 'mesh', filename=geom_name, - scale=f"{scale:.4E} {scale:.4E} {scale:.4E}") - material = et.SubElement(visual, 'material', name='') + visual = et.SubElement(link, "visual") + et.SubElement(visual, "origin", xyz="0 0 0", rpy="0 0 0") + geometry = et.SubElement(visual, "geometry") + et.SubElement( + geometry, + "mesh", + filename=geom_name, + scale=f"{scale:.4E} {scale:.4E} {scale:.4E}", + ) + material = et.SubElement(visual, "material", name="") if color is not None: - et.SubElement(material, - 'color', - rgba=f"{color[0]:.2E} {color[1]:.2E} {color[2]:.2E} 1") + et.SubElement( + material, "color", rgba=f"{color[0]:.2E} {color[1]:.2E} {color[2]:.2E} 1" + ) # Collision Information - collision = et.SubElement(link, 'collision') - et.SubElement(collision, 'origin', xyz="0 0 0", rpy="0 0 0") - geometry = et.SubElement(collision, 'geometry') - et.SubElement(geometry, 'mesh', filename=geom_name, - scale=f"{scale:.4E} {scale:.4E} {scale:.4E}") + collision = et.SubElement(link, "collision") + et.SubElement(collision, "origin", xyz="0 0 0", rpy="0 0 0") + geometry = et.SubElement(collision, "geometry") + et.SubElement( + geometry, + "mesh", + filename=geom_name, + scale=f"{scale:.4E} {scale:.4E} {scale:.4E}", + ) # Create rigid joint to previous link if prev_link_name is not None: - joint_name = f'{link_name}_joint' - joint = et.SubElement(root, - 'joint', - name=joint_name, - type='fixed') - et.SubElement(joint, 'origin', xyz="0 0 0", rpy="0 0 0") - et.SubElement(joint, 'parent', link=prev_link_name) - et.SubElement(joint, 'child', link=link_name) + joint_name = f"{link_name}_joint" + joint = et.SubElement(root, "joint", name=joint_name, type="fixed") + et.SubElement(joint, "origin", xyz="0 0 0", rpy="0 0 0") + et.SubElement(joint, "parent", link=prev_link_name) + et.SubElement(joint, "child", link=link_name) prev_link_name = link_name # Write URDF file tree = et.ElementTree(root) - urdf_filename = f'{name}.urdf' - tree.write(os.path.join(fullpath, urdf_filename), - pretty_print=True) + urdf_filename = f"{name}.urdf" + tree.write(os.path.join(fullpath, urdf_filename), pretty_print=True) # Write Gazebo config file - root = et.Element('model') - model = et.SubElement(root, 'name') + root = et.Element("model") + model = et.SubElement(root, "name") model.text = name - version = et.SubElement(root, 'version') - version.text = '1.0' - sdf = et.SubElement(root, 'sdf', version='1.4') - sdf.text = f'{name}.urdf' + version = et.SubElement(root, "version") + version.text = "1.0" + sdf = et.SubElement(root, "sdf", version="1.4") + sdf.text = f"{name}.urdf" - author = et.SubElement(root, 'author') - et.SubElement(author, 'name').text = f'trimesh {__version__}' - et.SubElement(author, 'email').text = 'blank@blank.blank' + author = et.SubElement(root, "author") + et.SubElement(author, "name").text = f"trimesh {__version__}" + et.SubElement(author, "email").text = "blank@blank.blank" - description = et.SubElement(root, 'description') + description = et.SubElement(root, "description") description.text = name tree = et.ElementTree(root) if tol.strict: # todo : we don't pass the URDF schema validation - schema = et.XMLSchema(file=get( - 'schema/urdf.xsd', as_stream=True)) + schema = et.XMLSchema(file=get("schema/urdf.xsd", as_stream=True)) if not schema.validate(tree): # actual error isn't raised by validate log.debug(schema.error_log) - tree.write(os.path.join(fullpath, 'model.config')) + tree.write(os.path.join(fullpath, "model.config")) return np.sum(convex_pieces) diff --git a/trimesh/exchange/xaml.py b/trimesh/exchange/xaml.py index be7e21633..c609e00e8 100644 --- a/trimesh/exchange/xaml.py +++ b/trimesh/exchange/xaml.py @@ -26,18 +26,21 @@ def load_XAML(file_obj, *args, **kwargs): result : dict Kwargs for a Trimesh constructor. """ + def element_to_color(element): """ Turn an XML element into a (4,) np.uint8 RGBA color """ if element is None: return visual.DEFAULT_COLOR - hexcolor = int(element.attrib['Color'].replace('#', ''), 16) - opacity = float(element.attrib['Opacity']) - rgba = [(hexcolor >> 16) & 0xFF, - (hexcolor >> 8) & 0xFF, - (hexcolor & 0xFF), - opacity * 0xFF] + hexcolor = int(element.attrib["Color"].replace("#", ""), 16) + opacity = float(element.attrib["Opacity"]) + rgba = [ + (hexcolor >> 16) & 0xFF, + (hexcolor >> 8) & 0xFF, + (hexcolor & 0xFF), + opacity * 0xFF, + ] rgba = np.array(rgba, dtype=np.uint8) return rgba @@ -47,10 +50,8 @@ def element_to_transform(element): transformation matrix. """ try: - matrix = next(element.iter( - tag=ns + 'MatrixTransform3D')).attrib['Matrix'] - matrix = np.array(matrix.split(), - dtype=np.float64).reshape((4, 4)).T + matrix = next(element.iter(tag=ns + "MatrixTransform3D")).attrib["Matrix"] + matrix = np.array(matrix.split(), dtype=np.float64).reshape((4, 4)).T return matrix except StopIteration: # this will be raised if the MatrixTransform3D isn't in the passed @@ -62,7 +63,7 @@ def element_to_transform(element): root = etree.XML(file_data) # the XML namespace - ns = root.tag.split('}')[0] + '}' + ns = root.tag.split("}")[0] + "}" # the linked lists our results are going in vertices = [] @@ -72,14 +73,11 @@ def element_to_transform(element): # iterate through the element tree # the GeometryModel3D tag contains a material and geometry - for geometry in root.iter(tag=ns + 'GeometryModel3D'): - + for geometry in root.iter(tag=ns + "GeometryModel3D"): # get the diffuse and specular colors specified in the material - color_search = './/{ns}{color}Material/*/{ns}SolidColorBrush' - diffuse = geometry.find(color_search.format(ns=ns, - color='Diffuse')) - specular = geometry.find(color_search.format(ns=ns, - color='Specular')) + color_search = ".//{ns}{color}Material/*/{ns}SolidColorBrush" + diffuse = geometry.find(color_search.format(ns=ns, color="Diffuse")) + specular = geometry.find(color_search.format(ns=ns, color="Specular")) # convert the element into a (4,) np.uint8 RGBA color diffuse = element_to_color(diffuse) @@ -94,7 +92,7 @@ def element_to_transform(element): # element.find will only return elements that are direct children # of the current element as opposed to element.iter, # which will return any depth of child - transform_element = current.find(ns + 'ModelVisual3D.Transform') + transform_element = current.find(ns + "ModelVisual3D.Transform") if transform_element is not None: # we are traversing the tree backwards, so append new # transforms to the left of the deque @@ -115,21 +113,20 @@ def element_to_transform(element): transform = util.multi_dot(transforms) # iterate through the contained mesh geometry elements - for g in geometry.iter(tag=ns + 'MeshGeometry3D'): - c_normals = np.array(g.attrib['Normals'].replace(',', ' ').split(), - dtype=np.float64).reshape((-1, 3)) + for g in geometry.iter(tag=ns + "MeshGeometry3D"): + c_normals = np.array( + g.attrib["Normals"].replace(",", " ").split(), dtype=np.float64 + ).reshape((-1, 3)) c_vertices = np.array( - g.attrib['Positions'].replace( - ',', ' ').split(), dtype=np.float64).reshape( - (-1, 3)) + g.attrib["Positions"].replace(",", " ").split(), dtype=np.float64 + ).reshape((-1, 3)) # bake in the transform as we're saving c_vertices = tf.transform_points(c_vertices, transform) c_faces = np.array( - g.attrib['TriangleIndices'].replace( - ',', ' ').split(), dtype=np.int64).reshape( - (-1, 3)) + g.attrib["TriangleIndices"].replace(",", " ").split(), dtype=np.int64 + ).reshape((-1, 3)) # save data to a sequence vertices.append(c_vertices) @@ -139,19 +136,20 @@ def element_to_transform(element): # compile the results into clean numpy arrays result = {} - result['vertices'], result['faces'] = util.append_faces(vertices, - faces) - result['face_colors'] = np.vstack(colors) - result['vertex_normals'] = np.vstack(normals) + result["vertices"], result["faces"] = util.append_faces(vertices, faces) + result["face_colors"] = np.vstack(colors) + result["vertex_normals"] = np.vstack(normals) return result try: from lxml import etree - _xaml_loaders = {'xaml': load_XAML} + + _xaml_loaders = {"xaml": load_XAML} except BaseException as E: # create a dummy module which will raise the ImportError # or other exception only when someone tries to use networkx from ..exceptions import ExceptionWrapper - _xaml_loaders = {'xaml': ExceptionWrapper(E)} + + _xaml_loaders = {"xaml": ExceptionWrapper(E)} diff --git a/trimesh/exchange/xyz.py b/trimesh/exchange/xyz.py index ac255cbf5..b9dc0cd8e 100644 --- a/trimesh/exchange/xyz.py +++ b/trimesh/exchange/xyz.py @@ -4,9 +4,7 @@ from ..points import PointCloud -def load_xyz(file_obj, - delimiter=None, - **kwargs): +def load_xyz(file_obj, delimiter=None, **kwargs): """ Load an XYZ file into a PointCloud. @@ -26,18 +24,18 @@ def load_xyz(file_obj, # read the whole file into memory as a string raw = util.decode_text(file_obj.read()).strip() # get the first line to look at - first = raw[:raw.find('\n')].strip() + first = raw[: raw.find("\n")].strip() # guess the column count by looking at the first line columns = len(first.split()) if columns < 3: raise ValueError("not enough columns in xyz file!") - if delimiter is None and ',' in first: + if delimiter is None and "," in first: # if no delimiter passed and file has commas - delimiter = ',' + delimiter = "," if delimiter is not None: # replace delimiter with whitespace so split works - raw = raw.replace(delimiter, ' ') + raw = raw.replace(delimiter, " ") # use string splitting to get array array = np.array(raw.split(), dtype=np.float64) @@ -53,15 +51,14 @@ def load_xyz(file_obj, if columns == 6: # RGB colors colors = np.array(data[:, 3:], dtype=np.uint8) - colors = np.concatenate(( - colors, - np.ones((len(data), 1), dtype=np.uint8) * 255), axis=1) + colors = np.concatenate( + (colors, np.ones((len(data), 1), dtype=np.uint8) * 255), axis=1 + ) elif columns >= 7: # extract RGBA colors colors = np.array(data[:, 3:8], dtype=np.uint8) # add extracted colors and vertices to kwargs - kwargs.update({'vertices': vertices, - 'colors': colors}) + kwargs.update({"vertices": vertices, "colors": colors}) return kwargs @@ -85,24 +82,22 @@ def export_xyz(cloud, write_colors=True, delimiter=None): Pointcloud in XYZ format """ if not isinstance(cloud, PointCloud): - raise ValueError('object must be PointCloud') + raise ValueError("object must be PointCloud") # compile data into a blob data = cloud.vertices - if (write_colors and - hasattr(cloud, 'colors') and - cloud.colors is not None): + if write_colors and hasattr(cloud, "colors") and cloud.colors is not None: # stack colors and vertices data = np.hstack((data, cloud.colors)) # if delimiter not passed use whitepace if delimiter is None: - delimiter = ' ' + delimiter = " " # stack blob into XYZ format export = util.array_to_string(data, col_delim=delimiter) return export -_xyz_loaders = {'xyz': load_xyz} -_xyz_exporters = {'xyz': export_xyz} +_xyz_loaders = {"xyz": load_xyz} +_xyz_exporters = {"xyz": export_xyz} diff --git a/trimesh/geometry.py b/trimesh/geometry.py index 7a28d9982..c44d3f73c 100644 --- a/trimesh/geometry.py +++ b/trimesh/geometry.py @@ -7,6 +7,7 @@ import scipy.sparse except BaseException as E: from . import exceptions + # raise E again if anyone tries to use sparse scipy = exceptions.ExceptionWrapper(E) @@ -30,8 +31,7 @@ def plane_transform(origin, normal): """ transform = align_vectors(normal, [0, 0, 1]) if origin is not None: - transform[:3, 3] = -np.dot( - transform, np.append(origin, 1))[:3] + transform[:3, 3] = -np.dot(transform, np.append(origin, 1))[:3] return transform @@ -60,7 +60,7 @@ def align_vectors(a, b, return_angle=False): a = np.array(a, dtype=np.float64) b = np.array(b, dtype=np.float64) if a.shape != (3,) or b.shape != (3,): - raise ValueError('vectors must be (3,)!') + raise ValueError("vectors must be (3,)!") # find the SVD of the two vectors au = np.linalg.svd(a.reshape((-1, 1)))[0] @@ -109,8 +109,7 @@ def faces_to_edges(faces, return_index=False): if return_index: # edges are in order of faces due to reshape - face_index = np.tile(np.arange(len(faces)), - (3, 1)).T.reshape(-1) + face_index = np.tile(np.arange(len(faces)), (3, 1)).T.reshape(-1) return edges, face_index return edges @@ -135,7 +134,7 @@ def vector_angle(pairs): elif util.is_shape(pairs, (2, 3)): pairs = pairs.reshape((-1, 2, 3)) elif not util.is_shape(pairs, (-1, 2, (2, 3))): - raise ValueError('pairs must be (n,2,(2|3))!') + raise ValueError("pairs must be (n,2,(2|3))!") # do the dot product between vectors dots = util.diagonal_dot(pairs[:, 0], pairs[:, 1]) @@ -177,8 +176,7 @@ def triangulate_quads(quads, dtype=np.int64): if len(quads.shape) == 2 and quads.shape[1] == 4: # if they are just quads stack and return - return np.vstack((quads[:, [0, 1, 2]], - quads[:, [2, 3, 0]])).astype(dtype) + return np.vstack((quads[:, [0, 1, 2]], quads[:, [2, 3, 0]])).astype(dtype) except ValueError: # new numpy raises an error for sequences pass @@ -190,25 +188,23 @@ def triangulate_quads(quads, dtype=np.int64): # triangulate arbitrary polygons as triangle fans # this isn't guaranteed to be sane if the polygons # aren't convex but that would require a real maniac - poly = [[[f[0], f[i + 1], f[i + 2]] - for i in range(len(f) - 2)] - for f in quads if len(f) > 4] + poly = [ + [[f[0], f[i + 1], f[i + 2]] for i in range(len(f) - 2)] + for f in quads + if len(f) > 4 + ] if len(quad) == 0 and len(poly) == 0: return tri.astype(dtype) if len(poly) > 0: poly = np.vstack(poly) if len(quad) > 0: - quad = np.vstack((quad[:, [0, 1, 2]], - quad[:, [2, 3, 0]])) + quad = np.vstack((quad[:, [0, 1, 2]], quad[:, [2, 3, 0]])) # combine triangulated quads with triangles - return util.vstack_empty([ - tri, quad, poly]).astype(dtype) + return util.vstack_empty([tri, quad, poly]).astype(dtype) -def vertex_face_indices(vertex_count, - faces, - faces_sparse): +def vertex_face_indices(vertex_count, faces, faces_sparse): """ Find vertex face indices from the faces array of vertices @@ -231,11 +227,10 @@ def vertex_face_indices(vertex_count, # Create 2D array with row for each vertex and # length of max number of faces for a vertex try: - counts = np.bincount( - faces.flatten(), minlength=vertex_count) + counts = np.bincount(faces.flatten(), minlength=vertex_count) except TypeError: # casting failed on 32 bit Windows - log.warning('casting failed, falling back!') + log.warning("casting failed, falling back!") # fall back to np.unique (usually ~35x slower than bincount) counts = np.unique(faces.flatten(), return_counts=True)[1] assert len(counts) == vertex_count @@ -259,24 +254,21 @@ def vertex_face_indices(vertex_count, padded[padded == 0] = sorted_faces except BaseException: # fall back to a slow loop - log.warning('vertex_faces falling back to slow loop! ' + - 'mesh probably has degenerate faces', - exc_info=True) + log.warning( + "vertex_faces falling back to slow loop! " + + "mesh probably has degenerate faces", + exc_info=True, + ) sort = np.zeros(faces.size, dtype=np.int64) flat = faces.flatten() for v in range(vertex_count): # assign the data in order - sort[starts[v]:starts[v] + counts[v] - ] = (np.where(flat == v)[0] // 3)[::-1] + sort[starts[v] : starts[v] + counts[v]] = (np.where(flat == v)[0] // 3)[::-1] padded[padded == 0] = sort return padded -def mean_vertex_normals(vertex_count, - faces, - face_normals, - sparse=None, - **kwargs): +def mean_vertex_normals(vertex_count, faces, face_normals, sparse=None, **kwargs): """ Find vertex normals from the mean of the faces that contain that vertex. @@ -296,6 +288,7 @@ def mean_vertex_normals(vertex_count, Normals for every vertex Vertices unreferenced by faces will be zero. """ + def summed_sparse(): # use a sparse matrix of which face contains each vertex to # figure out the summed normal at each vertex @@ -318,9 +311,7 @@ def summed_loop(): try: summed = summed_sparse() except BaseException: - log.warning( - 'unable to use sparse matrix, falling back!', - exc_info=True) + log.warning("unable to use sparse matrix, falling back!", exc_info=True) summed = summed_loop() # invalid normals will be returned as zero @@ -329,11 +320,9 @@ def summed_loop(): return vertex_normals -def weighted_vertex_normals(vertex_count, - faces, - face_normals, - face_angles, - use_loop=False): +def weighted_vertex_normals( + vertex_count, faces, face_normals, face_angles, use_loop=False +): """ Compute vertex normals from the faces that contain that vertex. The contibution of a face's normal to a vertex normal is the @@ -361,6 +350,7 @@ def weighted_vertex_normals(vertex_count, Normals for every vertex Vertices unreferenced by faces will be zero. """ + def summed_sparse(): # use a sparse matrix of which face contains each vertex to # figure out the summed normal at each vertex @@ -378,14 +368,13 @@ def summed_loop(): face_idxs, inface_idxs = np.where(faces == vertex_idx) surrounding_angles = face_angles[face_idxs, inface_idxs] summed[vertex_idx] = np.dot( - surrounding_angles / - surrounding_angles.sum(), - face_normals[face_idxs]) + surrounding_angles / surrounding_angles.sum(), face_normals[face_idxs] + ) return summed # normals should be unit vectors - face_ok = (face_normals ** 2).sum(axis=1) > 0.5 + face_ok = (face_normals**2).sum(axis=1) > 0.5 # don't consider faces with invalid normals faces = faces[face_ok] face_normals = face_normals[face_ok] @@ -395,83 +384,80 @@ def summed_loop(): try: return util.unitize(summed_sparse()) except BaseException: - log.warning( - 'unable to use sparse matrix, falling back!', - exc_info=True) + log.warning("unable to use sparse matrix, falling back!", exc_info=True) # we either crashed or were asked to loop return util.unitize(summed_loop()) def index_sparse(columns, indices, data=None, dtype=None): """ - Return a sparse matrix for which vertices are contained in which faces. - A data vector can be passed which is then used instead of booleans - - Parameters - ------------ - columns : int - Number of columns, usually number of vertices - indices : (m, d) int - Usually mesh.faces - - Returns - --------- - sparse: scipy.sparse.coo_matrix of shape (columns, len(faces)) - dtype is boolean - - Examples - ---------- - In [1]: sparse = faces_sparse(len(mesh.vertices), mesh.faces) - - In [2]: sparse.shape - Out[2]: (12, 20) - - In [3]: mesh.faces.shape - Out[3]: (20, 3) -co - In [4]: mesh.vertices.shape - Out[4]: (12, 3) - - In [5]: dense = sparse.toarray().astype(int) - - In [6]: dense - Out[6]: - array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], - [0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0], - [0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1], - [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0], - [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1], - [0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]]) - - In [7]: dense.sum(axis=0) - Out[7]: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]) + Return a sparse matrix for which vertices are contained in which faces. + A data vector can be passed which is then used instead of booleans + + Parameters + ------------ + columns : int + Number of columns, usually number of vertices + indices : (m, d) int + Usually mesh.faces + + Returns + --------- + sparse: scipy.sparse.coo_matrix of shape (columns, len(faces)) + dtype is boolean + + Examples + ---------- + In [1]: sparse = faces_sparse(len(mesh.vertices), mesh.faces) + + In [2]: sparse.shape + Out[2]: (12, 20) + + In [3]: mesh.faces.shape + Out[3]: (20, 3) + co + In [4]: mesh.vertices.shape + Out[4]: (12, 3) + + In [5]: dense = sparse.toarray().astype(int) + + In [6]: dense + Out[6]: + array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1], + [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0], + [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]]) + + In [7]: dense.sum(axis=0) + Out[7]: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]) """ indices = np.asanyarray(indices) columns = int(columns) # flattened list row = indices.reshape(-1) - col = np.tile(np.arange(len(indices)).reshape( - (-1, 1)), (1, indices.shape[1])).reshape(-1) + col = np.tile( + np.arange(len(indices)).reshape((-1, 1)), (1, indices.shape[1]) + ).reshape(-1) shape = (columns, len(indices)) if data is None: data = np.ones(len(col), dtype=bool) elif len(data) != len(col): - raise ValueError('data incorrect length') + raise ValueError("data incorrect length") if dtype is not None: data = data.astype(dtype) # assemble into sparse matrix - matrix = scipy.sparse.coo_matrix((data, (row, col)), - shape=shape, - dtype=data.dtype) + matrix = scipy.sparse.coo_matrix((data, (row, col)), shape=shape, dtype=data.dtype) return matrix diff --git a/trimesh/grouping.py b/trimesh/grouping.py index d9d21b91f..44a6c12c1 100644 --- a/trimesh/grouping.py +++ b/trimesh/grouping.py @@ -16,15 +16,18 @@ # wrapping just ImportError fails in some cases # will raise the error when someone tries to use KDtree from . import exceptions + cKDTree = exceptions.ExceptionWrapper(E) -def merge_vertices(mesh, - merge_tex=None, - merge_norm=None, - digits_vertex=None, - digits_norm=None, - digits_uv=None): +def merge_vertices( + mesh, + merge_tex=None, + merge_norm=None, + digits_vertex=None, + digits_norm=None, + digits_uv=None, +): """ Removes duplicate vertices, grouped by position and optionally texture coordinate and normal. @@ -59,12 +62,11 @@ def merge_vertices(mesh, digits_uv = 4 if digits_vertex is None: # use tol.merge if digit precision not passed - digits_vertex = util.decimal_to_digits( - tol.merge) + digits_vertex = util.decimal_to_digits(tol.merge) # if we have a ton of unreferenced vertices it will # make the unique_rows call super slow so cull first - if hasattr(mesh, 'faces') and len(mesh.faces) > 0: + if hasattr(mesh, "faces") and len(mesh.faces) > 0: referenced = np.zeros(len(mesh.vertices), dtype=bool) referenced[mesh.faces] = True else: @@ -72,23 +74,25 @@ def merge_vertices(mesh, referenced = np.ones(len(mesh.vertices), dtype=bool) # collect vertex attributes into sequence we can stack - stacked = [mesh.vertices * (10 ** digits_vertex)] + stacked = [mesh.vertices * (10**digits_vertex)] # UV texture visuals require us to update the # vertices and normals differently - if (not merge_tex and - mesh.visual.defined and - mesh.visual.kind == 'texture' and - mesh.visual.uv is not None and - len(mesh.visual.uv) == len(mesh.vertices)): + if ( + not merge_tex + and mesh.visual.defined + and mesh.visual.kind == "texture" + and mesh.visual.uv is not None + and len(mesh.visual.uv) == len(mesh.vertices) + ): # get an array with vertices and UV coordinates # converted to integers at requested precision - stacked.append(mesh.visual.uv * (10 ** digits_uv)) + stacked.append(mesh.visual.uv * (10**digits_uv)) # check to see if we have vertex normals - normals = mesh._cache['vertex_normals'] + normals = mesh._cache["vertex_normals"] if not merge_norm and np.shape(normals) == mesh.vertices.shape: - stacked.append(normals * (10 ** digits_norm)) + stacked.append(normals * (10**digits_norm)) # stack collected vertex properties and round to integer stacked = np.column_stack(stacked).round().astype(np.int64) @@ -133,7 +137,7 @@ def group(values, min_len=0, max_len=np.inf): values = original[order] # find the indexes which are duplicates - if values.dtype.kind == 'f': + if values.dtype.kind == "f": # for floats in a sorted array, neighbors are not duplicates # if the difference between them is greater than approximate zero nondupe = np.greater(np.abs(np.diff(values)), tol.zero) @@ -145,11 +149,10 @@ def group(values, min_len=0, max_len=np.inf): dupe_idx = np.append(0, np.nonzero(nondupe)[0] + 1) dupe_len = np.diff(np.concatenate((dupe_idx, [len(values)]))) - dupe_ok = np.logical_and(np.greater_equal(dupe_len, min_len), - np.less_equal(dupe_len, max_len)) - groups = [order[i:(i + j)] - for i, j in zip(dupe_idx[dupe_ok], - dupe_len[dupe_ok])] + dupe_ok = np.logical_and( + np.greater_equal(dupe_len, min_len), np.less_equal(dupe_len, max_len) + ) + groups = [order[i : (i + j)] for i, j in zip(dupe_idx[dupe_ok], dupe_len[dupe_ok])] return groups @@ -190,16 +193,14 @@ def hashable_rows(data, digits=None): # can we pack the whole row into a single 64 bit integer precision = int(np.floor(64 / as_int.shape[1])) # if the max value is less than precision we can do this - if np.abs(as_int).max() < 2**(precision - 1): + if np.abs(as_int).max() < 2 ** (precision - 1): # the resulting package hashable = np.zeros(len(as_int), dtype=np.int64) # loop through each column and bitwise xor to combine # make sure as_int is int64 otherwise bit offset won't work for offset, column in enumerate(as_int.astype(np.int64).T): # will modify hashable in place - np.bitwise_xor(hashable, - column << (offset * precision), - out=hashable) + np.bitwise_xor(hashable, column << (offset * precision), out=hashable) return hashable # reshape array into magical data type that is weird but hashable @@ -232,9 +233,9 @@ def float_to_int(data, digits=None, dtype=np.int32): # if data is already an integer or boolean we're done # if the data is empty we are also done - if data.dtype.kind in 'ib' or data.size == 0: + if data.dtype.kind in "ib" or data.size == 0: return data.astype(dtype) - elif data.dtype.kind != 'f': + elif data.dtype.kind != "f": data = data.astype(np.float64) # populate digits from kwargs @@ -243,8 +244,8 @@ def float_to_int(data, digits=None, dtype=np.int32): elif isinstance(digits, float) or isinstance(digits, np.float64): digits = util.decimal_to_digits(digits) elif not (isinstance(digits, int) or isinstance(digits, np.integer)): - log.warning('Digits were passed as %s!', digits.__class__.__name__) - raise ValueError('Digits must be None, int, or float!') + log.warning("Digits were passed as %s!", digits.__class__.__name__) + raise ValueError("Digits must be None, int, or float!") # data is float so convert to large integers data_max = np.abs(data).max() * 10**digits @@ -253,7 +254,7 @@ def float_to_int(data, digits=None, dtype=np.int32): # multiply by requested power of ten # then subtract small epsilon to avoid "go either way" rounding # then do the rounding and convert to integer - as_int = np.round((data * 10 ** digits) - 1e-6).astype(dtype) + as_int = np.round((data * 10**digits) - 1e-6).astype(dtype) return as_int @@ -278,8 +279,7 @@ def unique_ordered(data, return_index=False, return_inverse=False): # i.e. `data[index] == unique` # inverse is how to re-construct `data` from `unique` # i.e. `unique[inverse] == data` - unique, index, inverse = np.unique( - data, return_index=True, return_inverse=True) + unique, index, inverse = np.unique(data, return_index=True, return_inverse=True) # we want to maintain the original index order order = index.argsort() @@ -301,10 +301,7 @@ def unique_ordered(data, return_index=False, return_inverse=False): return result -def unique_bincount(values, - minlength=0, - return_inverse=False, - return_counts=False): +def unique_bincount(values, minlength=0, return_inverse=False, return_counts=False): """ For arrays of integers find unique values using bin counting. Roughly 10x faster for correct input than np.unique @@ -333,19 +330,19 @@ def unique_bincount(values, Only returned if return_counts is True """ values = np.asanyarray(values) - if len(values.shape) != 1 or values.dtype.kind != 'i': - raise ValueError('input must be 1D integers!') + if len(values.shape) != 1 or values.dtype.kind != "i": + raise ValueError("input must be 1D integers!") try: # count the number of occurrences of each value counts = np.bincount(values, minlength=minlength) except TypeError: # casting failed on 32 bit windows - log.warning('casting failed, falling back!') + log.warning("casting failed, falling back!") # fall back to numpy unique - return np.unique(values, - return_inverse=return_inverse, - return_counts=return_counts) + return np.unique( + values, return_inverse=return_inverse, return_counts=return_counts + ) # which bins are occupied at all # counts are integers so this works @@ -403,10 +400,7 @@ def merge_runs(data, digits=None): return data[mask] -def unique_float(data, - return_index=False, - return_inverse=False, - digits=None): +def unique_float(data, return_index=False, return_inverse=False, digits=None): """ Identical to the numpy.unique command, except evaluates floating point numbers, using a specified number of digits. @@ -415,9 +409,7 @@ def unique_float(data, """ data = np.asanyarray(data) as_int = float_to_int(data, digits) - _junk, unique, inverse = np.unique(as_int, - return_index=True, - return_inverse=True) + _junk, unique, inverse = np.unique(as_int, return_index=True, return_inverse=True) if (not return_index) and (not return_inverse): return data[unique] @@ -459,8 +451,7 @@ def unique_rows(data, digits=None, keep_order=False): # garbage row-hash and only returning index and inverse if keep_order: # keeps order of original occurrence - return unique_ordered( - rows, return_index=True, return_inverse=True)[1:] + return unique_ordered(rows, return_index=True, return_inverse=True)[1:] # returns values sorted by row-hash but since our row-hash # were pretty much garbage the sort order isn't meaningful return np.unique(rows, return_index=True, return_inverse=True)[1:] @@ -583,10 +574,10 @@ def group_slice(): # if you wanted to use this one function to deal with non- regular groups # you could use: np.array_split(dupe_idx) # this is roughly 3x slower than using the group_dict method above. - start_ok = np.diff( - np.concatenate((dupe_idx, [len(hashable)]))) == require_count - groups = np.tile(dupe_idx[start_ok].reshape((-1, 1)), - require_count) + np.arange(require_count) + start_ok = np.diff(np.concatenate((dupe_idx, [len(hashable)]))) == require_count + groups = np.tile(dupe_idx[start_ok].reshape((-1, 1)), require_count) + np.arange( + require_count + ) groups_idx = order[groups] if require_count == 1: return groups_idx.reshape(-1) @@ -620,16 +611,14 @@ def boolean_rows(a, b, operation=np.intersect1d): a = np.asanyarray(a, dtype=np.int64) b = np.asanyarray(b, dtype=np.int64) - av = a.view([('', a.dtype)] * a.shape[1]).ravel() - bv = b.view([('', b.dtype)] * b.shape[1]).ravel() + av = a.view([("", a.dtype)] * a.shape[1]).ravel() + bv = b.view([("", b.dtype)] * b.shape[1]).ravel() shared = operation(av, bv).view(a.dtype).reshape(-1, a.shape[1]) return shared -def group_vectors(vectors, - angle=1e-4, - include_negative=False): +def group_vectors(vectors, angle=1e-4, include_negative=False): """ Group vectors based on an angle tolerance, with the option to include negative vectors. @@ -684,11 +673,9 @@ def group_distance(values, distance): Indexes of points that make up a group """ - values = np.asanyarray(values, - dtype=np.float64) + values = np.asanyarray(values, dtype=np.float64) - consumed = np.zeros(len(values), - dtype=bool) + consumed = np.zeros(len(values), dtype=bool) tree = cKDTree(values) # (n, d) set of values that are unique @@ -699,8 +686,7 @@ def group_distance(values, distance): for index, value in enumerate(values): if consumed[index]: continue - group = np.array(tree.query_ball_point(value, distance), - dtype=np.int64) + group = np.array(tree.query_ball_point(value, distance), dtype=np.int64) consumed[group] = True unique.append(np.median(values[group], axis=0)) groups.append(group) @@ -725,22 +711,18 @@ def clusters(points, radius): """ from . import graph + tree = cKDTree(points) # some versions return pairs as a set of tuples - pairs = tree.query_pairs(r=radius, output_type='ndarray') + pairs = tree.query_pairs(r=radius, output_type="ndarray") # group connected components groups = graph.connected_components(pairs) return groups -def blocks(data, - min_len=2, - max_len=np.inf, - wrap=False, - digits=None, - only_nonzero=False): +def blocks(data, min_len=2, max_len=np.inf, wrap=False, digits=None, only_nonzero=False): """ Find the indices in an array of contiguous blocks of equal values. @@ -769,7 +751,7 @@ def blocks(data, # keep an integer range around so we can slice arange = np.arange(len(data)) - arange.flags['WRITEABLE'] = False + arange.flags["WRITEABLE"] = False nonzero = arange[1:][data[1:] != data[:-1]] infl = np.zeros(len(nonzero) + 2, dtype=int) @@ -780,18 +762,15 @@ def blocks(data, infl_len = infl[1:] - infl[:-1] # check the length of each group - infl_ok = np.logical_and(infl_len >= min_len, - infl_len <= max_len) + infl_ok = np.logical_and(infl_len >= min_len, infl_len <= max_len) if only_nonzero: # check to make sure the values of each contiguous block # are True by checking the first value of each block - infl_ok = np.logical_and( - infl_ok, data[infl[:-1]]) + infl_ok = np.logical_and(infl_ok, data[infl[:-1]]) # inflate start/end indexes into full ranges of values - blocks = [arange[infl[i]:infl[i + 1]] - for i, ok in enumerate(infl_ok) if ok] + blocks = [arange[infl[i] : infl[i + 1]] for i, ok in enumerate(infl_ok) if ok] if wrap: # wrap only matters if first and last points are the same @@ -823,8 +802,9 @@ def blocks(data, if combined < min_len or combined > max_len: return blocks # new block combines both ends - new_block = np.append(np.arange(infl[-2], infl[-1]), - np.arange(infl[0], infl[1])) + new_block = np.append( + np.arange(infl[-2], infl[-1]), np.arange(infl[0], infl[1]) + ) # we are in a first OR last situation now if first: # first was already in a block so replace it with combined @@ -863,7 +843,7 @@ def group_min(groups, data): groups = groups[order] # this is only needed if groups is unsorted data = data[order] # construct an index which marks borders between groups - index = np.empty(len(groups), 'bool') + index = np.empty(len(groups), "bool") index[0] = True index[1:] = groups[1:] != groups[:-1] return data[index] diff --git a/trimesh/inertia.py b/trimesh/inertia.py index 29ef128df..66ca18c58 100644 --- a/trimesh/inertia.py +++ b/trimesh/inertia.py @@ -33,10 +33,14 @@ def cylinder_inertia(mass, radius, height, transform=None): inertia : (3, 3) float Inertia tensor """ - h2, r2 = height ** 2, radius ** 2 - diagonal = np.array([((mass * h2) / 12) + ((mass * r2) / 4), - ((mass * h2) / 12) + ((mass * r2) / 4), - (mass * r2) / 2]) + h2, r2 = height**2, radius**2 + diagonal = np.array( + [ + ((mass * h2) / 12) + ((mass * r2) / 4), + ((mass * h2) / 12) + ((mass * r2) / 4), + (mass * r2) / 2, + ] + ) inertia = diagonal * np.eye(3) if transform is not None: @@ -61,7 +65,7 @@ def sphere_inertia(mass, radius): inertia : (3, 3) float Inertia tensor """ - inertia = (2.0 / 5.0) * (radius ** 2) * mass * np.eye(3) + inertia = (2.0 / 5.0) * (radius**2) * mass * np.eye(3) return inertia @@ -85,7 +89,7 @@ def principal_axis(inertia): """ inertia = np.asanyarray(inertia, dtype=np.float64) if inertia.shape != (3, 3): - raise ValueError('inertia tensor must be (3, 3)!') + raise ValueError("inertia tensor must be (3, 3)!") # you could any of the following to calculate this: # np.linalg.svd, np.linalg.eig, np.linalg.eigh @@ -99,45 +103,42 @@ def principal_axis(inertia): return components, vectors -def transform_inertia(transform, - inertia_tensor, - parallel_axis=False, - mass=None): +def transform_inertia(transform, inertia_tensor, parallel_axis=False, mass=None): """ - Transform an inertia tensor to a new frame. - - Note that in trimesh `mesh.moment_inertia` is *axis aligned* - and at `mesh.center_mass`. - - So to transform to a new frame and get the moment of inertia at - the center of mass the translation should be ignored and only - rotation applied. - - If parallel axis is enabled it will compute the inertia - about a new location. - - More details in the MIT OpenCourseWare PDF: - ` MIT16_07F09_Lec26.pdf` - - - Parameters - ------------ - transform : (3, 3) or (4, 4) float - Transformation matrix - inertia_tensor : (3, 3) float - Inertia tensor. - parallel_axis : bool - Apply the parallel axis theorum or not. - If the passed inertia tensor is at the center of mass - and you want the new post-transform tensor also at the - center of mass you DON'T want this enabled as you *only* - want to apply the rotation. Use this to get moment of - inertia at an arbitrary frame that isn't the center of mass. - - Returns - ------------ - transformed : (3, 3) float - Inertia tensor in new frame. + Transform an inertia tensor to a new frame. + + Note that in trimesh `mesh.moment_inertia` is *axis aligned* + and at `mesh.center_mass`. + + So to transform to a new frame and get the moment of inertia at + the center of mass the translation should be ignored and only + rotation applied. + + If parallel axis is enabled it will compute the inertia + about a new location. + + More details in the MIT OpenCourseWare PDF: + ` MIT16_07F09_Lec26.pdf` + + + Parameters + ------------ + transform : (3, 3) or (4, 4) float + Transformation matrix + inertia_tensor : (3, 3) float + Inertia tensor. + parallel_axis : bool + Apply the parallel axis theorum or not. + If the passed inertia tensor is at the center of mass + and you want the new post-transform tensor also at the + center of mass you DON'T want this enabled as you *only* + want to apply the rotation. Use this to get moment of + inertia at an arbitrary frame that isn't the center of mass. + + Returns + ------------ + transformed : (3, 3) float + Inertia tensor in new frame. """ # check inputs and extract rotation transform = np.asanyarray(transform, dtype=np.float64) @@ -146,11 +147,11 @@ def transform_inertia(transform, elif transform.shape == (3, 3): rotation = transform else: - raise ValueError('transform must be (3, 3) or (4, 4)!') + raise ValueError("transform must be (3, 3) or (4, 4)!") inertia_tensor = np.asanyarray(inertia_tensor, dtype=np.float64) if inertia_tensor.shape != (3, 3): - raise ValueError('inertia_tensor must be (3, 3)!') + raise ValueError("inertia_tensor must be (3, 3)!") if parallel_axis: if transform.shape == (3, 3): @@ -162,18 +163,18 @@ def transform_inertia(transform, # First the changed origin of the new transform is taken into # account. To calculate the inertia tensor # the parallel axis theorem is used - M = np.array([[a[1]**2 + a[2]**2, -a[0] * a[1], -a[0] * a[2]], - [-a[0] * a[1], a[0]**2 + a[2]**2, -a[1] * a[2]], - [-a[0] * a[2], -a[1] * a[2], a[0]**2 + a[1]**2]]) + M = np.array( + [ + [a[1] ** 2 + a[2] ** 2, -a[0] * a[1], -a[0] * a[2]], + [-a[0] * a[1], a[0] ** 2 + a[2] ** 2, -a[1] * a[2]], + [-a[0] * a[2], -a[1] * a[2], a[0] ** 2 + a[1] ** 2], + ] + ) aligned_inertia = inertia_tensor + mass * M - return util.multi_dot([rotation.T, - aligned_inertia, - rotation]) + return util.multi_dot([rotation.T, aligned_inertia, rotation]) - return util.multi_dot([rotation, - inertia_tensor, - rotation.T]) + return util.multi_dot([rotation, inertia_tensor, rotation.T]) def radial_symmetry(mesh): @@ -220,7 +221,7 @@ def radial_symmetry(mesh): axis = vector[0] section = vector[1:] - return 'spherical', axis, section + return "spherical", axis, section elif diff_zero.any(): # this is the case for 2/3 PCI are identical @@ -237,15 +238,14 @@ def radial_symmetry(mesh): # since two vectors are the same, we know the middle # one is one of those two - section_index = order[ - np.array([[0, 1], [1, -1]])[diff_zero]].flatten() + section_index = order[np.array([[0, 1], [1, -1]])[diff_zero]].flatten() section = vector[section_index] # we know the rotation axis is the sole unique value # and is either first or last of the sorted values axis_index = order[np.array([-1, 0])[diff_zero]][0] axis = vector[axis_index] - return 'radial', axis, section + return "radial", axis, section return None, None, None @@ -268,9 +268,13 @@ def scene_inertia(scene, transform): # get the matrix ang geometry name for nodes = [graph[n] for n in graph.nodes_geometry] # get the moment of inertia with the mesh moved to a location - moments = np.array([geoms[g].moment_inertia_frame( - np.dot(np.linalg.inv(mat), transform)) for mat, g in nodes - if hasattr(geoms[g], 'moment_inertia_frame')], - dtype=np.float64) + moments = np.array( + [ + geoms[g].moment_inertia_frame(np.dot(np.linalg.inv(mat), transform)) + for mat, g in nodes + if hasattr(geoms[g], "moment_inertia_frame") + ], + dtype=np.float64, + ) return moments.sum(axis=0) diff --git a/trimesh/intersections.py b/trimesh/intersections.py index b3bb5f24c..0343519d7 100644 --- a/trimesh/intersections.py +++ b/trimesh/intersections.py @@ -13,12 +13,14 @@ from .triangles import points_to_barycentric -def mesh_plane(mesh, - plane_normal, - plane_origin, - return_faces=False, - local_faces=None, - cached_dots=None): +def mesh_plane( + mesh, + plane_normal, + plane_origin, + return_faces=False, + local_faces=None, + cached_dots=None, +): """ Find a the intersections between a mesh and a plane, returning a set of line segments on that plane. @@ -116,13 +118,11 @@ def handle_on_vertex(signs, faces, vertices): vertex_plane = faces[signs == 0] edge_thru = faces[signs != 0].reshape((-1, 2)) point_intersect, valid = plane_lines( - plane_origin, - plane_normal, - vertices[edge_thru.T], - line_segments=False) - lines = np.column_stack(( - vertices[vertex_plane[valid]], - point_intersect)).reshape((-1, 2, 3)) + plane_origin, plane_normal, vertices[edge_thru.T], line_segments=False + ) + lines = np.column_stack((vertices[vertex_plane[valid]], point_intersect)).reshape( + (-1, 2, 3) + ) return lines def handle_on_edge(signs, faces, vertices): @@ -133,18 +133,18 @@ def handle_on_edge(signs, faces, vertices): def handle_basic(signs, faces, vertices): # case where one vertex is on one side and two are on the other - unique_element = grouping.unique_value_in_row( - signs, unique=[-1, 1]) + unique_element = grouping.unique_value_in_row(signs, unique=[-1, 1]) edges = np.column_stack( - (faces[unique_element], - faces[np.roll(unique_element, 1, axis=1)], - faces[unique_element], - faces[np.roll(unique_element, 2, axis=1)])).reshape( - (-1, 2)) - intersections, valid = plane_lines(plane_origin, - plane_normal, - vertices[edges.T], - line_segments=False) + ( + faces[unique_element], + faces[np.roll(unique_element, 1, axis=1)], + faces[unique_element], + faces[np.roll(unique_element, 2, axis=1)], + ) + ).reshape((-1, 2)) + intersections, valid = plane_lines( + plane_origin, plane_normal, vertices[edges.T], line_segments=False + ) # since the data has been pre- culled, any invalid intersections at all # means the culling was done incorrectly and thus things are broken assert valid.all() @@ -154,14 +154,13 @@ def handle_basic(signs, faces, vertices): plane_normal = np.asanyarray(plane_normal, dtype=np.float64) plane_origin = np.asanyarray(plane_origin, dtype=np.float64) if plane_origin.shape != (3,) or plane_normal.shape != (3,): - raise ValueError('Plane origin and normal must be (3,)!') + raise ValueError("Plane origin and normal must be (3,)!") if local_faces is None: # do a cross section against all faces faces = mesh.faces else: - local_faces = np.asanyarray( - local_faces, dtype=np.int64) + local_faces = np.asanyarray(local_faces, dtype=np.int64) # only take the subset of faces if passed faces = mesh.faces[local_faces] @@ -184,20 +183,17 @@ def handle_basic(signs, faces, vertices): # and which of the three intersection cases they are in cases = triangle_cases(signs) # handlers for each case - handlers = (handle_basic, - handle_on_vertex, - handle_on_edge) + handlers = (handle_basic, handle_on_vertex, handle_on_edge) # the (m, 2, 3) line segments - lines = np.vstack([h(signs[c], - faces[c], - mesh.vertices) - for c, h in zip(cases, handlers)]) + lines = np.vstack( + [h(signs[c], faces[c], mesh.vertices) for c, h in zip(cases, handlers)] + ) if return_faces: # everything that hit something index = np.hstack([np.nonzero(c)[0] for c in cases]) - assert index.dtype.kind == 'i' + assert index.dtype.kind == "i" if local_faces is None: return lines, index # we are considering a subset of faces @@ -206,11 +202,7 @@ def handle_basic(signs, faces, vertices): return lines -def mesh_multiplane( - mesh, - plane_origin, - plane_normal, - heights): +def mesh_multiplane(mesh, plane_origin, plane_normal, heights): """ A utility function for slicing a mesh by multiple parallel planes which caches the dot product operation. @@ -238,19 +230,14 @@ def mesh_multiplane( """ # check input plane plane_normal = util.unitize(plane_normal) - plane_origin = np.asanyarray(plane_origin, - dtype=np.float64) + plane_origin = np.asanyarray(plane_origin, dtype=np.float64) heights = np.asanyarray(heights, dtype=np.float64) # dot product of every vertex with plane - vertex_dots = np.dot( - plane_normal, - (mesh.vertices - plane_origin).T) + vertex_dots = np.dot(plane_normal, (mesh.vertices - plane_origin).T) # reconstruct transforms for each 2D section - base_transform = geometry.plane_transform( - origin=plane_origin, - normal=plane_normal) + base_transform = geometry.plane_transform(origin=plane_origin, normal=plane_normal) base_transform = np.linalg.inv(base_transform) # alter translation Z inside loop @@ -273,7 +260,8 @@ def mesh_multiplane( plane_origin=new_origin, plane_normal=plane_normal, return_faces=True, - cached_dots=new_dots) + cached_dots=new_dots, + ) # get the transforms to 3D space and back translation[2, 3] = height @@ -282,8 +270,7 @@ def mesh_multiplane( transforms.append(to_3D) # transform points to 2D frame - lines_2D = tf.transform_points( - lines.reshape((-1, 3)), to_2D) + lines_2D = tf.transform_points(lines.reshape((-1, 3)), to_2D) # if we didn't screw up the transform all # of the Z values should be zero @@ -302,10 +289,7 @@ def mesh_multiplane( return segments, transforms, face_index -def plane_lines(plane_origin, - plane_normal, - endpoints, - line_segments=True): +def plane_lines(plane_origin, plane_normal, endpoints, line_segments=True): """ Calculate plane-line intersections @@ -343,11 +327,9 @@ def plane_lines(plane_origin, # We discard on-plane vectors by checking that the dot product is nonzero valid = np.abs(b) > tol.zero if line_segments: - test = np.dot(plane_normal, - np.transpose(plane_origin - endpoints[1])) + test = np.dot(plane_normal, np.transpose(plane_origin - endpoints[1])) different_sides = np.sign(t) != np.sign(test) - nonzero = np.logical_or(np.abs(t) > tol.zero, - np.abs(test) > tol.zero) + nonzero = np.logical_or(np.abs(t) > tol.zero, np.abs(test) > tol.zero) valid = np.logical_and(valid, different_sides) valid = np.logical_and(valid, nonzero) @@ -358,12 +340,14 @@ def plane_lines(plane_origin, return intersection, valid -def planes_lines(plane_origins, - plane_normals, - line_origins, - line_directions, - return_distance=False, - return_denom=False): +def planes_lines( + plane_origins, + plane_normals, + line_origins, + line_directions, + return_distance=False, + return_denom=False, +): """ Given one line per plane find the intersection points. @@ -408,8 +392,7 @@ def planes_lines(plane_origins, valid = np.abs(projection_dir) > 1e-5 - distance = np.divide(projection_ori[valid], - projection_dir[valid]) + distance = np.divide(projection_ori[valid], projection_dir[valid]) on_plane = line_directions[valid] * distance.reshape((-1, 1)) on_plane += line_origins[valid] @@ -424,13 +407,15 @@ def planes_lines(plane_origins, return result -def slice_faces_plane(vertices, - faces, - plane_normal, - plane_origin, - uv=None, - face_index=None, - cached_dots=None): +def slice_faces_plane( + vertices, + faces, + plane_normal, + plane_origin, + uv=None, + face_index=None, + cached_dots=None, +): """ Slice a mesh (given as a set of faces and vertices) with a plane, returning a new mesh (again as a set of faces and vertices) that is the @@ -501,9 +486,7 @@ def slice_faces_plane(vertices, # (0,0,0), (-1,0,0), (-1,-1,0), (-1,-1,-1) <- inside # (1,0,0), (1,1,0), (1,1,1) <- outside # (1,0,-1), (1,-1,-1), (1,1,-1) <- onedge - onedge = np.logical_and( - signs_asum >= 2, - np.abs(signs_sum) <= 1) + onedge = np.logical_and(signs_asum >= 2, np.abs(signs_sum) <= 1) inside = signs_sum == -signs_asum @@ -540,19 +523,20 @@ def slice_faces_plane(vertices, # If no faces to cut, the surface is not in contact with this plane. # Thus, return a mesh with only the inside faces if len(cut_faces_quad) + len(cut_faces_tri) == 0: - if len(new_faces) == 0: # if no new faces at all return empty arrays - empty = (np.zeros((0, 3), dtype=np.float64), - np.zeros((0, 3), dtype=np.int64), - np.zeros((0, 2), dtype=np.float64) if have_uv else None) + empty = ( + np.zeros((0, 3), dtype=np.float64), + np.zeros((0, 3), dtype=np.int64), + np.zeros((0, 2), dtype=np.float64) if have_uv else None, + ) return empty # find the unique indices in the new faces # using an integer-only unique function - unique, inverse = grouping.unique_bincount(new_faces.reshape(-1), - minlength=len(vertices), - return_inverse=True) + unique, inverse = grouping.unique_bincount( + new_faces.reshape(-1), minlength=len(vertices), return_inverse=True + ) # use the unique indices for our final vertices and faces final_vert = vertices[unique] @@ -562,14 +546,14 @@ def slice_faces_plane(vertices, return final_vert, final_face, final_uv # Extract the intersections of each triangle's edges with the plane - o = cut_triangles # origins - d = np.roll(o, -1, axis=1) - o # directions - num = (plane_origin - o).dot(plane_normal) # compute num/denom + o = cut_triangles # origins + d = np.roll(o, -1, axis=1) - o # directions + num = (plane_origin - o).dot(plane_normal) # compute num/denom denom = np.dot(d, plane_normal) - denom[denom == 0.0] = 1e-12 # prevent division by zero + denom[denom == 0.0] = 1e-12 # prevent division by zero dist = np.divide(num, denom) # intersection points for each segment - int_points = np.einsum('ij,ijk->ijk', dist, d) + o + int_points = np.einsum("ij,ijk->ijk", dist, d) + o # Initialize the array of new vertices with the current vertices new_vertices = vertices @@ -586,27 +570,30 @@ def slice_faces_plane(vertices, quad_int_inds = np.where(cut_signs_quad == 1)[1] quad_int_verts = cut_faces_quad[ np.stack((range(num_quads), range(num_quads)), axis=1), - np.stack(((quad_int_inds + 1) % 3, (quad_int_inds + 2) % 3), axis=1)] + np.stack(((quad_int_inds + 1) % 3, (quad_int_inds + 2) % 3), axis=1), + ] # Fill out new quad faces with the intersection points as vertices new_quad_faces = np.append( quad_int_verts, - np.arange(len(new_vertices), - len(new_vertices) + - 2 * num_quads).reshape(num_quads, 2), axis=1) + np.arange(len(new_vertices), len(new_vertices) + 2 * num_quads).reshape( + num_quads, 2 + ), + axis=1, + ) # Extract correct intersection points from int_points and order them in # the same way as they were added to faces new_quad_vertices = quad_int_points[ np.stack((range(num_quads), range(num_quads)), axis=1), - np.stack((((quad_int_inds + 2) % 3).T, quad_int_inds.T), - axis=1), :].reshape(2 * num_quads, 3) + np.stack((((quad_int_inds + 2) % 3).T, quad_int_inds.T), axis=1), + :, + ].reshape(2 * num_quads, 3) # Add new vertices to existing vertices, triangulate quads, and add the # resulting triangles to the new faces new_vertices = np.append(new_vertices, new_quad_vertices, axis=0) - new_tri_faces_from_quads = geometry.triangulate_quads( - new_quad_faces) + new_tri_faces_from_quads = geometry.triangulate_quads(new_quad_faces) new_faces = np.append(new_faces, new_tri_faces_from_quads, axis=0) # Handle the case where a new triangle is formed by the intersection @@ -617,24 +604,24 @@ def slice_faces_plane(vertices, # Extract the single vertex for each triangle inside the plane and get the # inside vertices (CCW order) tri_int_inds = np.where(cut_signs_tri == -1)[1] - tri_int_verts = cut_faces_tri[range( - num_tris), tri_int_inds].reshape(num_tris, 1) + tri_int_verts = cut_faces_tri[range(num_tris), tri_int_inds].reshape(num_tris, 1) # Fill out new triangles with the intersection points as vertices new_tri_faces = np.append( tri_int_verts, - np.arange(len(new_vertices), - len(new_vertices) + - 2 * num_tris).reshape(num_tris, 2), - axis=1) + np.arange(len(new_vertices), len(new_vertices) + 2 * num_tris).reshape( + num_tris, 2 + ), + axis=1, + ) # Extract correct intersection points and order them in the same way as # the vertices were added to the faces new_tri_vertices = tri_int_points[ np.stack((range(num_tris), range(num_tris)), axis=1), - np.stack((tri_int_inds.T, ((tri_int_inds + 2) % 3).T), - axis=1), - :].reshape(2 * num_tris, 3) + np.stack((tri_int_inds.T, ((tri_int_inds + 2) % 3).T), axis=1), + :, + ].reshape(2 * num_tris, 3) # Append new vertices and new faces new_vertices = np.append(new_vertices, new_tri_vertices, axis=0) @@ -642,9 +629,9 @@ def slice_faces_plane(vertices, # find the unique indices in the new faces # using an integer-only unique function - unique, inverse = grouping.unique_bincount(new_faces.reshape(-1), - minlength=len(new_vertices), - return_inverse=True) + unique, inverse = grouping.unique_bincount( + new_faces.reshape(-1), minlength=len(new_vertices), return_inverse=True + ) # use the unique indexes for our final vertex and faces final_vert = new_vertices[unique] @@ -654,29 +641,31 @@ def slice_faces_plane(vertices, if have_uv: # Generate barycentric coordinates for intersection vertices quad_barycentrics = points_to_barycentric( - np.repeat(vertices[cut_faces_quad], 2, axis=0), - new_quad_vertices) + np.repeat(vertices[cut_faces_quad], 2, axis=0), new_quad_vertices + ) tri_barycentrics = points_to_barycentric( - np.repeat(vertices[cut_faces_tri], 2, axis=0), - new_tri_vertices) + np.repeat(vertices[cut_faces_tri], 2, axis=0), new_tri_vertices + ) all_barycentrics = np.concatenate([quad_barycentrics, tri_barycentrics]) # Interpolate UVs cut_uv = np.concatenate([uv[cut_faces_quad], uv[cut_faces_tri]]) - new_uv = np.einsum('ijk,ij->ik', np.repeat(cut_uv, 2, axis=0), all_barycentrics) + new_uv = np.einsum("ijk,ij->ik", np.repeat(cut_uv, 2, axis=0), all_barycentrics) final_uv = np.concatenate([uv, new_uv])[unique] return final_vert, final_face, final_uv -def slice_mesh_plane(mesh, - plane_normal, - plane_origin, - face_index=None, - cap=False, - cached_dots=None, - engine=None, - **kwargs): +def slice_mesh_plane( + mesh, + plane_normal, + plane_origin, + face_index=None, + cap=False, + cached_dots=None, + engine=None, + **kwargs, +): """ Slice a mesh with a plane returning a new mesh that is the portion of the original mesh to the positive normal side @@ -721,35 +710,35 @@ def slice_mesh_plane(mesh, from .visual import TextureVisuals # check input plane - plane_normal = np.asanyarray( - plane_normal, dtype=np.float64) - plane_origin = np.asanyarray( - plane_origin, dtype=np.float64) + plane_normal = np.asanyarray(plane_normal, dtype=np.float64) + plane_origin = np.asanyarray(plane_origin, dtype=np.float64) # check to make sure origins and normals have acceptable shape - shape_ok = ((plane_origin.shape == (3,) or - util.is_shape(plane_origin, (-1, 3))) and - (plane_normal.shape == (3,) or - util.is_shape(plane_normal, (-1, 3))) and - plane_origin.shape == plane_normal.shape) + shape_ok = ( + (plane_origin.shape == (3,) or util.is_shape(plane_origin, (-1, 3))) + and (plane_normal.shape == (3,) or util.is_shape(plane_normal, (-1, 3))) + and plane_origin.shape == plane_normal.shape + ) if not shape_ok: - raise ValueError('plane origins and normals must be (n, 3)!') + raise ValueError("plane origins and normals must be (n, 3)!") # start with copy of original mesh, faces, and vertices vertices = mesh.vertices.copy() faces = mesh.faces.copy() # We copy the UV coordinates if available - has_uv = (hasattr(mesh.visual, 'uv') and np.shape( - mesh.visual.uv) == (len(mesh.vertices), 2)) and not cap + has_uv = ( + hasattr(mesh.visual, "uv") and np.shape(mesh.visual.uv) == (len(mesh.vertices), 2) + ) and not cap uv = mesh.visual.uv.copy() if has_uv else None - if 'process' not in kwargs: - kwargs['process'] = False + if "process" not in kwargs: + kwargs["process"] = False # slice away specified planes - for origin, normal in zip(plane_origin.reshape((-1, 3)), - plane_normal.reshape((-1, 3))): + for origin, normal in zip( + plane_origin.reshape((-1, 3)), plane_normal.reshape((-1, 3)) + ): # save the new vertices and faces vertices, faces, uv = slice_faces_plane( vertices=vertices, @@ -757,13 +746,13 @@ def slice_mesh_plane(mesh, uv=uv, plane_normal=normal, plane_origin=origin, - face_index=face_index) + face_index=face_index, + ) # check if cap arg specified if cap: if face_index: # This hasn't been implemented yet. - raise NotImplementedError( - "face_index and cap can't be used together") + raise NotImplementedError("face_index and cap can't be used together") # start by deduplicating vertices again unique, inverse = grouping.unique_rows(vertices) @@ -774,9 +763,7 @@ def slice_mesh_plane(mesh, # that each face has three unique indices f = f[(f[:, :1] != f[:, 1:]).all(axis=1)] # transform to the cap plane - to_2D = geometry.plane_transform( - origin=origin, - normal=-normal) + to_2D = geometry.plane_transform(origin=origin, normal=-normal) to_3D = np.linalg.inv(to_2D) vertices_2D = tf.transform_points(vertices, to_2D) @@ -787,28 +774,27 @@ def slice_mesh_plane(mesh, edges = edges[on_plane[edges].all(axis=1)] edges = edges[edges[:, 0] != edges[:, 1]] - unique_edge = grouping.group_rows( - edges, require_count=1) + unique_edge = grouping.group_rows(edges, require_count=1) if len(unique) < 3: continue tree = cKDTree(vertices) # collect new faces faces = [f] - for p in polygons.edges_to_polygons( - edges[unique_edge], vertices_2D[:, :2]): + for p in polygons.edges_to_polygons(edges[unique_edge], vertices_2D[:, :2]): vn, fn = triangulate_polygon(p, engine=engine) # collect the original index for the new vertices vn3 = tf.transform_points(util.stack_3D(vn), to_3D) distance, vid = tree.query(vn3) if distance.max() > 1e-8: - util.log.debug('triangulate may have inserted vertex!') + util.log.debug("triangulate may have inserted vertex!") # triangulation should not have inserted vertices faces.append(vid[fn]) faces = np.vstack(faces) - visual = TextureVisuals( - uv=uv, material=mesh.visual.material.copy()) if has_uv else None + visual = ( + TextureVisuals(uv=uv, material=mesh.visual.material.copy()) if has_uv else None + ) # return the sliced mesh return Trimesh(vertices=vertices, faces=faces, visual=visual, **kwargs) diff --git a/trimesh/interval.py b/trimesh/interval.py index a68a25c6e..34fa856e3 100644 --- a/trimesh/interval.py +++ b/trimesh/interval.py @@ -40,7 +40,7 @@ def check(a, b, digits): b = np.array(b, dtype=np.float64) if a.shape != b.shape or a.shape[-1] != 2: - raise ValueError('ranges must be identical and (2,)!') + raise ValueError("ranges must be identical and (2,)!") # if input was single interval reshape it here is_1D = False @@ -89,32 +89,28 @@ def intersection(a, b, digits=8): overlap = np.zeros(a.shape, dtype=np.float64) # A fully overlaps B - current = np.logical_and(a_int[:, 0] <= b_int[:, 0], - a_int[:, 1] >= b_int[:, 1]) + current = np.logical_and(a_int[:, 0] <= b_int[:, 0], a_int[:, 1] >= b_int[:, 1]) overlap[current] = b[current] # B fully overlaps A - current = np.logical_and(a_int[:, 0] >= b_int[:, 0], - a_int[:, 1] <= b_int[:, 1]) + current = np.logical_and(a_int[:, 0] >= b_int[:, 0], a_int[:, 1] <= b_int[:, 1]) overlap[current] = a[current] # A starts B ends # A:, 0 B:, 0 A:, 1 B:, 1 current = np.logical_and( - np.logical_and(a_int[:, 0] <= b_int[:, 0], - b_int[:, 0] < a_int[:, 1]), - a_int[:, 1] < b_int[:, 1]) - overlap[current] = np.column_stack([b[current][:, 0], - a[current][:, 1]]) + np.logical_and(a_int[:, 0] <= b_int[:, 0], b_int[:, 0] < a_int[:, 1]), + a_int[:, 1] < b_int[:, 1], + ) + overlap[current] = np.column_stack([b[current][:, 0], a[current][:, 1]]) # B starts A ends # B:, 0 A:, 0 B:, 1 A:, 1 current = np.logical_and( - np.logical_and(b_int[:, 0] <= a_int[:, 0], - a_int[:, 0] < b_int[:, 1]), - b_int[:, 1] < a_int[:, 1]) - overlap[current] = np.column_stack([a[current][:, 0], - b[current][:, 1]]) + np.logical_and(b_int[:, 0] <= a_int[:, 0], a_int[:, 0] < b_int[:, 1]), + b_int[:, 1] < a_int[:, 1], + ) + overlap[current] = np.column_stack([a[current][:, 0], b[current][:, 1]]) # is range overlapping at all intersects = overlap.ptp(axis=1) > 10**-digits diff --git a/trimesh/nsphere.py b/trimesh/nsphere.py index 1a760a6c3..40bbb21ca 100644 --- a/trimesh/nsphere.py +++ b/trimesh/nsphere.py @@ -17,6 +17,7 @@ except BaseException as E: # raise the exception when someone tries to use it from . import exceptions + leastsq = exceptions.ExceptionWrapper(E) spatial = exceptions.ExceptionWrapper(E) @@ -26,7 +27,9 @@ def _MAX_MEMORY(): # if we have psutil check actual free memory when called return psutil.virtual_memory().free / 2.0 + except BaseException: + def _MAX_MEMORY(): # use a hardcoded best guess estimate return 1e9 @@ -71,7 +74,7 @@ def minimum_nsphere(obj): # bothering to compute the voronoi diagram fit_C, fit_R, fit_E = fit_nsphere(points) # return fit radius and center to global scale - fit_R = (((points - fit_C)**2).sum(axis=1).max() ** .5) * points_scale + fit_R = (((points - fit_C) ** 2).sum(axis=1).max() ** 0.5) * points_scale fit_C = (fit_C * points_scale) + points_origin if fit_E < 1e-6: @@ -97,22 +100,22 @@ def minimum_nsphere(obj): if memory_estimate > _MAX_MEMORY(): raise MemoryError radii_2 = spatial.distance.cdist( - voronoi.vertices, points, - metric='sqeuclidean').max(axis=1) + voronoi.vertices, points, metric="sqeuclidean" + ).max(axis=1) except MemoryError: # log the MemoryError - log.warning('MemoryError: falling back to slower check!') + log.warning("MemoryError: falling back to slower check!") # fall back to a potentially very slow list comprehension - radii_2 = np.array([((points - v) ** 2).sum(axis=1).max() - for v in voronoi.vertices]) + radii_2 = np.array( + [((points - v) ** 2).sum(axis=1).max() for v in voronoi.vertices] + ) # we want the smallest sphere so take the min of the radii radii_idx = radii_2.argmin() # return voronoi radius and center to global scale radius_v = np.sqrt(radii_2[radii_idx]) * points_scale - center_v = (voronoi.vertices[radii_idx] * - points_scale) + points_origin + center_v = (voronoi.vertices[radii_idx] * points_scale) + points_origin if radius_v > fit_R: return fit_C, fit_R @@ -158,12 +161,10 @@ def residuals(center): else: guess = np.asanyarray(prior) - center_result, return_code = leastsq(residuals, - guess, - xtol=1e-8) + center_result, return_code = leastsq(residuals, guess, xtol=1e-8) if return_code not in [1, 2, 3, 4]: - raise ValueError('Least square fit failed!') + raise ValueError("Least square fit failed!") radii = util.row_norm(points - center_result) radius = radii.mean() diff --git a/trimesh/parent.py b/trimesh/parent.py index b0b0b4c40..7df64f547 100644 --- a/trimesh/parent.py +++ b/trimesh/parent.py @@ -8,8 +8,9 @@ import numpy as np -from . import caching +from . import bounds, caching from . import transformations as tf +from .constants import tol from .util import ABC @@ -77,21 +78,19 @@ def __repr__(self): Human readable quick look at the geometry. """ elements = [] - if hasattr(self, 'vertices'): + if hasattr(self, "vertices"): # for Trimesh and PointCloud - elements.append(f'vertices.shape={self.vertices.shape}') - if hasattr(self, 'faces'): + elements.append(f"vertices.shape={self.vertices.shape}") + if hasattr(self, "faces"): # for Trimesh - elements.append(f'faces.shape={self.faces.shape}') - if hasattr(self, 'geometry') and isinstance( - self.geometry, dict): + elements.append(f"faces.shape={self.faces.shape}") + if hasattr(self, "geometry") and isinstance(self.geometry, dict): # for Scene - elements.append(f'len(geometry)={len(self.geometry)}') - if 'Voxel' in type(self).__name__: + elements.append(f"len(geometry)={len(self.geometry)}") + if "Voxel" in type(self).__name__: # for VoxelGrid objects elements.append(str(self.shape)[1:-1]) - return ''.format( - type(self).__name__, ', '.join(elements)) + return "".format(type(self).__name__, ", ".join(elements)) def apply_translation(self, translation): """ @@ -105,10 +104,9 @@ def apply_translation(self, translation): translation = np.asanyarray(translation, dtype=np.float64) if translation.shape == (2,): # create a planar matrix if we were passed a 2D offset - return self.apply_transform( - tf.planar_matrix(offset=translation)) + return self.apply_transform(tf.planar_matrix(offset=translation)) elif translation.shape != (3,): - raise ValueError('Translation must be (3,) or (2,)!') + raise ValueError("Translation must be (3,) or (2,)!") # manually create a translation matrix matrix = np.eye(4) @@ -177,9 +175,7 @@ def bounding_box(self): # translate to center of axis aligned bounds transform[:3, 3] = self.bounds.mean(axis=0) - aabb = primitives.Box(transform=transform, - extents=self.extents, - mutable=False) + aabb = primitives.Box(transform=transform, extents=self.extents, mutable=False) return aabb @caching.cache_decorator @@ -195,10 +191,11 @@ def bounding_box_oriented(self): bounding box of the mesh """ from . import bounds, primitives + to_origin, extents = bounds.oriented_bounds(self) - obb = primitives.Box(transform=np.linalg.inv(to_origin), - extents=extents, - mutable=False) + obb = primitives.Box( + transform=np.linalg.inv(to_origin), extents=extents, mutable=False + ) return obb @caching.cache_decorator @@ -218,10 +215,9 @@ def bounding_sphere(self): Sphere primitive containing current mesh """ from . import nsphere, primitives + center, radius = nsphere.minimum_nsphere(self) - minball = primitives.Sphere(center=center, - radius=radius, - mutable=False) + minball = primitives.Sphere(center=center, radius=radius, mutable=False) return minball @caching.cache_decorator @@ -235,6 +231,7 @@ def bounding_cylinder(self): Cylinder primitive containing current mesh """ from . import bounds, primitives + kwargs = bounds.minimum_cylinder(self) mincyl = primitives.Cylinder(mutable=False, **kwargs) return mincyl @@ -253,27 +250,40 @@ def bounding_primitive(self): trimesh.primitives.Box trimesh.primitives.Cylinder """ - options = [self.bounding_box_oriented, - self.bounding_sphere, - self.bounding_cylinder] + options = [ + self.bounding_box_oriented, + self.bounding_sphere, + self.bounding_cylinder, + ] volume_min = np.argmin([i.volume for i in options]) bounding_primitive = options[volume_min] return bounding_primitive - def apply_obb(self): + def apply_obb(self, **kwargs): """ Apply the oriented bounding box transform to the current mesh. This will result in a mesh with an AABB centered at the origin and the same dimensions as the OBB. + Parameters + ------------ + kwargs + Passed through to `bounds.oriented_bounds` + Returns ---------- matrix : (4, 4) float Transformation matrix that was applied to mesh to move it into OBB frame """ - matrix = self.bounding_box_oriented.primitive.transform - matrix = np.linalg.inv(matrix) - self.apply_transform(matrix) + if tol.strict: + # in strict mode make sure volume is identical + check = self.volume + matrix = bounds.oriented_bounds(self, **kwargs) + assert np.isclose(check, self.volume) + else: + # calculate the oriented bounding box + matrix = bounds.oriented_bounds(self, **kwargs) + return matrix diff --git a/trimesh/permutate.py b/trimesh/permutate.py index 24ae58aa5..01d19fd5f 100644 --- a/trimesh/permutate.py +++ b/trimesh/permutate.py @@ -28,8 +28,7 @@ def transform(mesh, translation_scale=1000.0): and rigidly transformed in space. """ # rotate and translate randomly - matrix = transformations.random_rotation_matrix( - translate=translation_scale) + matrix = transformations.random_rotation_matrix(translate=translation_scale) # randomly re-order triangles triangles = np.random.permutation(mesh.triangles).reshape((-1, 3)) @@ -37,10 +36,9 @@ def transform(mesh, translation_scale=1000.0): triangles = transformations.transform_points(triangles, matrix) # extract the class from the input object - mesh_type = util.type_named(mesh, 'Trimesh') + mesh_type = util.type_named(mesh, "Trimesh") # generate a new mesh from the permutated data - permutated = mesh_type( - **triangles_module.to_kwargs(triangles.reshape((-1, 3, 3)))) + permutated = mesh_type(**triangles_module.to_kwargs(triangles.reshape((-1, 3, 3)))) return permutated @@ -66,13 +64,13 @@ def noise(mesh, magnitude=None): if magnitude is None: magnitude = mesh.scale / 100.0 - random = (np.random.random(mesh.vertices.shape) - .5) * magnitude + random = (np.random.random(mesh.vertices.shape) - 0.5) * magnitude vertices_noise = mesh.vertices.copy() + random # make sure we've re- ordered faces randomly triangles = np.random.permutation(vertices_noise[mesh.faces]) - mesh_type = util.type_named(mesh, 'Trimesh') + mesh_type = util.type_named(mesh, "Trimesh") permutated = mesh_type(**triangles_module.to_kwargs(triangles)) return permutated @@ -98,32 +96,32 @@ def tessellation(mesh): """ # create random barycentric coordinates for each face # pad all coordinates by a small amount to bias new vertex towards center - barycentric = np.random.random(mesh.faces.shape) + .05 + barycentric = np.random.random(mesh.faces.shape) + 0.05 barycentric /= barycentric.sum(axis=1).reshape((-1, 1)) # create one new vertex somewhere in a face - vertex_face = (barycentric.reshape((-1, 3, 1)) - * mesh.triangles).sum(axis=1) + vertex_face = (barycentric.reshape((-1, 3, 1)) * mesh.triangles).sum(axis=1) vertex_face_id = np.arange(len(vertex_face)) + len(mesh.vertices) # new vertices are the old vertices stacked on the vertices in the faces vertices = np.vstack((mesh.vertices, vertex_face)) # there are three new faces per old face, and we maintain correct winding - faces = np.vstack((np.column_stack((mesh.faces[:, [0, 1]], vertex_face_id)), - np.column_stack( - (mesh.faces[:, [1, 2]], vertex_face_id)), - np.column_stack((mesh.faces[:, [2, 0]], vertex_face_id)))) + faces = np.vstack( + ( + np.column_stack((mesh.faces[:, [0, 1]], vertex_face_id)), + np.column_stack((mesh.faces[:, [1, 2]], vertex_face_id)), + np.column_stack((mesh.faces[:, [2, 0]], vertex_face_id)), + ) + ) # make sure the order of the faces is permutated faces = np.random.permutation(faces) - mesh_type = util.type_named(mesh, 'Trimesh') - permutated = mesh_type(vertices=vertices, - faces=faces) + mesh_type = util.type_named(mesh, "Trimesh") + permutated = mesh_type(vertices=vertices, faces=faces) return permutated class Permutator: - def __init__(self, mesh): """ A convenience object to get permutated versions of a mesh. @@ -131,8 +129,7 @@ def __init__(self, mesh): self._mesh = mesh def transform(self, translation_scale=1000): - return transform( - self._mesh, translation_scale=translation_scale) + return transform(self._mesh, translation_scale=translation_scale) def noise(self, magnitude=None): return noise(self._mesh, magnitude) diff --git a/trimesh/poses.py b/trimesh/poses.py index ebe260a4a..41f1bb5f4 100644 --- a/trimesh/poses.py +++ b/trimesh/poses.py @@ -14,14 +14,11 @@ # create a dummy module which will raise the ImportError # or other exception only when someone tries to use networkx from .exceptions import ExceptionWrapper + nx = ExceptionWrapper(E) -def compute_stable_poses(mesh, - center_mass=None, - sigma=0.0, - n_samples=1, - threshold=0.0): +def compute_stable_poses(mesh, center_mass=None, sigma=0.0, n_samples=1, threshold=0.0): """ Computes stable orientations of a mesh and their quasi-static probabilities. @@ -78,13 +75,9 @@ def compute_stable_poses(mesh, sample_coms = [] while len(sample_coms) < n_samples: remaining = n_samples - len(sample_coms) - coms = np.random.multivariate_normal(center_mass, - sigma * np.eye(3), - remaining) + coms = np.random.multivariate_normal(center_mass, sigma * np.eye(3), remaining) for c in coms: - dots = np.einsum('ij,ij->i', - c - cvh.triangles_center, - cvh.face_normals) + dots = np.einsum("ij,ij->i", c - cvh.triangles_center, cvh.face_normals) if np.all(dots < 0): sample_coms.append(c) @@ -92,7 +85,6 @@ def compute_stable_poses(mesh, # For each sample, compute the stable poses for sample_com in sample_coms: - # Create toppling digraph dg = _create_topple_graph(cvh, sample_com) @@ -105,24 +97,24 @@ def compute_stable_poses(mesh, if dg.out_degree(node) == 0: continue successor = next(iter(dg.successors(node))) - dg.nodes[successor]['prob'] += dg.nodes[node]['prob'] - dg.nodes[node]['prob'] = 0.0 + dg.nodes[successor]["prob"] += dg.nodes[node]["prob"] + dg.nodes[node]["prob"] = 0.0 new_nodes.append(successor) nodes = new_nodes n_iters += 1 # Collect stable poses for node in dg.nodes(): - if dg.nodes[node]['prob'] > 0.0: + if dg.nodes[node]["prob"] > 0.0: normal = cvh.face_normals[node] - prob = dg.nodes[node]['prob'] + prob = dg.nodes[node]["prob"] key = tuple(np.around(normal, decimals=3)) if key in norms_to_probs: - norms_to_probs[key]['prob'] += 1.0 / n_samples * prob + norms_to_probs[key]["prob"] += 1.0 / n_samples * prob else: norms_to_probs[key] = { - 'prob': 1.0 / n_samples * prob, - 'normal': normal + "prob": 1.0 / n_samples * prob, + "normal": normal, } transforms = [] @@ -130,12 +122,12 @@ def compute_stable_poses(mesh, # Filter stable poses for key in norms_to_probs: - prob = norms_to_probs[key]['prob'] + prob = norms_to_probs[key]["prob"] if prob > threshold: tf = np.eye(4) # Compute a rotation matrix for this stable pose - z = -1.0 * norms_to_probs[key]['normal'] + z = -1.0 * norms_to_probs[key]["normal"] x = np.array([-z[1], z[0], 0]) if np.linalg.norm(x) == 0.0: x = np.array([1, 0, 0]) @@ -189,9 +181,11 @@ def _orient3dfast(plane, pd): bdz = pb[2] - pd[2] cdz = pc[2] - pd[2] - return (adx * (bdy * cdz - bdz * cdy) - + bdx * (cdy * adz - cdz * ady) - + cdx * (ady * bdz - adz * bdy)) + return ( + adx * (bdy * cdz - bdz * cdy) + + bdx * (cdy * adz - cdz * ady) + + cdx * (ady * bdz - adz * bdy) + ) def _compute_static_prob(tri, com): @@ -219,12 +213,32 @@ def _compute_static_prob(tri, com): # Prevents weirdness with arctan try: - return 1.0 / np.pi * np.arctan(np.sqrt(np.tan(s / 2) * np.tan( - (s - a) / 2) * np.tan((s - b) / 2) * np.tan((s - c) / 2))) + return ( + 1.0 + / np.pi + * np.arctan( + np.sqrt( + np.tan(s / 2) + * np.tan((s - a) / 2) + * np.tan((s - b) / 2) + * np.tan((s - c) / 2) + ) + ) + ) except BaseException: s = s + 1e-8 - return 1.0 / np.pi * np.arctan(np.sqrt(np.tan(s / 2) * np.tan( - (s - a) / 2) * np.tan((s - b) / 2) * np.tan((s - c) / 2))) + return ( + 1.0 + / np.pi + * np.arctan( + np.sqrt( + np.tan(s / 2) + * np.tan((s - a) / 2) + * np.tan((s - b) / 2) + * np.tan((s - c) / 2) + ) + ) + ) def _create_topple_graph(cvh_mesh, com): @@ -264,7 +278,7 @@ def _create_topple_graph(cvh_mesh, com): graph_edges = [] for fp, e in zip(face_pairs, edges): verts = cvh_mesh.vertices[e] - graph_edges.append([fp[0], fp[1], {'verts': verts}]) + graph_edges.append([fp[0], fp[1], {"verts": verts}]) adj_graph.add_edges_from(graph_edges) @@ -274,9 +288,10 @@ def _create_topple_graph(cvh_mesh, com): topple_graph.add_node(i, prob=prob) # Compute COM projections onto planes of each triangle in cvh_mesh - proj_dists = np.einsum('ij,ij->i', cvh_mesh.face_normals, - com - cvh_mesh.triangles[:, 0]) - proj_coms = com - np.einsum('i,ij->ij', proj_dists, cvh_mesh.face_normals) + proj_dists = np.einsum( + "ij,ij->i", cvh_mesh.face_normals, com - cvh_mesh.triangles[:, 0] + ) + proj_coms = com - np.einsum("i,ij->ij", proj_dists, cvh_mesh.face_normals) barys = points_to_barycentric(cvh_mesh.triangles, proj_coms) unstable_face_indices = np.where(np.any(barys < 0, axis=1))[0] @@ -287,15 +302,17 @@ def _create_topple_graph(cvh_mesh, com): norm = cvh_mesh.face_normals[fi] for tfi in adj_graph[fi]: - v1, v2 = adj_graph[fi][tfi]['verts'] + v1, v2 = adj_graph[fi][tfi]["verts"] if np.dot(np.cross(v1 - centroid, v2 - centroid), norm) < 0: tmp = v2 v2 = v1 v1 = tmp plane1 = [centroid, v1, v1 + norm] plane2 = [centroid, v2 + norm, v2] - if _orient3dfast(plane1, proj_com) >= 0 and _orient3dfast( - plane2, proj_com) >= 0: + if ( + _orient3dfast(plane1, proj_com) >= 0 + and _orient3dfast(plane2, proj_com) >= 0 + ): break topple_graph.add_edge(fi, tfi) diff --git a/trimesh/primitives.py b/trimesh/primitives.py index 70828b9d9..4ed968bfe 100644 --- a/trimesh/primitives.py +++ b/trimesh/primitives.py @@ -44,39 +44,39 @@ def __init__(self): self._cache.force_immutable = True def __repr__(self): - return f'' + return f"" @property def faces(self): - stored = self._cache['faces'] + stored = self._cache["faces"] if util.is_shape(stored, (-1, 3)): return stored self._create_mesh() - return self._cache['faces'] + return self._cache["faces"] @faces.setter def faces(self, values): - log.warning('primitive faces are immutable: not setting!') + log.warning("primitive faces are immutable: not setting!") @property def vertices(self): - stored = self._cache['vertices'] + stored = self._cache["vertices"] if util.is_shape(stored, (-1, 3)): return stored self._create_mesh() - return self._cache['vertices'] + return self._cache["vertices"] @vertices.setter def vertices(self, values): if values is not None: - log.warning('primitive vertices are immutable: not setting!') + log.warning("primitive vertices are immutable: not setting!") @property def face_normals(self): # we need to avoid the logic in the superclass that # is specific to the data model prioritizing faces - stored = self._cache['face_normals'] + stored = self._cache["face_normals"] if util.is_shape(stored, (-1, 3)): return stored # just calculate if not stored @@ -84,13 +84,13 @@ def face_normals(self): normals = np.zeros((len(valid), 3)) normals[valid] = unit # store and return - self._cache['face_normals'] = normals + self._cache["face_normals"] = normals return normals @face_normals.setter def face_normals(self, values): if values is not None: - log.warning('Primitive face normals are immutable!') + log.warning("Primitive face normals are immutable!") @property def transform(self): @@ -123,7 +123,7 @@ def copy(self, **kwargs): # get the constructor arguments kwargs.update(self.to_dict()) # remove the type indicator, i.e. `Cylinder` - kwargs.pop('kind') + kwargs.pop("kind") # create a new object with kwargs primitive_copy = type(self)(**kwargs) # copy metadata @@ -153,8 +153,9 @@ def to_mesh(self, **kwargs): vertices=self.vertices.copy(), faces=self.faces.copy(), face_normals=self.face_normals.copy(), - process=kwargs.pop('process', False), - **kwargs) + process=kwargs.pop("process", False), + **kwargs, + ) return result def apply_transform(self, matrix): @@ -170,10 +171,9 @@ def apply_transform(self, matrix): matrix: (4, 4) float Homogeneous transformation """ - matrix = np.asanyarray( - matrix, order='C', dtype=np.float64) + matrix = np.asanyarray(matrix, order="C", dtype=np.float64) if matrix.shape != (4, 4): - raise ValueError('matrix must be `(4, 4)`!') + raise ValueError("matrix must be `(4, 4)`!") if util.allclose(matrix, _IDENTITY, 1e-8): # identity matrix is a no-op return self @@ -189,19 +189,16 @@ def apply_transform(self, matrix): kinds = (Box, Cylinder, Capsule, Sphere) if isinstance(self, kinds) and abs(scale - 1.0) > 1e-8: # scale the primitive attributes - if hasattr(prim, 'height'): + if hasattr(prim, "height"): prim.height *= scale - if hasattr(prim, 'radius'): + if hasattr(prim, "radius"): prim.radius *= scale - if hasattr(prim, 'extents'): + if hasattr(prim, "extents"): prim.extents *= scale # scale the translation of the current matrix current[:3, 3] *= scale # apply new matrix, rescale, translate, current - updated = util.multi_dot([ - matrix, - tf.scale_matrix(1.0 / scale), - current]) + updated = util.multi_dot([matrix, tf.scale_matrix(1.0 / scale), current]) else: # without scaling just multiply updated = np.dot(matrix, current) @@ -216,7 +213,7 @@ def apply_transform(self, matrix): return self def _create_mesh(self): - raise ValueError('Primitive doesn\'t define mesh creation!') + raise ValueError("Primitive doesn't define mesh creation!") class PrimitiveAttributes: @@ -264,68 +261,59 @@ def __doc__(self): # operation can be surprisingly slow and most # people never call it import pprint + doc = ( - 'Store the attributes of a {name} object.\n\n' + - 'When these values are changed, the mesh geometry will \n' + - 'automatically be updated to reflect the new values.\n\n' + - 'Available properties and their default values are:\n {defaults}' + - '\n\nExample\n---------------\n' + - 'p = trimesh.primitives.{name}()\n' + - 'p.primitive.radius = 10\n' + - '\n').format( + "Store the attributes of a {name} object.\n\n" + + "When these values are changed, the mesh geometry will \n" + + "automatically be updated to reflect the new values.\n\n" + + "Available properties and their default values are:\n {defaults}" + + "\n\nExample\n---------------\n" + + "p = trimesh.primitives.{name}()\n" + + "p.primitive.radius = 10\n" + + "\n" + ).format( name=self._parent.__class__.__name__, - defaults=pprint.pformat( - self._defaults, - width=-1)[1:-1]) + defaults=pprint.pformat(self._defaults, width=-1)[1:-1], + ) return doc def __getattr__(self, key): - if key.startswith('_'): + if key.startswith("_"): return super().__getattr__(key) - elif key == 'center': + elif key == "center": # this whole __getattr__ is a little hacky - return self._data['transform'][:3, 3] + return self._data["transform"][:3, 3] elif key in self._defaults: - return util.convert_like(self._data[key], - self._defaults[key]) - raise AttributeError( - f"primitive object has no attribute '{key}' ") + return util.convert_like(self._data[key], self._defaults[key]) + raise AttributeError(f"primitive object has no attribute '{key}' ") def __setattr__(self, key, value): - if key.startswith('_'): + if key.startswith("_"): return super().__setattr__(key, value) - elif key == 'center': + elif key == "center": value = np.array(value, dtype=np.float64) transform = np.eye(4) transform[:3, 3] = value - self._data['transform'] = transform + self._data["transform"] = transform return elif key in self._defaults: if self._mutable: - self._data[key] = util.convert_like( - value, self._defaults[key]) + self._data[key] = util.convert_like(value, self._defaults[key]) else: raise ValueError( - 'Primitive is configured as immutable! Cannot set attribute!') + "Primitive is configured as immutable! Cannot set attribute!" + ) else: keys = list(self._defaults.keys()) - raise ValueError( - f'Only default attributes {keys} can be set!') + raise ValueError(f"Only default attributes {keys} can be set!") def __dir__(self): - result = sorted(dir(type(self)) + - list(self._defaults.keys())) + result = sorted(dir(type(self)) + list(self._defaults.keys())) return result class Cylinder(Primitive): - - def __init__(self, - radius=1.0, - height=1.0, - transform=None, - sections=32, - mutable=True): + def __init__(self, radius=1.0, height=1.0, transform=None, sections=32, mutable=True): """ Create a Cylinder Primitive, a subclass of Trimesh. @@ -344,18 +332,18 @@ def __init__(self, """ super().__init__() - defaults = {'height': 10.0, - 'radius': 1.0, - 'transform': np.eye(4), - 'sections': 32} + defaults = {"height": 10.0, "radius": 1.0, "transform": np.eye(4), "sections": 32} self.primitive = PrimitiveAttributes( self, defaults=defaults, - kwargs={'height': height, - 'radius': radius, - 'transform': transform, - 'sections': sections}, - mutable=mutable) + kwargs={ + "height": height, + "radius": radius, + "transform": transform, + "sections": sections, + }, + mutable=mutable, + ) @caching.cache_decorator def volume(self): @@ -367,8 +355,7 @@ def volume(self): volume : float Volume of the cylinder """ - volume = ((np.pi * self.primitive.radius ** 2) * - self.primitive.height) + volume = (np.pi * self.primitive.radius**2) * self.primitive.height return volume @caching.cache_decorator @@ -386,7 +373,8 @@ def moment_inertia(self): mass=self.volume, radius=self.primitive.radius, height=self.primitive.height, - transform=self.primitive.transform) + transform=self.primitive.transform, + ) return tensor @caching.cache_decorator @@ -416,9 +404,8 @@ def segment(self): half = self.primitive.height / 2.0 # apply the transform to the Z- aligned segment points = np.dot( - self.primitive.transform, - np.transpose([[0, 0, -half, 1], - [0, 0, half, 1]])).T[:, :3] + self.primitive.transform, np.transpose([[0, 0, -half, 1], [0, 0, half, 1]]) + ).T[:, :3] return points def to_dict(self): @@ -432,10 +419,12 @@ def to_dict(self): as_dict : dict Serializable data for this primitive. """ - return {'kind': 'cylinder', - 'transform': self.primitive.transform.tolist(), - 'radius': float(self.primitive.radius), - 'height': float(self.primitive.height), } + return { + "kind": "cylinder", + "transform": self.primitive.transform.tolist(), + "radius": float(self.primitive.radius), + "height": float(self.primitive.height), + } def buffer(self, distance): """ @@ -457,29 +446,28 @@ def buffer(self, distance): buffered = Cylinder( height=self.primitive.height + distance * 2, radius=self.primitive.radius + distance, - transform=self.primitive.transform.copy()) + transform=self.primitive.transform.copy(), + ) return buffered def _create_mesh(self): - log.debug('creating mesh for Cylinder primitive') - mesh = creation.cylinder(radius=self.primitive.radius, - height=self.primitive.height, - sections=self.primitive.sections, - transform=self.primitive.transform) + log.debug("creating mesh for Cylinder primitive") + mesh = creation.cylinder( + radius=self.primitive.radius, + height=self.primitive.height, + sections=self.primitive.sections, + transform=self.primitive.transform, + ) - self._cache['vertices'] = mesh.vertices - self._cache['faces'] = mesh.faces - self._cache['face_normals'] = mesh.face_normals + self._cache["vertices"] = mesh.vertices + self._cache["faces"] = mesh.faces + self._cache["face_normals"] = mesh.face_normals class Capsule(Primitive): - - def __init__(self, - radius=1.0, - height=10.0, - transform=None, - sections=32, - mutable=True): + def __init__( + self, radius=1.0, height=10.0, transform=None, sections=32, mutable=True + ): """ Create a Capsule Primitive, a subclass of Trimesh. @@ -498,18 +486,18 @@ def __init__(self, """ super().__init__() - defaults = {'height': 1.0, - 'radius': 1.0, - 'transform': np.eye(4), - 'sections': 32} + defaults = {"height": 1.0, "radius": 1.0, "transform": np.eye(4), "sections": 32} self.primitive = PrimitiveAttributes( self, defaults=defaults, - kwargs={'height': height, - 'radius': radius, - 'transform': transform, - 'sections': sections}, - mutable=mutable) + kwargs={ + "height": height, + "radius": radius, + "transform": transform, + "sections": sections, + }, + mutable=mutable, + ) @property def transform(self): @@ -526,10 +514,12 @@ def to_dict(self): as_dict : dict Serializable data for this primitive. """ - return {'kind': 'capsule', - 'transform': self.primitive.transform.tolist(), - 'height': float(self.primitive.height), - 'radius': float(self.primitive.radius)} + return { + "kind": "capsule", + "transform": self.primitive.transform.tolist(), + "height": float(self.primitive.height), + "radius": float(self.primitive.radius), + } @caching.cache_decorator def direction(self): @@ -541,30 +531,26 @@ def direction(self): axis : (3,) float Vector along the cylinder axis """ - axis = np.dot(self.primitive.transform, - [0, 0, 1, 0])[:3] + axis = np.dot(self.primitive.transform, [0, 0, 1, 0])[:3] return axis def _create_mesh(self): - log.debug('creating mesh for `Capsule` primitive') + log.debug("creating mesh for `Capsule` primitive") - mesh = creation.capsule(radius=self.primitive.radius, - height=self.primitive.height) + mesh = creation.capsule( + radius=self.primitive.radius, height=self.primitive.height + ) mesh.apply_transform(self.primitive.transform) - self._cache['vertices'] = mesh.vertices - self._cache['faces'] = mesh.faces - self._cache['face_normals'] = mesh.face_normals + self._cache["vertices"] = mesh.vertices + self._cache["faces"] = mesh.faces + self._cache["face_normals"] = mesh.face_normals class Sphere(Primitive): - - def __init__(self, - radius=1.0, - center=None, - transform=None, - subdivisions=3, - mutable=True): + def __init__( + self, radius=1.0, center=None, transform=None, subdivisions=3, mutable=True + ): """ Create a Sphere Primitive, a subclass of Trimesh. @@ -584,26 +570,23 @@ def __init__(self, super().__init__() - defaults = {'radius': 1.0, - 'transform': np.eye(4), - 'subdivisions': 3} - constructor = {'radius': float(radius), - 'subdivisions': int(subdivisions)} + defaults = {"radius": 1.0, "transform": np.eye(4), "subdivisions": 3} + constructor = {"radius": float(radius), "subdivisions": int(subdivisions)} # center is a helper method for "transform" # since a sphere is rotationally symmetric if center is not None: if transform is not None: - raise ValueError( - 'only one of `center` and `transform` may be passed!') + raise ValueError("only one of `center` and `transform` may be passed!") translate = np.eye(4) translate[:3, 3] = center - constructor['transform'] = translate + constructor["transform"] = translate elif transform is not None: - constructor['transform'] = transform + constructor["transform"] = transform # create the attributes object self.primitive = PrimitiveAttributes( - self, defaults=defaults, kwargs=constructor, mutable=mutable) + self, defaults=defaults, kwargs=constructor, mutable=mutable + ) @property def center(self): @@ -624,17 +607,23 @@ def to_dict(self): as_dict : dict Serializable data for this primitive. """ - return {'kind': 'sphere', - 'transform': self.primitive.transform.tolist(), - 'radius': float(self.primitive.radius)} + return { + "kind": "sphere", + "transform": self.primitive.transform.tolist(), + "radius": float(self.primitive.radius), + } @property def bounds(self): # no docstring so will inherit Trimesh docstring # return exact bounds from primitive center and radius (rather than faces) # self.extents will also use this information - bounds = np.array([self.primitive.center - self.primitive.radius, - self.primitive.center + self.primitive.radius]) + bounds = np.array( + [ + self.primitive.center - self.primitive.radius, + self.primitive.center + self.primitive.radius, + ] + ) return bounds @property @@ -655,7 +644,7 @@ def area(self): area: float, surface area of the sphere Primitive """ - area = 4.0 * np.pi * (self.primitive.radius ** 2) + area = 4.0 * np.pi * (self.primitive.radius**2) return area @caching.cache_decorator @@ -668,7 +657,7 @@ def volume(self): volume: float, volume of the sphere Primitive """ - volume = (4.0 * np.pi * (self.primitive.radius ** 3)) / 3.0 + volume = (4.0 * np.pi * (self.primitive.radius**3)) / 3.0 return volume @caching.cache_decorator @@ -681,28 +670,22 @@ def moment_inertia(self): tensor: (3, 3) float 3D inertia tensor. """ - return inertia.sphere_inertia( - mass=self.volume, - radius=self.primitive.radius) + return inertia.sphere_inertia(mass=self.volume, radius=self.primitive.radius) def _create_mesh(self): - log.debug('creating mesh for Sphere primitive') + log.debug("creating mesh for Sphere primitive") unit = creation.icosphere( - subdivisions=self.primitive.subdivisions, - radius=self.primitive.radius) + subdivisions=self.primitive.subdivisions, radius=self.primitive.radius + ) # apply the center offset here - self._cache['vertices'] = unit.vertices + self.primitive.center - self._cache['faces'] = unit.faces - self._cache['face_normals'] = unit.face_normals + self._cache["vertices"] = unit.vertices + self.primitive.center + self._cache["faces"] = unit.faces + self._cache["face_normals"] = unit.face_normals class Box(Primitive): - def __init__(self, - extents=None, - transform=None, - bounds=None, - mutable=True): + def __init__(self, extents=None, transform=None, bounds=None, mutable=True): """ Create a Box Primitive as a subclass of Trimesh @@ -719,17 +702,17 @@ def __init__(self, Are extents and transform mutable after creation. """ super().__init__() - defaults = {'transform': np.eye(4), - 'extents': np.ones(3)} + defaults = {"transform": np.eye(4), "extents": np.ones(3)} if bounds is not None: # validate the multiple forms of input available here if extents is not None or transform is not None: raise ValueError( - 'if `bounds` is passed `extents` and `transform` must not be!') + "if `bounds` is passed `extents` and `transform` must not be!" + ) bounds = np.array(bounds, dtype=np.float64) if bounds.shape != (2, 3): - raise ValueError('`bounds` must be (2, 3) float') + raise ValueError("`bounds` must be (2, 3) float") # create extents from AABB extents = bounds.ptp(axis=0) # translate to the center of the box @@ -739,9 +722,9 @@ def __init__(self, self.primitive = PrimitiveAttributes( self, defaults=defaults, - kwargs={'extents': extents, - 'transform': transform}, - mutable=mutable) + kwargs={"extents": extents, "transform": transform}, + mutable=mutable, + ) def to_dict(self): """ @@ -754,9 +737,11 @@ def to_dict(self): as_dict : dict Serializable data for this primitive. """ - return {'kind': 'box', - 'transform': self.primitive.transform.tolist(), - 'extents': self.primitive.extents.tolist()} + return { + "kind": "box", + "transform": self.primitive.transform.tolist(), + "extents": self.primitive.extents.tolist(), + } @property def transform(self): @@ -779,7 +764,8 @@ def sample_volume(self, count): samples = sample.volume_rectangular( extents=self.primitive.extents, count=count, - transform=self.primitive.transform) + transform=self.primitive.transform, + ) return samples def sample_grid(self, count=None, step=None): @@ -801,23 +787,20 @@ def sample_grid(self, count=None, step=None): Points inside the box """ - if (count is not None and - step is not None): - raise ValueError('only step OR count can be specified!') + if count is not None and step is not None: + raise ValueError("only step OR count can be specified!") # create pre- transform bounds from extents - bounds = np.array([-self.primitive.extents, - self.primitive.extents]) * .5 + bounds = np.array([-self.primitive.extents, self.primitive.extents]) * 0.5 if step is not None: grid = util.grid_arange(bounds, step=step) elif count is not None: grid = util.grid_linspace(bounds, count=count) else: - raise ValueError('either count or step must be specified!') + raise ValueError("either count or step must be specified!") - transformed = tf.transform_points( - grid, matrix=self.primitive.transform) + transformed = tf.transform_points(grid, matrix=self.primitive.transform) return transformed @property @@ -826,8 +809,7 @@ def is_oriented(self): Returns whether or not the current box is rotated at all. """ if util.is_shape(self.primitive.transform, (4, 4)): - return not np.allclose(self.primitive.transform[ - 0:3, 0:3], np.eye(3)) + return not np.allclose(self.primitive.transform[0:3, 0:3], np.eye(3)) else: return False @@ -845,14 +827,15 @@ def volume(self): return volume def _create_mesh(self): - log.debug('creating mesh for Box primitive') - box = creation.box(extents=self.primitive.extents, - transform=self.primitive.transform) + log.debug("creating mesh for Box primitive") + box = creation.box( + extents=self.primitive.extents, transform=self.primitive.transform + ) self._cache.cache.update(box._cache.cache) - self._cache['vertices'] = box.vertices - self._cache['faces'] = box.faces - self._cache['face_normals'] = box.face_normals + self._cache["vertices"] = box.vertices + self._cache["faces"] = box.faces + self._cache["face_normals"] = box.face_normals def as_outline(self): """ @@ -865,18 +848,15 @@ def as_outline(self): """ # do the import in function to keep soft dependency from .path.creation import box_outline + # return outline with same size as primitive return box_outline( - extents=self.primitive.extents, - transform=self.primitive.transform) + extents=self.primitive.extents, transform=self.primitive.transform + ) class Extrusion(Primitive): - def __init__(self, - polygon=None, - transform=None, - height=1.0, - mutable=True): + def __init__(self, polygon=None, transform=None, height=1.0, mutable=True): """ Create an Extrusion primitive, which is a subclass of Trimesh. @@ -898,17 +878,18 @@ def __init__(self, # run the Trimesh init super().__init__() # set default values - defaults = {'polygon': Point([0, 0]).buffer(1.0), - 'transform': np.eye(4), - 'height': 1.0} + defaults = { + "polygon": Point([0, 0]).buffer(1.0), + "transform": np.eye(4), + "height": 1.0, + } self.primitive = PrimitiveAttributes( self, defaults=defaults, - kwargs={'transform': transform, - 'polygon': polygon, - 'height': height}, - mutable=mutable) + kwargs={"transform": transform, "polygon": polygon, "height": height}, + mutable=mutable, + ) @caching.cache_decorator def area(self): @@ -923,8 +904,7 @@ def area(self): Surface area of 3D extrusion """ # area of the sides of the extrusion - area = abs(self.primitive.height * - self.primitive.polygon.length) + area = abs(self.primitive.height * self.primitive.polygon.length) # area of the two caps of the extrusion area += self.primitive.polygon.area * 2 return area @@ -941,8 +921,7 @@ def volume(self): Volume of 3D extrusion """ # height may be negative - volume = abs(self.primitive.polygon.area * - self.primitive.height) + volume = abs(self.primitive.polygon.area * self.primitive.height) return volume @caching.cache_decorator @@ -958,8 +937,8 @@ def direction(self): """ # only consider rotation and signed height direction = np.dot( - self.primitive.transform[:3, :3], - [0.0, 0.0, np.sign(self.primitive.height)]) + self.primitive.transform[:3, :3], [0.0, 0.0, np.sign(self.primitive.height)] + ) return direction @property @@ -984,9 +963,9 @@ def bounding_box_oriented(self): # no docstring for inheritance # calculate OBB using 2D polygon and known axis from . import bounds + # find the 2D bounding box using the polygon - to_origin, box = bounds.oriented_bounds_2D( - self.primitive.polygon.exterior.coords) + to_origin, box = bounds.oriented_bounds_2D(self.primitive.polygon.exterior.coords) # 3D extents extents = np.append(box, abs(self.primitive.height)) # calculate to_3D transform from 2D obb @@ -994,9 +973,7 @@ def bounding_box_oriented(self): rotation_Z[2, 3] = self.primitive.height / 2.0 # combine the 2D OBB transformation with the 2D projection transform to_3D = np.dot(self.primitive.transform, rotation_Z) - obb = Box(transform=to_3D, - extents=extents, - mutable=False) + obb = Box(transform=to_3D, extents=extents, mutable=False) return obb def slide(self, distance): @@ -1012,8 +989,7 @@ def slide(self, distance): distance = float(distance) translation = np.eye(4) translation[2, 3] = distance - new_transform = np.dot(self.primitive.transform.copy(), - translation.copy()) + new_transform = np.dot(self.primitive.transform.copy(), translation.copy()) self.primitive.transform = new_transform def buffer(self, distance, distance_height=None, **kwargs): @@ -1051,7 +1027,8 @@ def buffer(self, distance, distance_height=None, **kwargs): transform=self.primitive.transform.copy(), polygon=self.primitive.polygon.buffer(distance), height=height, - **kwargs) + **kwargs, + ) # slide the stock along the axis buffered.slide(-np.sign(height) * distance_height) @@ -1069,23 +1046,26 @@ def to_dict(self): as_dict : dict Serializable data for this primitive. """ - return {'kind': 'extrusion', - 'polygon': self.primitive.polygon.wkt, - 'transform': self.primitive.transform.tolist(), - 'height': float(self.primitive.height)} + return { + "kind": "extrusion", + "polygon": self.primitive.polygon.wkt, + "transform": self.primitive.transform.tolist(), + "height": float(self.primitive.height), + } def _create_mesh(self): - log.debug('creating mesh for Extrusion primitive') + log.debug("creating mesh for Extrusion primitive") # extrude the polygon along Z mesh = creation.extrude_polygon( polygon=self.primitive.polygon, height=self.primitive.height, - transform=self.primitive.transform) + transform=self.primitive.transform, + ) # check volume here in unit tests if tol.strict and mesh.volume < 0.0: - raise ValueError('matrix inverted mesh!') + raise ValueError("matrix inverted mesh!") # cache mesh geometry in the primitive - self._cache['vertices'] = mesh.vertices - self._cache['faces'] = mesh.faces + self._cache["vertices"] = mesh.vertices + self._cache["faces"] = mesh.faces diff --git a/trimesh/proximity.py b/trimesh/proximity.py index e3f19a3e4..044e3e586 100644 --- a/trimesh/proximity.py +++ b/trimesh/proximity.py @@ -16,6 +16,7 @@ from scipy.spatial import cKDTree except BaseException as E: from .exceptions import ExceptionWrapper + cKDTree = ExceptionWrapper(E) @@ -44,7 +45,7 @@ def nearby_faces(mesh, points): """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): - raise ValueError('points must be (n,3)!') + raise ValueError("points must be (n,3)!") # an r-tree containing the axis aligned bounding box for every triangle rtree = mesh.triangles_tree @@ -56,8 +57,7 @@ def nearby_faces(mesh, points): distance_vertex += tol.merge # axis aligned bounds - bounds = np.column_stack((points - distance_vertex, - points + distance_vertex)) + bounds = np.column_stack((points - distance_vertex, points + distance_vertex)) # faces that intersect axis aligned bounding box candidates = [list(rtree.intersection(b)) for b in bounds] @@ -94,24 +94,24 @@ def closest_point_naive(mesh, points): # establish that input points are sane points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): - raise ValueError('triangles shape incorrect') + raise ValueError("triangles shape incorrect") if not util.is_shape(points, (-1, 3)): - raise ValueError('points must be (n,3)') + raise ValueError("points must be (n,3)") # create a giant tiled array of each point tiled len(triangles) times points_tiled = np.tile(points, (1, len(triangles))) - on_triangle = np.array([_corresponding( - triangles, i.reshape((-1, 3))) for i in points_tiled]) + on_triangle = np.array( + [_corresponding(triangles, i.reshape((-1, 3))) for i in points_tiled] + ) # distance squared - distance_2 = [((i - q)**2).sum(axis=1) - for i, q in zip(on_triangle, points)] + distance_2 = [((i - q) ** 2).sum(axis=1) for i, q in zip(on_triangle, points)] triangle_id = np.array([i.argmin() for i in distance_2]) # closest cartesian point closest = np.array([g[i] for i, g in zip(triangle_id, on_triangle)]) - distance = np.array([g[i] for i, g in zip(triangle_id, distance_2)]) ** .5 + distance = np.array([g[i] for i, g in zip(triangle_id, distance_2)]) ** 0.5 return closest, distance, triangle_id @@ -139,7 +139,7 @@ def closest_point(mesh, points): """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): - raise ValueError('points must be (n,3)!') + raise ValueError("points must be (n,3)!") # do a tree- based query for faces near each point candidates = nearby_faces(mesh, points) @@ -168,8 +168,7 @@ def closest_point(mesh, points): # get best two candidate indices by arg-sorting the per-query_distances qds = np.array_split(query_distance, query_group) - idxs = np.int32([qd.argsort()[:2] if len(qd) > 1 else [0, 0] - for qd in qds]) + idxs = np.int32([qd.argsort()[:2] if len(qd) > 1 else [0, 0] for qd in qds]) idxs[1:] += query_group.reshape(-1, 1) # points, distances and triangle ids for best two candidates @@ -193,8 +192,7 @@ def closest_point(mesh, points): # get two face normals for the candidate points normals = mesh.face_normals[two_candidates[c_mask]] # compute normalized surface-point to query-point vectors - vectors = (query_vector[idxs[c_mask]] / - two_dists[c_mask].reshape(-1, 2, 1) ** 0.5) + vectors = query_vector[idxs[c_mask]] / two_dists[c_mask].reshape(-1, 2, 1) ** 0.5 # compare enclosed angle for both face normals dots = (normals * vectors).sum(axis=2) @@ -210,7 +208,7 @@ def closest_point(mesh, points): # we were comparing the distance squared so # now take the square root in one vectorized operation - result_distance **= .5 + result_distance **= 0.5 return result_close, result_distance, result_tid @@ -251,26 +249,29 @@ def signed_distance(mesh, points): # triangle normal Project each point in to the closest triangle plane nonzero = np.where(nonzero)[0] normals = mesh.face_normals[triangle_id] - projection = (points[nonzero] - - (normals[nonzero].T * np.einsum( - "ij,ij->i", - points[nonzero] - closest[nonzero], - normals[nonzero])).T) + projection = ( + points[nonzero] + - ( + normals[nonzero].T + * np.einsum("ij,ij->i", points[nonzero] - closest[nonzero], normals[nonzero]) + ).T + ) # Determine if the projection lies within the closest triangle - barycentric = points_to_barycentric( - mesh.triangles[triangle_id[nonzero]], - projection) - ontriangle = ~(( - (barycentric < -tol.merge) | (barycentric > 1 + tol.merge) - ).any(axis=1)) + barycentric = points_to_barycentric(mesh.triangles[triangle_id[nonzero]], projection) + ontriangle = ~( + ((barycentric < -tol.merge) | (barycentric > 1 + tol.merge)).any(axis=1) + ) # Where projection does lie in the triangle, compare vector to projection to the # triangle normal to compute sign - sign = np.sign(np.einsum( - "ij,ij->i", - normals[nonzero[ontriangle]], - points[nonzero[ontriangle]] - projection[ontriangle])) + sign = np.sign( + np.einsum( + "ij,ij->i", + normals[nonzero[ontriangle]], + points[nonzero[ontriangle]] - projection[ontriangle], + ) + ) distance[nonzero[ontriangle]] *= -1.0 * sign # For all other triangles, resort to raycasting against the entire mesh @@ -328,8 +329,7 @@ def on_surface(self, points): triangle_id : (m,) int Index of closest triangle for each point. """ - return closest_point(mesh=self._mesh, - points=points) + return closest_point(mesh=self._mesh, points=points) def vertex(self, points): """ @@ -390,21 +390,20 @@ def longest_ray(mesh, points, directions): """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): - raise ValueError('points must be (n,3)!') + raise ValueError("points must be (n,3)!") directions = np.asanyarray(directions, dtype=np.float64) if not util.is_shape(directions, (-1, 3)): - raise ValueError('directions must be (n,3)!') + raise ValueError("directions must be (n,3)!") if len(points) != len(directions): - raise ValueError('number of points must equal number of directions!') + raise ValueError("number of points must equal number of directions!") - faces, rays, locations = mesh.ray.intersects_id(points, directions, - return_locations=True, - multiple_hits=True) + faces, rays, locations = mesh.ray.intersects_id( + points, directions, return_locations=True, multiple_hits=True + ) if len(rays) > 0: - distances = np.linalg.norm(locations - points[rays], - axis=1) + distances = np.linalg.norm(locations - points[rays], axis=1) else: distances = np.array([]) @@ -415,18 +414,13 @@ def longest_ray(mesh, points, directions): # Add infinite length for those with no valid intersection no_intersections = np.setdiff1d(np.arange(len(points)), rays) rays = np.concatenate((rays, no_intersections)) - distances = np.concatenate((distances, - np.repeat(np.inf, - len(no_intersections)))) + distances = np.concatenate((distances, np.repeat(np.inf, len(no_intersections)))) return group_min(rays, distances) -def max_tangent_sphere(mesh, - points, - inwards=True, - normals=None, - threshold=1e-6, - max_iter=100): +def max_tangent_sphere( + mesh, points, inwards=True, normals=None, threshold=1e-6, max_iter=100 +): """ Find the center and radius of the sphere which is tangent to the mesh at the given point and at least one more point with no @@ -457,15 +451,15 @@ def max_tangent_sphere(mesh, """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): - raise ValueError('points must be (n,3)!') + raise ValueError("points must be (n,3)!") if normals is not None: normals = np.asanyarray(normals, dtype=np.float64) if not util.is_shape(normals, (-1, 3)): - raise ValueError('normals must be (n,3)!') + raise ValueError("normals must be (n,3)!") if len(points) != len(normals): - raise ValueError('number of points must equal number of normals!') + raise ValueError("number of points must equal number of normals!") else: normals = mesh.face_normals[closest_point(mesh, points)[2]] @@ -493,8 +487,9 @@ def max_tangent_sphere(mesh, not_converged[i] = False else: vertex = mesh.vertices[projections.argmax()] - radii[i] = (np.dot(vertex - points[i], vertex - points[i]) / - (2 * np.dot(vertex - points[i], normals[i]))) + radii[i] = np.dot(vertex - points[i], vertex - points[i]) / ( + 2 * np.dot(vertex - points[i], normals[i]) + ) # Compute centers centers = points + normals * np.nan_to_num(radii.reshape(-1, 1)) @@ -507,17 +502,17 @@ def max_tangent_sphere(mesh, n_iter = 0 while not_converged.sum() > 0 and n_iter < max_iter: n_iter += 1 - n_points, n_dists, n_faces = mesh.nearest.on_surface( - centers[not_converged]) + n_points, n_dists, n_faces = mesh.nearest.on_surface(centers[not_converged]) # If the distance to the nearest point is the same as the distance # to the start point then we are done. - done = np.abs( - n_dists - - np.linalg.norm( - centers[not_converged] - - points[not_converged], - axis=1)) < tol.planar + done = ( + np.abs( + n_dists + - np.linalg.norm(centers[not_converged] - points[not_converged], axis=1) + ) + < tol.planar + ) not_converged[np.where(not_converged)[0][done]] = False # Otherwise find the radius and center of the sphere tangent to the mesh @@ -525,14 +520,12 @@ def max_tangent_sphere(mesh, diff = n_points[~done] - points[not_converged] old_radii = radii[not_converged].copy() # np.einsum produces element wise dot product - radii[not_converged] = (np.einsum('ij, ij->i', - diff, - diff) / - (2 * np.einsum('ij, ij->i', - diff, - normals[not_converged]))) - centers[not_converged] = points[not_converged] + \ - normals[not_converged] * radii[not_converged].reshape(-1, 1) + radii[not_converged] = np.einsum("ij, ij->i", diff, diff) / ( + 2 * np.einsum("ij, ij->i", diff, normals[not_converged]) + ) + centers[not_converged] = points[not_converged] + normals[not_converged] * radii[ + not_converged + ].reshape(-1, 1) # If change in radius is less than threshold we have converged cvged = old_radii - radii[not_converged] < convergence_threshold @@ -541,11 +534,7 @@ def max_tangent_sphere(mesh, return centers, radii -def thickness(mesh, - points, - exterior=False, - normals=None, - method='max_sphere'): +def thickness(mesh, points, exterior=False, normals=None, method="max_sphere"): """ Find the thickness of the mesh at the given points. @@ -569,27 +558,26 @@ def thickness(mesh, """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): - raise ValueError('points must be (n,3)!') + raise ValueError("points must be (n,3)!") if normals is not None: normals = np.asanyarray(normals, dtype=np.float64) if not util.is_shape(normals, (-1, 3)): - raise ValueError('normals must be (n,3)!') + raise ValueError("normals must be (n,3)!") if len(points) != len(normals): - raise ValueError('number of points must equal number of normals!') + raise ValueError("number of points must equal number of normals!") else: normals = mesh.face_normals[closest_point(mesh, points)[2]] - if method == 'max_sphere': - centers, radius = max_tangent_sphere(mesh=mesh, - points=points, - inwards=not exterior, - normals=normals) + if method == "max_sphere": + centers, radius = max_tangent_sphere( + mesh=mesh, points=points, inwards=not exterior, normals=normals + ) thickness = radius * 2 return thickness - elif method == 'ray': + elif method == "ray": if exterior: return longest_ray(mesh, points, normals) else: diff --git a/trimesh/ray/__init__.py b/trimesh/ray/__init__.py index a71eba216..373b4ef0e 100644 --- a/trimesh/ray/__init__.py +++ b/trimesh/ray/__init__.py @@ -3,11 +3,13 @@ # optionally load an interface to the embree raytracer try: from . import ray_pyembree + has_embree = True except BaseException as E: from .. import exceptions + ray_pyembree = exceptions.ExceptionWrapper(E) has_embree = False # add to __all__ as per pep8 -__all__ = ['ray_triangle', 'ray_pyembree'] +__all__ = ["ray_triangle", "ray_pyembree"] diff --git a/trimesh/ray/ray_pyembree.py b/trimesh/ray/ray_pyembree.py index a8975fa1b..0a65a0552 100644 --- a/trimesh/ray/ray_pyembree.py +++ b/trimesh/ray/ray_pyembree.py @@ -21,6 +21,7 @@ # try the preferred wrapper which installs from wheels from embreex import rtcore_scene from embreex.mesh_construction import TriangleMesh + # pass embree floats as 32 bit _embree_dtype = np.float32 except BaseException as E: @@ -28,8 +29,9 @@ # this will be deprecated at some point hopefully soon from pyembree import __version__, rtcore_scene from pyembree.mesh_construction import TriangleMesh + # see if we're using a newer version of the pyembree wrapper - _embree_new = tuple([int(i) for i in __version__.split('.')]) >= (0, 1, 4) + _embree_new = tuple([int(i) for i in __version__.split(".")]) >= (0, 1, 4) # both old and new versions require exact but different type _embree_dtype = [np.float64, np.float32][int(_embree_new)] except BaseException: @@ -38,10 +40,7 @@ class RayMeshIntersector: - - def __init__(self, - geometry, - scale_to_box=True): + def __init__(self, geometry, scale_to_box=True): """ Do ray- mesh queries. @@ -56,8 +55,7 @@ def __init__(self, """ self.mesh = geometry self._scale_to_box = scale_to_box - self._cache = caching.Cache( - id_function=self.mesh.__hash__) + self._cache = caching.Cache(id_function=self.mesh.__hash__) @property def _scale(self): @@ -77,14 +75,11 @@ def _scene(self): """ A cached version of the embreex scene. """ - return _EmbreeWrap(vertices=self.mesh.vertices, - faces=self.mesh.faces, - scale=self._scale) - - def intersects_location(self, - ray_origins, - ray_directions, - multiple_hits=True): + return _EmbreeWrap( + vertices=self.mesh.vertices, faces=self.mesh.faces, scale=self._scale + ) + + def intersects_location(self, ray_origins, ray_directions, multiple_hits=True): """ Return the location of where a ray hits a surface. @@ -104,23 +99,24 @@ def intersects_location(self, index_tri : (m,) int Indexes of mesh.faces """ - (index_tri, - index_ray, - locations) = self.intersects_id( - ray_origins=ray_origins, - ray_directions=ray_directions, - multiple_hits=multiple_hits, - return_locations=True) + (index_tri, index_ray, locations) = self.intersects_id( + ray_origins=ray_origins, + ray_directions=ray_directions, + multiple_hits=multiple_hits, + return_locations=True, + ) return locations, index_ray, index_tri @log_time - def intersects_id(self, - ray_origins, - ray_directions, - multiple_hits=True, - max_hits=20, - return_locations=False): + def intersects_id( + self, + ray_origins, + ray_directions, + multiple_hits=True, + max_hits=20, + return_locations=False, + ): """ Find the triangles hit by a list of rays, including optionally multiple hits along a single ray. @@ -150,13 +146,10 @@ def intersects_id(self, Intersection points, only returned if return_locations """ # make sure input is _dtype for embree - ray_origins = np.array( - deepcopy(ray_origins), - dtype=np.float64) - ray_directions = np.asanyarray(ray_directions, - dtype=np.float64) + ray_origins = np.array(deepcopy(ray_origins), dtype=np.float64) + ray_directions = np.asanyarray(ray_directions, dtype=np.float64) if ray_origins.shape != ray_directions.shape: - raise ValueError('Ray origin and direction don\'t match!') + raise ValueError("Ray origin and direction don't match!") ray_directions = util.unitize(ray_directions) # since we are constructing all hits, save them to a deque then @@ -170,9 +163,9 @@ def intersects_id(self, if multiple_hits or return_locations: # how much to offset ray to transport to the other side of face - distance = np.clip(_ray_offset_factor * self._scale, - _ray_offset_floor, - np.inf) + distance = np.clip( + _ray_offset_factor * self._scale, _ray_offset_floor, np.inf + ) ray_offsets = ray_directions * distance # grab the planes from triangles @@ -187,9 +180,7 @@ def intersects_id(self, # if you set output=1 it will calculate distance along # ray, which is bizzarely slower than our calculation - query = self._scene.run( - ray_origins[current], - ray_directions[current]) + query = self._scene.run(ray_origins[current], ray_directions[current]) # basically we need to reduce the rays to the ones that hit # something hit = query != -1 @@ -207,9 +198,7 @@ def intersects_id(self, result_ray_idx.append(current_index_hit) # if we don't need all of the hits, return the first one - if ((not multiple_hits and - not return_locations) or - not hit.any()): + if (not multiple_hits and not return_locations) or not hit.any(): break # find the location of where the ray hit the triangle plane @@ -217,7 +206,8 @@ def intersects_id(self, plane_origins=plane_origins[hit_triangle], plane_normals=plane_normals[hit_triangle], line_origins=ray_origins[current], - line_directions=ray_directions[current]) + line_directions=ray_directions[current], + ) if not valid.all(): # since a plane intersection was invalid we have to go back and @@ -247,16 +237,16 @@ def intersects_id(self, if return_locations: locations = ( - np.zeros((0, 3), float) if len(result_locations) == 0 - else np.array(result_locations)) + np.zeros((0, 3), float) + if len(result_locations) == 0 + else np.array(result_locations) + ) return index_tri, index_ray, locations return index_tri, index_ray @log_time - def intersects_first(self, - ray_origins, - ray_directions): + def intersects_first(self, ray_origins, ray_directions): """ Find the index of the first triangle a ray hits. @@ -277,13 +267,10 @@ def intersects_first(self, ray_origins = np.asanyarray(deepcopy(ray_origins)) ray_directions = np.asanyarray(ray_directions) - triangle_index = self._scene.run(ray_origins, - ray_directions) + triangle_index = self._scene.run(ray_origins, ray_directions) return triangle_index - def intersects_any(self, - ray_origins, - ray_directions): + def intersects_any(self, ray_origins, ray_directions): """ Check if a list of rays hits the surface. @@ -301,8 +288,9 @@ def intersects_any(self, Did each ray hit the surface """ - first = self.intersects_first(ray_origins=ray_origins, - ray_directions=ray_directions) + first = self.intersects_first( + ray_origins=ray_origins, ray_directions=ray_directions + ) hit = first != -1 return hit @@ -332,8 +320,7 @@ class _EmbreeWrap: """ def __init__(self, vertices, faces, scale): - scaled = np.array(vertices, - dtype=np.float64) + scaled = np.array(vertices, dtype=np.float64) self.origin = scaled.min(axis=0) self.scale = float(scale) scaled = (scaled - self.origin) * self.scale @@ -343,12 +330,12 @@ def __init__(self, vertices, faces, scale): TriangleMesh( scene=self.scene, vertices=scaled.astype(_embree_dtype), - indices=faces.view(np.ndarray).astype(np.int32)) + indices=faces.view(np.ndarray).astype(np.int32), + ) def run(self, origins, normals, **kwargs): - scaled = (np.array(origins, - dtype=np.float64) - self.origin) * self.scale + scaled = (np.array(origins, dtype=np.float64) - self.origin) * self.scale - return self.scene.run(scaled.astype(_embree_dtype), - normals.astype(_embree_dtype), - **kwargs) + return self.scene.run( + scaled.astype(_embree_dtype), normals.astype(_embree_dtype), **kwargs + ) diff --git a/trimesh/ray/ray_triangle.py b/trimesh/ray/ray_triangle.py index 1f0cc58de..8aa3bbe4a 100644 --- a/trimesh/ray/ray_triangle.py +++ b/trimesh/ray/ray_triangle.py @@ -19,12 +19,14 @@ def __init__(self, mesh): self.mesh = mesh self._cache = caching.Cache(self.mesh.__hash__) - def intersects_id(self, - ray_origins, - ray_directions, - return_locations=False, - multiple_hits=True, - **kwargs): + def intersects_id( + self, + ray_origins, + ray_directions, + return_locations=False, + multiple_hits=True, + **kwargs, + ): """ Find the intersections between the current mesh and an array of rays. @@ -49,27 +51,22 @@ def intersects_id(self, locations : (h, 3) float [optional] Position of intersection in space """ - (index_tri, - index_ray, - locations) = ray_triangle_id( - triangles=self.mesh.triangles, - ray_origins=ray_origins, - ray_directions=ray_directions, - tree=self.mesh.triangles_tree, - multiple_hits=multiple_hits, - triangles_normal=self.mesh.face_normals) + (index_tri, index_ray, locations) = ray_triangle_id( + triangles=self.mesh.triangles, + ray_origins=ray_origins, + ray_directions=ray_directions, + tree=self.mesh.triangles_tree, + multiple_hits=multiple_hits, + triangles_normal=self.mesh.face_normals, + ) if return_locations: if len(index_tri) == 0: return index_tri, index_ray, locations - unique = grouping.unique_rows( - np.column_stack((locations, index_ray)))[0] + unique = grouping.unique_rows(np.column_stack((locations, index_ray)))[0] return index_tri[unique], index_ray[unique], locations[unique] return index_tri, index_ray - def intersects_location(self, - ray_origins, - ray_directions, - **kwargs): + def intersects_location(self, ray_origins, ray_directions, **kwargs): """ Return unique cartesian locations where rays hit the mesh. If you are counting the number of hits a ray had, this method @@ -92,20 +89,15 @@ def intersects_location(self, index_tri: (n,) int Array of triangle (face) indexes """ - (index_tri, - index_ray, - locations) = self.intersects_id( - ray_origins=ray_origins, - ray_directions=ray_directions, - return_locations=True, - **kwargs) + (index_tri, index_ray, locations) = self.intersects_id( + ray_origins=ray_origins, + ray_directions=ray_directions, + return_locations=True, + **kwargs, + ) return locations, index_ray, index_tri - def intersects_first( - self, - ray_origins, - ray_directions, - **kwargs): + def intersects_first(self, ray_origins, ray_directions, **kwargs): """ Find the index of the first triangle a ray hits. @@ -123,13 +115,13 @@ def intersects_first( Index of triangle ray hit, or -1 if not hit """ - (index_tri, - index_ray) = self.intersects_id( - ray_origins=ray_origins, - ray_directions=ray_directions, - return_locations=False, - multiple_hits=False, - **kwargs) + (index_tri, index_ray) = self.intersects_id( + ray_origins=ray_origins, + ray_directions=ray_directions, + return_locations=False, + multiple_hits=False, + **kwargs, + ) # put the result into the form of "one triangle index per ray" result = np.ones(len(ray_origins), dtype=np.int64) * -1 @@ -137,10 +129,7 @@ def intersects_first( return result - def intersects_any(self, - ray_origins, - ray_directions, - **kwargs): + def intersects_any(self, ray_origins, ray_directions, **kwargs): """ Find out if each ray hit any triangle on the mesh. @@ -156,8 +145,7 @@ def intersects_any(self, hit : (m,) bool Whether any ray hit any triangle on the mesh """ - index_tri, index_ray = self.intersects_id( - ray_origins, ray_directions) + index_tri, index_ray = self.intersects_id(ray_origins, ray_directions) hit_any = np.zeros(len(ray_origins), dtype=bool) hit_idx = np.unique(index_ray) if len(hit_idx) > 0: @@ -186,12 +174,13 @@ def contains_points(self, points): def ray_triangle_id( - triangles, - ray_origins, - ray_directions, - triangles_normal=None, - tree=None, - multiple_hits=True): + triangles, + ray_origins, + ray_directions, + triangles_normal=None, + tree=None, + multiple_hits=True, +): """ Find the intersections between a group of triangles and rays @@ -229,9 +218,8 @@ def ray_triangle_id( # find the list of likely triangles and which ray they # correspond with, via rtree queries ray_candidates, ray_id = ray_triangle_candidates( - ray_origins=ray_origins, - ray_directions=ray_directions, - tree=tree) + ray_origins=ray_origins, ray_directions=ray_directions, tree=tree + ) # get subsets which are corresponding rays and triangles # (c,3,3) triangle candidates @@ -243,10 +231,9 @@ def ray_triangle_id( # get the plane origins and normals from the triangle candidates plane_origins = triangle_candidates[:, 0, :] if triangles_normal is None: - plane_normals, triangle_ok = triangles_mod.normals( - triangle_candidates) + plane_normals, triangle_ok = triangles_mod.normals(triangle_candidates) if not triangle_ok.all(): - raise ValueError('Invalid triangles!') + raise ValueError("Invalid triangles!") else: plane_normals = triangles_normal[ray_candidates] @@ -255,25 +242,28 @@ def ray_triangle_id( plane_origins=plane_origins, plane_normals=plane_normals, line_origins=line_origins, - line_directions=line_directions) + line_directions=line_directions, + ) - if (len(triangle_candidates) == 0 or - not valid.any()): + if len(triangle_candidates) == 0 or not valid.any(): # we got no hits so return early with empty array - return (np.array([], dtype=np.int64), - np.array([], dtype=np.int64), - np.array([], dtype=np.float64)) + return ( + np.array([], dtype=np.int64), + np.array([], dtype=np.int64), + np.array([], dtype=np.float64), + ) # find the barycentric coordinates of each plane intersection on the # triangle candidates barycentric = triangles_mod.points_to_barycentric( - triangle_candidates[valid], location) + triangle_candidates[valid], location + ) # the plane intersection is inside the triangle if all barycentric # coordinates are between 0.0 and 1.0 hit = np.logical_and( - (barycentric > -tol.zero).all(axis=1), - (barycentric < (1 + tol.zero)).all(axis=1)) + (barycentric > -tol.zero).all(axis=1), (barycentric < (1 + tol.zero)).all(axis=1) + ) # the result index of the triangle is a candidate with a valid # plane intersection and a triangle which contains the plane @@ -287,8 +277,7 @@ def ray_triangle_id( # only return points that are forward from the origin vector = location - ray_origins[index_ray] - distance = util.diagonal_dot( - vector, ray_directions[index_ray]) + distance = util.diagonal_dot(vector, ray_directions[index_ray]) forward = distance > -1e-6 index_tri = index_tri[forward] @@ -305,16 +294,12 @@ def ray_triangle_id( return index_tri, index_ray, location # find the first hit - first = np.array( - [g[distance[g].argmin()] for g in - grouping.group(index_ray)]) + first = np.array([g[distance[g].argmin()] for g in grouping.group(index_ray)]) return index_tri[first], index_ray[first], location[first] -def ray_triangle_candidates(ray_origins, - ray_directions, - tree): +def ray_triangle_candidates(ray_origins, ray_directions, tree): """ Do broad- phase search for triangles that the rays may intersect. @@ -338,9 +323,9 @@ def ray_triangle_candidates(ray_origins, ray_id : (n,) int Corresponding ray index for a triangle candidate """ - bounding = ray_bounds(ray_origins=ray_origins, - ray_directions=ray_directions, - bounds=tree.bounds) + bounding = ray_bounds( + ray_origins=ray_origins, ray_directions=ray_directions, bounds=tree.bounds + ) index = [] candidates = [] @@ -351,10 +336,7 @@ def ray_triangle_candidates(ray_origins, return np.array(candidates, dtype=np.int64), np.array(index, dtype=np.int64) -def ray_bounds(ray_origins, - ray_directions, - bounds, - buffer_dist=1e-5): +def ray_bounds(ray_origins, ray_directions, bounds, buffer_dist=1e-5): """ Given a set of rays and a bounding box for the volume of interest where the rays will be passing through, find the bounding boxes @@ -381,10 +363,10 @@ def ray_bounds(ray_origins, # find the primary axis of the vector axis = np.abs(ray_directions).argmax(axis=1) axis_bound = bounds.reshape((2, -1)).T[axis] - axis_ori = np.array([ray_origins[i][a] - for i, a in enumerate(axis)]).reshape((-1, 1)) - axis_dir = np.array([ray_directions[i][a] - for i, a in enumerate(axis)]).reshape((-1, 1)) + axis_ori = np.array([ray_origins[i][a] for i, a in enumerate(axis)]).reshape((-1, 1)) + axis_dir = np.array([ray_directions[i][a] for i, a in enumerate(axis)]).reshape( + (-1, 1) + ) # parametric equation of a line # point = direction*t + origin @@ -407,12 +389,9 @@ def ray_bounds(ray_origins, on_a = (ray_directions * t_a) + ray_origins on_b = (ray_directions * t_b) + ray_origins - on_plane = np.column_stack( - (on_a, on_b)).reshape( - (-1, 2, ray_directions.shape[1])) + on_plane = np.column_stack((on_a, on_b)).reshape((-1, 2, ray_directions.shape[1])) - ray_bounding = np.hstack((on_plane.min(axis=1), - on_plane.max(axis=1))) + ray_bounding = np.hstack((on_plane.min(axis=1), on_plane.max(axis=1))) # pad the bounding box by TOL_BUFFER # not sure if this is necessary, but if the ray is axis aligned # this function will otherwise return zero volume bounding boxes diff --git a/trimesh/ray/ray_util.py b/trimesh/ray/ray_util.py index 68276213b..292c7c1a9 100644 --- a/trimesh/ray/ray_util.py +++ b/trimesh/ray/ray_util.py @@ -4,9 +4,7 @@ @constants.log_time -def contains_points(intersector, - points, - check_direction=None): +def contains_points(intersector, points, check_direction=None): """ Check if a mesh contains a set of points, using ray tests. @@ -26,15 +24,14 @@ def contains_points(intersector, # convert points to float and make sure they are 3D points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): - raise ValueError('points must be (n,3)') + raise ValueError("points must be (n,3)") # placeholder result with no hits we'll fill in later contains = np.zeros(len(points), dtype=bool) # cull points outside of the axis aligned bounding box # this avoids running ray tests unless points are close - inside_aabb = bounds.contains(intersector.mesh.bounds, - points) + inside_aabb = bounds.contains(intersector.mesh.bounds, points) # if everything is outside the AABB, exit early if not inside_aabb.any(): @@ -42,28 +39,22 @@ def contains_points(intersector, # default ray direction is random, but we are not generating # uniquely each time so the behavior of this function is easier to debug - default_direction = np.array([0.4395064455, - 0.617598629942, - 0.652231566745]) + default_direction = np.array([0.4395064455, 0.617598629942, 0.652231566745]) if check_direction is None: # if no check direction is specified use the default # stack it only for points inside the AABB - ray_directions = np.tile(default_direction, - (inside_aabb.sum(), 1)) + ray_directions = np.tile(default_direction, (inside_aabb.sum(), 1)) else: # if a direction is passed use it ray_directions = np.tile( - np.array(check_direction).reshape(3), - (inside_aabb.sum(), 1)) + np.array(check_direction).reshape(3), (inside_aabb.sum(), 1) + ) # cast a ray both forwards and backwards location, index_ray, c = intersector.intersects_location( - np.vstack( - (points[inside_aabb], - points[inside_aabb])), - np.vstack( - (ray_directions, - -ray_directions))) + np.vstack((points[inside_aabb], points[inside_aabb])), + np.vstack((ray_directions, -ray_directions)), + ) # if we hit nothing in either direction just return with no hits if len(index_ray) == 0: @@ -71,9 +62,7 @@ def contains_points(intersector, # reshape so bi_hits[0] is the result in the forward direction and # bi_hits[1] is the result in the backwards directions - bi_hits = np.bincount( - index_ray, - minlength=len(ray_directions) * 2).reshape((2, -1)) + bi_hits = np.bincount(index_ray, minlength=len(ray_directions) * 2).reshape((2, -1)) # a point is probably inside if it hits a surface an odd number of times bi_contains = np.mod(bi_hits, 2) == 1 @@ -101,8 +90,7 @@ def contains_points(intersector, # rays where they don't agree and one isn't in free space # are deemed to be broken - broken = np.logical_and(np.logical_not(agree), - np.logical_not(one_freespace)) + broken = np.logical_and(np.logical_not(agree), np.logical_not(one_freespace)) # if all rays agree return if not broken.any(): @@ -113,18 +101,17 @@ def contains_points(intersector, # to avoid infinite recursion if check_direction is None: # we're going to run the check again in a random direction - new_direction = util.unitize(np.random.random(3) - .5) + new_direction = util.unitize(np.random.random(3) - 0.5) # do the mask trick again to be able to assign results mask = inside_aabb.copy() mask[mask] = broken contains[mask] = contains_points( - intersector, - points[inside_aabb][broken], - check_direction=new_direction) + intersector, points[inside_aabb][broken], check_direction=new_direction + ) constants.log.debug( - 'detected %d broken contains test, attempted to fix', - broken.sum()) + "detected %d broken contains test, attempted to fix", broken.sum() + ) return contains diff --git a/trimesh/registration.py b/trimesh/registration.py index 997d7759a..21e476f7a 100644 --- a/trimesh/registration.py +++ b/trimesh/registration.py @@ -20,17 +20,14 @@ # wrapping just ImportError fails in some cases # will raise the error when someone tries to use KDtree from . import exceptions + cKDTree = exceptions.ExceptionWrapper(E) sparse = exceptions.ExceptionWrapper(E) -def mesh_other(mesh, - other, - samples=500, - scale=False, - icp_first=10, - icp_final=50, - **kwargs): +def mesh_other( + mesh, other, samples=500, scale=False, icp_first=10, icp_final=50, **kwargs +): """ Align a mesh with another mesh or a PointCloud using the principal axes of inertia as a starting point which @@ -70,19 +67,17 @@ def key_points(m, count): to registration. """ if len(m.vertices) < (count / 2): - return np.vstack(( - m.vertices, - m.sample(count - len(m.vertices)))) + return np.vstack((m.vertices, m.sample(count - len(m.vertices)))) else: return m.sample(count) - if not util.is_instance_named(mesh, 'Trimesh'): - raise ValueError('mesh must be Trimesh object!') + if not util.is_instance_named(mesh, "Trimesh"): + raise ValueError("mesh must be Trimesh object!") inverse = True search = mesh # if both are meshes use the smaller one for searching - if util.is_instance_named(other, 'Trimesh'): + if util.is_instance_named(other, "Trimesh"): if len(mesh.vertices) > len(other.vertices): # do the expensive tree construction on the # smaller mesh and query the others points @@ -104,7 +99,7 @@ def key_points(m, count): points = other points_PIT = bounds.oriented_bounds(points)[0] else: - raise ValueError('other must be mesh or (n, 3) points!') + raise ValueError("other must be mesh or (n, 3) points!") # get the transform that aligns the search mesh principal # axes of inertia with the XYZ axis at the origin @@ -116,22 +111,27 @@ def key_points(m, count): # transform that moves the principal axes of inertia # of the search mesh to be aligned with the best- guess # principal axes of the points - search_to_points = np.dot(np.linalg.inv(points_PIT), - search_PIT) + search_to_points = np.dot(np.linalg.inv(points_PIT), search_PIT) # permutations of cube rotations # the principal inertia transform has arbitrary sign # along the 3 major axis so try all combinations of # 180 degree rotations with a quick first ICP pass - cubes = np.array([np.eye(4) * np.append(diag, 1) - for diag in [[1, 1, 1], - [1, 1, -1], - [1, -1, 1], - [-1, 1, 1], - [-1, -1, 1], - [-1, 1, -1], - [1, -1, -1], - [-1, -1, -1]]]) + cubes = np.array( + [ + np.eye(4) * np.append(diag, 1) + for diag in [ + [1, 1, 1], + [1, 1, -1], + [1, -1, 1], + [-1, 1, 1], + [-1, -1, 1], + [-1, 1, -1], + [1, -1, -1], + [-1, -1, -1], + ] + ] + ) # loop through permutations and run iterative closest point costs = np.ones(len(cubes)) * np.inf @@ -143,25 +143,26 @@ def key_points(m, count): # flipped around the centroid of search a_to_b = np.dot( transformations.transform_around(flip, centroid), - np.linalg.inv(search_to_points)) + np.linalg.inv(search_to_points), + ) # run first pass ICP - matrix, junk, cost = icp(a=points, - b=search, - initial=a_to_b, - max_iterations=int(icp_first), - scale=scale) + matrix, junk, cost = icp( + a=points, b=search, initial=a_to_b, max_iterations=int(icp_first), scale=scale + ) # save transform and costs from ICP transforms[i] = matrix costs[i] = cost # run a final ICP refinement step - matrix, junk, cost = icp(a=points, - b=search, - initial=transforms[np.argmin(costs)], - max_iterations=int(icp_final), - scale=scale) + matrix, junk, cost = icp( + a=points, + b=search, + initial=transforms[np.argmin(costs)], + max_iterations=int(icp_final), + scale=scale, + ) # convert to per- point distance average cost /= len(points) @@ -177,13 +178,9 @@ def key_points(m, count): return mesh_to_other, cost -def procrustes(a, - b, - weights=None, - reflection=True, - translation=True, - scale=True, - return_cost=True): +def procrustes( + a, b, weights=None, reflection=True, translation=True, scale=True, return_cost=True +): """ Perform Procrustes' analysis subject to constraints. Finds the transformation T mapping a to b which minimizes the square sum @@ -223,9 +220,9 @@ def procrustes(a, a = np.asanyarray(a, dtype=np.float64) b = np.asanyarray(b, dtype=np.float64) if not util.is_shape(a, (-1, 3)) or not util.is_shape(b, (-1, 3)): - raise ValueError('points must be (n,3)!') + raise ValueError("points must be (n,3)!") if len(a) != len(b): - raise ValueError('a and b must contain same number of points!') + raise ValueError("a and b must contain same number of points!") if weights is not None: w = np.asanyarray(weights, dtype=np.float64) if len(w) != len(a): @@ -247,14 +244,14 @@ def procrustes(a, # Remove scale component if scale: if weights is None: - ascale = np.sqrt(((a - acenter)**2).sum() / len(a)) + ascale = np.sqrt(((a - acenter) ** 2).sum() / len(a)) # ascale is the square root of weighted average of the # squared difference # between each point and acenter. else: - ascale = np.sqrt((((a - acenter)**2) * w_norm).sum()) + ascale = np.sqrt((((a - acenter) ** 2) * w_norm).sum()) - bscale = np.sqrt(((b - bcenter)**2).sum() / len(b)) + bscale = np.sqrt(((b - bcenter) ** 2).sum() / len(b)) else: ascale = 1 bscale = 1 @@ -265,12 +262,11 @@ def procrustes(a, # can be weighted differently. if weights is None: - target = np.dot(((b - bcenter) / bscale).T, - ((a - acenter) / ascale)) + target = np.dot(((b - bcenter) / bscale).T, ((a - acenter) / ascale)) else: target = np.dot( - ((b - bcenter) / bscale).T, - ((a - acenter) / ascale) * w.reshape((-1, 1))) + ((b - bcenter) / bscale).T, ((a - acenter) / ascale) * w.reshape((-1, 1)) + ) u, s, vh = np.linalg.svd(target) @@ -278,31 +274,24 @@ def procrustes(a, R = np.dot(u, vh) else: # no reflection allowed, so determinant must be 1.0 - R = np.dot(np.dot(u, np.diag( - [1, 1, np.linalg.det(np.dot(u, vh))])), vh) + R = np.dot(np.dot(u, np.diag([1, 1, np.linalg.det(np.dot(u, vh))])), vh) # Compute our 4D transformation matrix encoding # a -> (R @ (a - acenter)/ascale) * bscale + bcenter # = (bscale/ascale)R @ a + (bcenter - (bscale/ascale)R @ acenter) translation = bcenter - (bscale / ascale) * np.dot(R, acenter) matrix = np.hstack((bscale / ascale * R, translation.reshape(-1, 1))) - matrix = np.vstack( - (matrix, np.array([0.] * (a.shape[1]) + [1.]).reshape(1, -1))) + matrix = np.vstack((matrix, np.array([0.0] * (a.shape[1]) + [1.0]).reshape(1, -1))) if return_cost: transformed = transform_points(a, matrix) - cost = ((b - transformed)**2).mean() + cost = ((b - transformed) ** 2).mean() return matrix, transformed, cost else: return matrix -def icp(a, - b, - initial=None, - threshold=1e-5, - max_iterations=20, - **kwargs): +def icp(a, b, initial=None, threshold=1e-5, max_iterations=20, **kwargs): """ Apply the iterative closest point algorithm to align a point cloud with another point cloud or mesh. Will only produce reasonable results if the @@ -337,16 +326,16 @@ def icp(a, a = np.asanyarray(a, dtype=np.float64) if not util.is_shape(a, (-1, 3)): - raise ValueError('points must be (n,3)!') + raise ValueError("points must be (n,3)!") if initial is None: initial = np.eye(4) - is_mesh = util.is_instance_named(b, 'Trimesh') + is_mesh = util.is_instance_named(b, "Trimesh") if not is_mesh: b = np.asanyarray(b, dtype=np.float64) if not util.is_shape(b, (-1, 3)): - raise ValueError('points must be (n,3)!') + raise ValueError("points must be (n,3)!") btree = cKDTree(b) # transform a under initial_transformation @@ -366,9 +355,7 @@ def icp(a, closest = b[ix] # align a with closest points - matrix, transformed, cost = procrustes(a=a, - b=closest, - **kwargs) + matrix, transformed, cost = procrustes(a=a, b=closest, **kwargs) # update a with our new transformed points a = transformed @@ -385,33 +372,28 @@ def icp(a, def _normalize_by_source(source_mesh, target_geometry, target_positions): # Utility function to put the source mesh in [-1, 1]^3 and transform # target geometry accordingly - if not util.is_instance_named(target_geometry, 'Trimesh') and \ - not isinstance(target_geometry, PointCloud): + if not util.is_instance_named(target_geometry, "Trimesh") and not isinstance( + target_geometry, PointCloud + ): vertices = np.asanyarray(target_geometry) target_geometry = PointCloud(vertices) centroid, scale = source_mesh.centroid, source_mesh.scale source_mesh.vertices = (source_mesh.vertices - centroid[None, :]) / scale # Dont forget to also transform the target positions - target_geometry.vertices = ( - target_geometry.vertices - centroid[None, :]) / scale + target_geometry.vertices = (target_geometry.vertices - centroid[None, :]) / scale if target_positions is not None: target_positions = (target_positions - centroid[None, :]) / scale return target_geometry, target_positions, centroid, scale def _denormalize_by_source( - source_mesh, - target_geometry, - target_positions, - result, - centroid, - scale): + source_mesh, target_geometry, target_positions, result, centroid, scale +): # Utility function to transform source mesh from # [-1, 1]^3 to its original transform # and transform target geometry accordingly source_mesh.vertices = scale * source_mesh.vertices + centroid[None, :] - target_geometry.vertices = scale * \ - target_geometry.vertices + centroid[None, :] + target_geometry.vertices = scale * target_geometry.vertices + centroid[None, :] if target_positions is not None: target_positions = scale * target_positions + centroid[None, :] if isinstance(result, list): @@ -421,18 +403,20 @@ def _denormalize_by_source( return result -def nricp_amberg(source_mesh, - target_geometry, - source_landmarks=None, - target_positions=None, - steps=None, - eps=0.0001, - gamma=1, - distance_threshold=0.1, - return_records=False, - use_faces=True, - use_vertex_normals=True, - neighbors_count=8): +def nricp_amberg( + source_mesh, + target_geometry, + source_landmarks=None, + target_positions=None, + steps=None, + eps=0.0001, + gamma=1, + distance_threshold=0.1, + return_records=False, + use_faces=True, + use_vertex_normals=True, + neighbors_count=8, +): """ Non Rigid Iterative Closest Points @@ -499,8 +483,7 @@ def nricp_amberg(source_mesh, iteration. """ - def _solve_system(M_kron_G, D, vertices_weight, - nearest, ws, nE, nV, Dl, Ul, wl): + def _solve_system(M_kron_G, D, vertices_weight, nearest, ws, nE, nV, Dl, Ul, wl): # Solve for Eq. 12 U = nearest * vertices_weight[:, None] use_landmarks = Dl is not None and Ul is not None @@ -511,9 +494,9 @@ def _solve_system(M_kron_G, D, vertices_weight, B_shape = (4 * nE + nV + Ul.shape[0], 3) A = sparse.csr_matrix(sparse.vstack(A_stack)) B = sparse.lil_matrix(B_shape, dtype=np.float32) - B[4 * nE: (4 * nE + nV), :] = U + B[4 * nE : (4 * nE + nV), :] = U if use_landmarks: - B[4 * nE + nV: (4 * nE + nV + Ul.shape[0]), :] = Ul * wl + B[4 * nE + nV : (4 * nE + nV + Ul.shape[0]), :] = Ul * wl X = sparse.linalg.spsolve(A.T * A, A.T * B).toarray() return X @@ -526,8 +509,9 @@ def _node_arc_incidence(mesh, do_weight): data = np.ones(2 * nE, np.float32) data[1::2] = -1 if do_weight: - edge_lengths = np.linalg.norm(mesh.vertices[mesh.edges[:, 0]] - - mesh.vertices[mesh.edges[:, 1]], axis=-1) + edge_lengths = np.linalg.norm( + mesh.vertices[mesh.edges[:, 0]] - mesh.vertices[mesh.edges[:, 1]], axis=-1 + ) data *= np.repeat(1 / edge_lengths, 2) return sparse.coo_matrix((data, (rows, cols)), shape=(nE, nV)) @@ -536,9 +520,7 @@ def _create_D(vertex_3d_data): nV = len(vertex_3d_data) rows = np.repeat(np.arange(nV), 4) cols = np.arange(4 * nV) - data = np.concatenate( - (vertex_3d_data, np.ones( - (nV, 1))), axis=-1).flatten() + data = np.concatenate((vertex_3d_data, np.ones((nV, 1))), axis=-1).flatten() return sparse.csr_matrix((data, (rows, cols)), shape=(nV, 4 * nV)) def _create_X(nV): @@ -546,10 +528,7 @@ def _create_X(nV): X_ = np.concatenate((np.eye(3), np.array([[0, 0, 0]])), axis=0) return np.tile(X_, (nV, 1)) - def _create_Dl_Ul(D, - source_mesh, - source_landmarks, - target_positions): + def _create_Dl_Ul(D, source_mesh, source_landmarks, target_positions): # Create landmark terms (Eq. 11) Dl, Ul = None, None @@ -566,12 +545,21 @@ def _create_Dl_Ul(D, x0 = source_mesh.vertices[source_tri_vids[:, 0]] x1 = source_mesh.vertices[source_tri_vids[:, 1]] x2 = source_mesh.vertices[source_tri_vids[:, 2]] - Ul0 = target_positions - x1 * source_barys[:, 1, None] \ + Ul0 = ( + target_positions + - x1 * source_barys[:, 1, None] - x2 * source_barys[:, 2, None] - Ul1 = target_positions - x0 * source_barys[:, 0, None] \ + ) + Ul1 = ( + target_positions + - x0 * source_barys[:, 0, None] - x2 * source_barys[:, 2, None] - Ul2 = target_positions - x0 * source_barys[:, 0, None] \ + ) + Ul2 = ( + target_positions + - x0 * source_barys[:, 0, None] - x1 * source_barys[:, 1, None] + ) Ul = np.zeros((Ul0.shape[0] * 3, 3)) Ul[0::3] = Ul0 # y - v * x2 + w * x3 Ul[1::3] = Ul1 # y - u * x1 + w * x3 @@ -581,8 +569,9 @@ def _create_Dl_Ul(D, Ul = target_positions return Dl, Ul - target_geometry, target_positions, centroid, scale = \ - _normalize_by_source(source_mesh, target_geometry, target_positions) + target_geometry, target_positions, centroid, scale = _normalize_by_source( + source_mesh, target_geometry, target_positions + ) # Number of edges and vertices in source mesh nE = len(source_mesh.edges) @@ -619,7 +608,6 @@ def _create_Dl_Ul(D, # Main loop for ws, wl, wn, max_iter in steps: - # If normals are estimated from points and if there are less # than 3 points per query, avoid normal estimation if not use_faces and neighbors_count < 3: @@ -630,39 +618,38 @@ def _create_Dl_Ul(D, cpt_iter = 0 # Current step iterations loop - while last_error - \ - error > eps and (max_iter is None or cpt_iter < max_iter): - + while last_error - error > eps and (max_iter is None or cpt_iter < max_iter): qres = _from_mesh( target_geometry, transformed_vertices, from_vertices_only=not use_faces, return_normals=wn > 0, return_interpolated_normals=wn > 0 and use_vertex_normals, - neighbors_count=neighbors_count) + neighbors_count=neighbors_count, + ) # Data weighting vertices_weight = np.ones(nV) - vertices_weight[qres['distances'] > distance_threshold] = 0 + vertices_weight[qres["distances"] > distance_threshold] = 0 - if wn > 0 and 'normals' in qres: - target_normals = qres['normals'] - if use_vertex_normals and 'interpolated_normals' in qres: - target_normals = qres['interpolated_normals'] + if wn > 0 and "normals" in qres: + target_normals = qres["normals"] + if use_vertex_normals and "interpolated_normals" in qres: + target_normals = qres["interpolated_normals"] # Normal weighting = multiplying weights by cosines^wn source_normals = DN * X dot = util.diagonal_dot(source_normals, target_normals) # Normal orientation is only known for meshes as target dot = np.clip(dot, 0, 1) if use_faces else np.abs(dot) - vertices_weight = vertices_weight * dot ** wn + vertices_weight = vertices_weight * dot**wn # Actual system solve - X = _solve_system(M_kron_G, D, vertices_weight, qres['nearest'], - ws, nE, nV, Dl, Ul, wl) + X = _solve_system( + M_kron_G, D, vertices_weight, qres["nearest"], ws, nE, nV, Dl, Ul, wl + ) transformed_vertices = D * X last_error = error - error_vec = np.linalg.norm( - qres['nearest'] - transformed_vertices, axis=-1) + error_vec = np.linalg.norm(qres["nearest"] - transformed_vertices, axis=-1) error = (error_vec * vertices_weight).mean() if return_records: records.append(transformed_vertices) @@ -673,19 +660,22 @@ def _create_Dl_Ul(D, else: result = transformed_vertices - result = _denormalize_by_source(source_mesh, target_geometry, target_positions, - result, centroid, scale) + result = _denormalize_by_source( + source_mesh, target_geometry, target_positions, result, centroid, scale + ) return result -def _from_mesh(mesh, - input_points, - from_vertices_only=False, - return_barycentric_coordinates=False, - return_normals=False, - return_interpolated_normals=False, - neighbors_count=10, - **kwargs): +def _from_mesh( + mesh, + input_points, + from_vertices_only=False, + return_barycentric_coordinates=False, + return_normals=False, + return_interpolated_normals=False, + neighbors_count=10, + **kwargs, +): """ Find the the closest points and associated attributes from a Trimesh. @@ -725,37 +715,44 @@ def _from_mesh(mesh, if from_vertices_only or len(mesh.faces) == 0: # Consider only the vertices return _from_points( - mesh.vertices, input_points, mesh.kdtree, + mesh.vertices, + input_points, + mesh.kdtree, return_normals=return_normals, - neighbors_count=neighbors_count) + neighbors_count=neighbors_count, + ) # Else if we consider faces, use proximity.closest_point qres = {} from .proximity import closest_point from .triangles import points_to_barycentric - qres['nearest'], qres['distances'], qres['tids'] = closest_point( - mesh, input_points) + + qres["nearest"], qres["distances"], qres["tids"] = closest_point(mesh, input_points) if return_normals: - qres['normals'] = mesh.face_normals[qres['tids']] + qres["normals"] = mesh.face_normals[qres["tids"]] if return_barycentric_coordinates or return_interpolated_normals: - qres['barycentric_coordinates'] = points_to_barycentric( - mesh.vertices[mesh.faces[qres['tids']]], qres['nearest']) + qres["barycentric_coordinates"] = points_to_barycentric( + mesh.vertices[mesh.faces[qres["tids"]]], qres["nearest"] + ) if return_interpolated_normals: # Interpolation from barycentric coordinates - qres['interpolated_normals'] = \ - np.einsum('ij,ijk->ik', - qres['barycentric_coordinates'], - mesh.vertex_normals[mesh.faces[qres['tids']]]) + qres["interpolated_normals"] = np.einsum( + "ij,ijk->ik", + qres["barycentric_coordinates"], + mesh.vertex_normals[mesh.faces[qres["tids"]]], + ) return qres -def _from_points(target_points, - input_points, - kdtree=None, - return_normals=False, - neighbors_count=10, - **kwargs): +def _from_points( + target_points, + input_points, + kdtree=None, + return_normals=False, + neighbors_count=10, + **kwargs, +): """ Find the the closest points and associated attributes from a set of 3D points. @@ -794,32 +791,32 @@ def _from_points(target_points, if return_normals: assert neighbors_count >= 3 - distances, indices = kdtree.query( - input_points, k=neighbors_count) + distances, indices = kdtree.query(input_points, k=neighbors_count) nearest = target_points[indices, :] - qres['normals'] = plane_fit(nearest)[1] - qres['nearest'] = nearest[:, 0] - qres['distances'] = distances[:, 0] - qres['vertex_indices'] = indices[:, 0] + qres["normals"] = plane_fit(nearest)[1] + qres["nearest"] = nearest[:, 0] + qres["distances"] = distances[:, 0] + qres["vertex_indices"] = indices[:, 0] else: - qres['distances'], qres['vertex_indices'] = kdtree.query( - input_points) - qres['nearest'] = target_points[qres['vertex_indices'], :] + qres["distances"], qres["vertex_indices"] = kdtree.query(input_points) + qres["nearest"] = target_points[qres["vertex_indices"], :] return qres -def nricp_sumner(source_mesh, - target_geometry, - source_landmarks=None, - target_positions=None, - steps=None, - distance_threshold=0.1, - return_records=False, - use_faces=True, - use_vertex_normals=True, - neighbors_count=8, - face_pairs_type='vertex'): +def nricp_sumner( + source_mesh, + target_geometry, + source_landmarks=None, + target_positions=None, + steps=None, + distance_threshold=0.1, + return_records=False, + use_faces=True, + use_vertex_normals=True, + neighbors_count=8, + face_pairs_type="vertex", +): """ Non Rigid Iterative Closest Points @@ -888,14 +885,14 @@ def _construct_transform_matrix(faces, Vinv, size): # Utility function for constructing the per-frame transforms _construct_transform_matrix._row = np.array([0, 1, 2] * 4) nV = len(Vinv) - rows = np.tile(_construct_transform_matrix._row, nV) \ - + 3 * np.repeat(np.arange(nV), 12) + rows = np.tile(_construct_transform_matrix._row, nV) + 3 * np.repeat( + np.arange(nV), 12 + ) cols = np.repeat(faces.flat, 3) minus_inv_sum = -Vinv.sum(axis=1) Vinv_flat = Vinv.reshape(nV, 9) data = np.concatenate((minus_inv_sum, Vinv_flat), axis=-1).flatten() - return sparse.coo_matrix((data, (rows, cols)), - shape=(3 * nV, size), dtype=float) + return sparse.coo_matrix((data, (rows, cols)), shape=(3 * nV, size), dtype=float) def _build_tetrahedrons(mesh): # UUtility function for constructing the frames @@ -908,25 +905,29 @@ def _build_tetrahedrons(mesh): nV, nT = len(mesh.vertices), len(mesh.faces) v4_indices = np.arange(nV, nV + nT)[:, None] tetrahedrons = np.concatenate((mesh.faces, v4_indices), axis=-1) - frames = np.concatenate(((v2 - v1)[..., None], - (v3 - v1)[..., None], - v4_vec[..., None]), axis=-1) + frames = np.concatenate( + ((v2 - v1)[..., None], (v3 - v1)[..., None], v4_vec[..., None]), axis=-1 + ) return vertices, tetrahedrons, frames def _construct_identity_cost(vtet, tet, Vinv): # Utility function for constructing the identity cost - AEi = _construct_transform_matrix(tet, Vinv, len(vtet),).tocsr() + AEi = _construct_transform_matrix( + tet, + Vinv, + len(vtet), + ).tocsr() Bi = np.tile(np.identity(3, dtype=float), (len(tet), 1)) return AEi, Bi def _construct_smoothness_cost(vtet, tet, Vinv, face_pairs): # Utility function for constructing the smoothness (stiffness) cost - AEs_r = _construct_transform_matrix(tet[face_pairs[:, 0]], - Vinv[face_pairs[:, 0]], - len(vtet)).tocsr() - AEs_l = _construct_transform_matrix(tet[face_pairs[:, 1]], - Vinv[face_pairs[:, 1]], - len(vtet)).tocsr() + AEs_r = _construct_transform_matrix( + tet[face_pairs[:, 0]], Vinv[face_pairs[:, 0]], len(vtet) + ).tocsr() + AEs_l = _construct_transform_matrix( + tet[face_pairs[:, 1]], Vinv[face_pairs[:, 1]], len(vtet) + ).tocsr() AEs = (AEs_r - AEs_l).tocsc() AEs.eliminate_zeros() Bs = np.zeros((len(face_pairs) * 3, 3)) @@ -947,9 +948,9 @@ def _construct_landmark_cost(vtet, source_mesh, source_landmarks): data = source_landmarks_barys.flat AEl = sparse.coo_matrix((data, (rows, cols)), shape=(nL, nVT)) - marker_vids = \ - source_landmarks_vids[source_landmarks_barys > np.finfo( - np.float16).eps] + marker_vids = source_landmarks_vids[ + source_landmarks_barys > np.finfo(np.float16).eps + ] non_markers_mask = np.ones(len(source_mesh.vertices), dtype=bool) non_markers_mask[marker_vids] = False else: @@ -965,9 +966,7 @@ def _construct_landmark_cost(vtet, source_mesh, source_landmarks): def _construct_correspondence_cost(points, non_markers_mask, size): # Utility function for constructing the correspondence cost - AEc = sparse.identity( - size, dtype=float, format="csc")[ - :len(non_markers_mask)] + AEc = sparse.identity(size, dtype=float, format="csc")[: len(non_markers_mask)] AEc = AEc[non_markers_mask] Bc = points[non_markers_mask] return AEc, Bc @@ -977,22 +976,21 @@ def _compute_vertex_normals(vertices, faces): mesh_triangles = vertices[faces] mesh_triangles_cross = cross(mesh_triangles) mesh_face_normals = normals( - triangles=mesh_triangles, - crosses=mesh_triangles_cross)[0] + triangles=mesh_triangles, crosses=mesh_triangles_cross + )[0] mesh_face_angles = angles(mesh_triangles) mesh_normals = weighted_vertex_normals( vertex_count=nV, faces=faces, face_normals=mesh_face_normals, - face_angles=mesh_face_angles) + face_angles=mesh_face_angles, + ) return mesh_normals # First, normalize the source and target to [-1, 1]^3 - (target_geometry, - target_positions, - centroid, - scale) = _normalize_by_source( - source_mesh, target_geometry, target_positions) + (target_geometry, target_positions, centroid, scale) = _normalize_by_source( + source_mesh, target_geometry, target_positions + ) nV = len(source_mesh.vertices) use_landmarks = source_landmarks is not None and target_positions is not None @@ -1009,7 +1007,7 @@ def _compute_vertex_normals(vertices, faces): Vinv = np.linalg.inv(V) # List of (n, 2) faces index which share a vertex - if face_pairs_type == 'vertex': + if face_pairs_type == "vertex": face_pairs = source_mesh.face_neighborhood else: face_pairs = source_mesh.face_adjacency @@ -1018,11 +1016,11 @@ def _compute_vertex_normals(vertices, faces): # Identity cost (Eq. 12) AEi, Bi = _construct_identity_cost(source_vtet, source_tet, Vinv) # Smoothness cost (Eq. 11) - AEs, Bs = _construct_smoothness_cost(source_vtet, source_tet, - Vinv, face_pairs) + AEs, Bs = _construct_smoothness_cost(source_vtet, source_tet, Vinv, face_pairs) # Landmark cost (Eq. 13) - AEl, non_markers_mask = _construct_landmark_cost(source_vtet, source_mesh, - source_landmarks) + AEl, non_markers_mask = _construct_landmark_cost( + source_vtet, source_mesh, source_landmarks + ) transformed_vertices = source_vtet.copy() if return_records: @@ -1030,7 +1028,6 @@ def _compute_vertex_normals(vertices, faces): # Main loop for i, (wc, wi, ws, wl, wn) in enumerate(steps): - Astack = [AEi * wi, AEs * ws] Bstack = [Bi * wi, Bs * ws] @@ -1045,28 +1042,28 @@ def _compute_vertex_normals(vertices, faces): transformed_vertices[:nV], from_vertices_only=not use_faces, return_normals=wn > 0, - return_interpolated_normals=( - use_vertex_normals and wn > 0), - neighbors_count=neighbors_count) + return_interpolated_normals=(use_vertex_normals and wn > 0), + neighbors_count=neighbors_count, + ) # Correspondence cost (Eq. 13) AEc, Bc = _construct_correspondence_cost( - qres['nearest'], - non_markers_mask, - len(source_vtet)) + qres["nearest"], non_markers_mask, len(source_vtet) + ) vertices_weight = np.ones(nV) - vertices_weight[qres['distances'] > distance_threshold] = 0 - if wn > 0 or 'normals' in qres: - target_normals = qres['normals'] - if use_vertex_normals and 'interpolated_normals' in qres: - target_normals = qres['interpolated_normals'] + vertices_weight[qres["distances"] > distance_threshold] = 0 + if wn > 0 or "normals" in qres: + target_normals = qres["normals"] + if use_vertex_normals and "interpolated_normals" in qres: + target_normals = qres["interpolated_normals"] # Normal weighting : multiplying weights by cosines^wn - source_normals = _compute_vertex_normals(transformed_vertices, - source_mesh.faces) + source_normals = _compute_vertex_normals( + transformed_vertices, source_mesh.faces + ) dot = util.diagonal_dot(source_normals, target_normals) # Normal orientation is only known for meshes as target dot = np.clip(dot, 0, 1) if use_faces else np.abs(dot) - vertices_weight = vertices_weight * dot ** wn + vertices_weight = vertices_weight * dot**wn # Account for vertices' weight AEc.data *= vertices_weight[non_markers_mask][AEc.indices] @@ -1093,10 +1090,6 @@ def _compute_vertex_normals(vertices, faces): result = transformed_vertices[:nV] result = _denormalize_by_source( - source_mesh, - target_geometry, - target_positions, - result, - centroid, - scale) + source_mesh, target_geometry, target_positions, result, centroid, scale + ) return result diff --git a/trimesh/remesh.py b/trimesh/remesh.py index 3b2cde28a..3c3632b28 100644 --- a/trimesh/remesh.py +++ b/trimesh/remesh.py @@ -11,11 +11,9 @@ from .geometry import faces_to_edges -def subdivide(vertices, - faces, - face_index=None, - vertex_attributes=None, - return_index=False): +def subdivide( + vertices, faces, face_index=None, vertex_attributes=None, return_index=False +): """ Subdivide a mesh into smaller triangles. @@ -64,18 +62,22 @@ def subdivide(vertices, mid_idx = inverse.reshape((-1, 3)) + len(vertices) # the new faces_subset with correct winding - f = np.column_stack([faces_subset[:, 0], - mid_idx[:, 0], - mid_idx[:, 2], - mid_idx[:, 0], - faces_subset[:, 1], - mid_idx[:, 1], - mid_idx[:, 2], - mid_idx[:, 1], - faces_subset[:, 2], - mid_idx[:, 0], - mid_idx[:, 1], - mid_idx[:, 2]]).reshape((-1, 3)) + f = np.column_stack( + [ + faces_subset[:, 0], + mid_idx[:, 0], + mid_idx[:, 2], + mid_idx[:, 0], + faces_subset[:, 1], + mid_idx[:, 1], + mid_idx[:, 2], + mid_idx[:, 1], + faces_subset[:, 2], + mid_idx[:, 0], + mid_idx[:, 1], + mid_idx[:, 2], + ] + ).reshape((-1, 3)) # add the 3 new faces_subset per old face all on the end # by putting all the new faces after all the old faces @@ -88,14 +90,11 @@ def subdivide(vertices, new_attributes = {} for key, values in vertex_attributes.items(): attr_tris = values[faces_subset] - attr_mid = np.vstack([ - attr_tris[:, g, :].mean(axis=1) - for g in [[0, 1], - [1, 2], - [2, 0]]]) + attr_mid = np.vstack( + [attr_tris[:, g, :].mean(axis=1) for g in [[0, 1], [1, 2], [2, 0]]] + ) attr_mid = attr_mid[unique] - new_attributes[key] = np.vstack(( - values, attr_mid)) + new_attributes[key] = np.vstack((values, attr_mid)) return new_vertices, new_faces, new_attributes if return_index: @@ -105,8 +104,7 @@ def subdivide(vertices, # but we've removed all the faces in face_mask start = len(faces) - len(nonzero) # indexes are just offset from start - stack = np.arange( - start, start + len(f) * 4).reshape((-1, 4)) + stack = np.arange(start, start + len(f) * 4).reshape((-1, 4)) # reformat into a slightly silly dict for some reason index_dict = dict(zip(nonzero, stack)) @@ -115,11 +113,7 @@ def subdivide(vertices, return new_vertices, new_faces -def subdivide_to_size(vertices, - faces, - max_edge, - max_iter=10, - return_index=False): +def subdivide_to_size(vertices, faces, max_edge, max_iter=10, return_index=False): """ Subdivide a mesh until every edge is shorter than a specified length. @@ -155,10 +149,8 @@ def subdivide_to_size(vertices, done_idx = [] # copy inputs and make sure dtype is correct - current_faces = np.array( - faces, dtype=np.int64, copy=True) - current_vertices = np.array( - vertices, dtype=np.float64, copy=True) + current_faces = np.array(faces, dtype=np.int64, copy=True) + current_vertices = np.array(vertices, dtype=np.float64, copy=True) # store a map to the original face index current_index = np.arange(len(faces)) @@ -166,9 +158,9 @@ def subdivide_to_size(vertices, # loop through iteration cap for i in range(max_iter + 1): # compute the length of every triangle edge - edge_length = (np.diff( - current_vertices[current_faces[:, [0, 1, 2, 0]], :3], - axis=1) ** 2).sum(axis=2) ** 0.5 + edge_length = ( + np.diff(current_vertices[current_faces[:, [0, 1, 2, 0]], :3], axis=1) ** 2 + ).sum(axis=2) ** 0.5 # check edge length against maximum too_long = (edge_length > max_edge).any(axis=1) # faces that are OK @@ -177,8 +169,8 @@ def subdivide_to_size(vertices, # clean up the faces a little bit so we don't # store a ton of unused vertices unique, inverse = grouping.unique_bincount( - current_faces[face_ok].flatten(), - return_inverse=True) + current_faces[face_ok].flatten(), return_inverse=True + ) # store vertices and faces meeting criteria done_vert.append(current_vertices[unique]) @@ -186,8 +178,7 @@ def subdivide_to_size(vertices, if return_index: done_idx.append(current_index[face_ok]) - current_index = np.tile(current_index[too_long], - (4, 1)).T.ravel() + current_index = np.tile(current_index[too_long], (4, 1)).T.ravel() # met our goals so exit if not too_long.any(): @@ -195,16 +186,15 @@ def subdivide_to_size(vertices, # check max_iter before subdividing again if i >= max_iter: - raise ValueError('max_iter exceeded!') + raise ValueError("max_iter exceeded!") # run subdivision again - (current_vertices, - current_faces) = subdivide(current_vertices, - current_faces[too_long]) + (current_vertices, current_faces) = subdivide( + current_vertices, current_faces[too_long] + ) # stack sequence into nice (n, 3) arrays - final_vertices, final_faces = util.append_faces( - done_vert, done_face) + final_vertices, final_faces = util.append_faces(done_vert, done_face) if return_index: final_index = np.concatenate(done_idx) @@ -214,9 +204,7 @@ def subdivide_to_size(vertices, return final_vertices, final_faces -def subdivide_loop(vertices, - faces, - iterations=None): +def subdivide_loop(vertices, faces, iterations=None): """ Subdivide a mesh by dividing each triangle into four triangles and approximating their smoothed surface (loop subdivision). @@ -274,18 +262,13 @@ def subdivide_loop(vertices, def _subdivide(vertices, faces): # find the unique edges of our faces - edges, edges_face = faces_to_edges( - faces, return_index=True) + edges, edges_face = faces_to_edges(faces, return_index=True) edges.sort(axis=1) unique, inverse = grouping.unique_rows(edges) # set interior edges if there are two edges and boundary if there is # one. - edge_inter = np.sort( - grouping.group_rows( - edges, - require_count=2), - axis=1) + edge_inter = np.sort(grouping.group_rows(edges, require_count=2), axis=1) edge_bound = grouping.group_rows(edges, require_count=1) # make sure that one edge is shared by only one or two faces. if not len(edge_inter) * 2 + len(edge_bound) == len(edges): @@ -293,11 +276,10 @@ def _subdivide(vertices, faces): # edges shared by 2 faces are "connected" # so this connected components operation is # essentially identical to `face_adjacency` - faces_group = graph.connected_components( - edges_face[edge_inter]) + faces_group = graph.connected_components(edges_face[edge_inter]) if len(faces_group) == 1: - raise ValueError('Some edges are shared by more than 2 faces') + raise ValueError("Some edges are shared by more than 2 faces") # collect a subdivided copy of each body seq_verts = [] @@ -312,12 +294,13 @@ def _subdivide(vertices, faces): # want to pass forward the referenced vertices # for this particular group of connected faces unique, inverse = grouping.unique_bincount( - faces[f].reshape(-1), return_inverse=True) + faces[f].reshape(-1), return_inverse=True + ) # subdivide this subset of faces cur_verts, cur_faces = _subdivide( - vertices=vertices[unique], - faces=inverse.reshape((-1, 3))) + vertices=vertices[unique], faces=inverse.reshape((-1, 3)) + ) # increment the face references to match # the vertices when we stack them later @@ -360,13 +343,10 @@ def _subdivide(vertices, faces): # simplified from: # # 3 / 8 * (e_v0 + e_v1) + 1 / 8 * (e_v2 + e_v3) - odd[edge_inter_mask] = 0.375 * e_v0 + \ - 0.375 * e_v1 + e_v2 / 8.0 + e_v3 / 8.0 + odd[edge_inter_mask] = 0.375 * e_v0 + 0.375 * e_v1 + e_v2 / 8.0 + e_v3 / 8.0 # find vertex neighbors of each vertex - neighbors = graph.neighbors( - edges=edges[unique], - max_index=len(vertices)) + neighbors = graph.neighbors(edges=edges[unique], max_index=len(vertices)) # convert list type of array into a fixed-shaped numpy array (set -1 to # empties) neighbors = np.array(list(zip_longest(*neighbors, fillvalue=-1))).T @@ -382,9 +362,11 @@ def _subdivide(vertices, faces): # beta = 1 / k * (5 / 8 - (3 / 8 + 1 / 4 * np.cos(2 * np.pi / k)) ** 2) # simplified with sympy.parse_expr('...').simplify() - beta = (40.0 - (2.0 * np.cos(2 * np.pi / k) + 3)**2) / (64 * k) - even = beta[:, None] * vertices_[neighbors].sum(1) \ + beta = (40.0 - (2.0 * np.cos(2 * np.pi / k) + 3) ** 2) / (64 * k) + even = ( + beta[:, None] * vertices_[neighbors].sum(1) + (1 - k[:, None] * beta[:, None]) * vertices + ) # calculate even vertices for the boundary case if edge_bound_mask.any(): @@ -396,24 +378,29 @@ def _subdivide(vertices, faces): boundary_neighbors = neighbors[vrt_bound_mask] boundary_neighbors[~vrt_bound_mask[neighbors[vrt_bound_mask]]] = -1 - even[vrt_bound_mask] = (vertices_[boundary_neighbors].sum(axis=1) / 8.0 + - (3.0 / 4.0) * vertices[vrt_bound_mask]) + even[vrt_bound_mask] = ( + vertices_[boundary_neighbors].sum(axis=1) / 8.0 + + (3.0 / 4.0) * vertices[vrt_bound_mask] + ) # the new faces with odd vertices odd_idx = inverse.reshape((-1, 3)) + len(vertices) - new_faces = np.column_stack([ - faces[:, 0], - odd_idx[:, 0], - odd_idx[:, 2], - odd_idx[:, 0], - faces[:, 1], - odd_idx[:, 1], - odd_idx[:, 2], - odd_idx[:, 1], - faces[:, 2], - odd_idx[:, 0], - odd_idx[:, 1], - odd_idx[:, 2]]).reshape((-1, 3)) + new_faces = np.column_stack( + [ + faces[:, 0], + odd_idx[:, 0], + odd_idx[:, 2], + odd_idx[:, 0], + faces[:, 1], + odd_idx[:, 1], + odd_idx[:, 2], + odd_idx[:, 1], + faces[:, 2], + odd_idx[:, 0], + odd_idx[:, 1], + odd_idx[:, 2], + ] + ).reshape((-1, 3)) # stack the new even vertices and odd vertices new_vertices = np.vstack((even, odd)) diff --git a/trimesh/rendering.py b/trimesh/rendering.py index 155a7b49c..8aea52f97 100644 --- a/trimesh/rendering.py +++ b/trimesh/rendering.py @@ -30,33 +30,26 @@ def convert_to_vertexlist(geometry, **kwargs): Args to be passed to pyglet indexed vertex list constructor. """ - if util.is_instance_named(geometry, 'Trimesh'): + if util.is_instance_named(geometry, "Trimesh"): return mesh_to_vertexlist(geometry, **kwargs) - elif util.is_instance_named(geometry, 'Path'): + elif util.is_instance_named(geometry, "Path"): # works for Path3D and Path2D # both of which inherit from Path - return path_to_vertexlist( - geometry, **kwargs) - elif util.is_instance_named(geometry, 'PointCloud'): + return path_to_vertexlist(geometry, **kwargs) + elif util.is_instance_named(geometry, "PointCloud"): # pointcloud objects contain colors - return points_to_vertexlist(geometry.vertices, - colors=geometry.colors, - **kwargs) - elif util.is_instance_named(geometry, 'ndarray'): + return points_to_vertexlist(geometry.vertices, colors=geometry.colors, **kwargs) + elif util.is_instance_named(geometry, "ndarray"): # (n,2) or (n,3) points return points_to_vertexlist(geometry, **kwargs) - elif util.is_instance_named(geometry, 'VoxelGrid'): + elif util.is_instance_named(geometry, "VoxelGrid"): # for voxels view them as a bunch of boxes - return mesh_to_vertexlist(geometry.as_boxes(**kwargs), - **kwargs) + return mesh_to_vertexlist(geometry.as_boxes(**kwargs), **kwargs) else: - raise ValueError('Geometry passed is not a viewable type!') + raise ValueError("Geometry passed is not a viewable type!") -def mesh_to_vertexlist(mesh, - group=None, - smooth=True, - smooth_threshold=60000): +def mesh_to_vertexlist(mesh, group=None, smooth=True, smooth_threshold=60000): """ Convert a Trimesh object to arguments for an indexed vertex list constructor. @@ -78,7 +71,7 @@ def mesh_to_vertexlist(mesh, Args for vertex list constructor """ - if hasattr(mesh.visual, 'uv'): + if hasattr(mesh.visual, "uv"): # if the mesh has texture defined pass it to pyglet vertex_count = len(mesh.vertices) normals = mesh.vertex_normals.reshape(-1).tolist() @@ -90,10 +83,10 @@ def mesh_to_vertexlist(mesh, # shortcut for the material material = mesh.visual.material - if hasattr(material, 'image'): + if hasattr(material, "image"): # does the material actually have an image specified no_image = material.image is None - elif hasattr(material, 'baseColorTexture'): + elif hasattr(material, "baseColorTexture"): no_image = material.baseColorTexture is None else: no_image = True @@ -102,15 +95,13 @@ def mesh_to_vertexlist(mesh, if uv is None or no_image or len(uv) != vertex_count: # if no UV coordinates on material, just set face colors # to the diffuse color of the material - color_gl = colors_to_gl( - material.main_color, vertex_count) + color_gl = colors_to_gl(material.main_color, vertex_count) else: # if someone passed (n, 3) UVR cut it off here if uv.shape[1] > 2: uv = uv[:, :2] # texcoord as (2,) float - color_gl = ('t2f/static', - uv.astype(np.float64).reshape(-1).tolist()) + color_gl = ("t2f/static", uv.astype(np.float64).reshape(-1).tolist()) elif smooth and len(mesh.faces) < smooth_threshold: # if we have a small number of faces and colors defined @@ -121,29 +112,28 @@ def mesh_to_vertexlist(mesh, normals = mesh.vertex_normals.reshape(-1).tolist() faces = mesh.faces.reshape(-1).tolist() vertices = mesh.vertices.reshape(-1).tolist() - color_gl = colors_to_gl(mesh.visual.vertex_colors, - vertex_count) + color_gl = colors_to_gl(mesh.visual.vertex_colors, vertex_count) else: # we don't have textures or want to smooth so # send a polygon soup of disconnected triangles to opengl vertex_count = len(mesh.triangles) * 3 - normals = np.tile(mesh.face_normals, - (1, 3)).reshape(-1).tolist() + normals = np.tile(mesh.face_normals, (1, 3)).reshape(-1).tolist() vertices = mesh.triangles.reshape(-1).tolist() faces = np.arange(vertex_count).tolist() - colors = np.tile(mesh.visual.face_colors, - (1, 3)).reshape((-1, 4)) + colors = np.tile(mesh.visual.face_colors, (1, 3)).reshape((-1, 4)) color_gl = colors_to_gl(colors, vertex_count) # create the ordered tuple for pyglet, use like: # `batch.add_indexed(*args)` - args = (vertex_count, # number of vertices - GL_TRIANGLES, # mode - group, # group - faces, # indices - ('v3f/static', vertices), - ('n3f/static', normals), - color_gl) + args = ( + vertex_count, # number of vertices + GL_TRIANGLES, # mode + group, # group + faces, # indices + ("v3f/static", vertices), + ("n3f/static", normals), + color_gl, + ) return args @@ -168,8 +158,7 @@ def path_to_vertexlist(path, group=None, **kwargs): vertices = path.vertices # get (n, 2, (2|3)) lines - stacked = [util.stack_lines(e.discrete(vertices)) - for e in path.entities] + stacked = [util.stack_lines(e.discrete(vertices)) for e in path.entities] lines = util.vstack_empty(stacked) count = len(lines) @@ -184,25 +173,27 @@ def path_to_vertexlist(path, group=None, **kwargs): colors = path.colors if colors is not None: colors = np.vstack( - [(np.ones((len(s), 4)) * c).astype(np.uint8) - for s, c in zip(stacked, path.colors)]) + [ + (np.ones((len(s), 4)) * c).astype(np.uint8) + for s, c in zip(stacked, path.colors) + ] + ) # convert to gl-friendly colors gl_colors = colors_to_gl(colors, count=count) # collect args for vertexlist constructor - args = (count, # number of lines - GL_LINES, # mode - group, # group - index, # indices - ('v3f/static', lines.reshape(-1)), - gl_colors) + args = ( + count, # number of lines + GL_LINES, # mode + group, # group + index, # indices + ("v3f/static", lines.reshape(-1)), + gl_colors, + ) return args -def points_to_vertexlist(points, - colors=None, - group=None, - **kwargs): +def points_to_vertexlist(points, colors=None, group=None, **kwargs): """ Convert a numpy array of 3D points to args for a vertex list constructor. @@ -226,16 +217,18 @@ def points_to_vertexlist(points, if util.is_shape(points, (-1, 2)): points = np.column_stack((points, np.zeros(len(points)))) elif not util.is_shape(points, (-1, 3)): - raise ValueError('Pointcloud must be (n,3)!') + raise ValueError("Pointcloud must be (n,3)!") index = np.arange(len(points)).tolist() - args = (len(points), # number of vertices - GL_POINTS, # mode - group, # group - index, # indices - ('v3f/static', points.reshape(-1)), - colors_to_gl(colors, len(points))) + args = ( + len(points), # number of vertices + GL_POINTS, # mode + group, # group + index, # indices + ("v3f/static", points.reshape(-1)), + colors_to_gl(colors, len(points)), + ) return args @@ -260,9 +253,7 @@ def colors_to_gl(colors, count): colors = np.asanyarray(colors) count = int(count) # get the GL kind of color we have - colors_dtypes = {'f': 'f', - 'i': 'B', - 'u': 'B'} + colors_dtypes = {"f": "f", "i": "B", "u": "B"} if colors.dtype.kind in colors_dtypes: dtype = colors_dtypes[colors.dtype.kind] @@ -271,22 +262,24 @@ def colors_to_gl(colors, count): if dtype is not None and util.is_shape(colors, (count, (3, 4))): # save the shape and dtype for opengl color string - colors_type = f'c{colors.shape[1]}{dtype}/static' + colors_type = f"c{colors.shape[1]}{dtype}/static" # reshape the 2D array into a 1D one and then convert to a python list gl_colors = colors.reshape(-1).tolist() elif dtype is not None and colors.shape in [(3,), (4,)]: # we've been passed a single color so tile them - gl_colors = (np.ones((count, colors.size), - dtype=colors.dtype) * colors).reshape(-1).tolist() + gl_colors = ( + (np.ones((count, colors.size), dtype=colors.dtype) * colors) + .reshape(-1) + .tolist() + ) # we know we're tiling - colors_type = f'c{colors.size}{dtype}/static' + colors_type = f"c{colors.size}{dtype}/static" else: # case where colors are wrong shape # use black as the default color - gl_colors = np.tile([0.0, 0.0, 0.0], - (count, 1)).reshape(-1).tolist() + gl_colors = np.tile([0.0, 0.0, 0.0], (count, 1)).reshape(-1).tolist() # we're returning RGB float colors - colors_type = 'c3f/static' + colors_type = "c3f/static" return colors_type, gl_colors @@ -312,9 +305,9 @@ def material_to_texture(material, upsize=True): import pyglet # try to extract a PIL image from material - if hasattr(material, 'image'): + if hasattr(material, "image"): img = material.image - elif hasattr(material, 'baseColorTexture'): + elif hasattr(material, "baseColorTexture"): img = material.baseColorTexture else: return None @@ -326,16 +319,17 @@ def material_to_texture(material, upsize=True): # if we're not powers of two upsize if upsize: from .visual.texture import power_resize + img = power_resize(img) # use a PNG export to exchange into pyglet # probably a way to do this with a PIL converter with util.BytesIO() as f: # export PIL image as PNG - img.save(f, format='png') + img.save(f, format="png") f.seek(0) # filename used for format guess - gl_image = pyglet.image.load(filename='.png', file=f) + gl_image = pyglet.image.load(filename=".png", file=f) # turn image into pyglet texture texture = gl_image.get_texture() @@ -361,8 +355,7 @@ def matrix_to_gl(matrix): from pyglet import gl # convert to GLfloat, switch to column major and flatten to (16,) - return (gl.GLfloat * 16)(*np.array( - matrix, dtype=np.float32).T.ravel()) + return (gl.GLfloat * 16)(*np.array(matrix, dtype=np.float32).T.ravel()) def vector_to_gl(array, *args): @@ -409,8 +402,10 @@ def light_to_gl(light, transform, lightN): gl_position = vector_to_gl(transform[:3, 3]) # create the different position and color arguments - args = [(lightN, gl.GL_POSITION, gl_position), - (lightN, gl.GL_SPECULAR, gl_color), - (lightN, gl.GL_DIFFUSE, gl_color), - (lightN, gl.GL_AMBIENT, gl_color)] + args = [ + (lightN, gl.GL_POSITION, gl_position), + (lightN, gl.GL_SPECULAR, gl_color), + (lightN, gl.GL_DIFFUSE, gl_color), + (lightN, gl.GL_AMBIENT, gl_color), + ] return args diff --git a/trimesh/repair.py b/trimesh/repair.py index a9fd7f70e..d1c8af4c8 100644 --- a/trimesh/repair.py +++ b/trimesh/repair.py @@ -18,12 +18,14 @@ # create a dummy module which will raise the ImportError # or other exception only when someone tries to use networkx from .exceptions import ExceptionWrapper + nx = ExceptionWrapper(E) try: from .path.exchange.misc import faces_to_path except BaseException as E: from .exceptions import ExceptionWrapper + faces_to_path = ExceptionWrapper(E) @@ -68,8 +70,7 @@ def fix_winding(mesh): pair = faces[face_pair] # (6, 2) int edges = faces_to_edges(pair) - overlap = group_rows(np.sort(edges, axis=1), - require_count=2) + overlap = group_rows(np.sort(edges, axis=1), require_count=2) if len(overlap) == 0: # only happens on non-watertight meshes continue @@ -82,7 +83,7 @@ def fix_winding(mesh): if flipped > 0: mesh.faces = faces - log.debug('flipped %d/%d edges', flipped, len(mesh.faces) * 3) + log.debug("flipped %d/%d edges", flipped, len(mesh.faces) * 3) def fix_inversion(mesh, multibody=False): @@ -97,8 +98,7 @@ def fix_inversion(mesh, multibody=False): If True will try to fix normals on every body """ if multibody: - groups = graph.connected_components( - mesh.face_adjacency) + groups = graph.connected_components(mesh.face_adjacency) # escape early for single body if len(groups) == 1: if mesh.volume < 0.0: @@ -113,9 +113,8 @@ def fix_inversion(mesh, multibody=False): for faces in groups: # calculate the volume of the submesh faces volume = triangles.mass_properties( - tri[faces], - crosses=cross[faces], - skip_inertia=True)['volume'] + tri[faces], crosses=cross[faces], skip_inertia=True + )["volume"] # if that volume is negative it is either # inverted or just total garbage if volume < 0.0: @@ -123,7 +122,7 @@ def fix_inversion(mesh, multibody=False): # one or more faces needs flipping if flip.any(): # flip normals of necessary faces - if 'face_normals' in mesh._cache: + if "face_normals" in mesh._cache: normals = mesh.face_normals.copy() normals[flip] *= -1.0 else: @@ -182,8 +181,7 @@ def broken_faces(mesh, color=None): Indexes of mesh.faces """ adjacency = nx.from_edgelist(mesh.face_adjacency) - broken = [k for k, v in dict(adjacency.degree()).items() - if v != 3] + broken = [k for k, v in dict(adjacency.degree()).items() if v != 3] broken = np.array(broken) if color is not None and broken.size != 0: # if someone passed a broken color @@ -244,8 +242,7 @@ def hole_to_faces(hole): # we know that in a watertight mesh every edge will be included twice # thus every edge which appears only once is part of a hole boundary - boundary_groups = group_rows( - mesh.edges_sorted, require_count=1) + boundary_groups = group_rows(mesh.edges_sorted, require_count=1) # mesh is not watertight and we have too few edges # edges to do a repair @@ -254,12 +251,10 @@ def hole_to_faces(hole): return False boundary_edges = mesh.edges[boundary_groups] - index_as_dict = [{'index': i} for i in boundary_groups] + index_as_dict = [{"index": i} for i in boundary_groups] # we create a graph of the boundary edges, and find cycles. - g = nx.from_edgelist( - np.column_stack((boundary_edges, - index_as_dict))) + g = nx.from_edgelist(np.column_stack((boundary_edges, index_as_dict))) new_faces = [] new_vertex = [] for hole in nx.cycle_basis(g): @@ -287,7 +282,7 @@ def hole_to_faces(hole): # we compare the edge from the new face with # the boundary edge from the source mesh edge_test = face[:2] - edge_boundary = mesh.edges[g.get_edge_data(*edge_test)['index']] + edge_boundary = mesh.edges[g.get_edge_data(*edge_test)["index"]] # in a well constructed mesh, the winding is such that adjacent triangles # have reversed edges to each other. Here we check to make sure the @@ -303,8 +298,8 @@ def hole_to_faces(hole): new_vertices = mesh.vertices # try to save face normals if we can - if 'face_normals' in mesh._cache.cache: - cached_normals = mesh._cache.cache['face_normals'] + if "face_normals" in mesh._cache.cache: + cached_normals = mesh._cache.cache["face_normals"] else: cached_normals = None @@ -318,13 +313,13 @@ def hole_to_faces(hole): # over tol.merge apart, but the normal calculation is screwed up # these could be fixed by merging the vertices in question here: # if not valid.all(): - if mesh.visual.defined and mesh.visual.kind == 'face': + if mesh.visual.defined and mesh.visual.kind == "face": color = mesh.visual.face_colors else: color = None # apply the new faces and vertices - mesh.faces = np.vstack((mesh._data['faces'], new_faces[valid])) + mesh.faces = np.vstack((mesh._data["faces"], new_faces[valid])) mesh.vertices = new_vertices # dump the cache and set id to the new hash @@ -332,8 +327,7 @@ def hole_to_faces(hole): # save us a normals recompute if we can if cached_normals is not None: - mesh.face_normals = np.vstack((cached_normals, - new_normals)) + mesh.face_normals = np.vstack((cached_normals, new_normals)) # this is usually the case where two vertices of a triangle are just # over tol.merge apart, but the normal calculation is screwed up @@ -346,11 +340,10 @@ def hole_to_faces(hole): color_shape = np.shape(color) if len(color_shape) == 2: new_colors = np.tile(color[-1], (np.sum(valid), 1)) - new_colors = np.vstack((color, - new_colors)) + new_colors = np.vstack((color, new_colors)) mesh.visual.face_colors = new_colors - log.debug('Filled in mesh with %i triangles', np.sum(valid)) + log.debug("Filled in mesh with %i triangles", np.sum(valid)) return mesh.is_watertight @@ -382,10 +375,11 @@ def stitch(mesh, faces=None, insert_vertices=False): # get a sequence of vertex indices representing the # boundary of the specified faces # will be referencing the same indexes of `mesh.vertices` - points = [e.points for e in - faces_to_path(mesh, faces)['entities'] - if len(e.points) > 3 and - e.points[0] == e.points[-1]] + points = [ + e.points + for e in faces_to_path(mesh, faces)["entities"] + if len(e.points) > 3 and e.points[0] == e.points[-1] + ] # get properties to avoid querying in loop vertices = mesh.vertices @@ -397,27 +391,24 @@ def stitch(mesh, faces=None, insert_vertices=False): if insert_vertices: # create one new vertex per curve at the centroid - centroids = np.array([vertices[p].mean(axis=0) - for p in points]) + centroids = np.array([vertices[p].mean(axis=0) for p in points]) # save the original length of the vertices count = len(vertices) # for the normal check stack our local vertices vertices = np.vstack((vertices, centroids)) # create a triangle between our new centroid vertex # and each one of the boundary curves - fan = [np.column_stack(( - np.ones(len(p) - 1, dtype=int) * (count + i), - p[:-1], - p[1:])) - for i, p in enumerate(points)] + fan = [ + np.column_stack((np.ones(len(p) - 1, dtype=int) * (count + i), p[:-1], p[1:])) + for i, p in enumerate(points) + ] else: # since we're not allowed to insert new vertices # create a triangle fan for each boundary curve - fan = [np.column_stack(( - np.ones(len(p) - 3, dtype=int) * p[0], - p[1:-2], - p[2:-1])) - for p in points] + fan = [ + np.column_stack((np.ones(len(p) - 3, dtype=int) * p[0], p[1:-2], p[2:-1])) + for p in points + ] # now we do a normal check against an adjacent face # to see if each region needs to be flipped diff --git a/trimesh/resolvers.py b/trimesh/resolvers.py index 982469a59..e2395f3dd 100644 --- a/trimesh/resolvers.py +++ b/trimesh/resolvers.py @@ -29,7 +29,7 @@ class Resolver(util.ABC): @abc.abstractmethod def __init__(self, *args, **kwargs): - raise NotImplementedError('Use a resolver subclass!') + raise NotImplementedError("Use a resolver subclass!") @abc.abstractmethod def get(self, key): @@ -37,11 +37,11 @@ def get(self, key): @abc.abstractmethod def write(self, name, data): - raise NotImplementedError('`write` not implemented!') + raise NotImplementedError("`write` not implemented!") @abc.abstractmethod def namespaced(self, namespace): - raise NotImplementedError('`namespaced` not implemented!') + raise NotImplementedError("`namespaced` not implemented!") def __getitem__(self, key): return self.get(key) @@ -68,8 +68,7 @@ def __init__(self, source): File path where mesh was loaded from """ # remove everything other than absolute path - clean = os.path.expanduser( - os.path.abspath(str(source))) + clean = os.path.expanduser(os.path.abspath(str(source))) self.clean = clean if os.path.isdir(clean): @@ -82,8 +81,7 @@ def __init__(self, source): # exit if directory doesn't exist if not os.path.isdir(self.parent): - raise ValueError( - f'path `{self.parent} `not a directory!') + raise ValueError(f"path `{self.parent} `not a directory!") def keys(self): """ @@ -98,7 +96,7 @@ def keys(self): for path, _, names in os.walk(self.parent): # strip any leading parent key if path.startswith(parent): - path = path[len(parent):] + path = path[len(parent) :] # yield each name for name in names: yield os.path.join(path, name) @@ -118,8 +116,7 @@ def namespaced(self, namespace): resolver : FilePathResolver Resolver with root directory changed. """ - return FilePathResolver(os.path.join( - self.parent, namespace)) + return FilePathResolver(os.path.join(self.parent, namespace)) def get(self, name): """ @@ -138,9 +135,8 @@ def get(self, name): # load the file by path name path = os.path.join(self.parent, name.strip()) if not os.path.exists(path): - path = os.path.join( - self.parent, os.path.split(name)[-1]) - with open(path, 'rb') as f: + path = os.path.join(self.parent, os.path.split(name)[-1]) + with open(path, "rb") as f: data = f.read() return data @@ -156,7 +152,7 @@ def write(self, name, data): Data to write to the file """ # write files to path name - with open(os.path.join(self.parent, name.strip()), 'wb') as f: + with open(os.path.join(self.parent, name.strip()), "wb") as f: # handle encodings correctly for str/bytes util.write_encoded(file_obj=f, stuff=data) @@ -182,7 +178,7 @@ def __init__(self, archive=None, namespace=None): """ self.archive = archive if isinstance(namespace, str): - self.namespace = namespace.strip().rstrip('/') + '/' + self.namespace = namespace.strip().rstrip("/") + "/" else: self.namespace = None @@ -201,9 +197,11 @@ def keys(self): # only return keys that start with the namespace # and strip off the namespace from the returned # keys. - return [k[length:] for k in self.archive.keys() - if k.startswith(namespace) - and len(k) > length] + return [ + k[length:] + for k in self.archive.keys() + if k.startswith(namespace) and len(k) > length + ] return self.archive.keys() def write(self, key, value): @@ -239,8 +237,8 @@ def get(self, name): if name is None: return # make sure name is a string - if hasattr(name, 'decode'): - name = name.decode('utf-8') + if hasattr(name, "decode"): + name = name.decode("utf-8") # store reference to archive inside this function archive = self.archive # requested name not identical in @@ -283,8 +281,7 @@ def namespaced(self, namespace): resolver : Resolver Namespaced resolver. """ - return ZipResolver(archive=self.archive, - namespace=namespace) + return ZipResolver(archive=self.archive, namespace=namespace) def export(self): """ @@ -314,35 +311,38 @@ def __init__(self, url): Location where a mesh was stored or directory where mesh was stored """ - if hasattr(url, 'decode'): - url = url.decode('utf-8') + if hasattr(url, "decode"): + url = url.decode("utf-8") # parse string into namedtuple parsed = urlparse(url) # we want a base url - split = [i for i in parsed.path.split('/') - if len(i) > 0] + split = [i for i in parsed.path.split("/") if len(i) > 0] # if the last item in the url path is a filename # move up a "directory" for the base path if len(split) == 0: - path = '' - elif '.' in split[-1]: + path = "" + elif "." in split[-1]: # clip off last item - path = '/'.join(split[:-1]) + path = "/".join(split[:-1]) else: # recombine into string ignoring any double slashes - path = '/'.join(split) - self.base_url = '/'.join(i for i in [ - parsed.scheme + ':/', - parsed.netloc.strip('/'), - path.strip('/')] if len(i) > 0) + '/' + path = "/".join(split) + self.base_url = ( + "/".join( + i + for i in [parsed.scheme + ":/", parsed.netloc.strip("/"), path.strip("/")] + if len(i) > 0 + ) + + "/" + ) # our string handling should have never inserted double slashes - assert '//' not in self.base_url[len(parsed.scheme) + 3:] + assert "//" not in self.base_url[len(parsed.scheme) + 3 :] # we should always have ended with a single slash - assert self.base_url.endswith('/') + assert self.base_url.endswith("/") def get(self, name): """ @@ -367,11 +367,11 @@ def get(self, name): if response.status_code != 200: # try to strip off filesystem crap - if name.startswith('./'): + if name.startswith("./"): name = name[2:] response = requests.get(self.base_url + name) - if response.status_code == '404': + if response.status_code == "404": raise ValueError(response.content) # return the bytes of the response @@ -399,11 +399,7 @@ def write(self, key, value): class GithubResolver(Resolver): - def __init__(self, - repo, - branch=None, - commit=None, - save=None): + def __init__(self, repo, branch=None, commit=None, save=None): """ Get files from a remote Github repository by downloading a zip file with the entire branch @@ -423,14 +419,13 @@ def __init__(self, # the github URL for the latest commit of a branch. if commit is None: self.url = ( - 'https://github.com/{repo}/archive/' + - 'refs/heads/{branch}.zip').format( - repo=repo, branch=branch) + "https://github.com/{repo}/archive/" + "refs/heads/{branch}.zip" + ).format(repo=repo, branch=branch) else: # get a commit URL - self.url = ('https://github.com/{repo}/archive/' + - '{commit}.zip').format( - repo=repo, commit=commit) + self.url = ("https://github.com/{repo}/archive/" + "{commit}.zip").format( + repo=repo, commit=commit + ) if save is not None: self.cache = caching.DiskCache(save) @@ -449,7 +444,7 @@ def keys(self): return self.zipped.keys() def write(self, name, data): - raise NotImplementedError('`write` not implemented!') + raise NotImplementedError("`write` not implemented!") @property def zipped(self): @@ -459,23 +454,26 @@ def zipped(self): - locally saved zip file - retrieve zip file and saved """ + def fetch(): """ Fetch the remote zip file. """ import requests + response = requests.get(self.url) if not response.ok: raise ValueError(response.content) return response.content - if hasattr(self, '_zip'): + if hasattr(self, "_zip"): return self._zip # download the archive or get from disc raw = self.cache.get(self.url, fetch) # create a zip resolver for the archive - self._zip = ZipResolver(util.decompress( - util.wrap_as_stream(raw), file_type='zip')) + self._zip = ZipResolver( + util.decompress(util.wrap_as_stream(raw), file_type="zip") + ) return self._zip @@ -516,22 +514,25 @@ def nearby_names(name, namespace=None): Name that is a lightly permutated version of the initial name. """ + # the various operations that *might* result in a correct key def trim(prefix, item): if item.startswith(prefix): - return item[len(prefix):] + return item[len(prefix) :] return item - cleaners = [lambda x: x, - lambda x: x.strip(), - lambda x: trim('./', x), - lambda x: trim('.\\', x), - lambda x: trim('\\', x), - lambda x: os.path.split(x)[-1], - lambda x: x.replace('%20', ' ')] + cleaners = [ + lambda x: x, + lambda x: x.strip(), + lambda x: trim("./", x), + lambda x: trim(".\\", x), + lambda x: trim("\\", x), + lambda x: os.path.split(x)[-1], + lambda x: x.replace("%20", " "), + ] if namespace is None: - namespace = '' + namespace = "" # make sure we don't return repeat values hit = set() @@ -558,8 +559,8 @@ def trim(prefix, item): hit.add(current) yield namespace + current - if '..' in name and namespace is not None: + if ".." in name and namespace is not None: # if someone specified relative paths give it one attempt - strip = namespace.strip('/').split('/')[:-name.count('..')] - strip.extend(name.split('..')[-1].strip('/').split('/')) - yield '/'.join(strip) + strip = namespace.strip("/").split("/")[: -name.count("..")] + strip.extend(name.split("..")[-1].strip("/").split("/")) + yield "/".join(strip) diff --git a/trimesh/resources/__init__.py b/trimesh/resources/__init__.py index d01a0a091..1d99c9073 100644 --- a/trimesh/resources/__init__.py +++ b/trimesh/resources/__init__.py @@ -4,8 +4,7 @@ from ..util import decode_text, wrap_as_stream # find the current absolute path to this directory -_pwd = os.path.expanduser(os.path.abspath( - os.path.dirname(__file__))) +_pwd = os.path.expanduser(os.path.abspath(os.path.dirname(__file__))) # once resources are loaded cache them _cache = {} @@ -32,18 +31,15 @@ def get(name, decode=True, decode_json=False, as_stream=False): File data """ # key by name and decode - cache_key = (name, - bool(decode), - bool(decode_json), - bool(as_stream)) + cache_key = (name, bool(decode), bool(decode_json), bool(as_stream)) cached = _cache.get(cache_key) - if hasattr(cached, 'seek'): + if hasattr(cached, "seek"): cached.seek(0) if cached is not None: return cached # get the resource using relative names - with open(os.path.join(_pwd, name), 'rb') as f: + with open(os.path.join(_pwd, name), "rb") as f: resource = f.read() # make sure we return it as a string if asked @@ -78,11 +74,9 @@ def get_schema(name): """ from ..resolvers import FilePathResolver from ..schemas import resolve + # get a resolver for our base path - resolver = FilePathResolver( - os.path.join(_pwd, 'schema', name)) + resolver = FilePathResolver(os.path.join(_pwd, "schema", name)) # recursively load $ref keys - schema = resolve( - json.loads(decode_text(resolver.get(name))), - resolver=resolver) + schema = resolve(json.loads(decode_text(resolver.get(name))), resolver=resolver) return schema diff --git a/trimesh/resources/javascript/compile.py b/trimesh/resources/javascript/compile.py index b5fc690a1..6cc34e554 100644 --- a/trimesh/resources/javascript/compile.py +++ b/trimesh/resources/javascript/compile.py @@ -23,16 +23,15 @@ def minify(path): path: str, path of resource """ - if path.startswith('http'): - data = requests.get(path).content.decode( - 'ascii', errors='ignore') - print('downloaded', path, len(data)) + if path.startswith("http"): + data = requests.get(path).content.decode("ascii", errors="ignore") + print("downloaded", path, len(data)) else: - with open(path, 'rb') as f: + with open(path, "rb") as f: # some upstream JS uses unicode spaces -_- - data = f.read().decode('ascii', errors='ignore') + data = f.read().decode("ascii", errors="ignore") # don't re-minify - if '.min.' in path: + if ".min." in path: return data try: @@ -41,51 +40,52 @@ def minify(path): return data -if __name__ == '__main__': +if __name__ == "__main__": # we're going to embed every non-CDN'd file - h = html.parse('viewer.html') + h = html.parse("viewer.html") collection = [] # find all scripts in the document - for s in h.findall('//script'): - if 'src' in s.attrib: - if 'http' in s.attrib['src']: + for s in h.findall("//script"): + if "src" in s.attrib: + if "http" in s.attrib["src"]: # pass # download CDN files and embed continue # leave any remote files alone # get a blob of file - path = s.attrib['src'].strip() - print('minifying:', path) + path = s.attrib["src"].strip() + print("minifying:", path) mini = minify(path) # replace test data in our file - if path == 'load_base64.js': + if path == "load_base64.js": print('replacing test data with "$B64GLTF"') - start = mini.find('base64_data') - end = mini.find(';', start) + start = mini.find("base64_data") + end = mini.find(";", start) # replace test data with a string we can replace # keep in quotes to avoid being minified - mini = mini.replace(mini[start:end], - 'base64_data="$B64GLTF";') + mini = mini.replace(mini[start:end], 'base64_data="$B64GLTF";') collection.append(mini) # remove the script reference s.getparent().remove(s) # a new script element with everything blobbed together - ns = html.Element('script') - ns.text = ''.join(collection) + ns = html.Element("script") + ns.text = "".join(collection) # append the new script element - body = h.find('body') + body = h.find("body") body.append(ns) - result = html.tostring(h, pretty_print=False).decode('utf-8') + result = html.tostring(h, pretty_print=False).decode("utf-8") # result = result.replace('', '').replace('', '') - with open('../viewer.html.template', 'w') as f: + with open("../viewer.html.template", "w") as f: f.write(result) import subprocess - subprocess.check_call(['zip', '-9', '-j', '../viewer.template.zip', - '../viewer.html.template']) - os.remove('../viewer.html.template') + + subprocess.check_call( + ["zip", "-9", "-j", "../viewer.template.zip", "../viewer.html.template"] + ) + os.remove("../viewer.html.template") diff --git a/trimesh/sample.py b/trimesh/sample.py index ca1e1b28c..9a446b74e 100644 --- a/trimesh/sample.py +++ b/trimesh/sample.py @@ -10,7 +10,7 @@ from . import transformations, util from .visual import uv_to_interpolated_color -if hasattr(np.random, 'default_rng'): +if hasattr(np.random, "default_rng"): # newer versions of Numpy default_rng = np.random.default_rng else: @@ -80,7 +80,7 @@ def sample_surface(mesh, count, face_weight=None, sample_color=False, seed=None) tri_origins = tri_origins[face_index] tri_vectors = tri_vectors[face_index] - if sample_color and hasattr(mesh.visual, 'uv'): + if sample_color and hasattr(mesh.visual, "uv"): uv_origins = mesh.visual.uv[mesh.faces[:, 0]] uv_vectors = mesh.visual.uv[mesh.faces[:, 1:]].copy() uv_origins_tile = np.tile(uv_origins, (1, 2)).reshape((-1, 2, 2)) @@ -107,7 +107,7 @@ def sample_surface(mesh, count, face_weight=None, sample_color=False, seed=None) samples = sample_vector + tri_origins if sample_color: - if hasattr(mesh.visual, 'uv'): + if hasattr(mesh.visual, "uv"): sample_uv_vector = (uv_vectors * random_lengths).sum(axis=1) uv_samples = sample_uv_vector + uv_origins texture = mesh.visual.material.image @@ -144,9 +144,7 @@ def volume_mesh(mesh, count): return samples -def volume_rectangular(extents, - count, - transform=None): +def volume_rectangular(extents, count, transform=None): """ Return random samples inside a rectangular volume, useful for sampling inside oriented bounding boxes. @@ -165,11 +163,10 @@ def volume_rectangular(extents, samples : (count, 3) float Points in requested volume """ - samples = np.random.random((count, 3)) - .5 + samples = np.random.random((count, 3)) - 0.5 samples *= extents if transform is not None: - samples = transformations.transform_points(samples, - transform) + samples = transformations.transform_points(samples, transform) return samples @@ -218,7 +215,7 @@ def sample_surface_even(mesh, count, radius=None, seed=None): return points[:count], index[mask][:count] # warn if we didn't get all the samples we expect - util.log.warning(f'only got {len(points)}/{count} samples!') + util.log.warning(f"only got {len(points)}/{count} samples!") return points, index[mask] @@ -246,6 +243,5 @@ def sample_surface_sphere(count): theta = np.pi * 2 * u phi = np.arccos((2 * v) - 1) # convert spherical coordinates to cartesian - points = util.spherical_to_vector( - np.column_stack((theta, phi))) + points = util.spherical_to_vector(np.column_stack((theta, phi))) return points diff --git a/trimesh/scene/__init__.py b/trimesh/scene/__init__.py index 1610837b9..2ab1a162a 100644 --- a/trimesh/scene/__init__.py +++ b/trimesh/scene/__init__.py @@ -2,4 +2,4 @@ from .scene import Scene, split_scene # add to __all__ as per pep8 -__all__ = ['Camera', 'Scene', 'split_scene'] +__all__ = ["Camera", "Scene", "split_scene"] diff --git a/trimesh/scene/cameras.py b/trimesh/scene/cameras.py index 5fcd00bf8..d4ecc94b3 100644 --- a/trimesh/scene/cameras.py +++ b/trimesh/scene/cameras.py @@ -6,15 +6,9 @@ class Camera: - def __init__( - self, - name=None, - resolution=None, - focal=None, - fov=None, - z_near=0.01, - z_far=1000.0): + self, name=None, resolution=None, focal=None, fov=None, z_near=0.01, z_far=1000.0 + ): """ Create a new Camera object that stores camera intrinsic and extrinsic parameters. @@ -39,13 +33,13 @@ def __init__( if name is None: # if name is not passed, make it something unique - self.name = f'camera_{util.unique_id(6).upper()}' + self.name = f"camera_{util.unique_id(6).upper()}" else: # otherwise assign it self.name = name if fov is None and focal is None: - raise ValueError('either focal length or FOV required!') + raise ValueError("either focal length or FOV required!") # store whether or not we computed the focal length self._focal_computed = False @@ -74,7 +68,8 @@ def copy(self): name=copy.deepcopy(self.name), resolution=copy.deepcopy(self.resolution), focal=copy.deepcopy(self.focal), - fov=copy.deepcopy(self.fov)) + fov=copy.deepcopy(self.fov), + ) @property def resolution(self): @@ -100,7 +95,7 @@ def resolution(self, values): """ values = np.asanyarray(values, dtype=np.int64) if values.shape != (2,): - raise ValueError('resolution must be (2,) float') + raise ValueError("resolution must be (2,) float") values.flags.writeable = False self._resolution = values # unset computed value that depends on the other plus resolution @@ -122,8 +117,7 @@ def focal(self): """ if self._focal is None: # calculate focal length from FOV - focal = ( - self._resolution / (2.0 * np.tan(np.radians(self._fov / 2.0)))) + focal = self._resolution / (2.0 * np.tan(np.radians(self._fov / 2.0))) focal.flags.writeable = False self._focal = focal @@ -148,7 +142,7 @@ def focal(self, values): self._focal_computed = False values = np.asanyarray(values, dtype=np.float64) if values.shape != (2,): - raise ValueError('focal length must be (2,) float') + raise ValueError("focal length must be (2,) float") values.flags.writeable = False # assign passed values to focal length self._focal = values @@ -177,12 +171,10 @@ def K(self, values): return values = np.asanyarray(values, dtype=np.float64) if values.shape != (3, 3): - raise ValueError('matrix must be (3,3)!') + raise ValueError("matrix must be (3,3)!") - if not np.allclose(values.flatten()[[1, 3, 6, 7, 8]], - [0, 0, 0, 0, 1]): - raise ValueError( - 'matrix should only have focal length and resolution!') + if not np.allclose(values.flatten()[[1, 3, 6, 7, 8]], [0, 0, 0, 0, 1]): + raise ValueError("matrix should only have focal length and resolution!") # set focal length from matrix self.focal = [values[0, 0], values[1, 1]] @@ -200,8 +192,7 @@ def fov(self): XY field of view in degrees """ if self._fov is None: - fov = 2.0 * np.degrees( - np.arctan((self._resolution / 2.0) / self._focal)) + fov = 2.0 * np.degrees(np.arctan((self._resolution / 2.0) / self._focal)) fov.flags.writeable = False self._fov = fov return self._fov @@ -225,7 +216,7 @@ def fov(self, values): self._focal_computed = True values = np.asanyarray(values, dtype=np.float64) if values.shape != (2,): - raise ValueError('fov length must be (2,) int') + raise ValueError("fov length must be (2,) int") values.flags.writeable = False # assign passed values to FOV self._fov = values @@ -278,12 +269,10 @@ def look_at(self, points, **kwargs): transform : (4, 4) float Transformation matrix from world to camera """ - return look_at(points, - fov=self.fov, - **kwargs) + return look_at(points, fov=self.fov, **kwargs) def __repr__(self): - return f' FOV: {self.fov} Resolution: {self.resolution}' + return f" FOV: {self.fov} Resolution: {self.resolution}" def look_at(points, fov, rotation=None, distance=None, center=None, pad=None): @@ -342,8 +331,7 @@ def look_at(points, fov, rotation=None, distance=None, center=None, pad=None): tfov = np.tan(np.radians(fov) / 2.0) if distance is None: - distance = np.max(np.abs(points_c[:, :2]) / - tfov + points_c[:, 2][:, np.newaxis]) + distance = np.max(np.abs(points_c[:, :2]) / tfov + points_c[:, 2][:, np.newaxis]) if pad is not None: distance *= pad @@ -401,13 +389,13 @@ def ray_pixel_coords(camera): # create a grid of vectors xy = util.grid_linspace( - bounds=[[left, top], [right, bottom]], - count=camera.resolution) + bounds=[[left, top], [right, bottom]], count=camera.resolution + ) # create a matching array of pixel indexes for the rays pixels = util.grid_linspace( - bounds=[[0, res[1] - 1], [res[0] - 1, 0]], - count=res).astype(np.int64) + bounds=[[0, res[1] - 1], [res[0] - 1, 0]], count=res + ).astype(np.int64) assert xy.shape == pixels.shape return xy, pixels @@ -431,6 +419,5 @@ def camera_to_rays(camera): # get the on-plane coordinates xy, pixels = ray_pixel_coords(camera) # convert vectors to 3D unit vectors - vectors = util.unitize( - np.column_stack((xy, -np.ones_like(xy[:, :1])))) + vectors = util.unitize(np.column_stack((xy, -np.ones_like(xy[:, :1])))) return vectors, pixels diff --git a/trimesh/scene/lighting.py b/trimesh/scene/lighting.py index 27f36367e..90b27352c 100644 --- a/trimesh/scene/lighting.py +++ b/trimesh/scene/lighting.py @@ -38,15 +38,10 @@ class Light(util.ABC): If None, the radius is assumed to be infinite. """ - def __init__(self, - name=None, - color=None, - intensity=None, - radius=None): - + def __init__(self, name=None, color=None, intensity=None, radius=None): if name is None: # if name is not passed, make it something unique - self.name = f'light_{util.unique_id(6).upper()}' + self.name = f"light_{util.unique_id(6).upper()}" else: # otherwise assign it self.name = name @@ -122,17 +117,8 @@ class DirectionalLight(Light): If None, the radius is assumed to be infinite. """ - def __init__(self, - name=None, - color=None, - intensity=None, - radius=None): - super().__init__( - name=name, - color=color, - intensity=intensity, - radius=radius - ) + def __init__(self, name=None, color=None, intensity=None, radius=None): + super().__init__(name=name, color=color, intensity=intensity, radius=radius) class PointLight(Light): @@ -161,17 +147,8 @@ class PointLight(Light): If None, the radius is assumed to be infinite. """ - def __init__(self, - name=None, - color=None, - intensity=None, - radius=None): - super().__init__( - name=name, - color=color, - intensity=intensity, - radius=radius - ) + def __init__(self, name=None, color=None, intensity=None, radius=None): + super().__init__(name=name, color=color, intensity=intensity, radius=radius) class SpotLight(Light): @@ -211,19 +188,16 @@ class SpotLight(Light): Must be greater than `innerConeAngle` and less than or equal to `PI / 2.0`. """ - def __init__(self, - name=None, - color=None, - intensity=None, - radius=None, - innerConeAngle=0.0, - outerConeAngle=np.pi / 4.0): - super().__init__( - name=name, - color=color, - intensity=intensity, - radius=radius - ) + def __init__( + self, + name=None, + color=None, + intensity=None, + radius=None, + innerConeAngle=0.0, + outerConeAngle=np.pi / 4.0, + ): + super().__init__(name=name, color=color, intensity=intensity, radius=radius) self.outerConeAngle = outerConeAngle self.innerConeAngle = innerConeAngle @@ -234,7 +208,7 @@ def innerConeAngle(self): @innerConeAngle.setter def innerConeAngle(self, value): if value < 0.0 or value > self.outerConeAngle: - raise ValueError('Invalid value for inner cone angle') + raise ValueError("Invalid value for inner cone angle") self._innerConeAngle = float(value) @property @@ -244,7 +218,7 @@ def outerConeAngle(self): @outerConeAngle.setter def outerConeAngle(self, value): if value < 0.0 or value > np.pi / 2.0 + 1e-9: - raise ValueError('Invalid value for outer cone angle') + raise ValueError("Invalid value for outer cone angle") self._outerConeAngle = float(value) @@ -269,7 +243,6 @@ def autolight(scene): lights = [PointLight(), PointLight()] # create two translation matrices for bounds corners - transforms = [transformations.translation_matrix(b) - for b in scene.bounds] + transforms = [transformations.translation_matrix(b) for b in scene.bounds] return lights, transforms diff --git a/trimesh/scene/scene.py b/trimesh/scene/scene.py index 28ca909cb..d9326763e 100644 --- a/trimesh/scene/scene.py +++ b/trimesh/scene/scene.py @@ -19,14 +19,16 @@ class Scene(Geometry3D): moved by updating transform in the transform tree. """ - def __init__(self, - geometry=None, - base_frame='world', - metadata=None, - graph=None, - camera=None, - lights=None, - camera_transform=None): + def __init__( + self, + geometry=None, + base_frame="world", + metadata=None, + graph=None, + camera=None, + lights=None, + camera_transform=None, + ): """ Create a new Scene object. @@ -87,18 +89,18 @@ def apply_transform(self, transform): base = self.graph.base_frame for child in self.graph.transforms.children[base]: combined = np.dot(transform, self.graph[child][0]) - self.graph.update(frame_from=base, - frame_to=child, - matrix=combined) + self.graph.update(frame_from=base, frame_to=child, matrix=combined) return self - def add_geometry(self, - geometry, - node_name=None, - geom_name=None, - parent_node_name=None, - transform=None, - metadata=None): + def add_geometry( + self, + geometry, + node_name=None, + geom_name=None, + parent_node_name=None, + transform=None, + metadata=None, + ): """ Add a geometry to the scene. @@ -133,19 +135,23 @@ def add_geometry(self, # PointCloud objects will look like a sequence elif util.is_sequence(geometry): # if passed a sequence add all elements - return [self.add_geometry( - geometry=value, - node_name=node_name, - geom_name=geom_name, - parent_node_name=parent_node_name, - transform=transform, - metadata=metadata) for value in geometry] + return [ + self.add_geometry( + geometry=value, + node_name=node_name, + geom_name=geom_name, + parent_node_name=parent_node_name, + transform=transform, + metadata=metadata, + ) + for value in geometry + ] elif isinstance(geometry, dict): # if someone passed us a dict of geometry - return {k: self.add_geometry( - geometry=v, - geom_name=k, - metadata=metadata) for k, v in geometry.items()} + return { + k: self.add_geometry(geometry=v, geom_name=k, metadata=metadata) + for k, v in geometry.items() + } elif isinstance(geometry, Scene): # concatenate current scene with passed scene @@ -157,22 +163,22 @@ def add_geometry(self, self.graph.transforms = concat.graph.transforms return - if not hasattr(geometry, 'vertices'): - util.log.debug(f'unknown type ({type(geometry).__name__}) added to scene!') + if not hasattr(geometry, "vertices"): + util.log.debug(f"unknown type ({type(geometry).__name__}) added to scene!") return # get or create a name to reference the geometry by if geom_name is not None: # if name is passed use it name = geom_name - elif 'name' in geometry.metadata: + elif "name" in geometry.metadata: # if name is in metadata use it - name = geometry.metadata['name'] - elif 'file_name' in geometry.metadata: - name = geometry.metadata['file_name'] + name = geometry.metadata["name"] + elif "file_name" in geometry.metadata: + name = geometry.metadata["file_name"] else: # try to create a simple name - name = 'geometry_' + str(len(self.geometry)) + name = "geometry_" + str(len(self.geometry)) # if its already taken use our unique name logic name = unique_name(start=name, contains=self.geometry.keys()) @@ -193,12 +199,14 @@ def add_geometry(self, # create an identity transform from parent_node transform = np.eye(4) - self.graph.update(frame_to=node_name, - frame_from=parent_node_name, - matrix=transform, - geometry=name, - geometry_flags={'visible': True}, - metadata=metadata) + self.graph.update( + frame_to=node_name, + frame_from=parent_node_name, + matrix=transform, + geometry=name, + geometry_flags={"visible": True}, + metadata=metadata, + ) return node_name @@ -228,8 +236,9 @@ def strip_visuals(self): and set them to an empty `ColorVisuals`. """ from ..visual.color import ColorVisuals + for geometry in self.geometry.values(): - if util.is_instance_named(geometry, 'Trimesh'): + if util.is_instance_named(geometry, "Trimesh"): geometry.visual = ColorVisuals(mesh=geometry) def __hash__(self): @@ -246,10 +255,8 @@ def __hash__(self): # start with the last modified time of the scene graph hashable = [hex(self.graph.transforms.__hash__())] # take the re-hex string of the hash - hashable.extend(hex(geometry[k].__hash__()) for k in - geometry.keys()) - return caching.hash_fast( - ''.join(hashable).encode('utf-8')) + hashable.extend(hex(geometry[k].__hash__()) for k in geometry.keys()) + return caching.hash_fast("".join(hashable).encode("utf-8")) @property def is_empty(self): @@ -278,8 +285,7 @@ def is_valid(self): return True try: - referenced = {self.graph[i][1] - for i in self.graph.nodes_geometry} + referenced = {self.graph[i][1] for i in self.graph.nodes_geometry} except BaseException: # if connectivity to world frame is broken return false return False @@ -304,14 +310,19 @@ def bounds_corners(self): # collect AABB for each geometry corners = {} # collect vertices for every mesh - vertices = {k: m.vertices for k, m in - self.geometry.items() - if hasattr(m, 'vertices') and - len(m.vertices) > 0} + vertices = { + k: m.vertices + for k, m in self.geometry.items() + if hasattr(m, "vertices") and len(m.vertices) > 0 + } # handle 2D geometries vertices.update( - {k: np.column_stack((v, np.zeros(len(v)))) - for k, v in vertices.items() if v.shape[1] == 2}) + { + k: np.column_stack((v, np.zeros(len(v)))) + for k, v in vertices.items() + if v.shape[1] == 2 + } + ) # loop through every node with geometry for node_name in self.graph.nodes_geometry: @@ -326,8 +337,8 @@ def bounds_corners(self): dot = np.dot(transform[:3, :3], points.T) # append the AABB with translation applied after corners[node_name] = np.array( - [dot.min(axis=1) + transform[:3, 3], - dot.max(axis=1) + transform[:3, 3]]) + [dot.min(axis=1) + transform[:3, 3], dot.max(axis=1) + transform[:3, 3]] + ) return corners @caching.cache_decorator @@ -346,9 +357,7 @@ def bounds(self): return None # combine each geometry node AABB into a larger list corners = np.vstack(list(self.bounds_corners.values())) - return np.array([corners.min(axis=0), - corners.max(axis=0)], - dtype=np.float64) + return np.array([corners.min(axis=0), corners.max(axis=0)], dtype=np.float64) @caching.cache_decorator def extents(self): @@ -372,7 +381,7 @@ def scale(self): scale : float The mean of the bounding box edge lengths """ - scale = (self.extents ** 2).sum() ** .5 + scale = (self.extents**2).sum() ** 0.5 return scale @caching.cache_decorator @@ -399,10 +408,12 @@ def center_mass(self): The center of mass of the scene """ # get the center of mass and volume for each geometry - center_mass = {k: m.center_mass for k, m in self.geometry.items() - if hasattr(m, 'center_mass')} - mass = {k: m.mass for k, m in self.geometry.items() - if hasattr(m, 'mass')} + center_mass = { + k: m.center_mass + for k, m in self.geometry.items() + if hasattr(m, "center_mass") + } + mass = {k: m.mass for k, m in self.geometry.items() if hasattr(m, "mass")} # get the geometry name and transform for each instance graph = self.graph @@ -410,12 +421,15 @@ def center_mass(self): # get the transformed center of mass for each instance transformed = np.array( - [np.dot(mat, np.append(center_mass[g], 1))[:3] - for mat, g in instance - if g in center_mass], dtype=np.float64) + [ + np.dot(mat, np.append(center_mass[g], 1))[:3] + for mat, g in instance + if g in center_mass + ], + dtype=np.float64, + ) # weight the center of mass locations by volume - weights = np.array( - [mass[g] for _, g in instance], dtype=np.float64) + weights = np.array([mass[g] for _, g in instance], dtype=np.float64) weights /= weights.sum() return (transformed * weights.reshape((-1, 1))).sum(axis=0) @@ -431,8 +445,8 @@ def moment_inertia(self): Inertia with respect to cartesian axis at `scene.center_mass` """ return inertia.scene_inertia( - scene=self, - transform=transformations.translation_matrix(self.center_mass)) + scene=self, transform=transformations.translation_matrix(self.center_mass) + ) def moment_inertia_frame(self, transform): """ @@ -462,11 +476,11 @@ def area(self): Summed area of every instanced geometry """ # get the area of every geometry that has an area property - areas = {n: g.area for n, g in self.geometry.items() - if hasattr(g, 'area')} + areas = {n: g.area for n, g in self.geometry.items() if hasattr(g, "area")} # sum the area including instancing - return sum((areas.get(self.graph[n][1], 0.0) for n in - self.graph.nodes_geometry), 0.0) + return sum( + (areas.get(self.graph[n][1], 0.0) for n in self.graph.nodes_geometry), 0.0 + ) @caching.cache_decorator def volume(self): @@ -480,11 +494,11 @@ def volume(self): Summed area of every instanced geometry """ # get the area of every geometry that has a volume attribute - volume = {n: g.volume for n, g in self.geometry.items() - if hasattr(g, 'area')} + volume = {n: g.volume for n, g in self.geometry.items() if hasattr(g, "area")} # sum the area including instancing - return sum((volume.get(self.graph[n][1], 0.0) for n in - self.graph.nodes_geometry), 0.0) + return sum( + (volume.get(self.graph[n][1], 0.0) for n in self.graph.nodes_geometry), 0.0 + ) @caching.cache_decorator def triangles(self): @@ -506,19 +520,18 @@ def triangles(self): # get the actual potential mesh instance geometry = self.geometry[geometry_name] - if not hasattr(geometry, 'triangles'): + if not hasattr(geometry, "triangles"): continue # append the (n, 3, 3) triangles to a sequence triangles.append( transformations.transform_points( - geometry.triangles.copy().reshape((-1, 3)), - matrix=transform)) + geometry.triangles.copy().reshape((-1, 3)), matrix=transform + ) + ) # save the node names for each triangle - triangles_node.append( - np.tile(node_name, - len(geometry.triangles))) + triangles_node.append(np.tile(node_name, len(geometry.triangles))) # save the resulting nodes to the cache - self._cache['triangles_node'] = np.hstack(triangles_node) + self._cache["triangles_node"] = np.hstack(triangles_node) triangles = np.vstack(triangles).reshape((-1, 3, 3)) return triangles @@ -533,7 +546,7 @@ def triangles_node(self): Node name for each triangle """ populate = self.triangles # NOQA - return self._cache['triangles_node'] + return self._cache["triangles_node"] @caching.cache_decorator def geometry_identifiers(self): @@ -545,8 +558,7 @@ def geometry_identifiers(self): identifiers : dict {Identifier hash: key in self.geometry} """ - identifiers = {mesh.identifier_hash: name - for name, mesh in self.geometry.items()} + identifiers = {mesh.identifier_hash: name for name, mesh in self.geometry.items()} return identifiers @caching.cache_decorator @@ -567,24 +579,27 @@ def duplicate_nodes(self): return [] # geometry name : hash of mesh - hashes = {k: int(m.identifier_hash, 16) - for k, m in self.geometry.items() - if hasattr(m, 'identifier_hash')} + hashes = { + k: int(m.identifier_hash, 16) + for k, m in self.geometry.items() + if hasattr(m, "identifier_hash") + } # bring into local scope for loop graph = self.graph # get a hash for each node name # scene.graph node name : hashed geometry - node_hash = {node: hashes.get( - graph[node][1]) for - node in graph.nodes_geometry} + node_hash = {node: hashes.get(graph[node][1]) for node in graph.nodes_geometry} # collect node names for each hash key duplicates = collections.defaultdict(list) # use a slightly off-label list comprehension # for debatable function call overhead avoidance - [duplicates[hashed].append(node) for node, hashed - in node_hash.items() if hashed is not None] + [ + duplicates[hashed].append(node) + for node, hashed in node_hash.items() + if hashed is not None + ] # we only care about the values keys are garbage return list(duplicates.values()) @@ -610,12 +625,9 @@ def deduplicated(self): return Scene(geometry) - def set_camera(self, - angles=None, - distance=None, - center=None, - resolution=None, - fov=None): + def set_camera( + self, angles=None, distance=None, center=None, resolution=None, fov=None + ): """ Create a camera object for self.camera, and add a transform to self.graph for it. @@ -649,13 +661,10 @@ def set_camera(self, rotation = transformations.euler_matrix(*angles) transform = cameras.look_at( - self.bounds, - fov=fov, - rotation=rotation, - distance=distance, - center=center) + self.bounds, fov=fov, rotation=rotation, distance=distance, center=center + ) - if hasattr(self, '_camera') and self._camera is not None: + if hasattr(self, "_camera") and self._camera is not None: self._camera.fov = fov if resolution is not None: self._camera.resolution = resolution @@ -711,13 +720,11 @@ def camera_rays(self): # find our scene's transform for the camera transform = self.camera_transform # apply the rotation to the unit ray direction vectors - vectors = transformations.transform_points( - vectors, - transform, - translate=False) + vectors = transformations.transform_points(vectors, transform, translate=False) # camera origin is single point so extract from - origins = (np.ones_like(vectors) * - transformations.translation_from_matrix(transform)) + origins = np.ones_like(vectors) * transformations.translation_from_matrix( + transform + ) return origins, vectors, pixels @property @@ -755,7 +762,7 @@ def camera(self, camera): @property def has_camera(self): - return hasattr(self, '_camera') and self._camera is not None + return hasattr(self, "_camera") and self._camera is not None @property def lights(self): @@ -768,7 +775,7 @@ def lights(self): lights : [trimesh.scene.lighting.Light] Lights in the scene. """ - if not hasattr(self, '_lights') or self._lights is None: + if not hasattr(self, "_lights") or self._lights is None: # do some automatic lighting lights, transforms = lighting.autolight(self) # assign the transforms to the scene graph @@ -807,10 +814,10 @@ def rezero(self): matrix[:3, 3] = -self.centroid # we are going to change the base frame - new_base = str(self.graph.base_frame) + '_I' - self.graph.update(frame_from=new_base, - frame_to=self.graph.base_frame, - matrix=matrix) + new_base = str(self.graph.base_frame) + "_I" + self.graph.update( + frame_from=new_base, frame_to=self.graph.base_frame, matrix=matrix + ) self.graph.base_frame = new_base def dump(self, concatenate=False): @@ -836,7 +843,7 @@ def dump(self, concatenate=False): current = self.geometry[geometry_name].copy() # if the geometry is 2D see if we have to upgrade to 3D - if hasattr(current, 'to_3D'): + if hasattr(current, "to_3D"): # check to see if the scene is transforming the path out of plane check = util.isclose(transform, util._IDENTITY, atol=1e-8) check[:2, :3] = True @@ -849,8 +856,8 @@ def dump(self, concatenate=False): # move the geometry vertices into the requested frame current.apply_transform(transform) - current.metadata['name'] = geometry_name - current.metadata['node'] = node_name + current.metadata["name"] = geometry_name + current.metadata["node"] = node_name # save to our list of meshes result.append(current) @@ -880,15 +887,13 @@ def subscene(self, node): graph = self.graph nodes = graph.transforms.successors(node) # get every edge that has an included node - edges = [e for e in graph.to_edgelist() - if e[0] in nodes] + edges = [e for e in graph.to_edgelist() if e[0] in nodes] # create a scene graph when graph = SceneGraph(base_frame=node) graph.from_edgelist(edges) - geometry_names = {e[2]['geometry'] for e in edges - if 'geometry' in e[2]} + geometry_names = {e[2]["geometry"] for e in edges if "geometry" in e[2]} geometry = {k: self.geometry[k] for k in geometry_names} result = Scene(geometry=geometry, graph=graph) return result @@ -902,16 +907,11 @@ def convex_hull(self): --------- hull: Trimesh object, convex hull of all meshes in scene """ - points = util.vstack_empty( - [m.vertices - for m in self.dump()]) + points = util.vstack_empty([m.vertices for m in self.dump()]) hull = convex.convex_hull(points) return hull - def export(self, - file_obj=None, - file_type=None, - **kwargs): + def export(self, file_obj=None, file_type=None, **kwargs): """ Export a snapshot of the current scene. @@ -929,10 +929,8 @@ def export(self, Only returned if file_obj is None """ return export.export_scene( - scene=self, - file_obj=file_obj, - file_type=file_type, - **kwargs) + scene=self, file_obj=file_obj, file_type=file_type, **kwargs + ) def save_image(self, resolution=None, **kwargs): """ @@ -951,8 +949,8 @@ def save_image(self, resolution=None, **kwargs): Render of scene as a PNG """ from ..viewer.windowed import render_scene - png = render_scene( - scene=self, resolution=resolution, **kwargs) + + png = render_scene(scene=self, resolution=resolution, **kwargs) return png @property @@ -971,7 +969,7 @@ def units(self): if any(existing[0] != e for e in existing): # if all of our geometry doesn't have the same units already # this function will only do some hot nonsense - raise ValueError('models in scene have inconsistent units!') + raise ValueError("models in scene have inconsistent units!") return existing[0] @@ -1020,9 +1018,7 @@ def convert_units(self, desired, guess=False): current = units.units_from_metadata(self, guess=guess) # find the float conversion - scale = units.unit_conversion( - current=current, - desired=desired) + scale = units.unit_conversion(current=current, desired=desired) # exit early if our current units are the same as desired units if np.isclose(scale, 1.0): @@ -1058,8 +1054,7 @@ def explode(self, vector=None, origin=None): transform, geometry_name = self.graph[node_name] centroid = self.geometry[geometry_name].centroid # transform centroid into nodes location - centroid = np.dot(transform, - np.append(centroid, 1))[:3] + centroid = np.dot(transform, np.append(centroid, 1))[:3] if vector.shape == (): # case where our vector is a single number @@ -1068,7 +1063,7 @@ def explode(self, vector=None, origin=None): projected = np.dot(vector, (centroid - origin)) offset = vector * projected else: - raise ValueError('explode vector wrong shape!') + raise ValueError("explode vector wrong shape!") # original transform is read-only T_new = transform.copy() @@ -1091,8 +1086,7 @@ def scaled(self, scale): A copy of the current scene but scaled """ # convert 2D geometries to 3D for 3D scaling factors - scale_is_3D = isinstance( - scale, (list, tuple, np.ndarray)) and len(scale) == 3 + scale_is_3D = isinstance(scale, (list, tuple, np.ndarray)) and len(scale) == 3 if scale_is_3D and np.all(np.asarray(scale) == scale[0]): # scale is uniform @@ -1121,10 +1115,13 @@ def scaled(self, scale): geom_name=geom_name, node_name=n, parent_node_name=p, - transform=result.graph.transforms.edge_data[( - p, n)].get('matrix', None), - metadata=result.graph.transforms.edge_data[( - p, n)].get('metadata', None)) + transform=result.graph.transforms.edge_data[(p, n)].get( + "matrix", None + ), + metadata=result.graph.transforms.edge_data[(p, n)].get( + "metadata", None + ), + ) result.delete_geometry(geom_name) # Convert all 2D paths to 3D paths @@ -1141,18 +1138,18 @@ def scaled(self, scale): # Get geometry transform w.r.t. base frame result.geometry[geom_name].apply_transform(T).apply_scale( - scale).apply_transform(np.linalg.inv(T)) + scale + ).apply_transform(np.linalg.inv(T)) # Scale all transformations in the scene graph edge_data = result.graph.transforms.edge_data for uv in edge_data: - if 'matrix' in edge_data[uv]: + if "matrix" in edge_data[uv]: props = edge_data[uv] - T = edge_data[uv]['matrix'].copy() + T = edge_data[uv]["matrix"].copy() T[:3, 3] *= scale - props['matrix'] = T - result.graph.update( - frame_from=uv[0], frame_to=uv[1], **props) + props["matrix"] = T + result.graph.update(frame_from=uv[0], frame_to=uv[1], **props) # Clear cache result.graph.transforms._cache = {} result.graph.transforms._modified = str(uuid.uuid4()) @@ -1190,17 +1187,15 @@ def scaled(self, scale): # otherwise apply the full transform result.geometry[geometry].apply_transform(new_geom) - for node, T in zip(nodes[group], - transforms[group]): + for node, T in zip(nodes[group], transforms[group]): # generate the new transforms - transform = util.multi_dot( - [scale_3D, T, np.linalg.inv(new_geom)]) + transform = util.multi_dot([scale_3D, T, np.linalg.inv(new_geom)]) # apply scale to translation transform[:3, 3] *= scale # update scene with new transforms - result.graph.update(frame_to=node, - matrix=transform, - geometry=geometry) + result.graph.update( + frame_to=node, matrix=transform, geometry=geometry + ) return result def copy(self): @@ -1216,17 +1211,19 @@ def copy(self): # allow them to handle references to unpickle-able objects geometry = {n: g.copy() for n, g in self.geometry.items()} - if not hasattr(self, '_camera') or self._camera is None: + if not hasattr(self, "_camera") or self._camera is None: # if no camera set don't include it camera = None else: # otherwise get a copy of the camera camera = self.camera.copy() # create a new scene with copied geometry and graph - copied = Scene(geometry=geometry, - graph=self.graph.copy(), - metadata=self.metadata.copy(), - camera=camera) + copied = Scene( + geometry=geometry, + graph=self.graph.copy(), + metadata=self.metadata.copy(), + camera=camera, + ) return copied def show(self, viewer=None, **kwargs): @@ -1247,18 +1244,21 @@ def show(self, viewer=None, **kwargs): if viewer is None: # check to see if we are in a notebook or not from ..viewer import in_notebook + if in_notebook(): - viewer = 'notebook' + viewer = "notebook" else: - viewer = 'gl' + viewer = "gl" - if viewer == 'gl': + if viewer == "gl": # this imports pyglet, and will raise an ImportError # if pyglet is not available from ..viewer import SceneViewer + return SceneViewer(self, **kwargs) - elif viewer == 'notebook': + elif viewer == "notebook": from ..viewer import scene_to_notebook + return scene_to_notebook(self, **kwargs) elif callable(viewer): # if a callable method like a custom class @@ -1281,9 +1281,7 @@ def __add__(self, other): appended : trimesh.Scene Scene with geometry from both scenes """ - result = append_scenes( - [self, other], - common=[self.graph.base_frame]) + result = append_scenes([self, other], common=[self.graph.base_frame]) return result @@ -1301,7 +1299,7 @@ def split_scene(geometry, **kwargs): scene: trimesh.Scene """ # already a scene, so return it - if util.is_instance_named(geometry, 'Scene'): + if util.is_instance_named(geometry, "Scene"): return geometry # a list of things @@ -1312,8 +1310,7 @@ def split_scene(geometry, **kwargs): metadata.update(g.metadata) except BaseException: continue - return Scene(geometry, - metadata=metadata) + return Scene(geometry, metadata=metadata) # a single geometry so we are going to split split = [] @@ -1324,15 +1321,15 @@ def split_scene(geometry, **kwargs): # if there is only one geometry in the mesh # name it from the file name - if len(split) == 1 and 'file_name' in metadata: - split = {metadata['file_name']: split[0]} + if len(split) == 1 and "file_name" in metadata: + split = {metadata["file_name"]: split[0]} scene = Scene(split, metadata=metadata) return scene -def append_scenes(iterable, common=None, base_frame='world'): +def append_scenes(iterable, common=None, base_frame="world"): """ Concatenate multiple scene objects into one scene. @@ -1401,11 +1398,11 @@ def node_remap(node): # loop through every geometry for s in iterable: # allow Trimesh/Path2D geometry to be passed - if hasattr(s, 'scene'): + if hasattr(s, "scene"): s = s.scene() # if we don't have a scene raise an exception if not isinstance(s, Scene): - raise ValueError(f'{type(s).__name__} is not a scene!') + raise ValueError(f"{type(s).__name__} is not a scene!") # remap geometries if they have been consumed map_geom = {} @@ -1430,8 +1427,8 @@ def node_remap(node): # referred to geometry that doesn't exist # rather than crash here we ignore it as the user # possibly intended to add in geometries back later - if 'geometry' in attr and attr['geometry'] in map_geom: - attr['geometry'] = map_geom[attr['geometry']] + if "geometry" in attr and attr["geometry"] in map_geom: + attr["geometry"] = map_geom[attr["geometry"]] # save the new edge edges.append((a, b, attr)) # mark nodes from current scene as consumed diff --git a/trimesh/scene/transforms.py b/trimesh/scene/transforms.py index 09ffce6d4..9580391f0 100644 --- a/trimesh/scene/transforms.py +++ b/trimesh/scene/transforms.py @@ -9,7 +9,7 @@ # we compare to identity a lot _identity = np.eye(4) -_identity.flags['WRITEABLE'] = False +_identity.flags["WRITEABLE"] = False class SceneGraph: @@ -21,7 +21,7 @@ class SceneGraph: nodes. """ - def __init__(self, base_frame='world', repair_rigid=1e-5): + def __init__(self, base_frame="world", repair_rigid=1e-5): """ Create a scene graph, holding homogeneous transformation matrices and instance information about geometry. @@ -79,19 +79,17 @@ def update(self, frame_to, frame_from=None, **kwargs): frame_from = self.base_frame # pass through - attr = {k: v for k, v in kwargs.items() - if k in {'geometry', 'metadata'}} + attr = {k: v for k, v in kwargs.items() if k in {"geometry", "metadata"}} # convert various kwargs to a single matrix - attr['matrix'] = kwargs_to_matrix(**kwargs) + attr["matrix"] = kwargs_to_matrix(**kwargs) # add the edges for the transforms # wi ll return if it changed anything self.transforms.add_edge(frame_from, frame_to, **attr) # set the node attribute with the geometry information - if 'geometry' in kwargs: - self.transforms.node_data[ - frame_to]['geometry'] = kwargs['geometry'] + if "geometry" in kwargs: + self.transforms.node_data[frame_to]["geometry"] = kwargs["geometry"] def get(self, frame_to, frame_from=None): """ @@ -126,8 +124,7 @@ def get(self, frame_to, frame_from=None): return self._cache[key] # get the geometry at the final node if any - geometry = self.transforms.node_data[ - frame_to].get('geometry') + geometry = self.transforms.node_data[frame_to].get("geometry") # get a local reference to edge data data = self.transforms.edge_data @@ -137,13 +134,12 @@ def get(self, frame_to, frame_from=None): matrix = _identity elif key in data: # if the path is just an edge return early - matrix = data[key]['matrix'] + matrix = data[key]["matrix"] else: # we have a 3+ node path # get the path from the forest always going from # parent -> child -> child - path = self.transforms.shortest_path( - frame_from, frame_to) + path = self.transforms.shortest_path(frame_from, frame_to) # the path should always start with `frame_from` assert path[0] == frame_from # and end with the `frame_to` node @@ -154,21 +150,19 @@ def get(self, frame_to, frame_from=None): for u, v in zip(path[:-1], path[1:]): forward = data.get((u, v)) if forward is not None: - if 'matrix' in forward: + if "matrix" in forward: # append the matrix from u to v - matrices.append(forward['matrix']) + matrices.append(forward["matrix"]) continue # since forwards didn't exist backward must # exist otherwise this is a disconnected path # and we should raise an error anyway backward = data[(v, u)] - if 'matrix' in backward: + if "matrix" in backward: # append the inverted backwards matrix - matrices.append( - np.linalg.inv(backward['matrix'])) + matrices.append(np.linalg.inv(backward["matrix"])) # filter out any identity matrices - matrices = [m for m in matrices if - np.abs(m - _identity).max() > 1e-8] + matrices = [m for m in matrices if np.abs(m - _identity).max() > 1e-8] if len(matrices) == 0: matrix = _identity elif len(matrices) == 1: @@ -182,7 +176,7 @@ def get(self, frame_to, frame_from=None): matrix = fix_rigid(matrix, max_deviance=self.repair_rigid) # matrix being edited in-place leads to subtle bugs - matrix.flags['WRITEABLE'] = False + matrix.flags["WRITEABLE"] = False # store the result self._cache[key] = (matrix, geometry) @@ -223,11 +217,9 @@ def to_flattened(self): if node == base_frame: continue # get the matrix and geometry name - matrix, geometry = self.get( - frame_to=node, frame_from=base_frame) + matrix, geometry = self.get(frame_to=node, frame_from=base_frame) # store matrix as list rather than numpy array - flat[node] = {'transform': matrix.tolist(), - 'geometry': geometry} + flat[node] = {"transform": matrix.tolist(), "geometry": geometry} return flat @@ -252,8 +244,7 @@ def to_gltf(self, scene, mesh_index=None): if mesh_index is None: # geometry is an OrderedDict # map mesh name to index: {geometry key : index} - mesh_index = {name: i for i, name - in enumerate(scene.geometry.keys())} + mesh_index = {name: i for i, name in enumerate(scene.geometry.keys())} # get graph information into local scope before loop graph = self.transforms @@ -264,7 +255,7 @@ def to_gltf(self, scene, mesh_index=None): # list of dict, in gltf format # start with base frame as first node index - result = [{'name': base_frame}] + result = [{"name": base_frame}] # {node name : node index in gltf} lookup = {base_frame: 0} @@ -275,7 +266,7 @@ def to_gltf(self, scene, mesh_index=None): # assign the index to the node-name lookup lookup[node] = len(result) # populate a result at the correct index - result.append({'name': node}) + result.append({"name": node}) # get generated properties outside of loop # does the scene have a defined camera to export @@ -287,53 +278,52 @@ def to_gltf(self, scene, mesh_index=None): # then iterate through to collect data for info in result: # name of the scene node - node = info['name'] + node = info["name"] # get the original node names for children childs = children.get(node, []) if len(childs) > 0: - info['children'] = [lookup[k] for k in childs] + info["children"] = [lookup[k] for k in childs] # if we have a mesh store by index - if 'geometry' in node_data[node]: - mesh_key = node_data[node]['geometry'] + if "geometry" in node_data[node]: + mesh_key = node_data[node]["geometry"] if mesh_key in mesh_index: - info['mesh'] = mesh_index[mesh_key] + info["mesh"] = mesh_index[mesh_key] # check to see if we have camera node if has_camera and node == scene.camera.name: - info['camera'] = 0 + info["camera"] = 0 if node != base_frame: parent = graph.parents[node] node_edge = edge_data[(parent, node)] # get the matrix from this edge - matrix = node_edge['matrix'] + matrix = node_edge["matrix"] # only include if it's not an identify matrix if not util.allclose(matrix, _identity): - info['matrix'] = matrix.T.reshape(-1).tolist() + info["matrix"] = matrix.T.reshape(-1).tolist() # if an extra was stored on this edge - extras = node_edge.get('metadata') + extras = node_edge.get("metadata") if extras: extras = extras.copy() # if extensionss were stored on this edge - extensions = extras.pop('gltf_extensions', None) + extensions = extras.pop("gltf_extensions", None) if isinstance(extensions, dict): - info['extensions'] = extensions - extensions_used = extensions_used.union( - set(extensions.keys())) + info["extensions"] = extensions + extensions_used = extensions_used.union(set(extensions.keys())) # convert any numpy arrays to lists extras.update( - {k: v.tolist() for k, v in extras.items() - if hasattr(v, 'tolist')}) - info['extras'] = extras + {k: v.tolist() for k, v in extras.items() if hasattr(v, "tolist")} + ) + info["extras"] = extras - gltf = {'nodes': result} + gltf = {"nodes": result} if len(extensions_used) > 0: - gltf['extensionsUsed'] = list(extensions_used) + gltf["extensionsUsed"] = list(extensions_used) return gltf def to_edgelist(self): @@ -361,12 +351,12 @@ def to_edgelist(self): # make sure we're not stomping on original attr_new = attr.copy() # apply node geometry to edge attributes - if 'geometry' in b_attr: - attr_new['geometry'] = b_attr['geometry'] + if "geometry" in b_attr: + attr_new["geometry"] = b_attr["geometry"] # convert any numpy arrays to regular lists attr_new.update( - {k: v.tolist() for k, v in attr_new.items() - if hasattr(v, 'tolist')}) + {k: v.tolist() for k, v in attr_new.items() if hasattr(v, "tolist")} + ) export.append([a, b, attr_new]) return export @@ -394,8 +384,7 @@ def from_edgelist(self, edges, strict=True): self.update(edge[1], edge[0]) # edge is broken elif strict: - raise ValueError( - 'edge incorrect shape: %s', str(edge)) + raise ValueError("edge incorrect shape: %s", str(edge)) def to_networkx(self): """ @@ -407,9 +396,8 @@ def to_networkx(self): Directed graph. """ import networkx - return networkx.from_edgelist( - self.to_edgelist(), - create_using=networkx.DiGraph) + + return networkx.from_edgelist(self.to_edgelist(), create_using=networkx.DiGraph) def show(self, **kwargs): """ @@ -423,14 +411,12 @@ def show(self, **kwargs): """ import matplotlib.pyplot as plt import networkx + # default kwargs will only be set if not # passed explicitly to the show command - defaults = {'with_labels': True} - kwargs.update(**{k: v for k, v in defaults.items() - if k not in kwargs}) - networkx.draw_networkx( - G=self.to_networkx(), - **kwargs) + defaults = {"with_labels": True} + kwargs.update(**{k: v for k, v in defaults.items() if k not in kwargs}) + networkx.draw_networkx(G=self.to_networkx(), **kwargs) plt.show() @@ -468,9 +454,7 @@ def nodes_geometry(self): nodes_geometry : (m,) array Node names which have geometry associated """ - return [n for n, attr in - self.transforms.node_data.items() - if 'geometry' in attr] + return [n for n, attr in self.transforms.node_data.items() if "geometry" in attr] @caching.cache_decorator def geometry_nodes(self): @@ -485,8 +469,8 @@ def geometry_nodes(self): """ res = collections.defaultdict(list) for node, attr in self.transforms.node_data.items(): - if 'geometry' in attr: - res[attr['geometry']].append(node) + if "geometry" in attr: + res[attr["geometry"]].append(node) return res def remove_geometries(self, geometries): @@ -507,13 +491,13 @@ def remove_geometries(self, geometries): # remove the geometry reference from the node without deleting nodes # this lets us keep our cached paths, and will not screw up children for attrib in self.transforms.node_data.values(): - if 'geometry' in attrib and attrib['geometry'] in geometries: - attrib.pop('geometry') + if "geometry" in attrib and attrib["geometry"] in geometries: + attrib.pop("geometry") # it would be safer to just run _cache.clear # but the only property using the geometry should be # nodes_geometry: if this becomes not true change this to clear! - self._cache.cache.pop('nodes_geometry', None) + self._cache.cache.pop("nodes_geometry", None) def __contains__(self, key): return key in self.transforms.node_data @@ -524,7 +508,7 @@ def __getitem__(self, key): def __setitem__(self, key, value): value = np.asanyarray(value) if value.shape != (4, 4): - raise ValueError('Matrix must be specified!') + raise ValueError("Matrix must be specified!") return self.update(key, matrix=value) def clear(self): @@ -575,7 +559,7 @@ def add_edge(self, u, v, **kwargs): -------- changed : bool Return if this operation changed anything. - """ + """ self._hash = None # topology has changed so clear cache @@ -584,11 +568,9 @@ def add_edge(self, u, v, **kwargs): else: # check to see if matrix and geometry are identical edge = self.edge_data[(u, v)] - if (util.allclose(kwargs.get('matrix', _identity), - edge.get('matrix', _identity), - 1e-8) - and (edge.get('geometry') == - kwargs.get('geometry'))): + if util.allclose( + kwargs.get("matrix", _identity), edge.get("matrix", _identity), 1e-8 + ) and (edge.get("geometry") == kwargs.get("geometry")): return False # store a parent reference for traversal @@ -597,9 +579,8 @@ def add_edge(self, u, v, **kwargs): self.edge_data[(u, v)] = kwargs # set empty node data self.node_data[u].update({}) - if 'geometry' in kwargs: - self.node_data[v].update( - {'geometry': kwargs['geometry']}) + if "geometry" in kwargs: + self.node_data[v].update({"geometry": kwargs["geometry"]}) else: self.node_data[v].update({}) @@ -628,10 +609,7 @@ def remove_node(self, u): self._hash = None # delete all children's references and parent reference - children = [ - child for ( - child, - parent) in self.parents.items() if parent == u] + children = [child for (child, parent) in self.parents.items() if parent == u] for c in children: del self.parents[c] if u in self.parents: @@ -701,10 +679,9 @@ def shortest_path(self, u, v): # we have a either a common node between both # traversal directions or we have consumed the whole # tree in both directions so try to find the common node - common = set(backward).intersection( - forward).difference({None}) + common = set(backward).intersection(forward).difference({None}) if len(common) == 0: - raise ValueError(f'No path from {u}->{v}!') + raise ValueError(f"No path from {u}->{v}!") elif len(common) > 1: # get the first occurring common element in "forward" link = next(f for f in forward if f in common) @@ -714,8 +691,8 @@ def shortest_path(self, u, v): link = next(iter(common)) # combine the forward and backwards traversals - a = forward[:forward.index(link) + 1] - b = backward[:backward.index(link)] + a = forward[: forward.index(link) + 1] + b = backward[: backward.index(link)] path = a + b[::-1] # verify we didn't screw up the order @@ -726,7 +703,7 @@ def shortest_path(self, u, v): return path - raise ValueError('Iteration limit exceeded!') + raise ValueError("Iteration limit exceeded!") @property def nodes(self): @@ -750,17 +727,16 @@ def children(self): children : dict Keyed {node : [child, child, ...]} """ - if 'children' in self._cache: - return self._cache['children'] + if "children" in self._cache: + return self._cache["children"] child = collections.defaultdict(list) # append children to parent references # skip self-references to avoid a node loop - [child[v].append(u) for u, v in - self.parents.items() if u != v] + [child[v].append(u) for u, v in self.parents.items() if u != v] # cache and return as a vanilla dict - self._cache['children'] = dict(child) - return self._cache['children'] + self._cache["children"] = dict(child) + return self._cache["children"] def successors(self, node): """ @@ -815,29 +791,31 @@ def __hash__(self): # to try eliminating because it is very likely that # someone somewhere is modifying the data without # setting `self._hash = None` - hashed = getattr(self, '_hash', None) + hashed = getattr(self, "_hash", None) if hashed is not None: return hashed hashed = hash_fast( - (''.join(str(hash(k)) + v.get('geometry', '') - for k, v in self.edge_data.items()) + - ''.join(str(k) + v.get('geometry', '') - for k, v in self.node_data.items())).encode('utf-8') + - b''.join(v['matrix'].tobytes() - for v in self.edge_data.values() - if 'matrix' in v)) + ( + "".join( + str(hash(k)) + v.get("geometry", "") + for k, v in self.edge_data.items() + ) + + "".join( + str(k) + v.get("geometry", "") for k, v in self.node_data.items() + ) + ).encode("utf-8") + + b"".join( + v["matrix"].tobytes() for v in self.edge_data.values() if "matrix" in v + ) + ) self._hash = hashed return hashed def kwargs_to_matrix( - matrix=None, - quaternion=None, - translation=None, - axis=None, - angle=None, - **kwargs): + matrix=None, quaternion=None, translation=None, axis=None, angle=None, **kwargs +): """ Take multiple keyword arguments and parse them into a homogeneous transformation matrix. diff --git a/trimesh/schemas.py b/trimesh/schemas.py index b05fa817c..3c1543e40 100644 --- a/trimesh/schemas.py +++ b/trimesh/schemas.py @@ -31,10 +31,10 @@ def resolve(item, resolver): # run the resolver on every list item return [resolve(i, resolver) for i in item] elif isinstance(item, dict): - if '$ref' in item: + if "$ref" in item: # if we have a reference to a file pop the key # and update the dict with the reference in-place - raw = decode_text(resolver.get(item.pop('$ref'))) + raw = decode_text(resolver.get(item.pop("$ref"))) item.update(json.loads(raw)) # run the resolver on the dict again resolve(item, resolver) diff --git a/trimesh/smoothing.py b/trimesh/smoothing.py index 3830c6e2d..d5aa0fdc2 100644 --- a/trimesh/smoothing.py +++ b/trimesh/smoothing.py @@ -12,12 +12,14 @@ from .util import unitize -def filter_laplacian(mesh, - lamb=0.5, - iterations=10, - implicit_time_integration=False, - volume_constraint=True, - laplacian_operator=None): +def filter_laplacian( + mesh, + lamb=0.5, + iterations=10, + implicit_time_integration=False, + volume_constraint=True, + laplacian_operator=None, +): """ Smooth a mesh in-place using laplacian smoothing. Articles @@ -77,21 +79,18 @@ def filter_laplacian(mesh, # volume constraint if volume_constraint: # find the volume with new vertex positions - vol_new = triangles.mass_properties( - vertices[faces], skip_inertia=True)["volume"] + vol_new = triangles.mass_properties(vertices[faces], skip_inertia=True)[ + "volume" + ] # scale by volume ratio - vertices *= ((vol_ini / vol_new) ** (1.0 / 3.0)) + vertices *= (vol_ini / vol_new) ** (1.0 / 3.0) # assign modified vertices back to mesh mesh.vertices = vertices return mesh -def filter_humphrey(mesh, - alpha=0.1, - beta=0.5, - iterations=10, - laplacian_operator=None): +def filter_humphrey(mesh, alpha=0.1, beta=0.5, iterations=10, laplacian_operator=None): """ Smooth a mesh in-place using laplacian smoothing and Humphrey filtering. @@ -130,19 +129,14 @@ def filter_humphrey(mesh, vert_q = vertices.copy() vertices = laplacian_operator.dot(vertices) vert_b = vertices - (alpha * original + (1.0 - alpha) * vert_q) - vertices -= (beta * vert_b + (1.0 - beta) * - laplacian_operator.dot(vert_b)) + vertices -= beta * vert_b + (1.0 - beta) * laplacian_operator.dot(vert_b) # assign modified vertices back to mesh mesh.vertices = vertices return mesh -def filter_taubin(mesh, - lamb=0.5, - nu=0.5, - iterations=10, - laplacian_operator=None): +def filter_taubin(mesh, lamb=0.5, nu=0.5, iterations=10, laplacian_operator=None): """ Smooth a mesh in-place using laplacian smoothing and taubin filtering. @@ -186,11 +180,9 @@ def filter_taubin(mesh, return mesh -def filter_mut_dif_laplacian(mesh, - lamb=0.5, - iterations=10, - volume_constraint=True, - laplacian_operator=None): +def filter_mut_dif_laplacian( + mesh, lamb=0.5, iterations=10, volume_constraint=True, laplacian_operator=None +): """ Smooth a mesh in-place using laplacian smoothing using a mutable difusion laplacian. @@ -228,19 +220,17 @@ def filter_mut_dif_laplacian(mesh, # get mesh vertices as vanilla numpy array vertices = mesh.vertices.copy().view(np.ndarray) faces = mesh.faces.copy().view(np.ndarray) - eps = 0.01 * (np.max(mesh.area_faces)**0.5) + eps = 0.01 * (np.max(mesh.area_faces) ** 0.5) # Number of passes for _index in range(iterations): - # Mutable difusion normals = get_vertices_normals(mesh) qi = laplacian_operator.dot(vertices) pi_qi = vertices - qi adil = np.abs((normals * pi_qi).dot(np.ones((3, 1)))) adil = 1.0 / np.maximum(1e-12, adil) - lamber = np.maximum( - 0.2 * lamb, np.minimum(1.0, lamb * adil / np.mean(adil))) + lamber = np.maximum(0.2 * lamb, np.minimum(1.0, lamb * adil / np.mean(adil))) # Filter dot = laplacian_operator.dot(vertices) - vertices @@ -288,28 +278,26 @@ def laplacian_calculation(mesh, equal_weight=True, pinned_vertices=None): # stack neighbors to 1D arrays col = np.concatenate(neighbors) - row = np.concatenate([[i] * len(n) - for i, n in enumerate(neighbors)]) + row = np.concatenate([[i] * len(n) for i, n in enumerate(neighbors)]) if equal_weight: # equal weights for each neighbor - data = np.concatenate([[1.0 / len(n)] * len(n) - for n in neighbors]) + data = np.concatenate([[1.0 / len(n)] * len(n) for n in neighbors]) else: # umbrella weights, distance-weighted # use dot product of ones to replace array.sum(axis=1) ones = np.ones(3) # the distance from verticesex to neighbors norms = [ - 1.0 / np.maximum(1e-6, np.sqrt(np.dot( - (vertices[i] - vertices[n]) ** 2, ones))) - for i, n in enumerate(neighbors)] + 1.0 + / np.maximum(1e-6, np.sqrt(np.dot((vertices[i] - vertices[n]) ** 2, ones))) + for i, n in enumerate(neighbors) + ] # normalize group and stack into single array data = np.concatenate([i / i.sum() for i in norms]) # create the sparse matrix - matrix = coo_matrix((data, (row, col)), - shape=[len(vertices)] * 2) + matrix = coo_matrix((data, (row, col)), shape=[len(vertices)] * 2) return matrix diff --git a/trimesh/transformations.py b/trimesh/transformations.py index bc2861b93..384f9aa5f 100644 --- a/trimesh/transformations.py +++ b/trimesh/transformations.py @@ -851,9 +851,7 @@ def decompose_matrix(matrix): return scale, shear, angles, translate, perspective -def compose_matrix( - scale=None, shear=None, angles=None, translate=None, perspective=None -): +def compose_matrix(scale=None, shear=None, angles=None, translate=None, perspective=None): """Return transformation matrix from sequence of transformations. This is the inverse of the decompose_matrix function. diff --git a/trimesh/triangles.py b/trimesh/triangles.py index a5835ebf3..68ca76935 100644 --- a/trimesh/triangles.py +++ b/trimesh/triangles.py @@ -4,11 +4,14 @@ Functions for dealing with triangle soups in (n, 3, 3) float form. """ +from dataclasses import dataclass + import numpy as np from . import util from .constants import tol from .points import point_plane_distance +from .typed import NDArray, Optional, float64 from .util import diagonal_dot, unitize @@ -51,7 +54,7 @@ def area(triangles=None, crosses=None, sum=False): """ if crosses is None: crosses = cross(triangles) - areas = np.sqrt((crosses ** 2).sum(axis=1)) / 2.0 + areas = np.sqrt((crosses**2).sum(axis=1)) / 2.0 if sum: return areas.sum() return areas @@ -136,13 +139,15 @@ def all_coplanar(triangles): """ triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): - raise ValueError('Triangles must be (n, 3, 3)!') + raise ValueError("Triangles must be (n, 3, 3)!") test_normal = normals(triangles)[0] test_vertex = triangles[0][0] - distances = point_plane_distance(points=triangles[1:].reshape((-1, 3)), - plane_normal=test_normal, - plane_origin=test_vertex) + distances = point_plane_distance( + points=triangles[1:].reshape((-1, 3)), + plane_normal=test_normal, + plane_origin=test_vertex, + ) all_coplanar = np.all(np.abs(distances) < tol.zero) return all_coplanar @@ -155,23 +160,45 @@ def any_coplanar(triangles): """ triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): - raise ValueError('Triangles must be (n, 3, 3)!') + raise ValueError("Triangles must be (n, 3, 3)!") test_normal = normals(triangles)[0] test_vertex = triangles[0][0] - distances = point_plane_distance(points=triangles[1:].reshape((-1, 3)), - plane_normal=test_normal, - plane_origin=test_vertex) - any_coplanar = np.any( - np.all(np.abs(distances.reshape((-1, 3)) < tol.zero), axis=1)) + distances = point_plane_distance( + points=triangles[1:].reshape((-1, 3)), + plane_normal=test_normal, + plane_origin=test_vertex, + ) + any_coplanar = np.any(np.all(np.abs(distances.reshape((-1, 3)) < tol.zero), axis=1)) return any_coplanar -def mass_properties(triangles, - crosses=None, - density=None, - center_mass=None, - skip_inertia=False): +@dataclass +class MassProperties: + # the density value these mass properties were calculated with + # this alters `mass` and `inertia` + density: float + + # the volume multiplied by the density + mass: float + + # the volume produced + volume: float + + # the (3,) center of mass + center_mass: NDArray[float64] + + # the (3, 3) inertia tensor + inertia: Optional[NDArray[float64]] = None + + def __getitem__(self, item): + # add for backwards compatibility + return getattr(self, item) + + +def mass_properties( + triangles, crosses=None, density=None, center_mass=None, skip_inertia=False +) -> MassProperties: """ Calculate the mass properties of a group of triangles. @@ -198,7 +225,7 @@ def mass_properties(triangles, """ triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): - raise ValueError('Triangles must be (n, 3, 3)!') + raise ValueError("Triangles must be (n, 3, 3)!") if crosses is None: crosses = cross(triangles) @@ -214,18 +241,22 @@ def mass_properties(triangles, # for the x coordinates of every triangle # triangles[:,:,0] will give rows like [[x0, x1, x2], ...] - f2 = (triangles[:, 0, :]**2 + - triangles[:, 1, :]**2 + - triangles[:, 0, :] * triangles[:, 1, :] + - triangles[:, 2, :] * f1) - f3 = ((triangles[:, 0, :]**3) + - (triangles[:, 0, :]**2) * (triangles[:, 1, :]) + - (triangles[:, 0, :]) * (triangles[:, 1, :]**2) + - (triangles[:, 1, :]**3) + - (triangles[:, 2, :] * f2)) - g0 = (f2 + (triangles[:, 0, :] + f1) * triangles[:, 0, :]) - g1 = (f2 + (triangles[:, 1, :] + f1) * triangles[:, 1, :]) - g2 = (f2 + (triangles[:, 2, :] + f1) * triangles[:, 2, :]) + f2 = ( + triangles[:, 0, :] ** 2 + + triangles[:, 1, :] ** 2 + + triangles[:, 0, :] * triangles[:, 1, :] + + triangles[:, 2, :] * f1 + ) + f3 = ( + (triangles[:, 0, :] ** 3) + + (triangles[:, 0, :] ** 2) * (triangles[:, 1, :]) + + (triangles[:, 0, :]) * (triangles[:, 1, :] ** 2) + + (triangles[:, 1, :] ** 3) + + (triangles[:, 2, :] * f2) + ) + g0 = f2 + (triangles[:, 0, :] + f1) * triangles[:, 0, :] + g1 = f2 + (triangles[:, 1, :] + f1) * triangles[:, 1, :] + g2 = f2 + (triangles[:, 2, :] + f1) * triangles[:, 2, :] integral = np.zeros((10, len(f1))) integral[0] = crosses[:, 0] * f1[:, 0] integral[1:4] = (crosses * f2).T @@ -233,13 +264,14 @@ def mass_properties(triangles, for i in range(3): triangle_i = np.mod(i + 1, 3) integral[i + 7] = crosses[:, i] * ( - (triangles[:, 0, triangle_i] * g0[:, i]) + - (triangles[:, 1, triangle_i] * g1[:, i]) + - (triangles[:, 2, triangle_i] * g2[:, i])) + (triangles[:, 0, triangle_i] * g0[:, i]) + + (triangles[:, 1, triangle_i] * g1[:, i]) + + (triangles[:, 2, triangle_i] * g2[:, i]) + ) coefficients = 1.0 / np.array( - [6, 24, 24, 24, 60, 60, 60, 120, 120, 120], - dtype=np.float64) + [6, 24, 24, 24, 60, 60, 60, 120, 120, 120], dtype=np.float64 + ) integrated = integral.sum(axis=1) * coefficients volume = integrated[0] @@ -252,32 +284,33 @@ def mass_properties(triangles, # otherwise get it from the integration center_mass = integrated[1:4] / volume - result = {'density': density, - 'mass': density * volume, - 'volume': volume, - 'center_mass': center_mass} + result = MassProperties( + density=density, + mass=density * volume, + volume=volume, + center_mass=center_mass, + ) if skip_inertia: return result inertia = np.zeros((3, 3)) - inertia[0, 0] = integrated[5] + integrated[6] - \ - (volume * (center_mass[[1, 2]]**2).sum()) - inertia[1, 1] = integrated[4] + integrated[6] - \ - (volume * (center_mass[[0, 2]]**2).sum()) - inertia[2, 2] = integrated[4] + integrated[5] - \ - (volume * (center_mass[[0, 1]]**2).sum()) - inertia[0, 1] = - ( - integrated[7] - (volume * np.prod(center_mass[[0, 1]]))) - inertia[1, 2] = - ( - integrated[8] - (volume * np.prod(center_mass[[1, 2]]))) - inertia[0, 2] = - ( - integrated[9] - (volume * np.prod(center_mass[[0, 2]]))) + inertia[0, 0] = ( + integrated[5] + integrated[6] - (volume * (center_mass[[1, 2]] ** 2).sum()) + ) + inertia[1, 1] = ( + integrated[4] + integrated[6] - (volume * (center_mass[[0, 2]] ** 2).sum()) + ) + inertia[2, 2] = ( + integrated[4] + integrated[5] - (volume * (center_mass[[0, 1]] ** 2).sum()) + ) + inertia[0, 1] = -(integrated[7] - (volume * np.prod(center_mass[[0, 1]]))) + inertia[1, 2] = -(integrated[8] - (volume * np.prod(center_mass[[1, 2]]))) + inertia[0, 2] = -(integrated[9] - (volume * np.prod(center_mass[[0, 2]]))) inertia[2, 0] = inertia[0, 2] inertia[2, 1] = inertia[1, 2] inertia[1, 0] = inertia[0, 1] - inertia *= density - result['inertia'] = inertia + result.inertia = inertia * density return result @@ -302,7 +335,8 @@ def windings_aligned(triangles, normals_compare): triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3), allow_zeros=True): raise ValueError( - 'triangles must have shape (n, 3, 3), got %s' % str(triangles.shape)) + "triangles must have shape (n, 3, 3), got %s" % str(triangles.shape) + ) normals_compare = np.asanyarray(normals_compare, dtype=np.float64) calculated, valid = normals(triangles) @@ -311,8 +345,7 @@ def windings_aligned(triangles, normals_compare): difference = np.dot(calculated, normals_compare) else: # multiple comparison case - difference = diagonal_dot( - calculated, normals_compare[valid]) + difference = diagonal_dot(calculated, normals_compare[valid]) aligned = np.zeros(len(triangles), dtype=bool) aligned[valid] = difference > 0.0 @@ -337,11 +370,10 @@ def bounds_tree(triangles): """ triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): - raise ValueError('Triangles must be (n, 3, 3)!') + raise ValueError("Triangles must be (n, 3, 3)!") # the (n,6) interleaved bounding box for every triangle - triangle_bounds = np.column_stack((triangles.min(axis=1), - triangles.max(axis=1))) + triangle_bounds = np.column_stack((triangles.min(axis=1), triangles.max(axis=1))) tree = util.bounds_tree(triangle_bounds) return tree @@ -370,15 +402,14 @@ def nondegenerate(triangles, areas=None, height=None): """ triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): - raise ValueError('Triangles must be (n, 3, 3)!') + raise ValueError("Triangles must be (n, 3, 3)!") if height is None: height = tol.merge # if both edges of the triangles OBB are longer than tol.merge # we declare them to be nondegenerate - ok = (extents(triangles=triangles, - areas=areas) > height).all(axis=1) + ok = (extents(triangles=triangles, areas=areas) > height).all(axis=1) return ok @@ -401,19 +432,18 @@ def extents(triangles, areas=None): """ triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): - raise ValueError('Triangles must be (n, 3, 3)!') + raise ValueError("Triangles must be (n, 3, 3)!") if areas is None: - areas = area(triangles=triangles, - sum=False) + areas = area(triangles=triangles, sum=False) # the edge vectors which define the triangle a = triangles[:, 1] - triangles[:, 0] b = triangles[:, 2] - triangles[:, 0] # length of the edge vectors - length_a = (a**2).sum(axis=1)**.5 - length_b = (b**2).sum(axis=1)**.5 + length_a = (a**2).sum(axis=1) ** 0.5 + length_b = (b**2).sum(axis=1) ** 0.5 # which edges are acceptable length nonzero_a = length_a > tol.merge @@ -450,15 +480,13 @@ def barycentric_to_points(triangles, barycentric): triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): - raise ValueError('Triangles must be (n, 3, 3)!') + raise ValueError("Triangles must be (n, 3, 3)!") if barycentric.shape == (2,): - barycentric = np.ones((len(triangles), 2), - dtype=np.float64) * barycentric + barycentric = np.ones((len(triangles), 2), dtype=np.float64) * barycentric if util.is_shape(barycentric, (len(triangles), 2)): - barycentric = np.column_stack((barycentric, - 1.0 - barycentric.sum(axis=1))) + barycentric = np.column_stack((barycentric, 1.0 - barycentric.sum(axis=1))) elif not util.is_shape(barycentric, (len(triangles), 3)): - raise ValueError('Barycentric shape incorrect!') + raise ValueError("Barycentric shape incorrect!") barycentric /= barycentric.sum(axis=1).reshape((-1, 1)) points = (triangles * barycentric.reshape((-1, 3, 1))).sum(axis=1) @@ -466,9 +494,7 @@ def barycentric_to_points(triangles, barycentric): return points -def points_to_barycentric(triangles, - points, - method='cramer'): +def points_to_barycentric(triangles, points, method="cramer"): """ Find the barycentric coordinates of points relative to triangles. @@ -502,10 +528,8 @@ def method_cross(): denominator = diagonal_dot(n, n) barycentric = np.zeros((len(triangles), 3), dtype=np.float64) - barycentric[:, 2] = diagonal_dot( - np.cross(edge_vectors[:, 0], w), n) / denominator - barycentric[:, 1] = diagonal_dot( - np.cross(w, edge_vectors[:, 1]), n) / denominator + barycentric[:, 2] = diagonal_dot(np.cross(edge_vectors[:, 0], w), n) / denominator + barycentric[:, 1] = diagonal_dot(np.cross(w, edge_vectors[:, 1]), n) / denominator barycentric[:, 0] = 1 - barycentric[:, 1] - barycentric[:, 2] return barycentric @@ -519,10 +543,8 @@ def method_cramer(): inverse_denominator = 1.0 / (dot00 * dot11 - dot01 * dot01) barycentric = np.zeros((len(triangles), 3), dtype=np.float64) - barycentric[:, 2] = (dot00 * dot12 - dot01 * - dot02) * inverse_denominator - barycentric[:, 1] = (dot11 * dot02 - dot01 * - dot12) * inverse_denominator + barycentric[:, 2] = (dot00 * dot12 - dot01 * dot02) * inverse_denominator + barycentric[:, 1] = (dot11 * dot02 - dot01 * dot12) * inverse_denominator barycentric[:, 0] = 1 - barycentric[:, 1] - barycentric[:, 2] return barycentric @@ -530,14 +552,14 @@ def method_cramer(): triangles = np.asanyarray(triangles, dtype=np.float64) points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): - raise ValueError('triangles shape incorrect') + raise ValueError("triangles shape incorrect") if not util.is_shape(points, (len(triangles), 3)): - raise ValueError('triangles and points must correspond') + raise ValueError("triangles and points must correspond") edge_vectors = triangles[:, 1:] - triangles[:, :1] w = points - triangles[:, 0].reshape((-1, 3)) - if method == 'cross': + if method == "cross": return method_cross() return method_cramer() @@ -569,9 +591,9 @@ def closest_point(triangles, points): triangles = np.asanyarray(triangles, dtype=np.float64) points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): - raise ValueError('triangles shape incorrect') + raise ValueError("triangles shape incorrect") if not util.is_shape(points, (len(triangles), 3)): - raise ValueError('need same number of triangles and points!') + raise ValueError("need same number of triangles and points!") # store the location of the closest point result = np.zeros_like(points) @@ -616,9 +638,7 @@ def closest_point(triangles, points): # check if P in edge region of AB, if so return projection of P onto A vc = (d1 * d4) - (d3 * d2) - is_ab = ((vc < tol.zero) & - (d1 > -tol.zero) & - (d3 < tol.zero) & remain) + is_ab = (vc < tol.zero) & (d1 > -tol.zero) & (d3 < tol.zero) & remain if any(is_ab): v = (d1[is_ab] / (d1[is_ab] - d3[is_ab])).reshape((-1, 1)) result[is_ab] = a[is_ab] + (v * ab[is_ab]) @@ -643,9 +663,7 @@ def closest_point(triangles, points): # check if P in edge region of BC, if so return projection of P onto BC va = (d3 * d6) - (d5 * d4) - is_bc = ((va < tol.zero) & - ((d4 - d3) > - tol.zero) & - ((d5 - d6) > -tol.zero) & remain) + is_bc = (va < tol.zero) & ((d4 - d3) > -tol.zero) & ((d5 - d6) > -tol.zero) & remain if any(is_bc): d43 = d4[is_bc] - d3[is_bc] w = (d43 / (d43 + (d5[is_bc] - d6[is_bc]))).reshape((-1, 1)) @@ -686,11 +704,10 @@ def to_kwargs(triangles): """ triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): - raise ValueError('Triangles must be (n, 3, 3)!') + raise ValueError("Triangles must be (n, 3, 3)!") vertices = triangles.reshape((-1, 3)) faces = np.arange(len(vertices)).reshape((-1, 3)) - kwargs = {'vertices': vertices, - 'faces': faces} + kwargs = {"vertices": vertices, "faces": faces} return kwargs diff --git a/trimesh/units.py b/trimesh/units.py index ea09ee188..0f8936872 100644 --- a/trimesh/units.py +++ b/trimesh/units.py @@ -27,8 +27,7 @@ def unit_conversion(current, desired): Number to multiply by to put values into desired units """ # scaling factors from various unit systems to inches - to_inch = resources.get( - 'units_to_inches.json', decode_json=True) + to_inch = resources.get("units_to_inches.json", decode_json=True) current = str(current).strip().lower() desired = str(desired).strip().lower() @@ -54,43 +53,40 @@ def units_from_metadata(obj, guess=True): units: str A guess of what the units might be """ - to_inch = resources.get( - 'units_to_inches.json', decode_json=True) + to_inch = resources.get("units_to_inches.json", decode_json=True) # try to guess from metadata - for key in ['file_name', 'name']: + for key in ["file_name", "name"]: if key not in obj.metadata: continue # get the string which might contain unit hints hints = obj.metadata[key].lower() - if 'unit' in hints: + if "unit" in hints: # replace all delimiter options with white space - for delim in '_-.': - hints = hints.replace(delim, ' ') + for delim in "_-.": + hints = hints.replace(delim, " ") # loop through each hint for hint in hints.strip().split(): # key word is "unit" or "units" - if 'unit' not in hint: + if "unit" not in hint: continue # get rid of keyword and whitespace - hint = hint.replace( - 'units', '').replace( - 'unit', '').strip() + hint = hint.replace("units", "").replace("unit", "").strip() # if the hint is a valid unit return it if hint in to_inch: return hint if not guess: - raise ValueError('no units and not allowed to guess') + raise ValueError("no units and not allowed to guess") # we made it to the wild ass guess section # if the scale is larger than 100 mystery units # declare the model to be millimeters, otherwise inches - log.debug('no units: guessing from scale') + log.debug("no units: guessing from scale") if float(obj.scale) > 100.0: - return 'millimeters' + return "millimeters" else: - return 'inches' + return "inches" def _convert_units(obj, desired, guess=False): @@ -114,7 +110,7 @@ def _convert_units(obj, desired, guess=False): # to guess will raise a ValueError obj.units = units_from_metadata(obj, guess=guess) - log.debug('converting units from %s to %s', obj.units, desired) + log.debug("converting units from %s to %s", obj.units, desired) # float, conversion factor conversion = unit_conversion(obj.units, desired) diff --git a/trimesh/viewer/__init__.py b/trimesh/viewer/__init__.py index d235d200f..170c85c38 100644 --- a/trimesh/viewer/__init__.py +++ b/trimesh/viewer/__init__.py @@ -30,9 +30,11 @@ # explicitly list imports in __all__ # as otherwise flake8 gets mad -__all__ = ['SceneWidget', - 'SceneViewer', - 'render_scene', - 'in_notebook', - 'scene_to_notebook', - 'scene_to_html'] +__all__ = [ + "SceneWidget", + "SceneViewer", + "render_scene", + "in_notebook", + "scene_to_notebook", + "scene_to_html", +] diff --git a/trimesh/viewer/notebook.py b/trimesh/viewer/notebook.py index ddda0e69a..4820d5e40 100644 --- a/trimesh/viewer/notebook.py +++ b/trimesh/viewer/notebook.py @@ -29,17 +29,21 @@ def scene_to_html(scene): """ # fetch HTML template from ZIP archive # it is bundling all of three.js so compression is nice - base = util.decompress( - resources.get('templates/viewer.zip', decode=False), - file_type='zip')['viewer.html.template'].read().decode('utf-8') + base = ( + util.decompress( + resources.get("templates/viewer.zip", decode=False), file_type="zip" + )["viewer.html.template"] + .read() + .decode("utf-8") + ) # make sure scene has camera populated before export _ = scene.camera # get export as bytes - data = scene.export(file_type='glb') + data = scene.export(file_type="glb") # encode as base64 string - encoded = base64.b64encode(data).decode('utf-8') + encoded = base64.b64encode(data).decode("utf-8") # replace keyword with our scene data - result = base.replace('$B64GLTF', encoded) + result = base.replace("$B64GLTF", encoded) return result @@ -67,17 +71,20 @@ def scene_to_notebook(scene, height=500, **kwargs): as_html = scene_to_html(scene=scene) # escape the quotes in the HTML - srcdoc = as_html.replace('"', '"') + srcdoc = as_html.replace('"', """) # embed this puppy as the srcdoc attr of an IFframe # I tried this a dozen ways and this is the only one that works # display.IFrame/display.Javascript really, really don't work # div is to avoid IPython's pointless hardcoded warning - embedded = display.HTML(' '.join([ - '
']).format( - srcdoc=srcdoc, - height=height)) + embedded = display.HTML( + " ".join( + [ + '
', + ] + ).format(srcdoc=srcdoc, height=height) + ) return embedded @@ -96,10 +103,10 @@ def in_notebook(): # we only want to render rich output in notebooks # in terminals we definitely do not want to output HTML name = str(ipy.__class__).lower() - terminal = 'terminal' in name + terminal = "terminal" in name # spyder uses ZMQshell, and can appear to be a notebook - spyder = '_' in os.environ and 'spyder' in os.environ['_'] + spyder = "_" in os.environ and "spyder" in os.environ["_"] # assume we are in a notebook if we are not in # a terminal and we haven't been run by spyder diff --git a/trimesh/viewer/trackball.py b/trimesh/viewer/trackball.py index cf32cdefb..f48d4e8ec 100644 --- a/trimesh/viewer/trackball.py +++ b/trimesh/viewer/trackball.py @@ -31,8 +31,8 @@ class Trackball: - """A trackball class for creating camera transforms from mouse movements. - """ + """A trackball class for creating camera transforms from mouse movements.""" + STATE_ROTATE = 0 STATE_PAN = 1 STATE_ROLL = 2 @@ -76,8 +76,7 @@ def __init__(self, pose, size, scale, target=None): @property def pose(self): - """autolab_core.RigidTransform : The current camera-to-world pose. - """ + """autolab_core.RigidTransform : The current camera-to-world pose.""" return self._n_pose def set_state(self, state): @@ -127,7 +126,7 @@ def drag(self, point): point = np.array(point, dtype=np.float32) # get the "down" point defaulting to current point making # this a no-op if the "down" event didn't trigger for some reason - dx, dy = point - getattr(self, '_pdown', point) + dx, dy = point - getattr(self, "_pdown", point) mindim = 0.3 * np.min(self._size) target = self._target @@ -139,14 +138,10 @@ def drag(self, point): # Interpret drag as a rotation if self._state == Trackball.STATE_ROTATE: x_angle = -dx / mindim - x_rot_mat = transformations.rotation_matrix( - x_angle, y_axis, target - ) + x_rot_mat = transformations.rotation_matrix(x_angle, y_axis, target) y_angle = dy / mindim - y_rot_mat = transformations.rotation_matrix( - y_angle, x_axis, target - ) + y_rot_mat = transformations.rotation_matrix(y_angle, x_axis, target) self._n_pose = y_rot_mat.dot(x_rot_mat.dot(self._pose)) @@ -158,8 +153,7 @@ def drag(self, point): v_init = v_init / np.linalg.norm(v_init) v_curr = v_curr / np.linalg.norm(v_curr) - theta = (-np.arctan2(v_curr[1], v_curr[0]) + - np.arctan2(v_init[1], v_init[0])) + theta = -np.arctan2(v_curr[1], v_curr[0]) + np.arctan2(v_init[1], v_init[0]) rot_mat = transformations.rotation_matrix(theta, z_axis, target) @@ -205,7 +199,7 @@ def scroll(self, clicks): if clicks > 0: mult = ratio**clicks elif clicks < 0: - mult = (1.0 / ratio)**abs(clicks) + mult = (1.0 / ratio) ** abs(clicks) z_axis = self._n_pose[:3, 2].flatten() eye = self._n_pose[:3, 3].flatten() diff --git a/trimesh/viewer/widget.py b/trimesh/viewer/widget.py index 464a90f4f..c75dd287a 100644 --- a/trimesh/viewer/widget.py +++ b/trimesh/viewer/widget.py @@ -17,7 +17,6 @@ class SceneGroup(pyglet.graphics.Group): - def __init__( self, rect, @@ -31,7 +30,7 @@ def __init__( self.scene = scene if background is None: - background = [.99, .99, .99, 1.0] + background = [0.99, 0.99, 0.99, 1.0] self._background = background self._pixel_per_point = pixel_per_point @@ -56,7 +55,7 @@ def _set_view(self): gl.glPushMatrix() gl.glLoadIdentity() near = 0.01 - far = 1000. + far = 1000.0 gl.gluPerspective(self.scene.camera.fov[1], width / height, near, far) gl.glMatrixMode(gl.GL_MODELVIEW) @@ -88,7 +87,8 @@ def set_state(self): gl.glPushMatrix() gl.glLoadIdentity() gl.glMultMatrixf( - rendering.matrix_to_gl(np.linalg.inv(self.scene.camera_transform))) + rendering.matrix_to_gl(np.linalg.inv(self.scene.camera_transform)) + ) def unset_state(self): gl.glPopMatrix() @@ -99,7 +99,6 @@ def unset_state(self): class MeshGroup(pyglet.graphics.Group): - def __init__(self, transform=None, texture=None, parent=None): super().__init__(parent) if transform is None: @@ -123,7 +122,6 @@ def unset_state(self): class SceneWidget(glooey.Widget): - def __init__(self, scene, **kwargs): super().__init__() self.scene = scene @@ -140,16 +138,17 @@ def __init__(self, scene, **kwargs): self._initial_camera_transform = self.scene.camera_transform.copy() self.reset_view() - self._background = kwargs.pop('background', None) - self._smooth = kwargs.pop('smooth', True) + self._background = kwargs.pop("background", None) + self._smooth = kwargs.pop("smooth", True) if kwargs: - raise TypeError(f'unexpected kwargs: {kwargs}') + raise TypeError(f"unexpected kwargs: {kwargs}") @property def scene_group(self): if self._scene_group is None: - pixel_per_point = (np.array(self.window.get_viewport_size()) / - np.array(self.window.get_size())) + pixel_per_point = np.array(self.window.get_viewport_size()) / np.array( + self.window.get_size() + ) self._scene_group = SceneGroup( rect=self.rect, scene=self.scene, @@ -170,12 +169,14 @@ def clear(self): def reset_view(self): self.view = { - 'ball': Trackball( + "ball": Trackball( pose=self._initial_camera_transform, size=self.scene.camera.resolution, scale=self.scene.scale, - target=self.scene.centroid)} - self.scene.camera_transform = self.view['ball'].pose + target=self.scene.centroid, + ) + } + self.scene.camera_transform = self.view["ball"].pose def do_claim(self): return 0, 0 @@ -197,13 +198,10 @@ def do_regroup(self): mesh_group = MeshGroup( transform=transform, texture=self.textures.get(geometry_name), - parent=self.scene_group) + parent=self.scene_group, + ) self.mesh_group[node_name] = mesh_group - self.batch.migrate( - vertex_list, - gl.GL_TRIANGLES, - mesh_group, - self.batch) + self.batch.migrate(vertex_list, gl.GL_TRIANGLES, mesh_group, self.batch) def do_draw(self): resolution = (self.rect.width, self.rect.height) @@ -237,9 +235,10 @@ def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers): y_prev = y - dy left, bottom = self.rect.left, self.rect.bottom width, height = self.rect.width, self.rect.height - if not (left < x_prev <= left + width) or \ - not (bottom < y_prev <= bottom + height): - self.view['ball'].down(np.array([x, y])) + if not (left < x_prev <= left + width) or not ( + bottom < y_prev <= bottom + height + ): + self.view["ball"].down(np.array([x, y])) SceneViewer.on_mouse_drag(self, x, y, dx, dy, buttons, modifiers) self._draw() @@ -252,8 +251,7 @@ def _update_node(self, node_name, geometry_name, geometry, transform): geometry_hash_new = geometry_hash(geometry) if self.vertex_list_hash.get(geometry_name) != geometry_hash_new: # if geometry has texture defined convert it to opengl form - if hasattr(geometry, 'visual') and hasattr( - geometry.visual, 'material'): + if hasattr(geometry, "visual") and hasattr(geometry.visual, "material"): tex = rendering.material_to_texture(geometry.visual.material) if tex is not None: self.textures[geometry_name] = tex @@ -266,7 +264,8 @@ def _update_node(self, node_name, geometry_name, geometry, transform): mesh_group = MeshGroup( transform=transform, texture=self.textures.get(geometry_name), - parent=self.scene_group) + parent=self.scene_group, + ) self.mesh_group[node_name] = mesh_group if self.vertex_list_hash.get(geometry_name) != geometry_hash_new: @@ -275,9 +274,8 @@ def _update_node(self, node_name, geometry_name, geometry, transform): # convert geometry to constructor args args = rendering.convert_to_vertexlist( - geometry, - group=mesh_group, - smooth=self._smooth) + geometry, group=mesh_group, smooth=self._smooth + ) # create the indexed vertex list self.vertex_list[geometry_name] = self.batch.add_indexed(*args) # save the MD5 of the geometry diff --git a/trimesh/viewer/windowed.py b/trimesh/viewer/windowed.py index 6d7a19dfc..7a360f8fb 100644 --- a/trimesh/viewer/windowed.py +++ b/trimesh/viewer/windowed.py @@ -16,16 +16,15 @@ # to shaders and we will likely support it by forking an entirely # new viewer `trimesh.viewer.shaders` and then basically keeping # `windowed` around for backwards-compatibility with no changes -if int(pyglet.version.split('.')[0]) >= 2: - raise ImportError( - '`trimesh.viewer.windowed` requires `pip install "pyglet<2"`') +if int(pyglet.version.split(".")[0]) >= 2: + raise ImportError('`trimesh.viewer.windowed` requires `pip install "pyglet<2"`') from .. import rendering, util from ..transformations import translation_matrix from ..visual import to_rgba from .trackball import Trackball -pyglet.options['shadow_window'] = False +pyglet.options["shadow_window"] = False import pyglet.gl as gl # NOQA @@ -34,25 +33,26 @@ class SceneViewer(pyglet.window.Window): - - def __init__(self, - scene, - smooth=True, - flags=None, - visible=True, - resolution=None, - start_loop=True, - callback=None, - callback_period=None, - caption=None, - fixed=None, - offset_lines=True, - line_settings=None, - background=None, - window_conf=None, - profile=False, - record=False, - **kwargs): + def __init__( + self, + scene, + smooth=True, + flags=None, + visible=True, + resolution=None, + start_loop=True, + callback=None, + callback_period=None, + caption=None, + fixed=None, + offset_lines=True, + line_settings=None, + background=None, + window_conf=None, + profile=False, + record=False, + **kwargs, + ): """ Create a window that will display a trimesh.Scene object in an OpenGL context via pyglet. @@ -109,7 +109,8 @@ def __init__(self, # a transform to offset lines slightly to avoid Z-fighting self._line_offset = translation_matrix( - [0, 0, scene.scale / 1000 if self.offset_lines else 0]) + [0, 0, scene.scale / 1000 if self.offset_lines else 0] + ) self.reset_view() self.batch = pyglet.graphics.Batch() @@ -118,12 +119,13 @@ def __init__(self, self._profile = bool(profile) if self._profile: from pyinstrument import Profiler + self.Profiler = Profiler self._record = bool(record) if self._record: # will save bytes here - self.scene.metadata['recording'] = [] + self.scene.metadata["recording"] = [] # store kwargs self.kwargs = kwargs @@ -154,8 +156,7 @@ def __init__(self, # set the default line settings to a fraction # of our resolution so the points aren't tiny scale = max(resolution) - self.line_settings = {'point_size': scale / 200, - 'line_width': scale / 400} + self.line_settings = {"point_size": scale / 200, "line_width": scale / 400} # if we've been passed line settings override the default if line_settings is not None: self.line_settings.update(line_settings) @@ -165,32 +166,37 @@ def __init__(self, try: # try enabling antialiasing # if you have a graphics card this will probably work - conf = gl.Config(sample_buffers=1, - samples=4, - depth_size=24, - double_buffer=True) - super().__init__(config=conf, - visible=visible, - resizable=True, - width=resolution[0], - height=resolution[1], - caption=caption) + conf = gl.Config( + sample_buffers=1, samples=4, depth_size=24, double_buffer=True + ) + super().__init__( + config=conf, + visible=visible, + resizable=True, + width=resolution[0], + height=resolution[1], + caption=caption, + ) except pyglet.window.NoSuchConfigException: conf = gl.Config(double_buffer=True) - super().__init__(config=conf, - resizable=True, - visible=visible, - width=resolution[0], - height=resolution[1], - caption=caption) + super().__init__( + config=conf, + resizable=True, + visible=visible, + width=resolution[0], + height=resolution[1], + caption=caption, + ) else: # window config was manually passed - super().__init__(config=window_conf, - resizable=True, - visible=visible, - width=resolution[0], - height=resolution[1], - caption=caption) + super().__init__( + config=window_conf, + resizable=True, + visible=visible, + width=resolution[0], + height=resolution[1], + caption=caption, + ) # add scene geometry to viewer geometry self._update_vertex_list() @@ -211,8 +217,7 @@ def __init__(self, # set up a do-nothing periodic task which will # trigger `self.on_draw` every `callback_period` # seconds if someone has passed a callback - pyglet.clock.schedule_interval(lambda x: x, - callback_period) + pyglet.clock.schedule_interval(lambda x: x, callback_period) if start_loop: pyglet.app.run() @@ -226,9 +231,7 @@ def _update_vertex_list(self): continue if geometry_hash(geom) == self.vertex_list_hash.get(name): continue - self.add_geometry(name=name, - geometry=geom, - smooth=bool(self._smooth)) + self.add_geometry(name=name, geometry=geom, smooth=bool(self._smooth)) def _update_meshes(self): # call the callback if specified @@ -254,8 +257,7 @@ def add_geometry(self, name, geometry, **kwargs): # convert geometry to constructor args args = rendering.convert_to_vertexlist(geometry, **kwargs) except BaseException: - util.log.warning(f'failed to add geometry `{name}`', - exc_info=True) + util.log.warning(f"failed to add geometry `{name}`", exc_info=True) return # create the indexed vertex list @@ -266,14 +268,14 @@ def add_geometry(self, name, geometry, **kwargs): self.vertex_list_mode[name] = args[1] # get the visual if the element has it - visual = getattr(geometry, 'visual', None) - if hasattr(visual, 'uv') and hasattr(visual, 'material'): + visual = getattr(geometry, "visual", None) + if hasattr(visual, "uv") and hasattr(visual, "material"): try: tex = rendering.material_to_texture(visual.material) if tex is not None: self.textures[name] = tex except BaseException: - util.log.warning('failed to load texture', exc_info=True) + util.log.warning("failed to load texture", exc_info=True) def cleanup_geometries(self): """ @@ -283,11 +285,9 @@ def cleanup_geometries(self): # shorthand to scene graph graph = self.scene.graph # which parts of the graph still have geometry - geom_keep = {graph[node][1] for - node in graph.nodes_geometry} + geom_keep = {graph[node][1] for node in graph.nodes_geometry} # which geometries no longer need to be kept - geom_delete = [geom for geom in self.vertex_list - if geom not in geom_keep] + geom_delete = [geom for geom in self.vertex_list if geom not in geom_keep] for geom in geom_delete: # remove stored vertex references self.vertex_list.pop(geom, None) @@ -330,16 +330,18 @@ def reset_view(self, flags=None): e.g. {'cull': False} """ self.view = { - 'cull': True, - 'axis': False, - 'grid': False, - 'fullscreen': False, - 'wireframe': False, - 'ball': Trackball( + "cull": True, + "axis": False, + "grid": False, + "fullscreen": False, + "wireframe": False, + "ball": Trackball( pose=self._initial_camera_transform, size=self.scene.camera.resolution, scale=self.scene.scale, - target=self.scene.centroid)} + target=self.scene.centroid, + ), + } try: # if any flags are passed override defaults if isinstance(flags, dict): @@ -398,34 +400,33 @@ def _gl_enable_depth(camera): @staticmethod def _gl_enable_color_material(): # do some openGL things - gl.glColorMaterial(gl.GL_FRONT_AND_BACK, - gl.GL_AMBIENT_AND_DIFFUSE) + gl.glColorMaterial(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT_AND_DIFFUSE) gl.glEnable(gl.GL_COLOR_MATERIAL) gl.glShadeModel(gl.GL_SMOOTH) - gl.glMaterialfv(gl.GL_FRONT, - gl.GL_AMBIENT, - rendering.vector_to_gl( - 0.192250, 0.192250, 0.192250)) - gl.glMaterialfv(gl.GL_FRONT, - gl.GL_DIFFUSE, - rendering.vector_to_gl( - 0.507540, 0.507540, 0.507540)) - gl.glMaterialfv(gl.GL_FRONT, - gl.GL_SPECULAR, - rendering.vector_to_gl( - .5082730, .5082730, .5082730)) - - gl.glMaterialf(gl.GL_FRONT, - gl.GL_SHININESS, - .4 * 128.0) + gl.glMaterialfv( + gl.GL_FRONT, + gl.GL_AMBIENT, + rendering.vector_to_gl(0.192250, 0.192250, 0.192250), + ) + gl.glMaterialfv( + gl.GL_FRONT, + gl.GL_DIFFUSE, + rendering.vector_to_gl(0.507540, 0.507540, 0.507540), + ) + gl.glMaterialfv( + gl.GL_FRONT, + gl.GL_SPECULAR, + rendering.vector_to_gl(0.5082730, 0.5082730, 0.5082730), + ) + + gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 0.4 * 128.0) @staticmethod def _gl_enable_blending(): # enable blending for transparency gl.glEnable(gl.GL_BLEND) - gl.glBlendFunc(gl.GL_SRC_ALPHA, - gl.GL_ONE_MINUS_SRC_ALPHA) + gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA) @staticmethod def _gl_enable_smooth_lines(line_width=4, point_size=4): @@ -447,16 +448,15 @@ def _gl_enable_lighting(scene): # opengl only supports 7 lights? for i, light in enumerate(scene.lights[:7]): # the index of which light we have - lightN = eval(f'gl.GL_LIGHT{i}') + lightN = eval(f"gl.GL_LIGHT{i}") # get the transform for the light by name matrix = scene.graph.get(light.name)[0] # convert light object to glLightfv calls multiargs = rendering.light_to_gl( - light=light, - transform=matrix, - lightN=lightN) + light=light, transform=matrix, lightN=lightN + ) # enable the light in question gl.glEnable(lightN) @@ -472,7 +472,7 @@ def toggle_culling(self): non- watertight meshes you probably want to be able to see the back sides. """ - self.view['cull'] = not self.view['cull'] + self.view["cull"] = not self.view["cull"] self.update_flags() def toggle_wireframe(self): @@ -481,14 +481,14 @@ def toggle_wireframe(self): Good for looking inside meshes, off by default. """ - self.view['wireframe'] = not self.view['wireframe'] + self.view["wireframe"] = not self.view["wireframe"] self.update_flags() def toggle_fullscreen(self): """ Toggle between fullscreen and windowed mode. """ - self.view['fullscreen'] = not self.view['fullscreen'] + self.view["fullscreen"] = not self.view["fullscreen"] self.update_flags() def toggle_axis(self): @@ -497,11 +497,11 @@ def toggle_axis(self): off, world frame, every frame """ # cycle through three axis states - states = [False, 'world', 'all', 'without_world'] + states = [False, "world", "all", "without_world"] # the state after toggling - index = (states.index(self.view['axis']) + 1) % len(states) + index = (states.index(self.view["axis"]) + 1) % len(states) # update state to next index - self.view['axis'] = states[index] + self.view["axis"] = states[index] # perform gl actions self.update_flags() @@ -510,7 +510,7 @@ def toggle_grid(self): Toggle a rendered grid. """ # update state to next index - self.view['grid'] = not self.view['grid'] + self.view["grid"] = not self.view["grid"] # perform gl actions self.update_flags() @@ -519,24 +519,25 @@ def update_flags(self): Check the view flags, and call required GL functions. """ # view mode, filled vs wirefrom - if self.view['wireframe']: + if self.view["wireframe"]: gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE) else: gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL) # set fullscreen or windowed - self.set_fullscreen(fullscreen=self.view['fullscreen']) + self.set_fullscreen(fullscreen=self.view["fullscreen"]) # backface culling on or off - if self.view['cull']: + if self.view["cull"]: gl.glEnable(gl.GL_CULL_FACE) else: gl.glDisable(gl.GL_CULL_FACE) # case where we WANT an axis and NO vertexlist # is stored internally - if self.view['axis'] and self._axis is None: + if self.view["axis"] and self._axis is None: from .. import creation + # create an axis marker sized relative to the scene axis = creation.axis(origin_size=self.scene.scale / 100) # create ordered args for a vertex list @@ -545,16 +546,17 @@ def update_flags(self): self._axis = self.batch.add_indexed(*args) # case where we DON'T want an axis but a vertexlist # IS stored internally - elif not self.view['axis'] and self._axis is not None: + elif not self.view["axis"] and self._axis is not None: # remove the axis from the rendering batch self._axis.delete() # set the reference to None self._axis = None - if self.view['grid'] and self._grid is None: + if self.view["grid"] and self._grid is None: try: # create a grid marker from ..path.creation import grid + bounds = self.scene.bounds center = bounds.mean(axis=0) # set the grid to the lowest Z position @@ -563,18 +565,14 @@ def update_flags(self): # choose the side length by maximum XY length side = bounds.ptp(axis=0)[:2].max() # create an axis marker sized relative to the scene - grid_mesh = grid( - side=side, - count=4, - transform=translation_matrix(center)) + grid_mesh = grid(side=side, count=4, transform=translation_matrix(center)) # convert the path to vertexlist args args = rendering.convert_to_vertexlist(grid_mesh) # create ordered args for a vertex list self._grid = self.batch.add_indexed(*args) except BaseException: - util.log.warning( - 'failed to create grid!', exc_info=True) - elif not self.view['grid'] and self._grid is not None: + util.log.warning("failed to create grid!", exc_info=True) + elif not self.view["grid"] and self._grid is not None: self._grid.delete() self._grid = None @@ -596,10 +594,9 @@ def _update_perspective(self, width, height): camera = self.scene.camera # set perspective from camera data - gl.gluPerspective(camera.fov[1], - width / float(height), - camera.z_near, - camera.z_far) + gl.gluPerspective( + camera.fov[1], width / float(height), camera.z_near, camera.z_far + ) gl.glMatrixMode(gl.GL_MODELVIEW) return width, height @@ -610,44 +607,44 @@ def on_resize(self, width, height): """ width, height = self._update_perspective(width, height) self.scene.camera.resolution = (width, height) - self.view['ball'].resize(self.scene.camera.resolution) - self.scene.camera_transform = self.view['ball'].pose + self.view["ball"].resize(self.scene.camera.resolution) + self.scene.camera_transform = self.view["ball"].pose def on_mouse_press(self, x, y, buttons, modifiers): """ Set the start point of the drag. """ - self.view['ball'].set_state(Trackball.STATE_ROTATE) - if (buttons == pyglet.window.mouse.LEFT): - ctrl = (modifiers & pyglet.window.key.MOD_CTRL) - shift = (modifiers & pyglet.window.key.MOD_SHIFT) - if (ctrl and shift): - self.view['ball'].set_state(Trackball.STATE_ZOOM) + self.view["ball"].set_state(Trackball.STATE_ROTATE) + if buttons == pyglet.window.mouse.LEFT: + ctrl = modifiers & pyglet.window.key.MOD_CTRL + shift = modifiers & pyglet.window.key.MOD_SHIFT + if ctrl and shift: + self.view["ball"].set_state(Trackball.STATE_ZOOM) elif shift: - self.view['ball'].set_state(Trackball.STATE_ROLL) + self.view["ball"].set_state(Trackball.STATE_ROLL) elif ctrl: - self.view['ball'].set_state(Trackball.STATE_PAN) - elif (buttons == pyglet.window.mouse.MIDDLE): - self.view['ball'].set_state(Trackball.STATE_PAN) - elif (buttons == pyglet.window.mouse.RIGHT): - self.view['ball'].set_state(Trackball.STATE_ZOOM) + self.view["ball"].set_state(Trackball.STATE_PAN) + elif buttons == pyglet.window.mouse.MIDDLE: + self.view["ball"].set_state(Trackball.STATE_PAN) + elif buttons == pyglet.window.mouse.RIGHT: + self.view["ball"].set_state(Trackball.STATE_ZOOM) - self.view['ball'].down(np.array([x, y])) - self.scene.camera_transform = self.view['ball'].pose + self.view["ball"].down(np.array([x, y])) + self.scene.camera_transform = self.view["ball"].pose def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers): """ Pan or rotate the view. """ - self.view['ball'].drag(np.array([x, y])) - self.scene.camera_transform = self.view['ball'].pose + self.view["ball"].drag(np.array([x, y])) + self.scene.camera_transform = self.view["ball"].pose def on_mouse_scroll(self, x, y, dx, dy): """ Zoom the view. """ - self.view['ball'].scroll(dy) - self.scene.camera_transform = self.view['ball'].pose + self.view["ball"].scroll(dy) + self.scene.camera_transform = self.view["ball"].pose def on_key_press(self, symbol, modifiers): """ @@ -672,20 +669,21 @@ def on_key_press(self, symbol, modifiers): self.toggle_fullscreen() if symbol in [ - pyglet.window.key.LEFT, - pyglet.window.key.RIGHT, - pyglet.window.key.DOWN, - pyglet.window.key.UP]: - self.view['ball'].down([0, 0]) + pyglet.window.key.LEFT, + pyglet.window.key.RIGHT, + pyglet.window.key.DOWN, + pyglet.window.key.UP, + ]: + self.view["ball"].down([0, 0]) if symbol == pyglet.window.key.LEFT: - self.view['ball'].drag([-magnitude, 0]) + self.view["ball"].drag([-magnitude, 0]) elif symbol == pyglet.window.key.RIGHT: - self.view['ball'].drag([magnitude, 0]) + self.view["ball"].drag([magnitude, 0]) elif symbol == pyglet.window.key.DOWN: - self.view['ball'].drag([0, -magnitude]) + self.view["ball"].drag([0, -magnitude]) elif symbol == pyglet.window.key.UP: - self.view['ball'].drag([0, magnitude]) - self.scene.camera_transform = self.view['ball'].pose + self.view["ball"].drag([0, magnitude]) + self.scene.camera_transform = self.view["ball"].pose def on_draw(self): """ @@ -714,7 +712,7 @@ def on_draw(self): count = -1 # if we are rendering an axis marker at the world - if self._axis and not self.view['axis'] == 'without_world': + if self._axis and not self.view["axis"] == "without_world": # we stored it as a vertex list self._axis.draw(mode=gl.GL_TRIANGLES) if self._grid: @@ -742,7 +740,8 @@ def on_draw(self): if self.fixed is not None and geometry_name in self.fixed: # remove altered camera transform from fixed geometry transform_fix = np.linalg.inv( - np.dot(self._initial_camera_transform, transform_camera)) + np.dot(self._initial_camera_transform, transform_camera) + ) # apply the transform so the fixed geometry doesn't move transform = np.dot(transform, transform_fix) @@ -758,11 +757,14 @@ def on_draw(self): # the best way to do this is probably a shader but this works fine if mode == gl.GL_LINES: # apply the offset in camera space - transform = util.multi_dot([ - transform, - np.linalg.inv(transform_camera), - self._line_offset, - transform_camera]) + transform = util.multi_dot( + [ + transform, + np.linalg.inv(transform_camera), + self._line_offset, + transform_camera, + ] + ) # add a new matrix to the model stack gl.glPushMatrix() @@ -770,16 +772,18 @@ def on_draw(self): gl.glMultMatrixf(rendering.matrix_to_gl(transform)) # draw an axis marker for each mesh frame - if self.view['axis'] == 'all': + if self.view["axis"] == "all": self._axis.draw(mode=gl.GL_TRIANGLES) - elif self.view['axis'] == 'without_world': + elif self.view["axis"] == "without_world": if not util.allclose(transform, np.eye(4), atol=1e-5): self._axis.draw(mode=gl.GL_TRIANGLES) # transparent things must be drawn last - if (hasattr(mesh, 'visual') and - hasattr(mesh.visual, 'transparency') - and mesh.visual.transparency): + if ( + hasattr(mesh, "visual") + and hasattr(mesh.visual, "transparency") + and mesh.visual.transparency + ): # put the current item onto the back of the queue if count < count_original: # add the node to be drawn last @@ -817,7 +821,7 @@ def flip(self): # seek start of file-like object img.seek(0) # save the bytes from the file object - self.scene.metadata['recording'].append(img.read()) + self.scene.metadata["recording"].append(img.read()) def save_image(self, file_obj): """ @@ -831,7 +835,7 @@ def save_image(self, file_obj): manager = pyglet.image.get_buffer_manager() colorbuffer = manager.get_color_buffer() # if passed a string save by name - if hasattr(file_obj, 'write'): + if hasattr(file_obj, "write"): colorbuffer.save(file=file_obj) else: colorbuffer.save(filename=file_obj) @@ -851,17 +855,14 @@ def geometry_hash(geometry): hash : str """ h = str(hash(geometry)) - if hasattr(geometry, 'visual'): + if hasattr(geometry, "visual"): # if visual properties are defined h += str(hash(geometry.visual)) return h -def render_scene(scene, - resolution=None, - visible=True, - **kwargs): +def render_scene(scene, resolution=None, visible=True, **kwargs): """ Render a preview of a scene to a PNG. Note that whether this works or not highly variable based on @@ -887,8 +888,8 @@ def render_scene(scene, Image in PNG format """ window = SceneViewer( - scene, start_loop=False, visible=visible, - resolution=resolution, **kwargs) + scene, start_loop=False, visible=visible, resolution=resolution, **kwargs + ) from ..util import BytesIO @@ -897,7 +898,7 @@ def render_scene(scene, pyglet.clock.tick() window.switch_to() window.dispatch_events() - window.dispatch_event('on_draw') + window.dispatch_event("on_draw") window.flip() if save: # save the color buffer data to memory diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 2c5d808bc..cfdd41152 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -918,7 +918,6 @@ def pack_images(images): # which including deduplicating by hash, upsizing to the # nearest power of two, returning deterministically by seeding # and padding every side of the image by 1 pixel - # np.random.seed(42) return packing.images( images, deduplicate=True, @@ -1037,13 +1036,15 @@ def pack_images(images): new_uv = {} for group, img, offset in zip(mat_idx, images, offsets): # how big was the original image - scale_uv = img.size / final_size + scale = (np.array(img.size)) / (final_size) + # the units of offset are *pixels of the final image* + # thus to scale them to normalized UV coordinates we # what is the offset in fractions of final image - offset_uv = offset / final.size + offset / final_size # scale and translate each of the new UV coordinates # also make sure they are in 0.0-1.0 using modulus (i.e. wrap) for g in group: - g_uvs = uvs[g].copy() + uvs[g].copy() # only wrap pixels that are outside of 0.0-1.0. # use a small leeway of half a pixel for floating point inaccuracies and # the case of uv==1.0 @@ -1062,7 +1063,7 @@ def pack_images(images): new_uv[g] = (g_uvs * scale) + xy_off """ moved = (uvs[g] * scale_uv) + offset_uv - moved[np.logical_or(moved < -0.001, moved > 1.001)] %= 1.0 + moved[np.logical_or(moved < -0.00001, moved > 1.00001)] %= 1.0 new_uv[g] = moved # stack the new UV coordinates in the original order @@ -1106,10 +1107,10 @@ def pack_images(images): # note this is only true for simple colors # interpolation on complicated stuff can break this if not np.allclose(reference, compare): - from IPython import embed - - embed() - # assert np.allclose(reference, compare) + # from IPython import embed + # embed() + pass + assert np.allclose(reference, compare) if use_pbr: return ( diff --git a/trimesh/voxel/encoding.py b/trimesh/voxel/encoding.py index f350962f9..ea192c389 100644 --- a/trimesh/voxel/encoding.py +++ b/trimesh/voxel/encoding.py @@ -283,7 +283,9 @@ def __init__(self, indices, values, shape=None): raise ValueError("indices must be 2D, got shaped %s" % str(indices.shape)) if data["values"].shape != (indices.shape[0],): raise ValueError( - "values and indices shapes inconsistent: {} and {}".format(data["values"], data["indices"]) + "values and indices shapes inconsistent: {} and {}".format( + data["values"], data["indices"] + ) ) if shape is None: self._shape = tuple(data["indices"].max(axis=0) + 1) From 1f99f020dd95215590328ceebfe2516ebe570456 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 28 Sep 2023 16:37:17 -0400 Subject: [PATCH 113/144] check formatting in docker --- Dockerfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Dockerfile b/Dockerfile index e2512e288..cc49b9042 100644 --- a/Dockerfile +++ b/Dockerfile @@ -69,6 +69,10 @@ USER user # install things like pytest RUN pip install -e .[all] +# check formatting +RUN ruff trimesh +RUN black --check trimesh + # run pytest wrapped with xvfb for simple viewer tests RUN xvfb-run pytest --cov=trimesh \ -p no:ALL_DEPENDENCIES \ From 4e7ff7bf0a245eeaa69801bf1d2a961ff74eb05c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 28 Sep 2023 16:52:20 -0400 Subject: [PATCH 114/144] fix return type --- trimesh/base.py | 52 ++++++++++++++++++-------------------- trimesh/visual/material.py | 6 ++--- 2 files changed, 28 insertions(+), 30 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index e1b1ef694..81123ba59 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -43,6 +43,7 @@ from .exchange.export import export_mesh from .parent import Geometry3D from .scene import Scene +from .triangles import MassProperties from .typed import ArrayLike, NDArray from .visual import ColorVisuals, TextureVisuals, create_visual @@ -617,7 +618,7 @@ def center_mass(self): center_mass : (3, ) float Volumetric center of mass of the mesh. """ - return self.mass_properties["center_mass"] + return self.mass_properties.center_mass @center_mass.setter def center_mass(self, value): @@ -636,26 +637,25 @@ def center_mass(self, value): self._cache.delete("mass_properties") @property - def density(self): + def density(self) -> float: """ The density of the mesh used in inertia calculations. Returns ----------- - density : float + density The density of the primitive. """ - density = self.mass_properties["density"] - return density + return self.mass_properties.density @density.setter - def density(self, value): + def density(self, value: float): """ Set the density of the primitive. Parameters ------------- - density : float + density Specify the density of the primitive to be used in inertia calculations. """ @@ -674,8 +674,7 @@ def volume(self) -> float64: volume : float Volume of the current mesh """ - volume = self.mass_properties["volume"] - return volume + return self.mass_properties.volume @property def mass(self) -> float64: @@ -688,8 +687,7 @@ def mass(self) -> float64: mass : float Mass of the current mesh """ - mass = self.mass_properties["mass"] - return mass + return self.mass_properties.mass @property def moment_inertia(self) -> NDArray[float64]: @@ -707,8 +705,7 @@ def moment_inertia(self) -> NDArray[float64]: Moment of inertia of the current mesh at the center of mass and aligned with the cartesian axis. """ - inertia = self.mass_properties["inertia"] - return inertia + return self.mass_properties.inertia def moment_inertia_frame(self, transform: NDArray[float64]) -> NDArray[float64]: """ @@ -2695,11 +2692,10 @@ def area_faces(self) -> NDArray[float64]: area_faces : (n, ) float Area of each face """ - area_faces = triangles.area(crosses=self.triangles_cross, sum=False) - return area_faces + return triangles.area(crosses=self.triangles_cross, sum=False) @caching.cache_decorator - def mass_properties(self) -> triangles.MassProperties: + def mass_properties(self) -> MassProperties: """ Returns the mass properties of the current mesh. @@ -3025,19 +3021,19 @@ def face_adjacency_tree(self) -> Index: Returns -------- - tree: rtree.index + tree Where each edge in self.face_adjacency has a rectangular cell """ # the (n,6) interleaved bounding box for every line segment - segment_bounds = np.column_stack( - ( - self.vertices[self.face_adjacency_edges].min(axis=1), - self.vertices[self.face_adjacency_edges].max(axis=1), + return util.bounds_tree( + np.column_stack( + ( + self.vertices[self.face_adjacency_edges].min(axis=1), + self.vertices[self.face_adjacency_edges].max(axis=1), + ) ) ) - tree = util.bounds_tree(segment_bounds) - return tree def copy(self, include_cache: bool = False) -> "Trimesh": """ @@ -3089,7 +3085,7 @@ def __copy__(self, *args) -> "Trimesh": # interpret shallow copy as "keep cached data" return self.copy(include_cache=True) - def eval_cached(self, statement, *args): + def eval_cached(self, statement: str, *args): """ Evaluate a statement and cache the result before returning. @@ -3111,9 +3107,11 @@ def eval_cached(self, statement, *args): r = mesh.eval_cached('np.dot(self.vertices, args[0])', [0, 0, 1]) """ - statement = str(statement) - key = "eval_cached_" + statement - key += "_".join(str(i) for i in args) + # store this by the combined hash of statement and args + hashable = [hash(statement)] + hashable.extend(hash(a) for a in args) + + key = f"eval_cached_{hash(tuple(hashable))}" if key in self._cache: return self._cache[key] diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index cfdd41152..5b917900b 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -1036,11 +1036,11 @@ def pack_images(images): new_uv = {} for group, img, offset in zip(mat_idx, images, offsets): # how big was the original image - scale = (np.array(img.size)) / (final_size) + uv_scale = np.array(img.size) / final_size # the units of offset are *pixels of the final image* # thus to scale them to normalized UV coordinates we # what is the offset in fractions of final image - offset / final_size + uv_offset = offset / final_size # scale and translate each of the new UV coordinates # also make sure they are in 0.0-1.0 using modulus (i.e. wrap) for g in group: @@ -1062,7 +1062,7 @@ def pack_images(images): g_uvs[wrap_mask] = g_uvs[wrap_mask] % 1.0 new_uv[g] = (g_uvs * scale) + xy_off """ - moved = (uvs[g] * scale_uv) + offset_uv + moved = (uvs[g] * uv_scale) + uv_offset moved[np.logical_or(moved < -0.00001, moved > 1.00001)] %= 1.0 new_uv[g] = moved From fb772506cf7c38ab84e669c1095141a2b83f4498 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 28 Sep 2023 16:54:15 -0400 Subject: [PATCH 115/144] ruff on tests --- tests/test_arc.py | 2 +- tests/test_splines.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/test_arc.py b/tests/test_arc.py index 2eac5922f..ac7d973b4 100644 --- a/tests/test_arc.py +++ b/tests/test_arc.py @@ -13,7 +13,7 @@ def test_center(self): points = test_points[0] res_center, res_radius = test_results[0] center_info = arc_center(points) - C, R, N, angle = ( + C, R, _N, _angle = ( center_info["center"], center_info["radius"], center_info["normal"], diff --git a/tests/test_splines.py b/tests/test_splines.py index 204caa35c..ed4f8c1a3 100644 --- a/tests/test_splines.py +++ b/tests/test_splines.py @@ -17,9 +17,7 @@ def test_bezier_example(self): # perimeter should be about right if it was discretized properly if not g.np.isclose(p.polygons_full[0].exterior.length, truth, atol=100.0): raise ValueError( - "perimeter wrong: {} != {}".format( - truth, p.polygons_full[0].exterior.length - ) + f"perimeter wrong: {truth} != {p.polygons_full[0].exterior.length}" ) From b33e440174cd65161b73b03d3a1d0643a418a021 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 28 Sep 2023 17:05:39 -0400 Subject: [PATCH 116/144] tests --- tests/test_inertia.py | 4 +--- trimesh/parent.py | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/test_inertia.py b/tests/test_inertia.py index 95770fa83..f001c646f 100644 --- a/tests/test_inertia.py +++ b/tests/test_inertia.py @@ -451,9 +451,7 @@ def test_mass(self): triangles=mesh.triangles, density=truth["density"], skip_inertia=False ) - for key, _value in calc.items(): - if key not in truth: - continue + for key in truth.keys(): if not g.np.allclose(calc[key], truth[key], atol=1e-2): raise ValueError( "{}({}):\n{}\n!=\n{}".format( diff --git a/trimesh/parent.py b/trimesh/parent.py index 7df64f547..e716179ae 100644 --- a/trimesh/parent.py +++ b/trimesh/parent.py @@ -277,7 +277,7 @@ def apply_obb(self, **kwargs): Transformation matrix that was applied to mesh to move it into OBB frame """ - if tol.strict: + if tol.strict and hasattr(self, "volume"): # in strict mode make sure volume is identical check = self.volume matrix = bounds.oriented_bounds(self, **kwargs) From cdc9902ee73569e9b5032126ba760bafa22246f8 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 28 Sep 2023 17:48:23 -0400 Subject: [PATCH 117/144] fix apply_obb --- trimesh/parent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/trimesh/parent.py b/trimesh/parent.py index e716179ae..1ba633db0 100644 --- a/trimesh/parent.py +++ b/trimesh/parent.py @@ -280,10 +280,10 @@ def apply_obb(self, **kwargs): if tol.strict and hasattr(self, "volume"): # in strict mode make sure volume is identical check = self.volume - matrix = bounds.oriented_bounds(self, **kwargs) + matrix, _ = bounds.oriented_bounds(self, **kwargs) assert np.isclose(check, self.volume) else: # calculate the oriented bounding box - matrix = bounds.oriented_bounds(self, **kwargs) + matrix, _ = bounds.oriented_bounds(self, **kwargs) return matrix From e3d299de7fa3a44e4c92457b23ba76a57655f3c8 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 29 Sep 2023 13:04:03 -0400 Subject: [PATCH 118/144] fix test_inertia --- tests/test_inertia.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tests/test_inertia.py b/tests/test_inertia.py index f001c646f..8b96b7754 100644 --- a/tests/test_inertia.py +++ b/tests/test_inertia.py @@ -452,12 +452,10 @@ def test_mass(self): ) for key in truth.keys(): - if not g.np.allclose(calc[key], truth[key], atol=1e-2): - raise ValueError( - "{}({}):\n{}\n!=\n{}".format( - truth["filename"], key, calc[key], g.np.array(truth[key]) - ) - ) + if key == "area": + assert g.np.isclose(mesh.area, truth[key], atol=1e-2) + elif key in dir(calc): + assert g.np.allclose(calc[key], truth[key], atol=1e-2) if __name__ == "__main__": From a1682878bcfba684bc03bf98e6a59054b9600c51 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 29 Sep 2023 13:18:55 -0400 Subject: [PATCH 119/144] actually apply obb transform --- trimesh/parent.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/trimesh/parent.py b/trimesh/parent.py index 1ba633db0..abea714d0 100644 --- a/trimesh/parent.py +++ b/trimesh/parent.py @@ -277,13 +277,20 @@ def apply_obb(self, **kwargs): Transformation matrix that was applied to mesh to move it into OBB frame """ + # save the pre-transform volume if tol.strict and hasattr(self, "volume"): - # in strict mode make sure volume is identical - check = self.volume - matrix, _ = bounds.oriented_bounds(self, **kwargs) - assert np.isclose(check, self.volume) - else: - # calculate the oriented bounding box - matrix, _ = bounds.oriented_bounds(self, **kwargs) + volume = self.volume + + # calculate the OBB passing keyword arguments through + matrix, extents = bounds.oriented_bounds(self, **kwargs) + # apply the transform + self.apply_transform(matrix) + + if tol.strict: + # obb transform should not have changed volume + if hasattr(self, "volume"): + assert np.isclose(self.volume, volume) + # overall extents should match what we expected + assert np.allclose(self.extents, extents) return matrix From fd06c9150aad40bd09a53c61ebaad0b89b1c7634 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 29 Sep 2023 14:23:30 -0400 Subject: [PATCH 120/144] add some tests and better error messages --- models/2D/loose.dxf | 2726 +++++++++++++++++++++++++++++++++++++++ tests/test_mesh.py | 4 +- tests/test_paths.py | 26 + trimesh/parent.py | 3 + trimesh/path/packing.py | 23 +- trimesh/path/path.py | 24 +- trimesh/util.py | 6 +- 7 files changed, 2781 insertions(+), 31 deletions(-) create mode 100644 models/2D/loose.dxf diff --git a/models/2D/loose.dxf b/models/2D/loose.dxf new file mode 100644 index 000000000..3f9e85822 --- /dev/null +++ b/models/2D/loose.dxf @@ -0,0 +1,2726 @@ +999 +dxfrw 0.6.3 + 0 +SECTION + 2 +HEADER + 9 +$ACADVER + 1 +AC1021 + 9 +$DWGCODEPAGE + 3 +ANSI_1252 + 9 +$INSBASE + 10 +0 + 20 +0 + 30 +0 + 9 +$EXTMIN + 10 +0 + 20 +0 + 30 +0 + 9 +$EXTMAX + 10 +487.3705806953521 + 20 +278.5 + 30 +0 + 9 +$LIMMIN + 10 +0 + 20 +0 + 9 +$LIMMAX + 10 +420 + 20 +297 + 9 +$ORTHOMODE + 70 + 0 + 9 +$REGENMODE + 70 + 1 + 9 +$FILLMODE + 70 + 1 + 9 +$QTEXTMODE + 70 + 0 + 9 +$MIRRTEXT + 70 + 0 + 9 +$LTSCALE + 40 +1 + 9 +$ATTMODE + 70 + 0 + 9 +$TEXTSIZE + 40 +2.5 + 9 +$TRACEWID + 40 +15.68 + 9 +$TEXTSTYLE + 7 +STANDARD + 9 +$CLAYER + 8 +0 + 9 +$CELTYPE + 6 +BYLAYER + 9 +$CECOLOR + 62 + 256 + 9 +$CELTSCALE + 40 +1 + 9 +$DISPSILH + 70 + 0 + 9 +$DIMSCALE + 40 +2.5 + 9 +$DIMASZ + 40 +2.5 + 9 +$DIMEXO + 40 +0.625 + 9 +$DIMDLI + 40 +3.75 + 9 +$DIMRND + 40 +0 + 9 +$DIMDLE + 40 +0 + 9 +$DIMEXE + 40 +1.25 + 9 +$DIMTP + 40 +0 + 9 +$DIMTM + 40 +0 + 9 +$DIMTXT + 40 +2.5 + 9 +$DIMCEN + 40 +2.5 + 9 +$DIMTSZ + 40 +0 + 9 +$DIMTOL + 70 + 0 + 9 +$DIMLIM + 70 + 0 + 9 +$DIMTIH + 70 + 0 + 9 +$DIMTOH + 70 + 0 + 9 +$DIMSE1 + 70 + 0 + 9 +$DIMSE2 + 70 + 0 + 9 +$DIMTAD + 70 + 1 + 9 +$DIMZIN + 70 + 8 + 9 +$DIMBLK + 1 + + 9 +$DIMASO + 70 + 1 + 9 +$DIMSHO + 70 + 1 + 9 +$DIMPOST + 1 + + 9 +$DIMAPOST + 1 + + 9 +$DIMALT + 70 + 0 + 9 +$DIMALTD + 70 + 3 + 9 +$DIMALTF + 40 +0.03937 + 9 +$DIMLFAC + 40 +1 + 9 +$DIMTOFL + 70 + 1 + 9 +$DIMTVP + 40 +0 + 9 +$DIMTIX + 70 + 0 + 9 +$DIMSOXD + 70 + 0 + 9 +$DIMSAH + 70 + 0 + 9 +$DIMBLK1 + 1 + + 9 +$DIMBLK2 + 1 + + 9 +$DIMSTYLE + 2 +STANDARD + 9 +$DIMCLRD + 70 + 0 + 9 +$DIMCLRE + 70 + 0 + 9 +$DIMCLRT + 70 + 0 + 9 +$DIMTFAC + 40 +1 + 9 +$DIMGAP + 40 +0.625 + 9 +$DIMJUST + 70 + 0 + 9 +$DIMSD1 + 70 + 0 + 9 +$DIMSD2 + 70 + 0 + 9 +$DIMTOLJ + 70 + 0 + 9 +$DIMTZIN + 70 + 8 + 9 +$DIMALTZ + 70 + 0 + 9 +$DIMALTTZ + 70 + 0 + 9 +$DIMUPT + 70 + 0 + 9 +$DIMDEC + 70 + 2 + 9 +$DIMTDEC + 70 + 2 + 9 +$DIMALTU + 70 + 2 + 9 +$DIMALTTD + 70 + 3 + 9 +$DIMTXSTY + 7 +STANDARD + 9 +$DIMAUNIT + 70 + 0 + 9 +$DIMADEC + 70 + 0 + 9 +$DIMALTRND + 40 +0 + 9 +$DIMAZIN + 70 + 0 + 9 +$DIMDSEP + 70 + 44 + 9 +$DIMATFIT + 70 + 3 + 9 +$DIMFRAC + 70 + 0 + 9 +$DIMLDRBLK + 1 +STANDARD + 9 +$DIMLUNIT + 70 + 2 + 9 +$DIMLWD + 70 + -2 + 9 +$DIMLWE + 70 + -2 + 9 +$DIMTMOVE + 70 + 0 + 9 +$DIMFXL + 40 +1 + 9 +$DIMFXLON + 70 + 0 + 9 +$DIMJOGANG + 40 +0.7854 + 9 +$DIMTFILL + 70 + 0 + 9 +$DIMTFILLCLR + 70 + 0 + 9 +$DIMARCSYM + 70 + 0 + 9 +$DIMLTYPE + 6 + + 9 +$DIMLTEX1 + 6 + + 9 +$DIMLTEX2 + 6 + + 9 +$LUNITS + 70 + 2 + 9 +$LUPREC + 70 + 4 + 9 +$SKETCHINC + 40 +1 + 9 +$FILLETRAD + 40 +0 + 9 +$AUNITS + 70 + 0 + 9 +$AUPREC + 70 + 2 + 9 +$MENU + 1 +. + 9 +$ELEVATION + 40 +0 + 9 +$PELEVATION + 40 +0 + 9 +$THICKNESS + 40 +0 + 9 +$LIMCHECK + 70 + 0 + 9 +$CHAMFERA + 40 +0 + 9 +$CHAMFERB + 40 +0 + 9 +$CHAMFERC + 40 +0 + 9 +$CHAMFERD + 40 +0 + 9 +$SKPOLY + 70 + 0 + 9 +$USRTIMER + 70 + 1 + 9 +$ANGBASE + 50 +0 + 9 +$ANGDIR + 70 + 0 + 9 +$PDMODE + 70 + 34 + 9 +$PDSIZE + 40 +0 + 9 +$PLINEWID + 40 +0 + 9 +$SPLFRAME + 70 + 0 + 9 +$SPLINETYPE + 70 + 2 + 9 +$SPLINESEGS + 70 + 8 + 9 +$HANDSEED + 5 +20000 + 9 +$SURFTAB1 + 70 + 6 + 9 +$SURFTAB2 + 70 + 6 + 9 +$SURFTYPE + 70 + 6 + 9 +$SURFU + 70 + 6 + 9 +$SURFV + 70 + 6 + 9 +$UCSBASE + 2 + + 9 +$UCSNAME + 2 + + 9 +$UCSORG + 10 +0 + 20 +0 + 30 +0 + 9 +$UCSXDIR + 10 +1 + 20 +0 + 30 +0 + 9 +$UCSYDIR + 10 +0 + 20 +1 + 30 +0 + 9 +$UCSORTHOREF + 2 + + 9 +$UCSORTHOVIEW + 70 + 0 + 9 +$UCSORGTOP + 10 +0 + 20 +0 + 30 +0 + 9 +$UCSORGBOTTOM + 10 +0 + 20 +0 + 30 +0 + 9 +$UCSORGLEFT + 10 +0 + 20 +0 + 30 +0 + 9 +$UCSORGRIGHT + 10 +0 + 20 +0 + 30 +0 + 9 +$UCSORGFRONT + 10 +0 + 20 +0 + 30 +0 + 9 +$UCSORGBACK + 10 +0 + 20 +0 + 30 +0 + 9 +$PUCSBASE + 2 + + 9 +$PUCSNAME + 2 + + 9 +$PUCSORG + 10 +0 + 20 +0 + 30 +0 + 9 +$PUCSXDIR + 10 +1 + 20 +0 + 30 +0 + 9 +$PUCSYDIR + 10 +0 + 20 +1 + 30 +0 + 9 +$PUCSORTHOREF + 2 + + 9 +$PUCSORTHOVIEW + 70 + 0 + 9 +$PUCSORGTOP + 10 +0 + 20 +0 + 30 +0 + 9 +$PUCSORGBOTTOM + 10 +0 + 20 +0 + 30 +0 + 9 +$PUCSORGLEFT + 10 +0 + 20 +0 + 30 +0 + 9 +$PUCSORGRIGHT + 10 +0 + 20 +0 + 30 +0 + 9 +$PUCSORGFRONT + 10 +0 + 20 +0 + 30 +0 + 9 +$PUCSORGBACK + 10 +0 + 20 +0 + 30 +0 + 9 +$USERI1 + 70 + 0 + 9 +$USERI2 + 70 + 0 + 9 +$USERI3 + 70 + 0 + 9 +$USERI4 + 70 + 0 + 9 +$USERI5 + 70 + 0 + 9 +$USERR1 + 40 +0 + 9 +$USERR2 + 40 +0 + 9 +$USERR3 + 40 +0 + 9 +$USERR4 + 40 +0 + 9 +$USERR5 + 40 +0 + 9 +$WORLDVIEW + 70 + 1 + 9 +$SHADEDGE + 70 + 3 + 9 +$SHADEDIF + 70 + 70 + 9 +$TILEMODE + 70 + 1 + 9 +$MAXACTVP + 70 + 64 + 9 +$PINSBASE + 10 +0 + 20 +0 + 30 +0 + 9 +$PLIMCHECK + 70 + 0 + 9 +$PEXTMIN + 10 +0 + 20 +0 + 30 +0 + 9 +$PEXTMAX + 10 +0 + 20 +0 + 30 +0 + 9 +$SNAPSTYLE + 70 + 0 + 9 +$PLIMMIN + 10 +0 + 20 +0 + 9 +$PLIMMAX + 10 +210 + 20 +297 + 9 +$UNITMODE + 70 + 0 + 9 +$VISRETAIN + 70 + 1 + 9 +$PLINEGEN + 70 + 0 + 9 +$PSLTSCALE + 70 + 1 + 9 +$TREEDEPTH + 70 + 3020 + 9 +$CMLSTYLE + 2 +Standard + 9 +$CMLJUST + 70 + 0 + 9 +$CMLSCALE + 40 +20 + 9 +$PROXYGRAPHICS + 70 + 1 + 9 +$MEASUREMENT + 70 + 1 + 9 +$CELWEIGHT +370 + -1 + 9 +$ENDCAPS +280 + 0 + 9 +$JOINSTYLE +280 + 0 + 9 +$LWDISPLAY +290 + 0 + 9 +$INSUNITS + 70 + 4 + 9 +$HYPERLINKBASE + 1 + + 9 +$STYLESHEET + 1 + + 9 +$XEDIT +290 + 1 + 9 +$CEPSNTYPE +380 + 0 + 9 +$PSTYLEMODE +290 + 1 + 9 +$EXTNAMES +290 + 1 + 9 +$PSVPSCALE + 40 +1 + 9 +$OLESTARTUP +290 + 0 + 9 +$SORTENTS +280 + 127 + 9 +$INDEXCTL +280 + 0 + 9 +$HIDETEXT +280 + 1 + 9 +$XCLIPFRAME +290 + 0 + 9 +$HALOGAP +280 + 0 + 9 +$OBSCOLOR + 70 + 257 + 9 +$OBSLTYPE +280 + 0 + 9 +$INTERSECTIONDISPLAY +280 + 0 + 9 +$INTERSECTIONCOLOR + 70 + 257 + 9 +$DIMASSOC +280 + 1 + 9 +$PROJECTNAME + 1 + + 9 +$CAMERADISPLAY +290 + 0 + 9 +$LENSLENGTH + 40 +50 + 9 +$CAMERAHEIGHT + 40 +0 + 9 +$STEPSPERSEC + 40 +2 + 9 +$STEPSIZE + 40 +50 + 9 +$3DDWFPREC + 40 +2 + 9 +$PSOLWIDTH + 40 +5 + 9 +$PSOLHEIGHT + 40 +80 + 9 +$LOFTANG1 + 40 +1.570796326794897 + 9 +$LOFTANG2 + 40 +1.570796326794897 + 9 +$LOFTMAG1 + 40 +0 + 9 +$LOFTMAG2 + 40 +0 + 9 +$LOFTPARAM + 70 + 7 + 9 +$LOFTNORMALS +280 + 1 + 9 +$LATITUDE + 40 +1 + 9 +$LONGITUDE + 40 +1 + 9 +$NORTHDIRECTION + 40 +0 + 9 +$TIMEZONE + 70 +-8000 + 9 +$LIGHTGLYPHDISPLAY +280 + 1 + 9 +$TILEMODELIGHTSYNCH +280 + 1 + 9 +$SOLIDHIST +280 + 1 + 9 +$SHOWHIST +280 + 1 + 9 +$DWFFRAME +280 + 2 + 9 +$DGNFRAME +280 + 0 + 9 +$REALWORLDSCALE +290 + 1 + 9 +$INTERFERECOLOR + 62 + 1 + 9 +$CSHADOW +280 + 0 + 9 +$SHADOWPLANELOCATION + 40 +0 + 0 +ENDSEC + 0 +SECTION + 2 +CLASSES + 0 +ENDSEC + 0 +SECTION + 2 +TABLES + 0 +TABLE + 2 +VPORT + 5 +8 +330 +0 +100 +AcDbSymbolTable + 70 + 1 + 0 +VPORT + 5 +31 +330 +2 +100 +AcDbSymbolTableRecord +100 +AcDbViewportTableRecord + 2 +*ACTIVE + 70 + 0 + 10 +0 + 20 +0 + 11 +1 + 21 +1 + 12 +366.375 + 22 +205 + 13 +0 + 23 +0 + 14 +10 + 24 +10 + 15 +10 + 25 +10 + 16 +0 + 26 +0 + 36 +1 + 17 +0 + 27 +0 + 37 +0 + 40 +422.5 + 41 +1.763905325443787 + 42 +50 + 43 +0 + 44 +0 + 50 +0 + 51 +0 + 71 + 0 + 72 + 100 + 73 + 1 + 74 + 3 + 75 + 0 + 76 + 1 + 77 + 0 + 78 + 0 +281 + 0 + 65 + 1 +110 +0 +120 +0 +130 +0 +111 +1 +121 +0 +131 +0 +112 +0 +122 +1 +132 +0 + 79 + 0 +146 +0 +348 +10020 + 60 + 7 + 61 + 5 +292 +1 +282 + 1 +141 +0 +142 +0 + 63 + 250 +421 +3358443 + 0 +ENDTAB + 0 +TABLE + 2 +LTYPE + 5 +5 +330 +0 +100 +AcDbSymbolTable + 70 + 4 + 0 +LTYPE + 5 +14 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +ByBlock + 70 + 0 + 3 + + 72 + 65 + 73 + 0 + 40 +0 + 0 +LTYPE + 5 +15 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +ByLayer + 70 + 0 + 3 + + 72 + 65 + 73 + 0 + 40 +0 + 0 +LTYPE + 5 +16 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +Continuous + 70 + 0 + 3 +Solid line + 72 + 65 + 73 + 0 + 40 +0 + 0 +LTYPE + 5 +32 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DOT + 70 + 0 + 3 +Dot . . . . . . . . . . . . . . . . . . . . . . + 72 + 65 + 73 + 2 + 40 +6.35 + 49 +0 + 74 + 0 + 49 +-6.35 + 74 + 0 + 0 +LTYPE + 5 +33 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DOTTINY + 70 + 0 + 3 +Dot (.15x) ..................................... + 72 + 65 + 73 + 2 + 40 +0.9525 + 49 +0 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 0 +LTYPE + 5 +34 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DOT2 + 70 + 0 + 3 +Dot (.5x) ..................................... + 72 + 65 + 73 + 2 + 40 +3.175 + 49 +0 + 74 + 0 + 49 +-3.175 + 74 + 0 + 0 +LTYPE + 5 +35 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DOTX2 + 70 + 0 + 3 +Dot (2x) . . . . . . . . . . . . . + 72 + 65 + 73 + 2 + 40 +12.7 + 49 +0 + 74 + 0 + 49 +-12.7 + 74 + 0 + 0 +LTYPE + 5 +36 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DASHED + 70 + 0 + 3 +Dashed _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + 72 + 65 + 73 + 2 + 40 +19.05 + 49 +12.7 + 74 + 0 + 49 +-6.35 + 74 + 0 + 0 +LTYPE + 5 +37 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DASHEDTINY + 70 + 0 + 3 +Dashed (.15x) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + 72 + 65 + 73 + 2 + 40 +2.8575 + 49 +1.905 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 0 +LTYPE + 5 +38 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DASHED2 + 70 + 0 + 3 +Dashed (.5x) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + 72 + 65 + 73 + 2 + 40 +9.524999999999999 + 49 +6.35 + 74 + 0 + 49 +-3.175 + 74 + 0 + 0 +LTYPE + 5 +39 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DASHEDX2 + 70 + 0 + 3 +Dashed (2x) ____ ____ ____ ____ ____ ___ + 72 + 65 + 73 + 2 + 40 +38.09999999999999 + 49 +25.4 + 74 + 0 + 49 +-12.7 + 74 + 0 + 0 +LTYPE + 5 +3A +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DASHDOT + 70 + 0 + 3 +Dash dot __ . __ . __ . __ . __ . __ . __ . __ + 72 + 65 + 73 + 4 + 40 +25.4 + 49 +12.7 + 74 + 0 + 49 +-6.35 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-6.35 + 74 + 0 + 0 +LTYPE + 5 +3B +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DASHDOTTINY + 70 + 0 + 3 +Dash dot (.15x) _._._._._._._._._._._._._._._. + 72 + 65 + 73 + 4 + 40 +3.81 + 49 +1.905 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 0 +LTYPE + 5 +3C +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DASHDOT2 + 70 + 0 + 3 +Dash dot (.5x) _._._._._._._._._._._._._._._. + 72 + 65 + 73 + 4 + 40 +12.7 + 49 +6.35 + 74 + 0 + 49 +-3.175 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-3.175 + 74 + 0 + 0 +LTYPE + 5 +3D +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DASHDOTX2 + 70 + 0 + 3 +Dash dot (2x) ____ . ____ . ____ . ___ + 72 + 65 + 73 + 4 + 40 +50.8 + 49 +25.4 + 74 + 0 + 49 +-12.7 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-12.7 + 74 + 0 + 0 +LTYPE + 5 +3E +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DIVIDE + 70 + 0 + 3 +Divide ____ . . ____ . . ____ . . ____ . . ____ + 72 + 65 + 73 + 6 + 40 +31.75 + 49 +12.7 + 74 + 0 + 49 +-6.35 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-6.35 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-6.35 + 74 + 0 + 0 +LTYPE + 5 +3F +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DIVIDETINY + 70 + 0 + 3 +Divide (.15x) __..__..__..__..__..__..__..__.._ + 72 + 65 + 73 + 6 + 40 +4.7625 + 49 +1.905 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 0 +LTYPE + 5 +40 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DIVIDE2 + 70 + 0 + 3 +Divide (.5x) __..__..__..__..__..__..__..__.._ + 72 + 65 + 73 + 6 + 40 +15.875 + 49 +6.35 + 74 + 0 + 49 +-3.175 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-3.175 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-3.175 + 74 + 0 + 0 +LTYPE + 5 +41 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +DIVIDEX2 + 70 + 0 + 3 +Divide (2x) ________ . . ________ . . _ + 72 + 65 + 73 + 6 + 40 +63.5 + 49 +25.4 + 74 + 0 + 49 +-12.7 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-12.7 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-12.7 + 74 + 0 + 0 +LTYPE + 5 +42 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +BORDER + 70 + 0 + 3 +Border __ __ . __ __ . __ __ . __ __ . __ __ . + 72 + 65 + 73 + 6 + 40 +44.45 + 49 +12.7 + 74 + 0 + 49 +-6.35 + 74 + 0 + 49 +12.7 + 74 + 0 + 49 +-6.35 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-6.35 + 74 + 0 + 0 +LTYPE + 5 +43 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +BORDERTINY + 70 + 0 + 3 +Border (.15x) __.__.__.__.__.__.__.__.__.__.__. + 72 + 65 + 73 + 6 + 40 +6.6675 + 49 +1.905 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 49 +1.905 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 0 +LTYPE + 5 +44 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +BORDER2 + 70 + 0 + 3 +Border (.5x) __.__.__.__.__.__.__.__.__.__.__. + 72 + 65 + 73 + 6 + 40 +22.225 + 49 +6.35 + 74 + 0 + 49 +-3.175 + 74 + 0 + 49 +6.35 + 74 + 0 + 49 +-3.175 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-3.175 + 74 + 0 + 0 +LTYPE + 5 +45 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +BORDERX2 + 70 + 0 + 3 +Border (2x) ____ ____ . ____ ____ . ___ + 72 + 65 + 73 + 6 + 40 +88.89999999999999 + 49 +25.4 + 74 + 0 + 49 +-12.7 + 74 + 0 + 49 +25.4 + 74 + 0 + 49 +-12.7 + 74 + 0 + 49 +0 + 74 + 0 + 49 +-12.7 + 74 + 0 + 0 +LTYPE + 5 +46 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +CENTER + 70 + 0 + 3 +Center ____ _ ____ _ ____ _ ____ _ ____ _ ____ + 72 + 65 + 73 + 4 + 40 +50.8 + 49 +31.75 + 74 + 0 + 49 +-6.35 + 74 + 0 + 49 +6.35 + 74 + 0 + 49 +-6.35 + 74 + 0 + 0 +LTYPE + 5 +47 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +CENTERTINY + 70 + 0 + 3 +Center (.15x) ___ _ ___ _ ___ _ ___ _ ___ _ ___ + 72 + 65 + 73 + 4 + 40 +7.619999999999999 + 49 +4.7625 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 49 +0.9525 + 74 + 0 + 49 +-0.9525 + 74 + 0 + 0 +LTYPE + 5 +48 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +CENTER2 + 70 + 0 + 3 +Center (.5x) ___ _ ___ _ ___ _ ___ _ ___ _ ___ + 72 + 65 + 73 + 4 + 40 +28.575 + 49 +19.05 + 74 + 0 + 49 +-3.175 + 74 + 0 + 49 +3.175 + 74 + 0 + 49 +-3.175 + 74 + 0 + 0 +LTYPE + 5 +49 +330 +5 +100 +AcDbSymbolTableRecord +100 +AcDbLinetypeTableRecord + 2 +CENTERX2 + 70 + 0 + 3 +Center (2x) ________ __ ________ __ _____ + 72 + 65 + 73 + 4 + 40 +101.6 + 49 +63.5 + 74 + 0 + 49 +-12.7 + 74 + 0 + 49 +12.7 + 74 + 0 + 49 +-12.7 + 74 + 0 + 0 +ENDTAB + 0 +TABLE + 2 +LAYER + 5 +2 +330 +0 +100 +AcDbSymbolTable + 70 + 1 + 0 +LAYER + 5 +10 +330 +2 +100 +AcDbSymbolTableRecord +100 +AcDbLayerTableRecord + 2 +0 + 70 + 0 + 62 + 7 + 6 +CONTINUOUS +370 + 0 +390 +F + 0 +ENDTAB + 0 +TABLE + 2 +STYLE + 5 +3 +330 +0 +100 +AcDbSymbolTable + 70 + 3 + 0 +STYLE + 5 +4A +330 +2 +100 +AcDbSymbolTableRecord +100 +AcDbTextStyleTableRecord + 2 +Standard + 70 + 0 + 40 +0 + 41 +1 + 50 +0 + 71 + 0 + 42 +1 + 3 +txt + 4 + + 0 +ENDTAB + 0 +TABLE + 2 +VIEW + 5 +6 +330 +0 +100 +AcDbSymbolTable + 70 + 0 + 0 +ENDTAB + 0 +TABLE + 2 +UCS + 5 +7 +330 +0 +100 +AcDbSymbolTable + 70 + 0 + 0 +ENDTAB + 0 +TABLE + 2 +APPID + 5 +9 +330 +0 +100 +AcDbSymbolTable + 70 + 1 + 0 +APPID + 5 +12 +330 +9 +100 +AcDbSymbolTableRecord +100 +AcDbRegAppTableRecord + 2 +ACAD + 70 + 0 + 0 +APPID + 5 +4B +330 +9 +100 +AcDbSymbolTableRecord +100 +AcDbRegAppTableRecord + 2 +LibreCad + 70 + 0 + 0 +ENDTAB + 0 +TABLE + 2 +DIMSTYLE + 5 +A +330 +0 +100 +AcDbSymbolTable + 70 + 1 +100 +AcDbDimStyleTable + 71 + 1 + 0 +DIMSTYLE +105 +4C +330 +A +100 +AcDbSymbolTableRecord +100 +AcDbDimStyleTableRecord + 2 +Standard + 70 + 0 + 40 +1 + 41 +2.5 + 42 +0.625 + 43 +0.38 + 44 +1.25 + 45 +0 + 46 +0 + 47 +0 + 48 +0 + 49 +1 +140 +2.5 +141 +0.09 +142 +2.5 +143 +25.4 +144 +1 +145 +0 +146 +1 +147 +0.625 +148 +0 + 71 + 0 + 72 + 0 + 73 + 0 + 74 + 1 + 75 + 0 + 76 + 0 + 77 + 0 + 78 + 1 + 79 + 0 +170 + 0 +171 + 2 +172 + 0 +173 + 0 +174 + 0 +175 + 0 +176 + 0 +177 + 0 +178 + 0 +179 + 0 +271 + 2 +272 + 4 +273 + 2 +274 + 2 +275 + 0 +276 + 0 +277 + 2 +278 + 0 +279 + 0 +280 + 0 +281 + 0 +282 + 0 +283 + 1 +284 + 0 +285 + 0 +286 + 0 +288 + 0 +289 + 3 +340 +standard +341 + +371 + -2 +372 + -2 + 0 +ENDTAB + 0 +TABLE + 2 +BLOCK_RECORD + 5 +1 +330 +0 +100 +AcDbSymbolTable + 70 + 2 + 0 +BLOCK_RECORD + 5 +1F +330 +1 +100 +AcDbSymbolTableRecord +100 +AcDbBlockTableRecord + 2 +*Model_Space + 70 + 0 +280 + 1 +281 + 0 + 0 +BLOCK_RECORD + 5 +1E +330 +1 +100 +AcDbSymbolTableRecord +100 +AcDbBlockTableRecord + 2 +*Paper_Space + 70 + 0 +280 + 1 +281 + 0 + 0 +ENDTAB + 0 +ENDSEC + 0 +SECTION + 2 +BLOCKS + 0 +BLOCK + 5 +20 +330 +1F +100 +AcDbEntity + 8 +0 +100 +AcDbBlockBegin + 2 +*Model_Space + 70 + 0 + 10 +0 + 20 +0 + 30 +0 + 3 +*Model_Space + 1 + + 0 +ENDBLK + 5 +21 +330 +1F +100 +AcDbEntity + 8 +0 +100 +AcDbBlockEnd + 0 +BLOCK + 5 +1C +330 +1B +100 +AcDbEntity + 8 +0 +100 +AcDbBlockBegin + 2 +*Paper_Space + 70 + 0 + 10 +0 + 20 +0 + 30 +0 + 3 +*Paper_Space + 1 + + 0 +ENDBLK + 5 +1D +330 +1F +100 +AcDbEntity + 8 +0 +100 +AcDbBlockEnd + 0 +ENDSEC + 0 +SECTION + 2 +ENTITIES + 0 +CIRCLE + 5 +4D +100 +AcDbEntity + 8 +0 + 6 +ByLayer + 62 + 256 +370 + -1 +100 +AcDbCircle + 10 +400.75 + 20 +179.25 + 40 +86.62058069535207 + 0 +LINE + 5 +4E +100 +AcDbEntity + 8 +0 + 6 +ByLayer + 62 + 256 +370 + -1 +100 +AcDbLine + 10 +401.5 + 20 +278.5 + 11 +380.75 + 21 +230.75 + 0 +ENDSEC + 0 +SECTION + 2 +OBJECTS + 0 +DICTIONARY + 5 +C +330 +0 +100 +AcDbDictionary +281 + 1 + 3 +ACAD_GROUP +350 +D + 0 +DICTIONARY + 5 +D +330 +C +100 +AcDbDictionary +281 + 1 + 0 +ENDSEC + 0 +EOF diff --git a/tests/test_mesh.py b/tests/test_mesh.py index e4077dd47..33bcf927f 100644 --- a/tests/test_mesh.py +++ b/tests/test_mesh.py @@ -82,9 +82,7 @@ def test_meshes(self): assert abs(mesh.volume) > 0.0 - mesh.section( - plane_normal=[0, 0, 1], plane_origin=mesh.centroid - ) + mesh.section(plane_normal=[0, 0, 1], plane_origin=mesh.centroid) sample = mesh.sample(1000) even_sample = g.trimesh.sample.sample_surface_even(mesh, 100) # NOQA diff --git a/tests/test_paths.py b/tests/test_paths.py index c544dde13..c4e05aedc 100644 --- a/tests/test_paths.py +++ b/tests/test_paths.py @@ -211,6 +211,32 @@ def test_color(self): assert g.np.allclose(p.colors[0], color) assert p.colors.shape == (len(p.entities), 4) + p.colors = g.np.array( + [100, 100, 100] * len(p.entities), dtype=g.np.uint8 + ).reshape((-1, 3)) + assert g.np.allclose(p.colors[0], [100, 100, 100, 255]) + + def test_dangling(self): + p = g.get_mesh("2D/wrench.dxf") + assert len(p.dangling) == 0 + + b = g.get_mesh("2D/loose.dxf") + assert len(b.dangling) == 1 + assert len(b.polygons_full) == 1 + + def test_plot(self): + try: + # only run these if matplotlib is installed + import matplotlib.pyplot # NOQA + except BaseException: + g.log.debug("skipping `matplotlib.pyplot` tests") + + p = g.get_mesh("2D/wrench.dxf") + + # see if the logic crashes + p.plot_entities(show=False) + p.plot_discrete(show=False) + class SplitTest(g.unittest.TestCase): def test_split(self): diff --git a/trimesh/parent.py b/trimesh/parent.py index abea714d0..c9cbf54e9 100644 --- a/trimesh/parent.py +++ b/trimesh/parent.py @@ -90,6 +90,9 @@ def __repr__(self): if "Voxel" in type(self).__name__: # for VoxelGrid objects elements.append(str(self.shape)[1:-1]) + if "file_name" in self.metadata: + display = self.metadata["file_name"] + elements.append(f"name=`{display}`") return "".format(type(self).__name__, ", ".join(elements)) def apply_translation(self, translation): diff --git a/trimesh/path/packing.py b/trimesh/path/packing.py index ceb6c123b..8c856c150 100644 --- a/trimesh/path/packing.py +++ b/trimesh/path/packing.py @@ -513,6 +513,7 @@ def images( iterations: Optional[int] = 50, seed: Optional[int] = None, spacing: Optional[float] = None, + mode: Optional[str] = None, ): """ Pack a list of images and return result and offsets. @@ -528,6 +529,10 @@ def images( deduplicate Should images that have identical hashes be inserted more than once? + mode + If passed return an output image with the + requested mode, otherwise will be picked + from the input images. Returns ----------- @@ -581,19 +586,25 @@ def images( # round up all dimensions to powers of 2 size = (2 ** np.ceil(np.log2(size))).astype(np.int64) + if mode is None: + # get the mode of every input image + modes = list({i.mode for i in images}) + # pick the longest mode as a simple heuristic + # which prefers "RGBA" over "RGB" + mode = modes[np.argmax([len(m) for m in modes])] + # create the image in the mode of the first image - result = Image.new(images[0].mode, tuple(size)) + result = Image.new(mode, tuple(size)) done = set() # paste each image into the result for img, off in zip(images, offset): - if tuple(off) in done: - continue + if tuple(off) not in done: + # box is upper left corner + corner = (off[0], size[1] - img.size[1] - off[1]) + result.paste(img, box=corner) else: done.add(tuple(off)) - # box is upper left corner - corner = (off[0], size[1] - img.size[1] - off[1]) - result.paste(img, box=corner) return result, offset diff --git a/trimesh/path/path.py b/trimesh/path/path.py index b10d8ceb0..400dab746 100644 --- a/trimesh/path/path.py +++ b/trimesh/path/path.py @@ -7,7 +7,6 @@ """ import collections import copy -import warnings from hashlib import sha256 import numpy as np @@ -246,10 +245,8 @@ def dangling(self): """ if len(self.paths) == 0: return np.arange(len(self.entities)) - else: - included = np.hstack(self.paths) - dangling = np.setdiff1d(np.arange(len(self.entities)), included) - return dangling + + return np.setdiff1d(np.arange(len(self.entities)), np.hstack(self.paths)) @caching.cache_decorator def kdtree(self): @@ -1440,17 +1437,6 @@ def identifier_hash(self): as_int = (self.identifier * 1e4).astype(np.int64) return sha256(as_int.tobytes(order="C")).hexdigest() - @property - def identifier_md5(self): - warnings.warn( - "`geom.identifier_md5` is deprecated and will " - + "be removed in October 2023: replace " - + "with `geom.identifier_hash`", - category=DeprecationWarning, - stacklevel=2, - ) - return self.identifier_hash - @property def path_valid(self): """ @@ -1460,8 +1446,7 @@ def path_valid(self): Indexes of self.paths self.polygons_closed which are valid polygons. """ - valid = np.array([i is not None for i in self.polygons_closed], dtype=bool) - return valid + return np.array([i is not None for i in self.polygons_closed], dtype=bool) @caching.cache_decorator def root(self): @@ -1519,5 +1504,4 @@ def enclosure_shell(self): """ pairs = [(r, self.connected_paths(r, include_self=False)) for r in self.root] # OrderedDict to maintain corresponding order - corresponding = collections.OrderedDict(pairs) - return corresponding + return collections.OrderedDict(pairs) diff --git a/trimesh/util.py b/trimesh/util.py index 4fa1be8b9..9b9dbe11f 100644 --- a/trimesh/util.py +++ b/trimesh/util.py @@ -1503,9 +1503,11 @@ def concatenate(a, b=None): try: # concatenate visuals visual = is_mesh[0].visual.concatenate([m.visual for m in is_mesh[1:]]) - except BaseException: - log.debug("failed to combine visuals", exc_info=True) + except BaseException as E: + log.debug(f"failed to combine visuals {_STRICT}", exc_info=True) visual = None + if _STRICT: + raise E # create the mesh object return trimesh_type( From 6781c47b625ac10f531a3dcf32b808d674e79769 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 29 Sep 2023 14:46:02 -0400 Subject: [PATCH 121/144] fix check and add matplotlib to test extra --- pyproject.toml | 27 ++++++++++++++++----------- tests/test_paths.py | 1 + trimesh/visual/material.py | 6 ++++-- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6da990a32..59d26b95b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,18 +58,8 @@ trimesh = [ ] [project.optional-dependencies] -test = [ - "pytest-cov", - "coveralls", - "mypy", - "ezdxf", - "pytest", - "pymeshlab", - "pyinstrument", - "ruff", - "black", -] +# this is the base extra most users will want easy = [ "colorlog", "mapbox-earcut", @@ -101,6 +91,21 @@ recommend = [ "pyVHACD", ] +# this is the list of everything that is ever added anywhere +# mostly useful for getting our test coverage up +test = [ + "pytest-cov", + "coveralls", + "mypy", + "ezdxf", + "pytest", + "pymeshlab", + "pyinstrument", + "matplotlib", + "ruff", + "black", +] + # requires pip >= 21.2 # https://hynek.me/articles/python-recursive-optional-dependencies/ all = ["trimesh[easy,recommend,test]"] diff --git a/tests/test_paths.py b/tests/test_paths.py index c4e05aedc..67284cb5f 100644 --- a/tests/test_paths.py +++ b/tests/test_paths.py @@ -230,6 +230,7 @@ def test_plot(self): import matplotlib.pyplot # NOQA except BaseException: g.log.debug("skipping `matplotlib.pyplot` tests") + return p = g.get_mesh("2D/wrench.dxf") diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 5b917900b..c86cf2a6d 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -795,14 +795,16 @@ def get_base_color_texture(mat): img = None if isinstance(mat, PBRMaterial): if mat.baseColorTexture is not None: - img = multiply_factor(mat.baseColorTexture, mat.baseColorFactor, "RGBA") + img = multiply_factor( + mat.baseColorTexture, factor=mat.baseColorFactor, mode="RGBA" + ) elif mat.baseColorFactor is not None: c = color.to_rgba(mat.baseColorFactor) assert c.shape == (4,) assert c.dtype == np.uint8 img = Image.fromarray(c.reshape((1, 1, -1))) - if mat.alphaMode != "BLEND": + if img is not None and mat.alphaMode != "BLEND": # we can't handle alpha blending well, but we can bake alpha cutoff mode = img.mode img = np.array(img) From 4d14f4fb4cabbb098552817582fce734eb07a217 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 29 Sep 2023 19:06:08 -0400 Subject: [PATCH 122/144] try uv packing check inline --- trimesh/__init__.py | 72 ++++++++++++++++++++++++++----- trimesh/base.py | 1 - trimesh/visual/material.py | 88 +++++++++++--------------------------- 3 files changed, 87 insertions(+), 74 deletions(-) diff --git a/trimesh/__init__.py b/trimesh/__init__.py index 83673dd3c..33b80819a 100644 --- a/trimesh/__init__.py +++ b/trimesh/__init__.py @@ -8,7 +8,34 @@ and analysis, in the style of the Polygon object in the Shapely library. """ # avoid a circular import in trimesh.base -from . import bounds, collision, nsphere, primitives, smoothing, voxel +from . import ( + boolean, + caching, + comparison, + convex, + creation, + curvature, + decomposition, + geometry, + graph, + grouping, + inertia, + intersections, + permutate, + poses, + primitives, + proximity, + ray, + registration, + remesh, + repair, + sample, + smoothing, + transformations, + triangles, + units, + util, +) # geometry objects from .base import Trimesh @@ -36,23 +63,48 @@ path = ExceptionWrapper(E) __all__ = [ - "__version__", - "Trimesh", "PointCloud", + "Trimesh", "Scene", - "voxel", - "unitize", + "util", + "__version__", + "available_formats", + "boolean", "bounds", - "nsphere", + "caching", "collision", - "smoothing", - "tol", - "path", + "comparison", + "convex", + "creation", + "curvature", + "decomposition", + "geometry", + "graph", + "grouping", + "inertia", + "intersections", "load", "load_mesh", "load_path", "load_remote", + "nsphere", + "path", + "permutate", + "poses", "primitives", + "proximity", + "ray", + "registration", + "remesh", + "repair", + "sample", + "smoothing", + "tol", "transform_points", - "available_formats", + "transformations", + "triangles", + "unitize", + "units", + "utilScene", + "voxel", ] diff --git a/trimesh/base.py b/trimesh/base.py index 81123ba59..96e1a44e1 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -32,7 +32,6 @@ remesh, repair, sample, - smoothing, # noqa transformations, triangles, units, diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index c86cf2a6d..e372c095e 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -712,7 +712,7 @@ def empty_material(color=None): Returns ------------- material : SimpleMaterial - Image is a a one pixel RGB + Image is a a four pixel RGB """ try: from PIL import Image @@ -915,12 +915,19 @@ def resize_images(images, sizes): resized.append(img) return resized + packed = {} + def pack_images(images): + key = hash(tuple(sorted([id(i) for i in images]))) + assert key not in packed + if key in packed: + return packed[key] + # run image packing with our material-specific settings # which including deduplicating by hash, upsizing to the # nearest power of two, returning deterministically by seeding # and padding every side of the image by 1 pixel - return packing.images( + result = packing.images( images, deduplicate=True, power_resize=True, @@ -928,6 +935,8 @@ def pack_images(images): iterations=10, spacing=int(padding), ) + packed[key] = result + return result if deduplicate: # start by collecting a list of indexes for each material hash @@ -1038,82 +1047,35 @@ def pack_images(images): new_uv = {} for group, img, offset in zip(mat_idx, images, offsets): # how big was the original image - uv_scale = np.array(img.size) / final_size + uv_scale = (np.array(img.size) - 1) / final_size # the units of offset are *pixels of the final image* # thus to scale them to normalized UV coordinates we # what is the offset in fractions of final image - uv_offset = offset / final_size + uv_offset = offset / (final_size - 1) # scale and translate each of the new UV coordinates # also make sure they are in 0.0-1.0 using modulus (i.e. wrap) for g in group: - uvs[g].copy() # only wrap pixels that are outside of 0.0-1.0. # use a small leeway of half a pixel for floating point inaccuracies and # the case of uv==1.0 - """" - half_pixel_width = 1.0 / (2 * img.size[0]) - half_pixel_height = 1.0 / (2 * img.size[1]) - wrap_mask_u = (g_uvs[:, 0] <= -half_pixel_width) | ( - g_uvs[:, 0] >= (1.0 + half_pixel_width) - ) - wrap_mask_v = (g_uvs[:, 1] <= -half_pixel_height) | ( - g_uvs[:, 1] >= (1.0 + half_pixel_height) - ) - wrap_mask = np.stack([wrap_mask_u, wrap_mask_v], axis=-1) + uvg = uvs[g] + + moved = (uvg * uv_scale) + uv_offset + # wrap by half-pic + half = 0.5 / np.array(img.size) + wrap = np.logical_or(uvg < -half, uvg > (1.0 + half)) + moved[wrap] %= 1.0 + + if tol.strict: + old = color.uv_to_color(uvg, img) + new = color.uv_to_color(moved, final) + assert np.allclose(old, new, atol=10) - g_uvs[wrap_mask] = g_uvs[wrap_mask] % 1.0 - new_uv[g] = (g_uvs * scale) + xy_off - """ - moved = (uvs[g] * uv_scale) + uv_offset - moved[np.logical_or(moved < -0.00001, moved > 1.00001)] %= 1.0 new_uv[g] = moved # stack the new UV coordinates in the original order stacked = np.vstack([new_uv[i] for i in range(len(uvs))]) - # check to make sure the packed result image matches - # the original input image exactly in unit tests - if tol.strict: - # get the pixel color from the original image - material_textures = [(get_base_color_texture, final)] - if use_pbr: - material_textures.append( - (get_metallic_roughness_texture, final_metallic_roughness) - ) - if final_emissive: - material_textures.append((get_emissive_texture, final_emissive)) - if final_normals: - material_textures.append((get_normal_texture, final_normals)) - if final_occlusion: - material_textures.append((get_occlusion_texture, final_occlusion)) - - check = [] - for uv, mat in zip(uvs, materials): - # get the image from the material and whether or not - # it had to fill in with default data - material_textures_values = [] - for texture_load_fn, _ in material_textures: - orig_img = texture_load_fn(mat) - current = color.uv_to_interpolated_color(image=orig_img, uv=uv) - material_textures_values.append(current) - check.append(material_textures_values) - - check_flat = [] - for texture_idx in range(len(material_textures)): - check_flat.append(np.vstack([c[texture_idx] for c in check])) - - for reference, (_, final_texture) in zip(check_flat, material_textures): - # get the pixel color from the packed image - compare = color.uv_to_interpolated_color(uv=stacked, image=final_texture) - # should be exactly identical - # note this is only true for simple colors - # interpolation on complicated stuff can break this - if not np.allclose(reference, compare): - # from IPython import embed - # embed() - pass - assert np.allclose(reference, compare) - if use_pbr: return ( PBRMaterial( From 6b3b83bd0afef95b5531be50ecc36f63f534bd90 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Fri, 29 Sep 2023 19:21:48 -0400 Subject: [PATCH 123/144] add collision to init --- trimesh/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/trimesh/__init__.py b/trimesh/__init__.py index 33b80819a..a200c7af4 100644 --- a/trimesh/__init__.py +++ b/trimesh/__init__.py @@ -11,6 +11,7 @@ from . import ( boolean, caching, + collision, comparison, convex, creation, From 67a37272f1daced85c5c2b7245cf2bc9c7840beb Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sat, 30 Sep 2023 01:16:42 -0400 Subject: [PATCH 124/144] try to include examples as md/rst --- docs/Makefile | 17 ++++++--------- docs/conf.py | 14 ++----------- docs/examples.py | 47 ++++++++++++++++++++++++------------------ docs/examples.template | 2 +- docs/index.rst | 2 +- docs/requirements.txt | 6 +++--- 6 files changed, 40 insertions(+), 48 deletions(-) diff --git a/docs/Makefile b/docs/Makefile index f993b0d68..b3d512665 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -15,14 +15,13 @@ example_notebooks := $(wildcard ../examples/*.ipynb) example_notebooks := $(filter-out ../examples/save_image.ipynb, $(example_notebooks)) example_names = $(foreach path, $(example_notebooks), $(basename $(notdir $(path)))) -example_htmls = $(foreach name, $(example_names), $(STATICDIR)/examples/$(name).html) -example_rsts = $(foreach name, $(example_names), examples.$(name).rst) -html: conf.py index.rst *.md README.rst trimesh.rst examples.md $(example_rsts) $(example_htmls) .deps +example_rsts = $(foreach name, $(example_names), examples.$(name).md) + +html: conf.py index.rst *.md trimesh.rst README.rst examples.md $(example_rsts) .deps @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) touch "$(BUILDDIR)/html/.nojekyll" echo "trimesh.org" > "$(BUILDDIR)/html/CNAME" - mv "$(BUILDDIR)/html/_static/examples" "$(BUILDDIR)/html/examples" || true mv "$(BUILDDIR)/html/_static/images" "$(BUILDDIR)/html/images" || true cp "$(STATICDIR)/favicon.ico" "$(BUILDDIR)/html/favicon.ico" || true @@ -30,12 +29,8 @@ html: conf.py index.rst *.md README.rst trimesh.rst examples.md $(example_rsts) $(PIP) install -r requirements.txt $(PIP) freeze > .deps -$(STATICDIR)/examples/%.html: ../examples/%.ipynb .deps - mkdir -p "$(STATICDIR)/examples" - $(NBCONVERT) nbconvert --execute --to html --output $(abspath $@) $< - -examples.%.rst: $(STATICDIR)/examples/%.html examples.template - $(PYTHON) -c "open('$@', 'w').write(open('examples.template').read().format(name='$(*F)', url='$<'))" +examples.%.md : ../examples/%.ipynb .deps + $(NBCONVERT) nbconvert --execute --to markdown --output $(abspath $@) $< examples.md: .deps $(PYTHON) "examples.py" @@ -47,4 +42,4 @@ README.rst: ../README.md .deps pandoc --from=gfm --to=rst --output=README.rst ../README.md clean: - rm -rvf "$(BUILDDIR)" "$(STATICDIR)/examples" examples.*.rst trimesh*.rst .deps + rm -rvf "$(BUILDDIR)" "$(STATICDIR)/examples" examples.*.* trimesh*.rst .deps diff --git a/docs/conf.py b/docs/conf.py index 62aa3a568..da44fa425 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -79,18 +79,8 @@ def abspath(rel): # The theme to use for HTML and HTML Help pages html_theme = "furo" -# options for rtd-theme -html_theme_options = { - "display_version": True, - "prev_next_buttons_location": "bottom", - "style_external_links": False, - # toc options - "collapse_navigation": True, - "sticky_navigation": True, - "navigation_depth": 4, - "includehidden": True, - "titles_only": False, -} +# options for furo +html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff --git a/docs/examples.py b/docs/examples.py index a1e90570a..7f95be214 100644 --- a/docs/examples.py +++ b/docs/examples.py @@ -11,21 +11,18 @@ import os import sys -log = logging.getLogger('trimesh') +log = logging.getLogger("trimesh") log.addHandler(logging.StreamHandler(sys.stdout)) log.setLevel(logging.DEBUG) # current working directory -pwd = os.path.abspath(os.path.expanduser( - os.path.dirname(__file__))) +pwd = os.path.abspath(os.path.expanduser(os.path.dirname(__file__))) # where are our notebooks to render -source = os.path.abspath(os.path.join( - pwd, '..', 'examples')) +source = os.path.abspath(os.path.join(pwd, "..", "examples")) # which index file are we generating -target = os.path.abspath(os.path.join( - pwd, "examples.md")) +target = os.path.abspath(os.path.join(pwd, "examples.rst")) def extract_docstring(loaded): @@ -45,33 +42,43 @@ def extract_docstring(loaded): Cleaned up docstring. """ - source = loaded['cells'][0]['source'] + source = loaded["cells"][0]["source"] assert source[0].strip() == '"""' assert source[-1].strip() == '"""' - return ' '.join(i.strip() for i in source[1:-1]) + return " ".join(i.strip() for i in source[1:-1]) -if __name__ == '__main__': +base = """ +{name} +========== +.. toctree:: + :maxdepth: 2 - markdown = ['# Examples', - 'Several examples are available as rendered IPython notebooks.', '', ] + {markdown} +""" + +if __name__ == "__main__": + markdown = [ + "# Examples", + "Several examples are available as rendered IPython notebooks.", + "", + ] for fn in os.listdir(source): - if not fn.lower().endswith('.ipynb'): + if not fn.lower().endswith(".ipynb"): continue path = os.path.join(source, fn) with open(path) as f: raw = json.load(f) doc = extract_docstring(raw) - log.info(f'`{fn}`: "{doc}"\n') - link = f'examples.{fn.split(".")[0]}.html' - markdown.append(f'### [{fn}]({link})') - markdown.append(doc) - markdown.append('') + name = fn.split(".")[0] + file_name = f"examples.{name}.md" + + markdown.append(base.format(name=name, markdown=file_name)) - final = '\n'.join(markdown) - with open(target, 'w') as f: + final = "\n".join(markdown) + with open(target, "w") as f: f.write(final) diff --git a/docs/examples.template b/docs/examples.template index 1d78a1a13..1212d6db7 100644 --- a/docs/examples.template +++ b/docs/examples.template @@ -1,5 +1,5 @@ {name} ===================== -.. raw:: html +.. raw:: markdown :file: {url} diff --git a/docs/index.rst b/docs/index.rst index 32e6a3a71..c4723bb89 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -20,7 +20,7 @@ Examples .. toctree:: :maxdepth: 2 - examples.md + examples.rst Contributing ========== diff --git a/docs/requirements.txt b/docs/requirements.txt index 2121f623c..c27590be5 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -3,11 +3,11 @@ recommonmark==0.7.1 jupyter==1.0.0 # get sphinx version range from furo install -furo==2023.8.19 +furo==2023.9.10 myst-parser==2.0.0 pyopenssl==23.2.0 autodocsumm==0.2.11 jinja2==3.1.2 -matplotlib==3.7.2 -nbconvert==7.7.4 +matplotlib==3.8.0 +nbconvert==7.8.0 From 878ee235c241649ac74f11000b61b5ab56ca378d Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sat, 30 Sep 2023 01:19:32 -0400 Subject: [PATCH 125/144] switch to rst --- docs/Makefile | 6 +++--- docs/examples.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/Makefile b/docs/Makefile index b3d512665..6d92d8d33 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -16,7 +16,7 @@ example_notebooks := $(filter-out ../examples/save_image.ipynb, $(example_notebo example_names = $(foreach path, $(example_notebooks), $(basename $(notdir $(path)))) -example_rsts = $(foreach name, $(example_names), examples.$(name).md) +example_rsts = $(foreach name, $(example_names), examples.$(name).rst) html: conf.py index.rst *.md trimesh.rst README.rst examples.md $(example_rsts) .deps @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) @@ -29,8 +29,8 @@ html: conf.py index.rst *.md trimesh.rst README.rst examples.md $(example_rsts) $(PIP) install -r requirements.txt $(PIP) freeze > .deps -examples.%.md : ../examples/%.ipynb .deps - $(NBCONVERT) nbconvert --execute --to markdown --output $(abspath $@) $< +examples.%.rst : ../examples/%.ipynb .deps + $(NBCONVERT) nbconvert --execute --to rst --output $(abspath $@) $< examples.md: .deps $(PYTHON) "examples.py" diff --git a/docs/examples.py b/docs/examples.py index 7f95be214..f2a6af27e 100644 --- a/docs/examples.py +++ b/docs/examples.py @@ -74,9 +74,9 @@ def extract_docstring(loaded): raw = json.load(f) doc = extract_docstring(raw) + # name = fn.split(".")[0] - file_name = f"examples.{name}.md" - + file_name = f"examples.{name}.rst" markdown.append(base.format(name=name, markdown=file_name)) final = "\n".join(markdown) From 0d069683ce4dc269ba6abac8021050c62bf28461 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sat, 30 Sep 2023 15:05:21 -0400 Subject: [PATCH 126/144] slightly closer --- Dockerfile | 2 +- docs/Makefile | 17 ++++++++----- docs/{guides => content}/contributing.md | 0 docs/{guides => content}/docker.md | 0 docs/{guides => content}/install.md | 0 docs/{guides => content}/nricp.md | 0 docs/examples.py | 31 +++++++++++++++++------- docs/index.rst | 12 ++++----- trimesh/base.py | 8 ++---- 9 files changed, 42 insertions(+), 28 deletions(-) rename docs/{guides => content}/contributing.md (100%) rename docs/{guides => content}/docker.md (100%) rename docs/{guides => content}/install.md (100%) rename docs/{guides => content}/nricp.md (100%) diff --git a/Dockerfile b/Dockerfile index cc49b9042..e3f3548c5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -105,7 +105,7 @@ RUN make ### Copy just the docs so we can output them FROM scratch as docs -COPY --from=build_docs /home/user/docs/_build/html/ ./ +COPY --from=build_docs /home/user/docs/built/html/ ./ ### Make sure the output stage is the last stage so a simple # "docker build ." still outputs an expected image diff --git a/docs/Makefile b/docs/Makefile index 889b2f18b..ae7832a4e 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -7,10 +7,13 @@ NBCONVERT ?= jupyter PYTHON ?= python PIP ?= pip SOURCEDIR = . -BUILDDIR = _build +BUILDDIR = built TEMPLATESDIR = templates +# where to put generated RST files +GENDIR = generated STATICDIR = _static + example_notebooks := $(wildcard ../examples/*.ipynb) example_notebooks := $(filter-out ../examples/save_image.ipynb, $(example_notebooks)) @@ -28,18 +31,20 @@ html: conf.py index.rst trimesh.rst README.rst examples.rst $(example_rsts) .dep .deps: requirements.txt $(PIP) install -r requirements.txt $(PIP) freeze > .deps + mkdir -p $(GENDIR) + mkdir -p $(BUILDDIR) examples.%.rst : ../examples/%.ipynb .deps - $(NBCONVERT) nbconvert --execute --to rst --output $(abspath $@) $< + $(NBCONVERT) nbconvert --execute --to rst --output-dir $(GENDIR) $< examples.rst: .deps - $(PYTHON) "examples.py" + $(PYTHON) "examples.py" "--source=../examples" "--target=$(GENDIR)/examples.rst" trimesh.rst: .deps - $(SPHINXGEN) -eTf -t "$(TEMPLATESDIR)" -o "$(SOURCEDIR)" ../trimesh + $(SPHINXGEN) -eTf -t "$(TEMPLATESDIR)" -o "$(GENDIR)" ../trimesh README.rst: ../README.md .deps - pandoc --from=gfm --to=rst --output=README.rst ../README.md + pandoc --from=gfm --to=rst --output="$(GENDIR)/README.rst" ../README.md clean: - rm -rvf "$(BUILDDIR)" "$(STATICDIR)/examples" examples.*.* trimesh*.rst .deps + rm -rvf "$(BUILDDIR)" "$(GENDIR)" .deps diff --git a/docs/guides/contributing.md b/docs/content/contributing.md similarity index 100% rename from docs/guides/contributing.md rename to docs/content/contributing.md diff --git a/docs/guides/docker.md b/docs/content/docker.md similarity index 100% rename from docs/guides/docker.md rename to docs/content/docker.md diff --git a/docs/guides/install.md b/docs/content/install.md similarity index 100% rename from docs/guides/install.md rename to docs/content/install.md diff --git a/docs/guides/nricp.md b/docs/content/nricp.md similarity index 100% rename from docs/guides/nricp.md rename to docs/content/nricp.md diff --git a/docs/examples.py b/docs/examples.py index 91f4667a1..2d8c22257 100644 --- a/docs/examples.py +++ b/docs/examples.py @@ -18,12 +18,6 @@ # current working directory pwd = os.path.abspath(os.path.expanduser(os.path.dirname(__file__))) -# where are our notebooks to render -source = os.path.abspath(os.path.join(pwd, "..", "examples")) - -# which index file are we generating -target = os.path.abspath(os.path.join(pwd, "examples.rst")) - def extract_docstring(loaded): """ @@ -51,16 +45,34 @@ def extract_docstring(loaded): base = """ -{name} +{title} ========== .. toctree:: :maxdepth: 2 - {markdown} + {file_name} """ + if __name__ == "__main__": + import argparse + + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--source", type=str, help="a directory containing `ipynb` files", required=True + ) + parser.add_argument( + "--target", type=str, help="Where the generated .rst file goes", required=True + ) + args = parser.parse_args() + + source = os.path.abspath(args.source) + target = os.path.abspath(args.target) + markdown = [ "# Examples", + "===========", "Several examples are available as rendered IPython notebooks.", "", ] @@ -76,7 +88,8 @@ def extract_docstring(loaded): # name = fn.split(".")[0] file_name = f"examples.{name}.rst" - markdown.append(base.format(name=name, markdown=file_name)) + title = " ".join(name.split("_")).title() + markdown.append(base.format(title=title, file_name=file_name)) final = "\n".join(markdown) with open(target, "w") as f: diff --git a/docs/index.rst b/docs/index.rst index 1365f55bf..e9ccea816 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,4 +1,4 @@ -.. include:: README.rst +.. include:: generated/README.rst Links ========== @@ -10,28 +10,28 @@ Install .. toctree:: :maxdepth: 2 - guides/install.md + content/install.md Examples ========== .. toctree:: - :maxdepth: 2 + :maxdepth: 1 - examples.rst + Examples Contributing ========== .. toctree:: :maxdepth: 1 - Contributing + Contributing Docker ========== .. toctree:: :maxdepth: 1 - Docker + Docker API Reference ============= diff --git a/trimesh/base.py b/trimesh/base.py index 96e1a44e1..e15086108 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -2467,13 +2467,9 @@ def apply_transform(self, matrix: NDArray[float64]) -> "Trimesh": # transform overridden center of mass if "center_mass" in self._data: - center_mass = self._data["center_mass"] + center_mass = [self._data["center_mass"]] self.center_mass = transformations.transform_points( - np.array( - [ - center_mass, - ] - ), + center_mass, matrix, )[0] From e5f751f7fcc0902e8d65a1de7445c5b782b5e108 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sat, 30 Sep 2023 20:02:52 -0400 Subject: [PATCH 127/144] postprocess --- docs/Makefile | 24 ++-- docs/conf.py | 2 +- docs/{ => content}/index.rst | 22 ++-- docs/examples.py | 116 +++++++++++++----- docs/{_static => static}/custom.css | 0 docs/{_static => static}/favicon.ico | Bin docs/{_static => static}/images/favicon.svg | 0 .../{_static => static}/images/logotype-a.svg | 0 .../{_static => static}/images/logotype-b.svg | 0 .../images/trimesh-logo.png | Bin 10 files changed, 107 insertions(+), 57 deletions(-) rename docs/{ => content}/index.rst (76%) rename docs/{_static => static}/custom.css (100%) rename docs/{_static => static}/favicon.ico (100%) rename docs/{_static => static}/images/favicon.svg (100%) rename docs/{_static => static}/images/logotype-a.svg (100%) rename docs/{_static => static}/images/logotype-b.svg (100%) rename docs/{_static => static}/images/trimesh-logo.png (100%) diff --git a/docs/Makefile b/docs/Makefile index ae7832a4e..6135a072b 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -6,39 +6,39 @@ SPHINXGEN ?= sphinx-apidoc NBCONVERT ?= jupyter PYTHON ?= python PIP ?= pip -SOURCEDIR = . BUILDDIR = built TEMPLATESDIR = templates # where to put generated RST files -GENDIR = generated -STATICDIR = _static +GENDIR = generate +CONTENT = content +STATICDIR = static example_notebooks := $(wildcard ../examples/*.ipynb) example_notebooks := $(filter-out ../examples/save_image.ipynb, $(example_notebooks)) - example_names = $(foreach path, $(example_notebooks), $(basename $(notdir $(path)))) - example_rsts = $(foreach name, $(example_names), examples.$(name).rst) -html: conf.py index.rst trimesh.rst README.rst examples.rst $(example_rsts) .deps - @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - touch "$(BUILDDIR)/html/.nojekyll" + +html: conf.py # $(CONTENT)/index.rst trimesh.rst README.rst $(example_rsts) examples.rst .deps + @$(SPHINXBUILD) -M html "$(GENDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) echo "trimesh.org" > "$(BUILDDIR)/html/CNAME" - mv "$(BUILDDIR)/html/_static/images" "$(BUILDDIR)/html/images" || true - cp "$(STATICDIR)/favicon.ico" "$(BUILDDIR)/html/favicon.ico" || true + touch "$(BUILDDIR)/html/.nojekyll" + cp -R "$(STATICDIR)/images" "$(BUILDDIR)/html/images" || true + cp "$(STATICDIR)/favicon.ico" "$(BUILDDIR)/html/favicon.ico" || true .deps: requirements.txt $(PIP) install -r requirements.txt $(PIP) freeze > .deps mkdir -p $(GENDIR) mkdir -p $(BUILDDIR) + cp conf.py $(CONTENT)/* $(GENDIR) examples.%.rst : ../examples/%.ipynb .deps $(NBCONVERT) nbconvert --execute --to rst --output-dir $(GENDIR) $< examples.rst: .deps - $(PYTHON) "examples.py" "--source=../examples" "--target=$(GENDIR)/examples.rst" + $(PYTHON) examples.py --source=../examples --target=$(GENDIR)/examples.rst trimesh.rst: .deps $(SPHINXGEN) -eTf -t "$(TEMPLATESDIR)" -o "$(GENDIR)" ../trimesh @@ -47,4 +47,4 @@ README.rst: ../README.md .deps pandoc --from=gfm --to=rst --output="$(GENDIR)/README.rst" ../README.md clean: - rm -rvf "$(BUILDDIR)" "$(GENDIR)" .deps + rm -rvf "$(BUILDDIR)" "$(GENDIR)" .deps diff --git a/docs/conf.py b/docs/conf.py index 738f50085..7c2650075 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -85,7 +85,7 @@ def abspath(rel): # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = ["static"] html_logo = "images/trimesh-logo.png" # custom css diff --git a/docs/index.rst b/docs/content/index.rst similarity index 76% rename from docs/index.rst rename to docs/content/index.rst index e9ccea816..09575140f 100644 --- a/docs/index.rst +++ b/docs/content/index.rst @@ -1,4 +1,4 @@ -.. include:: generated/README.rst +.. include:: README.rst Links ========== @@ -10,28 +10,28 @@ Install .. toctree:: :maxdepth: 2 - content/install.md - -Examples -========== -.. toctree:: - :maxdepth: 1 - - Examples + install.md Contributing ========== .. toctree:: :maxdepth: 1 - Contributing + Contributing Docker ========== .. toctree:: :maxdepth: 1 - Docker + Docker + +Examples +========== +.. toctree:: + :maxdepth: 1 + + Examples API Reference ============= diff --git a/docs/examples.py b/docs/examples.py index 2d8c22257..9c6620a2e 100644 --- a/docs/examples.py +++ b/docs/examples.py @@ -2,11 +2,10 @@ examples.py ------------ -Generate `examples.md` from the contents +Convert `ipynb` to a web-renderable format from the contents of `../examples/*.ipynb` """ -import json import logging import os import sys @@ -45,17 +44,90 @@ def extract_docstring(loaded): base = """ -{title} -========== -.. toctree:: - :maxdepth: 2 - - {file_name} +{title} """ -if __name__ == "__main__": - import argparse +def generate_index(source: str, target: str) -> str: + """ + Go through a directory of source `ipynb` files and write + an RST index with a toctree. + + Also postprocesses the results of `jupyter nbconvert` + """ + + lines = [ + "Examples", + "===========", + "Several examples are available as rendered IPython notebooks.", + "", + ".. toctree::", + " :maxdepth: 2", + "", + ] + + target_dir = os.path.dirname(target) + + for fn in os.listdir(source): + if not fn.lower().endswith(".ipynb"): + continue + + name = fn.rsplit(".")[0] + title = name.replace("_", " ").title() + # notebook converted to RST + convert = os.path.join(target_dir, f"{name}.rst") + if not os.path.exists(convert): + print(f"no RST for {name}.rst") + continue + + with open(convert) as f: + doc, post = postprocess(f.read(), title=title) + with open(convert, "w") as f: + f.write(post) + + lines.append(f" {name}") + # lines.append(doc) + lines.append("") + + return "\n".join(lines) + + +def postprocess(text: str, title: str) -> str: + """ + Postprocess an RST generated from `jupyter nbconvert` + """ + lines = str.splitlines(text) + + # already has a title so exit + if "===" in "".join(lines[:4]): + return "", text + + head = [] + index = 0 + ready = False + for i, L in enumerate(lines): + if "parsed-literal" in L: + ready = True + continue + if ready: + if "code::" in L: + index = i + break + else: + head.append(L) + + # clean up the "parsed literal" + docstring = ( + " ".join(" ".join(head).replace("\\n", " ").split()).strip().strip("'").strip() + ) + + # add a title and the docstring as a header + clip = f"{title}\n=============\n{docstring}\n\n" + "\n".join(lines[index:]) + + return docstring, clip + + +if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() @@ -70,27 +142,5 @@ def extract_docstring(loaded): source = os.path.abspath(args.source) target = os.path.abspath(args.target) - markdown = [ - "# Examples", - "===========", - "Several examples are available as rendered IPython notebooks.", - "", - ] - - for fn in os.listdir(source): - if not fn.lower().endswith(".ipynb"): - continue - path = os.path.join(source, fn) - with open(path) as f: - raw = json.load(f) - doc = extract_docstring(raw) - - # - name = fn.split(".")[0] - file_name = f"examples.{name}.rst" - title = " ".join(name.split("_")).title() - markdown.append(base.format(title=title, file_name=file_name)) - - final = "\n".join(markdown) with open(target, "w") as f: - f.write(final) + f.write(generate_index(source=source, target=target)) diff --git a/docs/_static/custom.css b/docs/static/custom.css similarity index 100% rename from docs/_static/custom.css rename to docs/static/custom.css diff --git a/docs/_static/favicon.ico b/docs/static/favicon.ico similarity index 100% rename from docs/_static/favicon.ico rename to docs/static/favicon.ico diff --git a/docs/_static/images/favicon.svg b/docs/static/images/favicon.svg similarity index 100% rename from docs/_static/images/favicon.svg rename to docs/static/images/favicon.svg diff --git a/docs/_static/images/logotype-a.svg b/docs/static/images/logotype-a.svg similarity index 100% rename from docs/_static/images/logotype-a.svg rename to docs/static/images/logotype-a.svg diff --git a/docs/_static/images/logotype-b.svg b/docs/static/images/logotype-b.svg similarity index 100% rename from docs/_static/images/logotype-b.svg rename to docs/static/images/logotype-b.svg diff --git a/docs/_static/images/trimesh-logo.png b/docs/static/images/trimesh-logo.png similarity index 100% rename from docs/_static/images/trimesh-logo.png rename to docs/static/images/trimesh-logo.png From 08a59e72da510c45a2ed77f4cacff0c6ee9c1cda Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sat, 30 Sep 2023 20:05:53 -0400 Subject: [PATCH 128/144] restore deps --- docs/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Makefile b/docs/Makefile index 6135a072b..bb42beb03 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -20,7 +20,7 @@ example_names = $(foreach path, $(example_notebooks), $(basename $(notdir $(path example_rsts = $(foreach name, $(example_names), examples.$(name).rst) -html: conf.py # $(CONTENT)/index.rst trimesh.rst README.rst $(example_rsts) examples.rst .deps +html: conf.py $(CONTENT)/index.rst trimesh.rst README.rst $(example_rsts) examples.rst .deps @$(SPHINXBUILD) -M html "$(GENDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) echo "trimesh.org" > "$(BUILDDIR)/html/CNAME" touch "$(BUILDDIR)/html/.nojekyll" From bd1ac44706645f76dab933d1a31f978926f30b88 Mon Sep 17 00:00:00 2001 From: iory Date: Sun, 1 Oct 2023 17:34:19 +0900 Subject: [PATCH 129/144] Modified to skip processing when the vertex attribute of primitive is None --- trimesh/exchange/dae.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/trimesh/exchange/dae.py b/trimesh/exchange/dae.py index 9e0595483..fda557f8a 100644 --- a/trimesh/exchange/dae.py +++ b/trimesh/exchange/dae.py @@ -191,6 +191,8 @@ def _parse_node( primitive = primitive.triangleset() if isinstance(primitive, collada.triangleset.TriangleSet): vertex = primitive.vertex + if vertex is None: + continue vertex_index = primitive.vertex_index vertices = vertex[vertex_index].reshape(len(vertex_index) * 3, 3) From afb2aa359be9204d2ec7682a069685fd78ce6056 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Sun, 1 Oct 2023 14:53:11 -0400 Subject: [PATCH 130/144] fix link and bump to rc2 --- docs/content/install.md | 3 +-- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/content/install.md b/docs/content/install.md index bd015a2bf..862fb126b 100644 --- a/docs/content/install.md +++ b/docs/content/install.md @@ -29,8 +29,7 @@ Conda Packages If you prefer a `conda` environment, `trimesh` is available on `conda-forge` ([trimesh-feedstock repo](https://github.com/conda-forge/trimesh-feedstock)) - -If you install [Miniconda](https://conda.io/docs/install/quick.html) you can then run: +If you install [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/) you can then run: ``` conda install -c conda-forge trimesh diff --git a/pyproject.toml b/pyproject.toml index 59d26b95b..588cc918b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ requires = ["setuptools >= 61.0", "wheel"] [project] name = "trimesh" requires-python = ">=3.7" -version = "4.0.0.rc1" +version = "4.0.0.rc2" authors = [{name = "Michael Dawson-Haggerty", email = "mikedh@kerfed.com"}] license = {text = "MIT"} description = "Import, export, process, analyze and view triangular meshes." From 780b61c0b0b5d014b73d14b204e5b24a3018e8ef Mon Sep 17 00:00:00 2001 From: Nicholas Pfaff Date: Mon, 2 Oct 2023 13:25:40 -0400 Subject: [PATCH 131/144] Allow custom VHACD parameters --- trimesh/base.py | 8 ++++++-- trimesh/decomposition.py | 20 ++++++++++++++++++-- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/trimesh/base.py b/trimesh/base.py index e15086108..114b141d0 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -2860,7 +2860,7 @@ def to_dict(self) -> Dict[str, Union[str, List[List[float]], List[List[int]]]]: "faces": self.faces.tolist(), } - def convex_decomposition(self) -> List["Trimesh"]: + def convex_decomposition(self, **kwargs) -> List["Trimesh"]: """ Compute an approximate convex decomposition of a mesh using `pip install pyVHACD`. @@ -2869,8 +2869,12 @@ def convex_decomposition(self) -> List["Trimesh"]: ------- meshes List of convex meshes that approximate the original + **kwargs : VHACD keyword arguments """ - return [Trimesh(**kwargs) for kwargs in decomposition.convex_decomposition(self)] + return [ + Trimesh(**kwargs) + for kwargs in decomposition.convex_decomposition(self, **kwargs) + ] def union( self, other: "Trimesh", engine: Optional[str] = None, **kwargs diff --git a/trimesh/decomposition.py b/trimesh/decomposition.py index 12cab73bc..840966113 100644 --- a/trimesh/decomposition.py +++ b/trimesh/decomposition.py @@ -3,14 +3,30 @@ import numpy as np -def convex_decomposition(mesh) -> List[Dict]: +def convex_decomposition(mesh, **kwargs) -> List[Dict]: """ Compute an approximate convex decomposition of a mesh. + VHACD Parameters which can be passed as kwargs: + + Name Default + ----------------------------------------- + maxConvexHulls 64 + resolution 400000 + minimumVolumePercentErrorAllowed 1.0 + maxRecursionDepth 10 + shrinkWrap True + fillMode "flood" + maxNumVerticesPerCH 64 + asyncACD True + minEdgeLength 2 + findBestPlane False + Parameters ---------- mesh : trimesh.Trimesh Mesh to be decomposed into convex parts + **kwargs : VHACD keyword arguments Returns ------- @@ -31,5 +47,5 @@ def convex_decomposition(mesh) -> List[Dict]: return [ {"vertices": v, "faces": f.reshape((-1, 4))[:, 1:]} - for v, f in compute_vhacd(mesh.vertices, faces) + for v, f in compute_vhacd(mesh.vertices, faces, **kwargs) ] From ca994d09cb6c39ef89a1d59181b41f399212ebbb Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 5 Oct 2023 15:05:58 -0400 Subject: [PATCH 132/144] combine version logic into one function --- pyproject.toml | 2 +- trimesh/version.py | 82 +++++++++++++++++++++------------------------- 2 files changed, 39 insertions(+), 45 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 588cc918b..f319ce56d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ name = "trimesh" requires-python = ">=3.7" version = "4.0.0.rc2" authors = [{name = "Michael Dawson-Haggerty", email = "mikedh@kerfed.com"}] -license = {text = "MIT"} +license = {file = "LICENSE.md"} description = "Import, export, process, analyze and view triangular meshes." keywords = ["graphics", "mesh", "geometry", "3D"] classifiers = [ diff --git a/trimesh/version.py b/trimesh/version.py index 32f93f3f1..fae20eb45 100644 --- a/trimesh/version.py +++ b/trimesh/version.py @@ -4,57 +4,51 @@ Get the current version from package metadata or pyproject.toml if everything else fails. """ +import json +import os +from typing import Optional -def _get_version(): +def _get_version() -> Optional[str]: """ Try all our methods to get the version. """ - for method in [_importlib, _pkgresources, _pyproject]: - try: - return method() - except BaseException: - pass - return None - - -def _importlib() -> str: - """ - Get the version string using package metadata on Python >= 3.8 - """ - - from importlib.metadata import version - - return version("trimesh") - - -def _pkgresources() -> str: - """ - Get the version string using package metadata on Python < 3.8 - """ - from pkg_resources import get_distribution - - return get_distribution("trimesh").version - -def _pyproject() -> str: - """ - Get the version string from the pyproject.toml file. - """ - import json - import os - - # use a path relative to this file - pyproject = os.path.abspath( - os.path.join( - os.path.dirname(os.path.abspath(os.path.expanduser(__file__))), - "..", - "pyproject.toml", + try: + # Get the version string using package metadata on Python >= 3.8 + from importlib.metadata import version + + return version("trimesh") + except BaseException: + pass + + try: + # Get the version string using package metadata on Python < 3.8 + from pkg_resources import get_distribution + + return get_distribution("trimesh").version + except BaseException: + pass + + try: + # Get the version string from the pyproject.toml file using + # relative paths. This will be the only option if the library + # has not been installed (i.e. in a CI or container environment) + pyproject = os.path.abspath( + os.path.join( + os.path.dirname(os.path.abspath(os.path.expanduser(__file__))), + "..", + "pyproject.toml", + ) ) - ) - with open(pyproject) as f: - # json.loads cleans up the string and removes the quotes - return next(json.loads(L.split("=")[1]) for L in f if "version" in L) + with open(pyproject) as f: + # json.loads cleans up the string and removes the quotes + # this logic requires the first use of "version" be the actual version + return next(json.loads(L.split("=")[1]) for L in f if "version" in L) + except BaseException: + pass + + return None # try all our tricks From 6fe2af10f491e119178e316fbc251fc25be091c7 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 9 Oct 2023 19:46:46 -0400 Subject: [PATCH 133/144] use vhacdx --- docs/content/install.md | 2 +- pyproject.toml | 2 +- tests/test_decomposition.py | 2 +- trimesh/decomposition.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/content/install.md b/docs/content/install.md index 862fb126b..9a7697227 100644 --- a/docs/content/install.md +++ b/docs/content/install.md @@ -80,4 +80,4 @@ Trimesh has a lot of soft-required upstream packages. We try to make sure they'r |`pytest`| A test runner. | | `test`| |`pytest-cov`| A plugin to calculate test coverage. | | `test`| |`pyinstrument`| A sampling based profiler for performance tweaking. | | `test`| -|`pyvhacd`| A binding for VHACD which provides convex decompositions | | `recommend`| +|`vhacdx`| A binding for VHACD which provides convex decompositions | | `recommend`| diff --git a/pyproject.toml b/pyproject.toml index f319ce56d..d755528f7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -88,7 +88,7 @@ recommend = [ "xatlas", "scikit-image", "python-fcl", - "pyVHACD", + "vhacdx", ] # this is the list of everything that is ever added anywhere diff --git a/tests/test_decomposition.py b/tests/test_decomposition.py index 75ae8cd49..b8733aa29 100644 --- a/tests/test_decomposition.py +++ b/tests/test_decomposition.py @@ -7,7 +7,7 @@ class DecompositionTest(g.unittest.TestCase): def test_convex_decomposition(self): try: - import pyVHACD # noqa + import vhacdx # noqa except ImportError: return diff --git a/trimesh/decomposition.py b/trimesh/decomposition.py index 840966113..4b7537914 100644 --- a/trimesh/decomposition.py +++ b/trimesh/decomposition.py @@ -34,7 +34,7 @@ def convex_decomposition(mesh, **kwargs) -> List[Dict]: List of **kwargs for Trimeshes that are nearly convex and approximate the original. """ - from pyVHACD import compute_vhacd + from vhacdx import compute_vhacd # the faces are triangulated in a (len(face), ...vertex-index) # for vtkPolyData From c5b11ace8796c78dfbe5e569e78323ae303d1c7c Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 9 Oct 2023 20:53:23 -0400 Subject: [PATCH 134/144] address suggestions in #2017 --- trimesh/visual/material.py | 83 ++++++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 31 deletions(-) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index e372c095e..ae4ed0ee8 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -11,8 +11,14 @@ from .. import exceptions, util from ..constants import tol +from ..typed import NDArray, Optional from . import color +try: + from PIL import Image +except BaseException as E: + Image = exceptions.ExceptionWrapper(E) + # epsilon for comparing floating point _eps = 1e-5 @@ -700,7 +706,7 @@ def __hash__(self): ) -def empty_material(color=None): +def empty_material(color: Optional[NDArray[np.uint8]] = None) -> SimpleMaterial: """ Return an empty material set to a single color @@ -714,25 +720,36 @@ def empty_material(color=None): material : SimpleMaterial Image is a a four pixel RGB """ - try: - from PIL import Image - except BaseException as E: - return exceptions.ExceptionWrapper(E) - - final = np.array([255, 255, 255, 255], dtype=np.uint8) - if np.shape(color) in ((3,), (4,)): - final[: len(color)] = color # create a one pixel RGB image - image = Image.fromarray(final.reshape((1, 1, 4)).astype(np.uint8)) - return SimpleMaterial(image=image) + return SimpleMaterial(image=color_image(color=color)) + + +def color_image(color: Optional[NDArray[np.uint8]] = None) -> "Image": + """ + Generate an image with one color. + + Parameters + ---------- + color + Optional uint8 color + + Returns + ---------- + image + A (2, 2) RGBA image with the specified color. + """ + single = np.array([100, 100, 100, 255], dtype=np.uint8) + if np.shape(color) in ((3,), (4,)): + single[: len(color)] = color + return Image.fromarray(np.tile(single, 4).reshape((2, 2, 4)).astype(np.uint8)) def pack( materials, uvs, deduplicate=True, - padding: int = 1, + padding: int = 2, max_tex_size_individual=8192, max_tex_size_fused=8192, ): @@ -802,7 +819,7 @@ def get_base_color_texture(mat): c = color.to_rgba(mat.baseColorFactor) assert c.shape == (4,) assert c.dtype == np.uint8 - img = Image.fromarray(c.reshape((1, 1, -1))) + img = color_image(c) if img is not None and mat.alphaMode != "BLEND": # we can't handle alpha blending well, but we can bake alpha cutoff @@ -818,15 +835,11 @@ def get_base_color_texture(mat): img = mat.image elif np.shape(getattr(mat, "diffuse", [])) == (4,): # return a one pixel image - img = Image.fromarray( - np.reshape(color.to_rgba(mat.diffuse), (1, 1, 4)).astype(np.uint8) - ) + img = color_image(mat.diffuse) if img is None: # return a one pixel image - img = Image.fromarray( - np.reshape([100, 100, 100, 255], (1, 1, 4)).astype(np.uint8) - ) + img = color_image() # make sure we're always returning in RGBA mode return img.convert("RGBA") @@ -918,15 +931,18 @@ def resize_images(images, sizes): packed = {} def pack_images(images): + # run image packing with our material-specific settings + # which including deduplicating by hash, upsizing to the + # nearest power of two, returning deterministically by seeding + # and padding every side of the image by 1 pixel + + # see if we've already run this packing image key = hash(tuple(sorted([id(i) for i in images]))) assert key not in packed if key in packed: return packed[key] - # run image packing with our material-specific settings - # which including deduplicating by hash, upsizing to the - # nearest power of two, returning deterministically by seeding - # and padding every side of the image by 1 pixel + # otherwise run packing now result = packing.images( images, deduplicate=True, @@ -1054,22 +1070,27 @@ def pack_images(images): uv_offset = offset / (final_size - 1) # scale and translate each of the new UV coordinates # also make sure they are in 0.0-1.0 using modulus (i.e. wrap) + half = 0.5 / np.array(img.size) + for g in group: # only wrap pixels that are outside of 0.0-1.0. # use a small leeway of half a pixel for floating point inaccuracies and # the case of uv==1.0 - uvg = uvs[g] + uvg = uvs[g].copy() - moved = (uvg * uv_scale) + uv_offset - # wrap by half-pic - half = 0.5 / np.array(img.size) + # wrap before scaling and offsetting wrap = np.logical_or(uvg < -half, uvg > (1.0 + half)) - moved[wrap] %= 1.0 + uvg[wrap] %= 1.0 + + # apply the scale and offset + moved = (uvg * uv_scale) + uv_offset if tol.strict: - old = color.uv_to_color(uvg, img) - new = color.uv_to_color(moved, final) - assert np.allclose(old, new, atol=10) + # the color from the original coordinates and image + old = color.uv_to_interpolated_color(uvs[g], img) + # the color from the packed image + new = color.uv_to_interpolated_color(moved, final) + assert np.allclose(old, new, atol=4) new_uv[g] = moved From 000551b776dad6f71dd11db6d4969df1659ebabf Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 9 Oct 2023 21:02:19 -0400 Subject: [PATCH 135/144] soft dep --- trimesh/visual/material.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index ae4ed0ee8..77eca1153 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -739,9 +739,14 @@ def color_image(color: Optional[NDArray[np.uint8]] = None) -> "Image": image A (2, 2) RGBA image with the specified color. """ + # only raise an error further down the line + if isinstance(Image, exceptions.ExceptionWrapper): + return Image + # start with a single default RGBA color single = np.array([100, 100, 100, 255], dtype=np.uint8) if np.shape(color) in ((3,), (4,)): single[: len(color)] = color + # tile into a (2, 2) image and return return Image.fromarray(np.tile(single, 4).reshape((2, 2, 4)).astype(np.uint8)) From 400242db024902ed9752724edd06f8ab8d33f966 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Mon, 9 Oct 2023 23:14:07 -0400 Subject: [PATCH 136/144] match upstream vhacdx --- trimesh/decomposition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trimesh/decomposition.py b/trimesh/decomposition.py index 4b7537914..5793ae52b 100644 --- a/trimesh/decomposition.py +++ b/trimesh/decomposition.py @@ -46,6 +46,6 @@ def convex_decomposition(mesh, **kwargs) -> List[Dict]: ) return [ - {"vertices": v, "faces": f.reshape((-1, 4))[:, 1:]} + {"vertices": v, "faces": f} for v, f in compute_vhacd(mesh.vertices, faces, **kwargs) ] From 8344ad4ba6ba3337ed5c71f4b8f11537ec0dd90f Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 10 Oct 2023 15:03:00 -0400 Subject: [PATCH 137/144] remove redundant path method --- trimesh/path/path.py | 76 +++++++++++++++----------------------------- trimesh/typed.py | 4 +-- 2 files changed, 28 insertions(+), 52 deletions(-) diff --git a/trimesh/path/path.py b/trimesh/path/path.py index 400dab746..1f9fd321b 100644 --- a/trimesh/path/path.py +++ b/trimesh/path/path.py @@ -17,6 +17,7 @@ from ..constants import tol_path as tol from ..geometry import plane_transform from ..points import plane_fit +from ..typed import Dict, List, NDArray, Optional, float64 from ..visual import to_rgba from . import ( creation, # NOQA @@ -25,6 +26,7 @@ simplify, traversal, ) +from .entities import Entity from .exchange.export import export_path from .util import concatenate @@ -66,10 +68,10 @@ class Path(parent.Geometry): def __init__( self, - entities=None, - vertices=None, - metadata=None, - process=True, + entities: Optional[List[Entity]] = None, + vertices: Optional[NDArray[float64]] = None, + metadata: Optional[Dict] = None, + process: bool = True, colors=None, **kwargs, ): @@ -116,11 +118,12 @@ def __repr__(self): def process(self): """ - Apply basic cleaning functions to the Path object in- place. + Apply basic cleaning functions to the Path object in-place. """ with self._cache: - for func in self._process_functions(): - func() + self.merge_vertices() + self.remove_duplicate_entities() + self.remove_unreferenced_vertices() return self @property @@ -169,7 +172,7 @@ def vertices(self): return self._vertices @vertices.setter - def vertices(self, values): + def vertices(self, values: NDArray[float64]): self._vertices = caching.tracked_array(values, dtype=np.float64) @property @@ -272,9 +275,8 @@ def scale(self): scale : float Approximate size of the world holding this path """ - # use vertices peak-peak rather than exact extents - scale = float((self.vertices.ptp(axis=0) ** 2).sum() ** 0.5) - return scale + # return the diagonal length of the AABB + return np.linalg.norm(self.vertices.ptp(axis=0)) @caching.cache_decorator def length(self): @@ -672,27 +674,8 @@ def remove_unreferenced_vertices(self): self.replace_vertex_references(mask=mask) self.vertices = self.vertices[unique] - def discretize_path(self, path): - """ - Given a list of entities, return a list of connected points. - - Parameters - ----------- - path: (n,) int - Indexes of self.entities - - Returns - ----------- - discrete : (m, dimension) - Linear segment path. - """ - discrete = traversal.discretize_path( - self.entities, self.vertices, path, scale=self.scale - ) - return discrete - @caching.cache_decorator - def discrete(self): + def discrete(self) -> List[NDArray[float64]]: """ A sequence of connected vertices in space, corresponding to self.paths. @@ -702,8 +685,18 @@ def discrete(self): discrete : (len(self.paths),) A sequence of (m*, dimension) float """ - discrete = [self.discretize_path(i) for i in self.paths] - return discrete + # avoid cache hits in the loop + scale = self.scale + entities = self.entities + vertices = self.vertices + + # discretize each path + return [ + traversal.discretize_path( + entities=entities, vertices=vertices, path=path, scale=scale + ) + for path in self.paths + ] def export(self, file_obj=None, file_type=None, **kwargs): """ @@ -809,13 +802,6 @@ class Path3D(Path): Hold multiple vector curves (lines, arcs, splines, etc) in 3D. """ - def _process_functions(self): - return [ - self.merge_vertices, - self.remove_duplicate_entities, - self.remove_unreferenced_vertices, - ] - def to_planar(self, to_2D=None, normal=None, check=True): """ Check to see if current vectors are all coplanar. @@ -934,16 +920,6 @@ def show(self, annotations=True): else: self.plot_entities(show=True, annotations=annotations) - def _process_functions(self): - """ - Return a list of functions to clean up a Path2D - """ - return [ - self.merge_vertices, - self.remove_duplicate_entities, - self.remove_unreferenced_vertices, - ] - def apply_obb(self): """ Transform the current path so that its OBB is axis aligned diff --git a/trimesh/typed.py b/trimesh/typed.py index 3226760c1..12a0cdeee 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Sequence, Tuple +from typing import Dict, List, Optional, Sequence, Tuple # our default integer and floating point types from numpy import float64, int64 @@ -10,4 +10,4 @@ ArrayLike = Sequence -__all__ = ["NDArray", "ArrayLike", "Optional", "List", "Tuple", "float64", "int64"] +__all__ = ["NDArray", "ArrayLike", "Optional", "List", "Dict", "Tuple", "float64", "int64"] From 623c5d8967b71e10be9e403d94db56121385d14f Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Tue, 10 Oct 2023 15:34:06 -0400 Subject: [PATCH 138/144] use stream loading in enclosure tree --- trimesh/path/path.py | 8 ++++---- trimesh/path/polygons.py | 33 +++++++++++++++++---------------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/trimesh/path/path.py b/trimesh/path/path.py index 1f9fd321b..d8bf60d6b 100644 --- a/trimesh/path/path.py +++ b/trimesh/path/path.py @@ -47,8 +47,11 @@ cKDTree = exceptions.ExceptionWrapper(E) try: from shapely.geometry import Polygon + from shapely.prepared import prep except BaseException as E: Polygon = exceptions.ExceptionWrapper(E) + prep = exceptions.ExceptionWrapper(E) + try: import networkx as nx except BaseException as E: @@ -1093,8 +1096,7 @@ def polygons_closed(self): """ # will attempt to recover invalid garbage geometry # and will be None if geometry is unrecoverable - polys = polygons.paths_to_polygons(self.discrete) - return polys + return polygons.paths_to_polygons(self.discrete) @caching.cache_decorator def polygons_full(self): @@ -1125,8 +1127,6 @@ def polygons_full(self): shell = closed[root].exterior # create a polygon with interiors full[i] = polygons.repair_invalid(Polygon(shell=shell, holes=holes)) - # so we can use advanced indexing - full = np.array(full) return full diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index 45c192d39..b9b0b25f2 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -19,12 +19,12 @@ nx = ExceptionWrapper(E) try: - from rtree import Rtree + from rtree.index import Index except BaseException as E: # create a dummy module which will raise the ImportError from ..exceptions import ExceptionWrapper - Rtree = ExceptionWrapper(E) + Index = ExceptionWrapper(E) def enclosure_tree(polygons: List[Polygon]): @@ -49,24 +49,26 @@ def enclosure_tree(polygons: List[Polygon]): Edges indicate a polygon is contained by another polygon """ - tree = Rtree() + + # get the bounds for every valid polygon + bounds = { + i: polygon.bounds + for i, polygon in enumerate(polygons) + if len(getattr(polygon, "bounds", [])) == 4 + } + # nodes are indexes in polygons contains = nx.DiGraph() - for i, polygon in enumerate(polygons): - # if a polygon is None it means creation - # failed due to weird geometry so ignore it - if polygon is None or len(polygon.bounds) != 4: - continue - # insert polygon bounds into rtree - tree.insert(i, polygon.bounds) - # make sure every valid polygon has a node - contains.add_node(i) + # make sure we don't have orphaned polygon + contains.add_nodes_from(bounds.keys()) + + # create an rtree from the bounds + tree = Index(zip(bounds.keys(), bounds.values(), [None] * len(bounds))) # loop through every polygon - for i in contains.nodes(): - polygon = polygons[i] + for i, b in bounds.items(): # we first query for bounding box intersections from the R-tree - for j in tree.intersection(polygon.bounds): + for j in tree.intersection(b): # if we are checking a polygon against itself continue if i == j: continue @@ -90,7 +92,6 @@ def enclosure_tree(polygons: List[Polygon]): if len(degrees) > 0 and degrees.max() > 1: # collect new edges for graph edges = [] - # order the roots so they are sorted by degree roots = roots[np.argsort([degree[r] for r in roots])] # find edges of subgraph for each root and children From cf273faa4aa8b0d46d975c07c5ebd4ea79a075d3 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Wed, 11 Oct 2023 18:33:59 -0400 Subject: [PATCH 139/144] only create tree if any valid bounds exist --- trimesh/path/polygons.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/trimesh/path/polygons.py b/trimesh/path/polygons.py index b9b0b25f2..2075c9039 100644 --- a/trimesh/path/polygons.py +++ b/trimesh/path/polygons.py @@ -52,9 +52,11 @@ def enclosure_tree(polygons: List[Polygon]): # get the bounds for every valid polygon bounds = { - i: polygon.bounds - for i, polygon in enumerate(polygons) - if len(getattr(polygon, "bounds", [])) == 4 + k: v + for k, v in { + i: getattr(polygon, "bounds", []) for i, polygon in enumerate(polygons) + }.items() + if len(v) == 4 } # nodes are indexes in polygons @@ -62,8 +64,12 @@ def enclosure_tree(polygons: List[Polygon]): # make sure we don't have orphaned polygon contains.add_nodes_from(bounds.keys()) - # create an rtree from the bounds - tree = Index(zip(bounds.keys(), bounds.values(), [None] * len(bounds))) + if len(bounds) > 0: + # if there are no valid bounds tree creation will fail + # and we won't be calling `tree.intersection` anywhere + # we could return here but having multiple return paths + # seems more dangerous than iterating through an empty graph + tree = Index(zip(bounds.keys(), bounds.values(), [None] * len(bounds))) # loop through every polygon for i, b in bounds.items(): From e3c156b1f1d85cf34cd111e68146d679137824b6 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Wed, 11 Oct 2023 18:36:00 -0400 Subject: [PATCH 140/144] check path discrete --- tests/test_paths.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_paths.py b/tests/test_paths.py index 67284cb5f..bbbe84d10 100644 --- a/tests/test_paths.py +++ b/tests/test_paths.py @@ -45,8 +45,8 @@ def test_discrete(self): if d.metadata["file_name"][-3:] == "dxf": assert len(d.layers) == len(d.entities) - for path in d.paths: - verts = d.discretize_path(path) + for path, verts in zip(d.paths, d.discrete): + assert len(path) >= 1 dists = g.np.sum((g.np.diff(verts, axis=0)) ** 2, axis=1) ** 0.5 if not g.np.all(dists > g.tol_path.zero): From df181f1f3ec3cf4942bc7ac2c18e76eb10ca4f6a Mon Sep 17 00:00:00 2001 From: "Benjamin A. Beasley" Date: Thu, 12 Oct 2023 07:19:11 -0400 Subject: [PATCH 141/144] Fix some assorted typos found with codespell These affect comments, docstrings, documentation, and exception messages. --- docker/trimesh-setup | 4 ++-- docs/content/docker.md | 2 +- docs/content/nricp.md | 12 ++++++------ models/jacked.obj | 2 +- models/plane.xaml | 2 +- tests/generic.py | 2 +- tests/test_bounds.py | 2 +- tests/test_cache.py | 2 +- tests/test_gltf.py | 2 +- tests/test_polygons.py | 4 ++-- tests/test_primitives.py | 4 ++-- tests/test_resolvers.py | 2 +- tests/test_sample.py | 2 +- trimesh/base.py | 2 +- trimesh/exchange/threemf.py | 2 +- trimesh/transformations.py | 2 +- trimesh/visual/material.py | 2 +- 17 files changed, 25 insertions(+), 25 deletions(-) diff --git a/docker/trimesh-setup b/docker/trimesh-setup index 7646d27b3..5ed0f0d26 100755 --- a/docker/trimesh-setup +++ b/docker/trimesh-setup @@ -136,7 +136,7 @@ def fetch(url, sha256): Location of remote resource. sha256: str The SHA256 hash of the resource once retrieved, - wil raise a `ValueError` if the hash doesn't match. + will raise a `ValueError` if the hash doesn't match. Returns ------------- @@ -362,7 +362,7 @@ if __name__ == "__main__": "apt": lambda x: apt_select.extend(x), } - # allow comma delimeters and de-duplicate + # allow comma delimiters and de-duplicate if args.install is None: parser.print_help() exit() diff --git a/docs/content/docker.md b/docs/content/docker.md index d2e91aef9..9e5def77a 100644 --- a/docs/content/docker.md +++ b/docs/content/docker.md @@ -18,7 +18,7 @@ RUN pip install trimesh[easy] ### Using Prebuilt Images -The `trimesh/trimesh` docker images are based on the offical Python base image, currently `python:3.11-slim-bullseye`. They are built and pushed to Docker Hub automatically in Github Actions for every release. +The `trimesh/trimesh` docker images are based on the official Python base image, currently `python:3.11-slim-bullseye`. They are built and pushed to Docker Hub automatically in Github Actions for every release. If you need some of the more demanding dependencies they can be a good option. The `trimesh/trimesh` images are pushed with three tags: `latest` (for latest :), semantic version (i.e. `3.15.5`), or git short hash (i.e. `1c6178d`). These images include `embree` and `trimesh[all]` which is run in a multi-stage build to avoid including intermediate files in the final image. diff --git a/docs/content/nricp.md b/docs/content/nricp.md index 271606884..dacc19074 100644 --- a/docs/content/nricp.md +++ b/docs/content/nricp.md @@ -2,7 +2,7 @@ Non-Rigid Registration ===================== Mesh non-rigid registration methods are capable of aligning (*i.e.* superimposing) a *source mesh* on a *target geometry* which can be any 3D structure that enables nearest point query. In Trimesh, the target geometry can either be a mesh `trimesh.Trimesh` or a point cloud `trimesh.PointCloud`. This process is often used to build dense correspondence, needed for the creation of [3D Morphable Models](https://www.face-rec.org/algorithms/3d_morph/morphmod2.pdf). -The "non-rigid" part means that the vertices of the source mesh are not scaled, rotated and translated together to match the target geometry as with [Iterative Closest Points](https://en.wikipedia.org/wiki/Iterative_closest_point) (ICP) methods. Instead, they are allowed to move *more or less independantly* to land on the target geometry. +The "non-rigid" part means that the vertices of the source mesh are not scaled, rotated and translated together to match the target geometry as with [Iterative Closest Points](https://en.wikipedia.org/wiki/Iterative_closest_point) (ICP) methods. Instead, they are allowed to move *more or less independently* to land on the target geometry. Trimesh implements two mesh non-rigid registrations algorithms which are both extensions of ICP. They are called Non-Rigid ICP methods : @@ -30,7 +30,7 @@ $\mathbf{x}_4$ is basically the point at the tip of the triangle normal starting Each deformed vertex $\tilde{\mathbf{v}}_i$ is computed from the vertex $\mathbf{v}_i$ via an affine transformation $\{\mathbf{T}, \mathbf{d}\}_i$ with $\mathbf{T}_i \in \mathbb{R}^{3\times3}$ being its scaling/rotational part and $\mathbf{d}_i$ being its translational part. We get $\tilde{\mathbf{v}}_i = \mathbf{T}_i\mathbf{v}_i + \mathbf{d}_i$. -The main idea is to subtract $\mathbf{d}$ from the previous equation. To do this, we substract $\mathbf{x}_1$ from each tetrahedron to obtain frames $\mathbf{V}_i$ and $\tilde{\mathbf{V}}_i \in \mathbb{R}^{3\times3}$ : +The main idea is to subtract $\mathbf{d}$ from the previous equation. To do this, we subtract $\mathbf{x}_1$ from each tetrahedron to obtain frames $\mathbf{V}_i$ and $\tilde{\mathbf{V}}_i \in \mathbb{R}^{3\times3}$ : $$ \begin{matrix} @@ -103,7 +103,7 @@ Then we either start the next iteration or return the result. The number of iterations is determined by the length of the `steps` argument. `steps` should be an iterable of five floats iterables `[[wc_1, wi_1, ws_1, wl_1, wn_1], ..., [wc_n, wi_n, ws_n, wl_n, wn_n]]`. The floats should correspond to $w_C, w_I, w_S, w_L$ and $w_N$. The extra weight $w_N$ is related to outlier robustness. ### Robustness to outliers -The target geometry can be noisy or incomplete which can lead to bad closest points $\mathbf{c}$. To remedy this issue, the linear equations related to $E_C$ are also weighted by *closest point validity weights*. First, if the distance to the closest point greater than the user specified threshold `distance_threshold`, the corresponding linear equations are multiplied by 0 (*i.e.* removed). Second, one may need the normals at the source mesh vertices and the normals at target geoemtry closest points to coincide. We use the dot product to the power $w_N$ to determine if normals are well aligned and use it to weight the linear equations. Eventually, the *closest point validity weights* are : +The target geometry can be noisy or incomplete which can lead to bad closest points $\mathbf{c}$. To remedy this issue, the linear equations related to $E_C$ are also weighted by *closest point validity weights*. First, if the distance to the closest point greater than the user specified threshold `distance_threshold`, the corresponding linear equations are multiplied by 0 (*i.e.* removed). Second, one may need the normals at the source mesh vertices and the normals at target geometry closest points to coincide. We use the dot product to the power $w_N$ to determine if normals are well aligned and use it to weight the linear equations. Eventually, the *closest point validity weights* are : $$ \boldsymbol{\alpha}=\left[ @@ -115,7 +115,7 @@ $$ $$ -With $d_{max}$ being the threshold given with the argument `distance_threshold`, and $\mathbf{n}_v$ and $\mathbf{n}_c$ the normals mentionned above. +With $d_{max}$ being the threshold given with the argument `distance_threshold`, and $\mathbf{n}_v$ and $\mathbf{n}_c$ the normals mentioned above. ### Summary @@ -148,7 +148,7 @@ Three energies are minimized : $$E_C = \sum\limits^n_{i=1} \boldsymbol{\alpha}_i \lVert \mathbf{w}_i^\text{T}\mathbf{X}_i - \mathbf{c}_i \rVert^2$$ -- The **deformation smoothness term** (stiffness term) $E_S$. In the following, $\mathbf{G}=[1,1,1,\gamma]$ is used to weights differences in the rotational and skew part of the deformation, and can be accesed via the argument `gamma`. Two vertices are adjacent if they share an edge. +- The **deformation smoothness term** (stiffness term) $E_S$. In the following, $\mathbf{G}=[1,1,1,\gamma]$ is used to weights differences in the rotational and skew part of the deformation, and can be accessed via the argument `gamma`. Two vertices are adjacent if they share an edge. $$E_S = \sum\limits^n_{j\in\text{adj}(i)} \lVert (\mathbf{X}_i - \mathbf{X}_j) \mathbf{G} \rVert^2$$ @@ -206,7 +206,7 @@ The [same implementation](#robustness-to-outliers) than `nricp_sumner` is used, - $j ← j + 1$ -> In contrast to `nricp_sumner`, the matrix $\mathbf{A}_C$ is built only once at initilization. +> In contrast to `nricp_sumner`, the matrix $\mathbf{A}_C$ is built only once at initialization. ## Comparison of the two methods The main difference between `nricp_sumner` and `nricp_amberg` is the kind of transformations that is optimized. `nricp_sumner` involves frames with an extra vertex representing the orientation of the triangles, and solves implicitly for transformations that act on these frames. In `nricp_amberg`, per-vertex transformations are explicitly solved for which allows to construct the correspondence cost matrix $\mathbf{A}_C$ only once. As a result, `nricp_sumner` tends to output smoother results with less high frequencies. The users are advised to try both algorithms with different parameter sets, especially different `steps` arguments, and find which suits better their problem. `nricp_amberg` appears to be easier to tune, though. diff --git a/models/jacked.obj b/models/jacked.obj index 7eb6aa693..420408304 100644 --- a/models/jacked.obj +++ b/models/jacked.obj @@ -1,5 +1,5 @@ # https://github.com/mikedh/trimesh -mtllib nonexistant.mtl +mtllib nonexistent.mtl usemtl material0 v -0.50000000 -0.50000000 -0.50000000 v -0.50000000 -0.50000000 0.50000000 diff --git a/models/plane.xaml b/models/plane.xaml index ed4fbd416..5fa216c51 100644 --- a/models/plane.xaml +++ b/models/plane.xaml @@ -27,7 +27,7 @@ - + NDArray[bool]: Returns -------- unique : (len(faces),) bool - A mask where the first occurance of a unique face is true. + A mask where the first occurrence of a unique face is true. """ mask = np.zeros(len(self.faces), dtype=bool) mask[grouping.unique_rows(np.sort(self.faces, axis=1))[0]] = True diff --git a/trimesh/exchange/threemf.py b/trimesh/exchange/threemf.py index f2905b1e4..c6a254f19 100644 --- a/trimesh/exchange/threemf.py +++ b/trimesh/exchange/threemf.py @@ -290,7 +290,7 @@ def model_id(x): with xf.element("object", **attribs): with xf.element("mesh"): with xf.element("vertices"): - # vertex nodes are writed directly to the file + # vertex nodes are written directly to the file # so make sure lxml's buffer is flushed xf.flush() for i in range(0, len(m.vertices), batch_size): diff --git a/trimesh/transformations.py b/trimesh/transformations.py index 384f9aa5f..5525d93f3 100644 --- a/trimesh/transformations.py +++ b/trimesh/transformations.py @@ -2086,7 +2086,7 @@ def planar_matrix_to_3D(matrix_2D): matrix_2D = np.asanyarray(matrix_2D, dtype=np.float64) if matrix_2D.shape != (3, 3): - raise ValueError("Homogenous 2D transformation matrix required!") + raise ValueError("Homogeneous 2D transformation matrix required!") matrix_3D = np.eye(4) # translation diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index e372c095e..0ccd3b7c5 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -758,7 +758,7 @@ def pack( max_tex_size_fused : int | None Maximum size of the combined texture. Individual texture size will be reduced to fit. - Set to None to allow infite size. + Set to None to allow infinite size. Returns ------------ From 0bf4939acb550e260e5edb27650a38dacf8ca2cd Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 12 Oct 2023 20:19:31 -0400 Subject: [PATCH 142/144] add clamping strategy --- trimesh/visual/material.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index 77eca1153..c478c5210 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -1083,9 +1083,10 @@ def pack_images(images): # the case of uv==1.0 uvg = uvs[g].copy() - # wrap before scaling and offsetting - wrap = np.logical_or(uvg < -half, uvg > (1.0 + half)) - uvg[wrap] %= 1.0 + # now wrap anything more than half a pixel outside + uvg[np.logical_or(uvg < -half, uvg > (1.0 + half))] %= 1.0 + # clamp to half a pixel + uvg = np.clip(uvg, half, 1.0 - half) # apply the scale and offset moved = (uvg * uv_scale) + uv_offset @@ -1095,7 +1096,7 @@ def pack_images(images): old = color.uv_to_interpolated_color(uvs[g], img) # the color from the packed image new = color.uv_to_interpolated_color(moved, final) - assert np.allclose(old, new, atol=4) + assert np.allclose(old, new, atol=10) new_uv[g] = moved From 0df357cbe1fa758d3424a48a401a3b3c0d79e640 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 12 Oct 2023 20:57:50 -0400 Subject: [PATCH 143/144] run black --- trimesh/typed.py | 11 ++++++++++- trimesh/visual/material.py | 2 +- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/trimesh/typed.py b/trimesh/typed.py index 12a0cdeee..b374f03f0 100644 --- a/trimesh/typed.py +++ b/trimesh/typed.py @@ -10,4 +10,13 @@ ArrayLike = Sequence -__all__ = ["NDArray", "ArrayLike", "Optional", "List", "Dict", "Tuple", "float64", "int64"] +__all__ = [ + "NDArray", + "ArrayLike", + "Optional", + "List", + "Dict", + "Tuple", + "float64", + "int64", +] diff --git a/trimesh/visual/material.py b/trimesh/visual/material.py index c478c5210..8777a662b 100644 --- a/trimesh/visual/material.py +++ b/trimesh/visual/material.py @@ -1096,7 +1096,7 @@ def pack_images(images): old = color.uv_to_interpolated_color(uvs[g], img) # the color from the packed image new = color.uv_to_interpolated_color(moved, final) - assert np.allclose(old, new, atol=10) + assert np.allclose(old, new, atol=6) new_uv[g] = moved From 03bdeea74686a45e982a6886e45c35f98e6f2760 Mon Sep 17 00:00:00 2001 From: Michael Dawson-Haggerty Date: Thu, 12 Oct 2023 21:52:40 -0400 Subject: [PATCH 144/144] major version bump --- README.md | 8 -------- pyproject.toml | 2 +- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/README.md b/README.md index edc805641..d16adcc02 100644 --- a/README.md +++ b/README.md @@ -4,14 +4,6 @@ [![Github Actions](https://github.com/mikedh/trimesh/workflows/Release%20Trimesh/badge.svg)](https://github.com/mikedh/trimesh/actions) [![codecov](https://codecov.io/gh/mikedh/trimesh/branch/main/graph/badge.svg?token=4PVRQXyl2h)](https://codecov.io/gh/mikedh/trimesh) [![Docker Image Version (latest by date)](https://img.shields.io/docker/v/trimesh/trimesh?label=docker&sort=semver)](https://hub.docker.com/r/trimesh/trimesh/tags) [![PyPI version](https://badge.fury.io/py/trimesh.svg)](https://badge.fury.io/py/trimesh) - -| :warning: WARNING | -|---------------------------| -| `trimesh >= 4.0.0` on `release-candidate` makes minimum Python 3.7 is in pre-release | -| Testing with `pip install --pre trimesh` would be much appreciated! | -| Projects that support `python<3.7` should update requirement to `trimesh<4` | - - Trimesh is a pure Python 3.7+ library for loading and using [triangular meshes](https://en.wikipedia.org/wiki/Triangle_mesh) with an emphasis on watertight surfaces. The goal of the library is to provide a full featured and well tested Trimesh object which allows for easy manipulation and analysis, in the style of the Polygon object in the [Shapely library](https://github.com/Toblerity/Shapely). The API is mostly stable, but this should not be relied on and is not guaranteed: install a specific version if you plan on deploying something using trimesh. diff --git a/pyproject.toml b/pyproject.toml index d755528f7..07a1979c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ requires = ["setuptools >= 61.0", "wheel"] [project] name = "trimesh" requires-python = ">=3.7" -version = "4.0.0.rc2" +version = "4.0.0" authors = [{name = "Michael Dawson-Haggerty", email = "mikedh@kerfed.com"}] license = {file = "LICENSE.md"} description = "Import, export, process, analyze and view triangular meshes."