diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index 8c081b86146..b49a2f6e7fa 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -508,7 +508,7 @@ def _create_openpype_zip(self, zip_path: Path, openpype_path: Path) -> None: processed_path = file self._print(f"- processing {processed_path}") - zip_file.write(file, file.relative_to(openpype_root)) + zip_file.write(file, file.resolve().relative_to(openpype_root)) # test if zip is ok zip_file.testzip() diff --git a/openpype/hosts/blender/api/plugin.py b/openpype/hosts/blender/api/plugin.py index de30da33194..50b73ade2b9 100644 --- a/openpype/hosts/blender/api/plugin.py +++ b/openpype/hosts/blender/api/plugin.py @@ -5,11 +5,12 @@ import bpy -from avalon import api -import avalon.blender +from avalon import api, blender +from avalon.blender import ops +from avalon.blender.pipeline import AVALON_CONTAINERS from openpype.api import PypeCreatorMixin -VALID_EXTENSIONS = [".blend", ".json", ".abc"] +VALID_EXTENSIONS = [".blend", ".json", ".abc", ".fbx"] def asset_name( @@ -27,32 +28,24 @@ def get_unique_number( asset: str, subset: str ) -> str: """Return a unique number based on the asset name.""" - avalon_containers = [ - c for c in bpy.data.collections - if c.name == 'AVALON_CONTAINERS' - ] - containers = [] - # First, add the children of avalon containers - for c in avalon_containers: - containers.extend(c.children) - # then keep looping to include all the children - for c in containers: - containers.extend(c.children) - container_names = [ - c.name for c in containers - ] + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + return "01" + asset_groups = avalon_container.all_objects + + container_names = [c.name for c in asset_groups if c.type == 'EMPTY'] count = 1 - name = f"{asset}_{count:0>2}_{subset}_CON" + name = f"{asset}_{count:0>2}_{subset}" while name in container_names: count += 1 - name = f"{asset}_{count:0>2}_{subset}_CON" + name = f"{asset}_{count:0>2}_{subset}" return f"{count:0>2}" def prepare_data(data, container_name): name = data.name local_data = data.make_local() - local_data.name = f"{name}:{container_name}" + local_data.name = f"{container_name}:{name}" return local_data @@ -102,7 +95,7 @@ def get_local_collection_with_name(name): return None -class Creator(PypeCreatorMixin, avalon.blender.Creator): +class Creator(PypeCreatorMixin, blender.Creator): pass @@ -173,6 +166,16 @@ def load(self, name: Optional[str] = None, namespace: Optional[str] = None, options: Optional[Dict] = None) -> Optional[bpy.types.Collection]: + """ Run the loader on Blender main thread""" + mti = ops.MainThreadItem(self._load, context, name, namespace, options) + ops.execute_in_main_thread(mti) + + def _load(self, + context: dict, + name: Optional[str] = None, + namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[bpy.types.Collection]: """Load asset via database Arguments: @@ -218,16 +221,26 @@ def load(self, # loader=self.__class__.__name__, # ) - asset = context["asset"]["name"] - subset = context["subset"]["name"] - instance_name = asset_name(asset, subset, unique_number) + '_CON' + # asset = context["asset"]["name"] + # subset = context["subset"]["name"] + # instance_name = asset_name(asset, subset, unique_number) + '_CON' - return self._get_instance_collection(instance_name, nodes) + # return self._get_instance_collection(instance_name, nodes) - def update(self, container: Dict, representation: Dict): + def exec_update(self, container: Dict, representation: Dict): """Must be implemented by a sub-class""" raise NotImplementedError("Must be implemented by a sub-class") - def remove(self, container: Dict) -> bool: + def update(self, container: Dict, representation: Dict): + """ Run the update on Blender main thread""" + mti = ops.MainThreadItem(self.exec_update, container, representation) + ops.execute_in_main_thread(mti) + + def exec_remove(self, container: Dict) -> bool: """Must be implemented by a sub-class""" raise NotImplementedError("Must be implemented by a sub-class") + + def remove(self, container: Dict) -> bool: + """ Run the remove on Blender main thread""" + mti = ops.MainThreadItem(self.exec_remove, container) + ops.execute_in_main_thread(mti) diff --git a/openpype/hosts/blender/plugins/create/create_animation.py b/openpype/hosts/blender/plugins/create/create_animation.py index 9aebf7e9b7e..f7887b7e80d 100644 --- a/openpype/hosts/blender/plugins/create/create_animation.py +++ b/openpype/hosts/blender/plugins/create/create_animation.py @@ -2,11 +2,13 @@ import bpy -from avalon import api, blender -import openpype.hosts.blender.api.plugin +from avalon import api +from avalon.blender import lib, ops +from avalon.blender.pipeline import AVALON_INSTANCES +from openpype.hosts.blender.api import plugin -class CreateAnimation(openpype.hosts.blender.api.plugin.Creator): +class CreateAnimation(plugin.Creator): """Animation output for character rigs""" name = "animationMain" @@ -15,16 +17,36 @@ class CreateAnimation(openpype.hosts.blender.api.plugin.Creator): icon = "male" def process(self): + """ Run the creator on Blender main thread""" + mti = ops.MainThreadItem(self._process) + ops.execute_in_main_thread(mti) + + def _process(self): + # Get Instance Containter or create it if it does not exist + instances = bpy.data.collections.get(AVALON_INSTANCES) + if not instances: + instances = bpy.data.collections.new(name=AVALON_INSTANCES) + bpy.context.scene.collection.children.link(instances) + + # Create instance object + # name = self.name + # if not name: asset = self.data["asset"] subset = self.data["subset"] - name = openpype.hosts.blender.api.plugin.asset_name(asset, subset) - collection = bpy.data.collections.new(name=name) - bpy.context.scene.collection.children.link(collection) + name = plugin.asset_name(asset, subset) + # asset_group = bpy.data.objects.new(name=name, object_data=None) + # asset_group.empty_display_type = 'SINGLE_ARROW' + asset_group = bpy.data.collections.new(name=name) + instances.children.link(asset_group) self.data['task'] = api.Session.get('AVALON_TASK') - blender.lib.imprint(collection, self.data) + lib.imprint(asset_group, self.data) if (self.options or {}).get("useSelection"): - for obj in blender.lib.get_selection(): - collection.objects.link(obj) - - return collection + selected = lib.get_selection() + for obj in selected: + asset_group.objects.link(obj) + elif (self.options or {}).get("asset_group"): + obj = (self.options or {}).get("asset_group") + asset_group.objects.link(obj) + + return asset_group diff --git a/openpype/hosts/blender/plugins/create/create_layout.py b/openpype/hosts/blender/plugins/create/create_layout.py index 5404cec5875..831261f0277 100644 --- a/openpype/hosts/blender/plugins/create/create_layout.py +++ b/openpype/hosts/blender/plugins/create/create_layout.py @@ -3,11 +3,12 @@ import bpy from avalon import api -from avalon.blender import lib -import openpype.hosts.blender.api.plugin +from avalon.blender import lib, ops +from avalon.blender.pipeline import AVALON_INSTANCES +from openpype.hosts.blender.api import plugin -class CreateLayout(openpype.hosts.blender.api.plugin.Creator): +class CreateLayout(plugin.Creator): """Layout output for character rigs""" name = "layoutMain" @@ -16,13 +17,34 @@ class CreateLayout(openpype.hosts.blender.api.plugin.Creator): icon = "cubes" def process(self): - + """ Run the creator on Blender main thread""" + mti = ops.MainThreadItem(self._process) + ops.execute_in_main_thread(mti) + + def _process(self): + # Get Instance Containter or create it if it does not exist + instances = bpy.data.collections.get(AVALON_INSTANCES) + if not instances: + instances = bpy.data.collections.new(name=AVALON_INSTANCES) + bpy.context.scene.collection.children.link(instances) + + # Create instance object asset = self.data["asset"] subset = self.data["subset"] - name = openpype.hosts.blender.api.plugin.asset_name(asset, subset) - collection = bpy.context.collection - collection.name = name + name = plugin.asset_name(asset, subset) + asset_group = bpy.data.objects.new(name=name, object_data=None) + asset_group.empty_display_type = 'SINGLE_ARROW' + instances.objects.link(asset_group) self.data['task'] = api.Session.get('AVALON_TASK') - lib.imprint(collection, self.data) - - return collection + lib.imprint(asset_group, self.data) + + # Add selected objects to instance + if (self.options or {}).get("useSelection"): + bpy.context.view_layer.objects.active = asset_group + selected = lib.get_selection() + for obj in selected: + obj.select_set(True) + selected.append(asset_group) + bpy.ops.object.parent_set(keep_transform=True) + + return asset_group diff --git a/openpype/hosts/blender/plugins/create/create_model.py b/openpype/hosts/blender/plugins/create/create_model.py index 921d86513b9..e778f5b74f7 100644 --- a/openpype/hosts/blender/plugins/create/create_model.py +++ b/openpype/hosts/blender/plugins/create/create_model.py @@ -3,11 +3,12 @@ import bpy from avalon import api -from avalon.blender import lib -import openpype.hosts.blender.api.plugin +from avalon.blender import lib, ops +from avalon.blender.pipeline import AVALON_INSTANCES +from openpype.hosts.blender.api import plugin -class CreateModel(openpype.hosts.blender.api.plugin.Creator): +class CreateModel(plugin.Creator): """Polygonal static geometry""" name = "modelMain" @@ -16,17 +17,34 @@ class CreateModel(openpype.hosts.blender.api.plugin.Creator): icon = "cube" def process(self): - + """ Run the creator on Blender main thread""" + mti = ops.MainThreadItem(self._process) + ops.execute_in_main_thread(mti) + + def _process(self): + # Get Instance Containter or create it if it does not exist + instances = bpy.data.collections.get(AVALON_INSTANCES) + if not instances: + instances = bpy.data.collections.new(name=AVALON_INSTANCES) + bpy.context.scene.collection.children.link(instances) + + # Create instance object asset = self.data["asset"] subset = self.data["subset"] - name = openpype.hosts.blender.api.plugin.asset_name(asset, subset) - collection = bpy.data.collections.new(name=name) - bpy.context.scene.collection.children.link(collection) + name = plugin.asset_name(asset, subset) + asset_group = bpy.data.objects.new(name=name, object_data=None) + asset_group.empty_display_type = 'SINGLE_ARROW' + instances.objects.link(asset_group) self.data['task'] = api.Session.get('AVALON_TASK') - lib.imprint(collection, self.data) + lib.imprint(asset_group, self.data) + # Add selected objects to instance if (self.options or {}).get("useSelection"): - for obj in lib.get_selection(): - collection.objects.link(obj) - - return collection + bpy.context.view_layer.objects.active = asset_group + selected = lib.get_selection() + for obj in selected: + obj.select_set(True) + selected.append(asset_group) + bpy.ops.object.parent_set(keep_transform=True) + + return asset_group diff --git a/openpype/hosts/blender/plugins/create/create_rig.py b/openpype/hosts/blender/plugins/create/create_rig.py index 116fb9f7426..2e1c71f5709 100644 --- a/openpype/hosts/blender/plugins/create/create_rig.py +++ b/openpype/hosts/blender/plugins/create/create_rig.py @@ -3,11 +3,12 @@ import bpy from avalon import api -from avalon.blender import lib -import openpype.hosts.blender.api.plugin +from avalon.blender import lib, ops +from avalon.blender.pipeline import AVALON_INSTANCES +from openpype.hosts.blender.api import plugin -class CreateRig(openpype.hosts.blender.api.plugin.Creator): +class CreateRig(plugin.Creator): """Artist-friendly rig with controls to direct motion""" name = "rigMain" @@ -16,26 +17,34 @@ class CreateRig(openpype.hosts.blender.api.plugin.Creator): icon = "wheelchair" def process(self): - + """ Run the creator on Blender main thread""" + mti = ops.MainThreadItem(self._process) + ops.execute_in_main_thread(mti) + + def _process(self): + # Get Instance Containter or create it if it does not exist + instances = bpy.data.collections.get(AVALON_INSTANCES) + if not instances: + instances = bpy.data.collections.new(name=AVALON_INSTANCES) + bpy.context.scene.collection.children.link(instances) + + # Create instance object asset = self.data["asset"] subset = self.data["subset"] - name = openpype.hosts.blender.api.plugin.asset_name(asset, subset) - collection = bpy.data.collections.new(name=name) - bpy.context.scene.collection.children.link(collection) + name = plugin.asset_name(asset, subset) + asset_group = bpy.data.objects.new(name=name, object_data=None) + asset_group.empty_display_type = 'SINGLE_ARROW' + instances.objects.link(asset_group) self.data['task'] = api.Session.get('AVALON_TASK') - lib.imprint(collection, self.data) - - # Add the rig object and all the children meshes to - # a set and link them all at the end to avoid duplicates. - # Blender crashes if trying to link an object that is already linked. - # This links automatically the children meshes if they were not - # selected, and doesn't link them twice if they, insted, - # were manually selected by the user. + lib.imprint(asset_group, self.data) + # Add selected objects to instance if (self.options or {}).get("useSelection"): - for obj in lib.get_selection(): - for child in obj.users_collection[0].children: - collection.children.link(child) - collection.objects.link(obj) - - return collection + bpy.context.view_layer.objects.active = asset_group + selected = lib.get_selection() + for obj in selected: + obj.select_set(True) + selected.append(asset_group) + bpy.ops.object.parent_set(keep_transform=True) + + return asset_group diff --git a/openpype/hosts/blender/plugins/load/load_abc.py b/openpype/hosts/blender/plugins/load/load_abc.py index 4248cffd699..92656fac9ee 100644 --- a/openpype/hosts/blender/plugins/load/load_abc.py +++ b/openpype/hosts/blender/plugins/load/load_abc.py @@ -4,9 +4,14 @@ from pprint import pformat from typing import Dict, List, Optional -from avalon import api, blender import bpy -import openpype.hosts.blender.api.plugin as plugin + +from avalon import api +from avalon.blender import lib +from avalon.blender.pipeline import AVALON_CONTAINERS +from avalon.blender.pipeline import AVALON_CONTAINER_ID +from avalon.blender.pipeline import AVALON_PROPERTY +from openpype.hosts.blender.api import plugin class CacheModelLoader(plugin.AssetLoader): @@ -21,24 +26,30 @@ class CacheModelLoader(plugin.AssetLoader): families = ["model", "pointcache"] representations = ["abc"] - label = "Link Alembic" + label = "Load Alembic" icon = "code-fork" color = "orange" - def _remove(self, objects, container): - for obj in list(objects): + def _remove(self, asset_group): + objects = list(asset_group.children) + empties = [] + + for obj in objects: if obj.type == 'MESH': + for material_slot in list(obj.material_slots): + bpy.data.materials.remove(material_slot.material) bpy.data.meshes.remove(obj.data) elif obj.type == 'EMPTY': - bpy.data.objects.remove(obj) + objects.extend(obj.children) + empties.append(obj) - bpy.data.collections.remove(container) + for empty in empties: + bpy.data.objects.remove(empty) - def _process(self, libpath, container_name, parent_collection): + def _process(self, libpath, asset_group, group_name): bpy.ops.object.select_all(action='DESELECT') - view_layer = bpy.context.view_layer - view_layer_collection = view_layer.active_layer_collection.collection + collection = bpy.context.view_layer.active_layer_collection.collection relative = bpy.context.preferences.filepaths.use_relative_paths bpy.ops.wm.alembic_import( @@ -46,34 +57,61 @@ def _process(self, libpath, container_name, parent_collection): relative_path=relative ) - parent = parent_collection + parent = bpy.context.scene.collection + + imported = lib.get_selection() + + empties = [obj for obj in imported if obj.type == 'EMPTY'] + + container = None + + for empty in empties: + if not empty.parent: + container = empty + break + + assert container, "No asset group found" + + # Children must be linked before parents, + # otherwise the hierarchy will break + objects = [] + nodes = list(container.children) - if parent is None: - parent = bpy.context.scene.collection + for obj in nodes: + obj.parent = asset_group - model_container = bpy.data.collections.new(container_name) - parent.children.link(model_container) - for obj in bpy.context.selected_objects: - model_container.objects.link(obj) - view_layer_collection.objects.unlink(obj) + bpy.data.objects.remove(container) + for obj in nodes: + objects.append(obj) + nodes.extend(list(obj.children)) + + objects.reverse() + + for obj in objects: + parent.objects.link(obj) + collection.objects.unlink(obj) + + for obj in objects: name = obj.name - obj.name = f"{name}:{container_name}" + obj.name = f"{group_name}:{name}" + if obj.type != 'EMPTY': + name_data = obj.data.name + obj.data.name = f"{group_name}:{name_data}" - # Groups are imported as Empty objects in Blender - if obj.type == 'MESH': - data_name = obj.data.name - obj.data.name = f"{data_name}:{container_name}" + for material_slot in obj.material_slots: + name_mat = material_slot.material.name + material_slot.material.name = f"{group_name}:{name_mat}" - if not obj.get(blender.pipeline.AVALON_PROPERTY): - obj[blender.pipeline.AVALON_PROPERTY] = dict() + if not obj.get(AVALON_PROPERTY): + obj[AVALON_PROPERTY] = dict() - avalon_info = obj[blender.pipeline.AVALON_PROPERTY] - avalon_info.update({"container_name": container_name}) + avalon_info = obj[AVALON_PROPERTY] + avalon_info.update({"container_name": group_name}) bpy.ops.object.select_all(action='DESELECT') - return model_container + return objects def process_asset( self, context: dict, name: str, namespace: Optional[str] = None, @@ -91,47 +129,41 @@ def process_asset( asset = context["asset"]["name"] subset = context["subset"]["name"] - lib_container = plugin.asset_name( - asset, subset - ) - unique_number = plugin.get_unique_number( - asset, subset - ) + asset_name = plugin.asset_name(asset, subset) + unique_number = plugin.get_unique_number(asset, subset) + group_name = plugin.asset_name(asset, subset, unique_number) namespace = namespace or f"{asset}_{unique_number}" - container_name = plugin.asset_name( - asset, subset, unique_number - ) - container = bpy.data.collections.new(lib_container) - container.name = container_name - blender.pipeline.containerise_existing( - container, - name, - namespace, - context, - self.__class__.__name__, - ) - - container_metadata = container.get( - blender.pipeline.AVALON_PROPERTY) + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_container) - container_metadata["libpath"] = libpath - container_metadata["lib_container"] = lib_container + asset_group = bpy.data.objects.new(group_name, object_data=None) + avalon_container.objects.link(asset_group) - obj_container = self._process( - libpath, container_name, None) + objects = self._process(libpath, asset_group, group_name) - container_metadata["obj_container"] = obj_container + bpy.context.scene.collection.objects.link(asset_group) - # Save the list of objects in the metadata container - container_metadata["objects"] = obj_container.all_objects + asset_group[AVALON_PROPERTY] = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or '', + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + "libpath": libpath, + "asset_name": asset_name, + "parent": str(context["representation"]["parent"]), + "family": context["representation"]["context"]["family"], + "objectName": group_name + } - nodes = list(container.objects) - nodes.append(container) - self[:] = nodes - return nodes + self[:] = objects + return objects - def update(self, container: Dict, representation: Dict): + def exec_update(self, container: Dict, representation: Dict): """Update the loaded asset. This will remove all objects of the current collection, load the new @@ -143,9 +175,8 @@ def update(self, container: Dict, representation: Dict): Warning: No nested collections are supported at the moment! """ - collection = bpy.data.collections.get( - container["objectName"] - ) + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) libpath = Path(api.get_representation_path(representation)) extension = libpath.suffix.lower() @@ -155,12 +186,9 @@ def update(self, container: Dict, representation: Dict): pformat(representation, indent=2), ) - assert collection, ( + assert asset_group, ( f"The asset is not loaded: {container['objectName']}" ) - assert not (collection.children), ( - "Nested collections are not supported." - ) assert libpath, ( "No existing library file found for {container['objectName']}" ) @@ -171,45 +199,34 @@ def update(self, container: Dict, representation: Dict): f"Unsupported file: {libpath}" ) - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) - collection_libpath = collection_metadata["libpath"] - - obj_container = plugin.get_local_collection_with_name( - collection_metadata["obj_container"].name - ) - objects = obj_container.all_objects - - container_name = obj_container.name + metadata = asset_group.get(AVALON_PROPERTY) + group_libpath = metadata["libpath"] - normalized_collection_libpath = ( - str(Path(bpy.path.abspath(collection_libpath)).resolve()) + normalized_group_libpath = ( + str(Path(bpy.path.abspath(group_libpath)).resolve()) ) normalized_libpath = ( str(Path(bpy.path.abspath(str(libpath))).resolve()) ) self.log.debug( - "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_collection_libpath, + "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_group_libpath, normalized_libpath, ) - if normalized_collection_libpath == normalized_libpath: + if normalized_group_libpath == normalized_libpath: self.log.info("Library already loaded, not updating...") return - parent = plugin.get_parent_collection(obj_container) + mat = asset_group.matrix_basis.copy() + self._remove(asset_group) - self._remove(objects, obj_container) + self._process(str(libpath), asset_group, object_name) + asset_group.matrix_basis = mat - obj_container = self._process( - str(libpath), container_name, parent) + metadata["libpath"] = str(libpath) + metadata["representation"] = str(representation["_id"]) - collection_metadata["obj_container"] = obj_container - collection_metadata["objects"] = obj_container.all_objects - collection_metadata["libpath"] = str(libpath) - collection_metadata["representation"] = str(representation["_id"]) - - def remove(self, container: Dict) -> bool: + def exec_remove(self, container: Dict) -> bool: """Remove an existing container from a Blender scene. Arguments: @@ -222,25 +239,14 @@ def remove(self, container: Dict) -> bool: Warning: No nested collections are supported at the moment! """ - collection = bpy.data.collections.get( - container["objectName"] - ) - if not collection: - return False - assert not (collection.children), ( - "Nested collections are not supported." - ) + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) - - obj_container = plugin.get_local_collection_with_name( - collection_metadata["obj_container"].name - ) - objects = obj_container.all_objects + if not asset_group: + return False - self._remove(objects, obj_container) + self._remove(asset_group) - bpy.data.collections.remove(collection) + bpy.data.objects.remove(asset_group) return True diff --git a/openpype/hosts/blender/plugins/load/load_animation.py b/openpype/hosts/blender/plugins/load/load_animation.py index 4025fdfa74b..47c48248b2f 100644 --- a/openpype/hosts/blender/plugins/load/load_animation.py +++ b/openpype/hosts/blender/plugins/load/load_animation.py @@ -1,20 +1,19 @@ """Load an animation in Blender.""" import logging -from pathlib import Path -from pprint import pformat from typing import Dict, List, Optional -from avalon import api, blender import bpy -import openpype.hosts.blender.api.plugin + +from avalon.blender.pipeline import AVALON_PROPERTY +from openpype.hosts.blender.api import plugin logger = logging.getLogger("openpype").getChild( "blender").getChild("load_animation") -class BlendAnimationLoader(openpype.hosts.blender.api.plugin.AssetLoader): +class BlendAnimationLoader(plugin.AssetLoader): """Load animations from a .blend file. Warning: @@ -29,67 +28,6 @@ class BlendAnimationLoader(openpype.hosts.blender.api.plugin.AssetLoader): icon = "code-fork" color = "orange" - def _remove(self, objects, lib_container): - for obj in list(objects): - if obj.type == 'ARMATURE': - bpy.data.armatures.remove(obj.data) - elif obj.type == 'MESH': - bpy.data.meshes.remove(obj.data) - - bpy.data.collections.remove(bpy.data.collections[lib_container]) - - def _process(self, libpath, lib_container, container_name): - - relative = bpy.context.preferences.filepaths.use_relative_paths - with bpy.data.libraries.load( - libpath, link=True, relative=relative - ) as (_, data_to): - data_to.collections = [lib_container] - - scene = bpy.context.scene - - scene.collection.children.link(bpy.data.collections[lib_container]) - - anim_container = scene.collection.children[lib_container].make_local() - - meshes = [obj for obj in anim_container.objects if obj.type == 'MESH'] - armatures = [ - obj for obj in anim_container.objects if obj.type == 'ARMATURE'] - - # Should check if there is only an armature? - - objects_list = [] - - # Link meshes first, then armatures. - # The armature is unparented for all the non-local meshes, - # when it is made local. - for obj in meshes + armatures: - - obj = obj.make_local() - - obj.data.make_local() - - anim_data = obj.animation_data - - if anim_data is not None and anim_data.action is not None: - - anim_data.action.make_local() - - if not obj.get(blender.pipeline.AVALON_PROPERTY): - - obj[blender.pipeline.AVALON_PROPERTY] = dict() - - avalon_info = obj[blender.pipeline.AVALON_PROPERTY] - avalon_info.update({"container_name": container_name}) - - objects_list.append(obj) - - anim_container.pop(blender.pipeline.AVALON_PROPERTY) - - bpy.ops.object.select_all(action='DESELECT') - - return objects_list - def process_asset( self, context: dict, name: str, namespace: Optional[str] = None, options: Optional[Dict] = None @@ -101,148 +39,32 @@ def process_asset( context: Full parenthood of representation to load options: Additional settings dictionary """ - libpath = self.fname - asset = context["asset"]["name"] - subset = context["subset"]["name"] - lib_container = openpype.hosts.blender.api.plugin.asset_name(asset, subset) - container_name = openpype.hosts.blender.api.plugin.asset_name( - asset, subset, namespace - ) - - container = bpy.data.collections.new(lib_container) - container.name = container_name - blender.pipeline.containerise_existing( - container, - name, - namespace, - context, - self.__class__.__name__, - ) - - container_metadata = container.get( - blender.pipeline.AVALON_PROPERTY) - - container_metadata["libpath"] = libpath - container_metadata["lib_container"] = lib_container - - objects_list = self._process( - libpath, lib_container, container_name) - - # Save the list of objects in the metadata container - container_metadata["objects"] = objects_list - - nodes = list(container.objects) - nodes.append(container) - self[:] = nodes - return nodes - - def update(self, container: Dict, representation: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - - Warning: - No nested collections are supported at the moment! - """ - - collection = bpy.data.collections.get( - container["objectName"] - ) - - libpath = Path(api.get_representation_path(representation)) - extension = libpath.suffix.lower() - - logger.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(representation, indent=2), - ) - - assert collection, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert not (collection.children), ( - "Nested collections are not supported." - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in openpype.hosts.blender.api.plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) - - collection_libpath = collection_metadata["libpath"] - normalized_collection_libpath = ( - str(Path(bpy.path.abspath(collection_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - logger.debug( - "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_collection_libpath, - normalized_libpath, - ) - if normalized_collection_libpath == normalized_libpath: - logger.info("Library already loaded, not updating...") - return - - objects = collection_metadata["objects"] - lib_container = collection_metadata["lib_container"] - - self._remove(objects, lib_container) - - objects_list = self._process( - str(libpath), lib_container, collection.name) - - # Save the list of objects in the metadata container - collection_metadata["objects"] = objects_list - collection_metadata["libpath"] = str(libpath) - collection_metadata["representation"] = str(representation["_id"]) - - bpy.ops.object.select_all(action='DESELECT') - - def remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. + with bpy.data.libraries.load( + libpath, link=True, relative=False + ) as (data_from, data_to): + data_to.objects = data_from.objects + data_to.actions = data_from.actions - Returns: - bool: Whether the container was deleted. + container = data_to.objects[0] - Warning: - No nested collections are supported at the moment! - """ + assert container, "No asset group found" - collection = bpy.data.collections.get( - container["objectName"] - ) - if not collection: - return False - assert not (collection.children), ( - "Nested collections are not supported." - ) + target_namespace = container.get(AVALON_PROPERTY).get('namespace') - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) - objects = collection_metadata["objects"] - lib_container = collection_metadata["lib_container"] + action = data_to.actions[0].make_local().copy() - self._remove(objects, lib_container) + for obj in bpy.data.objects: + if obj.get(AVALON_PROPERTY) and obj.get(AVALON_PROPERTY).get( + 'namespace') == target_namespace: + if obj.children[0]: + if not obj.children[0].animation_data: + obj.children[0].animation_data_create() + obj.children[0].animation_data.action = action + break - bpy.data.collections.remove(collection) + bpy.data.objects.remove(container) - return True + library = bpy.data.libraries.get(bpy.path.basename(libpath)) + bpy.data.libraries.remove(library) diff --git a/openpype/hosts/blender/plugins/load/load_fbx.py b/openpype/hosts/blender/plugins/load/load_fbx.py new file mode 100644 index 00000000000..b80dc69adc6 --- /dev/null +++ b/openpype/hosts/blender/plugins/load/load_fbx.py @@ -0,0 +1,273 @@ +"""Load an asset in Blender from an Alembic file.""" + +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +import bpy + +from avalon import api +from avalon.blender import lib +from avalon.blender.pipeline import AVALON_CONTAINERS +from avalon.blender.pipeline import AVALON_CONTAINER_ID +from avalon.blender.pipeline import AVALON_PROPERTY +from openpype.hosts.blender.api import plugin + + +class FbxModelLoader(plugin.AssetLoader): + """Load FBX models. + + Stores the imported asset in an empty named after the asset. + """ + + families = ["model", "rig"] + representations = ["fbx"] + + label = "Load FBX" + icon = "code-fork" + color = "orange" + + def _remove(self, asset_group): + objects = list(asset_group.children) + + for obj in objects: + if obj.type == 'MESH': + for material_slot in list(obj.material_slots): + if material_slot.material: + bpy.data.materials.remove(material_slot.material) + bpy.data.meshes.remove(obj.data) + elif obj.type == 'ARMATURE': + objects.extend(obj.children) + bpy.data.armatures.remove(obj.data) + elif obj.type == 'CURVE': + bpy.data.curves.remove(obj.data) + elif obj.type == 'EMPTY': + objects.extend(obj.children) + bpy.data.objects.remove(obj) + + def _process(self, libpath, asset_group, group_name, action): + bpy.ops.object.select_all(action='DESELECT') + + collection = bpy.context.view_layer.active_layer_collection.collection + + bpy.ops.import_scene.fbx(filepath=libpath) + + parent = bpy.context.scene.collection + + imported = lib.get_selection() + + empties = [obj for obj in imported if obj.type == 'EMPTY'] + + container = None + + for empty in empties: + if not empty.parent: + container = empty + break + + assert container, "No asset group found" + + # Children must be linked before parents, + # otherwise the hierarchy will break + objects = [] + nodes = list(container.children) + + for obj in nodes: + obj.parent = asset_group + + bpy.data.objects.remove(container) + + for obj in nodes: + objects.append(obj) + nodes.extend(list(obj.children)) + + objects.reverse() + + for obj in objects: + parent.objects.link(obj) + collection.objects.unlink(obj) + + for obj in objects: + name = obj.name + obj.name = f"{group_name}:{name}" + if obj.type != 'EMPTY': + name_data = obj.data.name + obj.data.name = f"{group_name}:{name_data}" + + if obj.type == 'MESH': + for material_slot in obj.material_slots: + name_mat = material_slot.material.name + material_slot.material.name = f"{group_name}:{name_mat}" + elif obj.type == 'ARMATURE': + anim_data = obj.animation_data + if action is not None: + anim_data.action = action + elif anim_data.action is not None: + name_action = anim_data.action.name + anim_data.action.name = f"{group_name}:{name_action}" + + if not obj.get(AVALON_PROPERTY): + obj[AVALON_PROPERTY] = dict() + + avalon_info = obj[AVALON_PROPERTY] + avalon_info.update({"container_name": group_name}) + + bpy.ops.object.select_all(action='DESELECT') + + return objects + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + libpath = self.fname + asset = context["asset"]["name"] + subset = context["subset"]["name"] + + asset_name = plugin.asset_name(asset, subset) + unique_number = plugin.get_unique_number(asset, subset) + group_name = plugin.asset_name(asset, subset, unique_number) + namespace = namespace or f"{asset}_{unique_number}" + + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_container) + + asset_group = bpy.data.objects.new(group_name, object_data=None) + avalon_container.objects.link(asset_group) + + objects = self._process(libpath, asset_group, group_name, None) + + objects = [] + nodes = list(asset_group.children) + + for obj in nodes: + objects.append(obj) + nodes.extend(list(obj.children)) + + bpy.context.scene.collection.objects.link(asset_group) + + asset_group[AVALON_PROPERTY] = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or '', + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + "libpath": libpath, + "asset_name": asset_name, + "parent": str(context["representation"]["parent"]), + "family": context["representation"]["context"]["family"], + "objectName": group_name + } + + self[:] = objects + return objects + + def exec_update(self, container: Dict, representation: Dict): + """Update the loaded asset. + + This will remove all objects of the current collection, load the new + ones and add them to the collection. + If the objects of the collection are used in another collection they + will not be removed, only unlinked. Normally this should not be the + case though. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + libpath = Path(api.get_representation_path(representation)) + extension = libpath.suffix.lower() + + self.log.info( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert asset_group, ( + f"The asset is not loaded: {container['objectName']}" + ) + assert libpath, ( + "No existing library file found for {container['objectName']}" + ) + assert libpath.is_file(), ( + f"The file doesn't exist: {libpath}" + ) + assert extension in plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}" + ) + + metadata = asset_group.get(AVALON_PROPERTY) + group_libpath = metadata["libpath"] + + normalized_group_libpath = ( + str(Path(bpy.path.abspath(group_libpath)).resolve()) + ) + normalized_libpath = ( + str(Path(bpy.path.abspath(str(libpath))).resolve()) + ) + self.log.debug( + "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_group_libpath, + normalized_libpath, + ) + if normalized_group_libpath == normalized_libpath: + self.log.info("Library already loaded, not updating...") + return + + # Get the armature of the rig + objects = asset_group.children + armatures = [obj for obj in objects if obj.type == 'ARMATURE'] + action = None + + if armatures: + armature = armatures[0] + + if armature.animation_data and armature.animation_data.action: + action = armature.animation_data.action + + mat = asset_group.matrix_basis.copy() + self._remove(asset_group) + + self._process(str(libpath), asset_group, object_name, action) + + asset_group.matrix_basis = mat + + metadata["libpath"] = str(libpath) + metadata["representation"] = str(representation["_id"]) + + def exec_remove(self, container: Dict) -> bool: + """Remove an existing container from a Blender scene. + + Arguments: + container (openpype:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + + if not asset_group: + return False + + self._remove(asset_group) + + bpy.data.objects.remove(asset_group) + + return True diff --git a/openpype/hosts/blender/plugins/load/load_layout.py b/openpype/hosts/blender/plugins/load/load_layout.py deleted file mode 100644 index 2092be9139c..00000000000 --- a/openpype/hosts/blender/plugins/load/load_layout.py +++ /dev/null @@ -1,664 +0,0 @@ -"""Load a layout in Blender.""" - -import json -from logging import log, warning -import math - -import logging -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -from avalon import api, blender, pipeline -import bpy -import openpype.hosts.blender.api.plugin as plugin -from openpype.lib import get_creator_by_name - - -class BlendLayoutLoader(plugin.AssetLoader): - """Load layout from a .blend file.""" - - families = ["layout"] - representations = ["blend"] - - label = "Link Layout" - icon = "code-fork" - color = "orange" - - def _remove(self, objects, obj_container): - for obj in list(objects): - if obj.type == 'ARMATURE': - bpy.data.armatures.remove(obj.data) - elif obj.type == 'MESH': - bpy.data.meshes.remove(obj.data) - elif obj.type == 'CAMERA': - bpy.data.cameras.remove(obj.data) - elif obj.type == 'CURVE': - bpy.data.curves.remove(obj.data) - - for element_container in obj_container.children: - for child in element_container.children: - bpy.data.collections.remove(child) - bpy.data.collections.remove(element_container) - - bpy.data.collections.remove(obj_container) - - def _process(self, libpath, lib_container, container_name, actions): - relative = bpy.context.preferences.filepaths.use_relative_paths - with bpy.data.libraries.load( - libpath, link=True, relative=relative - ) as (_, data_to): - data_to.collections = [lib_container] - - scene = bpy.context.scene - - scene.collection.children.link(bpy.data.collections[lib_container]) - - layout_container = scene.collection.children[lib_container].make_local() - layout_container.name = container_name - - objects_local_types = ['MESH', 'CAMERA', 'CURVE'] - - objects = [] - armatures = [] - - containers = list(layout_container.children) - - for container in layout_container.children: - if container.name == blender.pipeline.AVALON_CONTAINERS: - containers.remove(container) - - for container in containers: - container.make_local() - objects.extend([ - obj for obj in container.objects - if obj.type in objects_local_types - ]) - armatures.extend([ - obj for obj in container.objects - if obj.type == 'ARMATURE' - ]) - containers.extend(list(container.children)) - - # Link meshes first, then armatures. - # The armature is unparented for all the non-local meshes, - # when it is made local. - for obj in objects + armatures: - local_obj = obj.make_local() - if obj.data: - obj.data.make_local() - - if not local_obj.get(blender.pipeline.AVALON_PROPERTY): - local_obj[blender.pipeline.AVALON_PROPERTY] = dict() - - avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY] - avalon_info.update({"container_name": container_name}) - - action = actions.get(local_obj.name, None) - - if local_obj.type == 'ARMATURE' and action is not None: - local_obj.animation_data.action = action - - layout_container.pop(blender.pipeline.AVALON_PROPERTY) - - bpy.ops.object.select_all(action='DESELECT') - - return layout_container - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - - libpath = self.fname - asset = context["asset"]["name"] - subset = context["subset"]["name"] - lib_container = plugin.asset_name( - asset, subset - ) - unique_number = plugin.get_unique_number( - asset, subset - ) - namespace = namespace or f"{asset}_{unique_number}" - container_name = plugin.asset_name( - asset, subset, unique_number - ) - - container = bpy.data.collections.new(lib_container) - container.name = container_name - blender.pipeline.containerise_existing( - container, - name, - namespace, - context, - self.__class__.__name__, - ) - - container_metadata = container.get( - blender.pipeline.AVALON_PROPERTY) - - container_metadata["libpath"] = libpath - container_metadata["lib_container"] = lib_container - - obj_container = self._process( - libpath, lib_container, container_name, {}) - - container_metadata["obj_container"] = obj_container - - # Save the list of objects in the metadata container - container_metadata["objects"] = obj_container.all_objects - - # nodes = list(container.objects) - # nodes.append(container) - nodes = [container] - self[:] = nodes - return nodes - - def update(self, container: Dict, representation: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - - Warning: - No nested collections are supported at the moment! - """ - collection = bpy.data.collections.get( - container["objectName"] - ) - - libpath = Path(api.get_representation_path(representation)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(representation, indent=2), - ) - - assert collection, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert not (collection.children), ( - "Nested collections are not supported." - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) - collection_libpath = collection_metadata["libpath"] - objects = collection_metadata["objects"] - lib_container = collection_metadata["lib_container"] - obj_container = collection_metadata["obj_container"] - - normalized_collection_libpath = ( - str(Path(bpy.path.abspath(collection_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_collection_libpath, - normalized_libpath, - ) - if normalized_collection_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - actions = {} - - for obj in objects: - if obj.type == 'ARMATURE': - if obj.animation_data and obj.animation_data.action: - actions[obj.name] = obj.animation_data.action - - self._remove(objects, obj_container) - - obj_container = self._process( - str(libpath), lib_container, collection.name, actions) - - # Save the list of objects in the metadata container - collection_metadata["obj_container"] = obj_container - collection_metadata["objects"] = obj_container.all_objects - collection_metadata["libpath"] = str(libpath) - collection_metadata["representation"] = str(representation["_id"]) - - bpy.ops.object.select_all(action='DESELECT') - - def remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - - Warning: - No nested collections are supported at the moment! - """ - - collection = bpy.data.collections.get( - container["objectName"] - ) - if not collection: - return False - assert not (collection.children), ( - "Nested collections are not supported." - ) - - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) - objects = collection_metadata["objects"] - obj_container = collection_metadata["obj_container"] - - self._remove(objects, obj_container) - - bpy.data.collections.remove(collection) - - return True - - -class UnrealLayoutLoader(plugin.AssetLoader): - """Load layout published from Unreal.""" - - families = ["layout"] - representations = ["json"] - - label = "Link Layout" - icon = "code-fork" - color = "orange" - - animation_creator_name = "CreateAnimation" - - def _remove_objects(self, objects): - for obj in list(objects): - if obj.type == 'ARMATURE': - bpy.data.armatures.remove(obj.data) - elif obj.type == 'MESH': - bpy.data.meshes.remove(obj.data) - elif obj.type == 'CAMERA': - bpy.data.cameras.remove(obj.data) - elif obj.type == 'CURVE': - bpy.data.curves.remove(obj.data) - else: - self.log.error( - f"Object {obj.name} of type {obj.type} not recognized.") - - def _remove_collections(self, collection): - if collection.children: - for child in collection.children: - self._remove_collections(child) - bpy.data.collections.remove(child) - - def _remove(self, layout_container): - layout_container_metadata = layout_container.get( - blender.pipeline.AVALON_PROPERTY) - - if layout_container.children: - for child in layout_container.children: - child_container = child.get(blender.pipeline.AVALON_PROPERTY) - child_container['objectName'] = child.name - api.remove(child_container) - - for c in bpy.data.collections: - metadata = c.get('avalon') - if metadata: - print("metadata.get('id')") - print(metadata.get('id')) - if metadata and metadata.get('id') == 'pyblish.avalon.instance': - print("metadata.get('dependencies')") - print(metadata.get('dependencies')) - print("layout_container_metadata.get('representation')") - print(layout_container_metadata.get('representation')) - if metadata.get('dependencies') == layout_container_metadata.get('representation'): - - for child in c.children: - bpy.data.collections.remove(child) - bpy.data.collections.remove(c) - break - - def _get_loader(self, loaders, family): - name = "" - if family == 'rig': - name = "BlendRigLoader" - elif family == 'model': - name = "BlendModelLoader" - - if name == "": - return None - - for loader in loaders: - if loader.__name__ == name: - return loader - - return None - - def set_transform(self, obj, transform): - location = transform.get('translation') - rotation = transform.get('rotation') - scale = transform.get('scale') - - # Y position is inverted in sign because Unreal and Blender have the - # Y axis mirrored - obj.location = ( - location.get('x'), - location.get('y'), - location.get('z') - ) - obj.rotation_euler = ( - rotation.get('x'), - rotation.get('y'), - rotation.get('z') - ) - obj.scale = ( - scale.get('x'), - scale.get('y'), - scale.get('z') - ) - - def _process( - self, libpath, layout_container, container_name, representation, - actions, parent_collection - ): - with open(libpath, "r") as fp: - data = json.load(fp) - - scene = bpy.context.scene - layout_collection = bpy.data.collections.new(container_name) - scene.collection.children.link(layout_collection) - - parent = parent_collection - - if parent is None: - parent = scene.collection - - all_loaders = api.discover(api.Loader) - - avalon_container = bpy.data.collections.get( - blender.pipeline.AVALON_CONTAINERS) - - for element in data: - reference = element.get('reference') - family = element.get('family') - - loaders = api.loaders_from_representation(all_loaders, reference) - loader = self._get_loader(loaders, family) - - if not loader: - continue - - instance_name = element.get('instance_name') - - element_container = api.load( - loader, - reference, - namespace=instance_name - ) - - if not element_container: - continue - - avalon_container.children.unlink(element_container) - layout_container.children.link(element_container) - - element_metadata = element_container.get( - blender.pipeline.AVALON_PROPERTY) - - # Unlink the object's collection from the scene collection and - # link it in the layout collection - element_collection = element_metadata.get('obj_container') - scene.collection.children.unlink(element_collection) - layout_collection.children.link(element_collection) - - objects = element_metadata.get('objects') - element_metadata['instance_name'] = instance_name - - objects_to_transform = [] - - creator_plugin = get_creator_by_name(self.animation_creator_name) - if not creator_plugin: - raise ValueError("Creator plugin \"{}\" was not found.".format( - self.animation_creator_name - )) - - if family == 'rig': - for o in objects: - if o.type == 'ARMATURE': - objects_to_transform.append(o) - # Create an animation subset for each rig - o.select_set(True) - asset = api.Session["AVALON_ASSET"] - c = api.create( - creator_plugin, - name="animation_" + element_collection.name, - asset=asset, - options={"useSelection": True}, - data={"dependencies": representation}) - scene.collection.children.unlink(c) - parent.children.link(c) - o.select_set(False) - break - elif family == 'model': - objects_to_transform = objects - - for o in objects_to_transform: - self.set_transform(o, element.get('transform')) - - if actions: - if o.type == 'ARMATURE': - action = actions.get(instance_name, None) - - if action: - if o.animation_data is None: - o.animation_data_create() - o.animation_data.action = action - - return layout_collection - - def process_asset(self, - context: dict, - name: str, - namespace: Optional[str] = None, - options: Optional[Dict] = None): - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.fname - asset = context["asset"]["name"] - subset = context["subset"]["name"] - lib_container = plugin.asset_name( - asset, subset - ) - unique_number = plugin.get_unique_number( - asset, subset - ) - namespace = namespace or f"{asset}_{unique_number}" - container_name = plugin.asset_name( - asset, subset, unique_number - ) - - layout_container = bpy.data.collections.new(container_name) - blender.pipeline.containerise_existing( - layout_container, - name, - namespace, - context, - self.__class__.__name__, - ) - - container_metadata = layout_container.get( - blender.pipeline.AVALON_PROPERTY) - - container_metadata["libpath"] = libpath - container_metadata["lib_container"] = lib_container - - layout_collection = self._process( - libpath, layout_container, container_name, - str(context["representation"]["_id"]), None, None) - - container_metadata["obj_container"] = layout_collection - - # Save the list of objects in the metadata container - container_metadata["objects"] = layout_collection.all_objects - - nodes = [layout_container] - self[:] = nodes - return nodes - - def update(self, container: Dict, representation: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - """ - layout_container = bpy.data.collections.get( - container["objectName"] - ) - if not layout_container: - return False - - libpath = Path(api.get_representation_path(representation)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(representation, indent=2), - ) - - assert layout_container, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - layout_container_metadata = layout_container.get( - blender.pipeline.AVALON_PROPERTY) - collection_libpath = layout_container_metadata["libpath"] - lib_container = layout_container_metadata["lib_container"] - obj_container = plugin.get_local_collection_with_name( - layout_container_metadata["obj_container"].name - ) - objects = obj_container.all_objects - - container_name = obj_container.name - - normalized_collection_libpath = ( - str(Path(bpy.path.abspath(collection_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_collection_libpath, - normalized_libpath, - ) - if normalized_collection_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - actions = {} - - for obj in objects: - if obj.type == 'ARMATURE': - if obj.animation_data and obj.animation_data.action: - obj_cont_name = obj.get( - blender.pipeline.AVALON_PROPERTY).get('container_name') - obj_cont = plugin.get_local_collection_with_name( - obj_cont_name) - element_metadata = obj_cont.get( - blender.pipeline.AVALON_PROPERTY) - instance_name = element_metadata.get('instance_name') - actions[instance_name] = obj.animation_data.action - - self._remove(layout_container) - - bpy.data.collections.remove(obj_container) - - creator_plugin = get_creator_by_name(self.setdress_creator_name) - if not creator_plugin: - raise ValueError("Creator plugin \"{}\" was not found.".format( - self.setdress_creator_name - )) - - parent = api.create( - creator_plugin, - name="animation", - asset=api.Session["AVALON_ASSET"], - options={"useSelection": True}, - data={"dependencies": str(representation["_id"])}) - - layout_collection = self._process( - libpath, layout_container, container_name, - str(representation["_id"]), actions, parent) - - layout_container_metadata["obj_container"] = layout_collection - layout_container_metadata["objects"] = layout_collection.all_objects - layout_container_metadata["libpath"] = str(libpath) - layout_container_metadata["representation"] = str( - representation["_id"]) - - def remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - """ - layout_container = bpy.data.collections.get( - container["objectName"] - ) - if not layout_container: - return False - - layout_container_metadata = layout_container.get( - blender.pipeline.AVALON_PROPERTY) - obj_container = plugin.get_local_collection_with_name( - layout_container_metadata["obj_container"].name - ) - - self._remove(layout_container) - - bpy.data.collections.remove(obj_container) - bpy.data.collections.remove(layout_container) - - return True diff --git a/openpype/hosts/blender/plugins/load/load_layout_blend.py b/openpype/hosts/blender/plugins/load/load_layout_blend.py new file mode 100644 index 00000000000..85cb4dfbd37 --- /dev/null +++ b/openpype/hosts/blender/plugins/load/load_layout_blend.py @@ -0,0 +1,337 @@ +"""Load a layout in Blender.""" + +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +import bpy + +from avalon import api +from avalon.blender.pipeline import AVALON_CONTAINERS +from avalon.blender.pipeline import AVALON_CONTAINER_ID +from avalon.blender.pipeline import AVALON_PROPERTY +from openpype.hosts.blender.api import plugin + + +class BlendLayoutLoader(plugin.AssetLoader): + """Load layout from a .blend file.""" + + families = ["layout"] + representations = ["blend"] + + label = "Link Layout" + icon = "code-fork" + color = "orange" + + def _remove(self, asset_group): + objects = list(asset_group.children) + + for obj in objects: + if obj.type == 'MESH': + for material_slot in list(obj.material_slots): + if material_slot.material: + bpy.data.materials.remove(material_slot.material) + bpy.data.meshes.remove(obj.data) + elif obj.type == 'ARMATURE': + objects.extend(obj.children) + bpy.data.armatures.remove(obj.data) + elif obj.type == 'CURVE': + bpy.data.curves.remove(obj.data) + elif obj.type == 'EMPTY': + objects.extend(obj.children) + bpy.data.objects.remove(obj) + + def _remove_asset_and_library(self, asset_group): + libpath = asset_group.get(AVALON_PROPERTY).get('libpath') + + # Check how many assets use the same library + count = 0 + for obj in bpy.data.collections.get(AVALON_CONTAINERS).all_objects: + if obj.get(AVALON_PROPERTY).get('libpath') == libpath: + count += 1 + + self._remove(asset_group) + + bpy.data.objects.remove(asset_group) + + # If it is the last object to use that library, remove it + if count == 1: + library = bpy.data.libraries.get(bpy.path.basename(libpath)) + bpy.data.libraries.remove(library) + + def _process(self, libpath, asset_group, group_name, actions): + with bpy.data.libraries.load( + libpath, link=True, relative=False + ) as (data_from, data_to): + data_to.objects = data_from.objects + + parent = bpy.context.scene.collection + + empties = [obj for obj in data_to.objects if obj.type == 'EMPTY'] + + container = None + + for empty in empties: + if empty.get(AVALON_PROPERTY): + container = empty + break + + assert container, "No asset group found" + + # Children must be linked before parents, + # otherwise the hierarchy will break + objects = [] + nodes = list(container.children) + + for obj in nodes: + obj.parent = asset_group + + for obj in nodes: + objects.append(obj) + nodes.extend(list(obj.children)) + + objects.reverse() + + constraints = [] + + armatures = [obj for obj in objects if obj.type == 'ARMATURE'] + + for armature in armatures: + for bone in armature.pose.bones: + for constraint in bone.constraints: + if hasattr(constraint, 'target'): + constraints.append(constraint) + + for obj in objects: + parent.objects.link(obj) + + for obj in objects: + local_obj = plugin.prepare_data(obj, group_name) + + action = None + + if actions: + action = actions.get(local_obj.name, None) + + if local_obj.type == 'MESH': + plugin.prepare_data(local_obj.data, group_name) + + if obj != local_obj: + for constraint in constraints: + if constraint.target == obj: + constraint.target = local_obj + + for material_slot in local_obj.material_slots: + if material_slot.material: + plugin.prepare_data(material_slot.material, group_name) + elif local_obj.type == 'ARMATURE': + plugin.prepare_data(local_obj.data, group_name) + + if action is not None: + local_obj.animation_data.action = action + elif local_obj.animation_data.action is not None: + plugin.prepare_data( + local_obj.animation_data.action, group_name) + + # Set link the drivers to the local object + if local_obj.data.animation_data: + for d in local_obj.data.animation_data.drivers: + for v in d.driver.variables: + for t in v.targets: + t.id = local_obj + + if not local_obj.get(AVALON_PROPERTY): + local_obj[AVALON_PROPERTY] = dict() + + avalon_info = local_obj[AVALON_PROPERTY] + avalon_info.update({"container_name": group_name}) + + objects.reverse() + + bpy.data.orphans_purge(do_local_ids=False) + + bpy.ops.object.select_all(action='DESELECT') + + return objects + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + libpath = self.fname + asset = context["asset"]["name"] + subset = context["subset"]["name"] + + asset_name = plugin.asset_name(asset, subset) + unique_number = plugin.get_unique_number(asset, subset) + group_name = plugin.asset_name(asset, subset, unique_number) + namespace = namespace or f"{asset}_{unique_number}" + + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_container) + + asset_group = bpy.data.objects.new(group_name, object_data=None) + asset_group.empty_display_type = 'SINGLE_ARROW' + avalon_container.objects.link(asset_group) + + objects = self._process(libpath, asset_group, group_name, None) + + for child in asset_group.children: + if child.get(AVALON_PROPERTY): + avalon_container.objects.link(child) + + bpy.context.scene.collection.objects.link(asset_group) + + asset_group[AVALON_PROPERTY] = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or '', + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + "libpath": libpath, + "asset_name": asset_name, + "parent": str(context["representation"]["parent"]), + "family": context["representation"]["context"]["family"], + "objectName": group_name + } + + self[:] = objects + return objects + + def update(self, container: Dict, representation: Dict): + """Update the loaded asset. + + This will remove all objects of the current collection, load the new + ones and add them to the collection. + If the objects of the collection are used in another collection they + will not be removed, only unlinked. Normally this should not be the + case though. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + libpath = Path(api.get_representation_path(representation)) + extension = libpath.suffix.lower() + + self.log.info( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert asset_group, ( + f"The asset is not loaded: {container['objectName']}" + ) + assert libpath, ( + "No existing library file found for {container['objectName']}" + ) + assert libpath.is_file(), ( + f"The file doesn't exist: {libpath}" + ) + assert extension in plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}" + ) + + metadata = asset_group.get(AVALON_PROPERTY) + group_libpath = metadata["libpath"] + + normalized_group_libpath = ( + str(Path(bpy.path.abspath(group_libpath)).resolve()) + ) + normalized_libpath = ( + str(Path(bpy.path.abspath(str(libpath))).resolve()) + ) + self.log.debug( + "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_group_libpath, + normalized_libpath, + ) + if normalized_group_libpath == normalized_libpath: + self.log.info("Library already loaded, not updating...") + return + + actions = {} + + for obj in asset_group.children: + obj_meta = obj.get(AVALON_PROPERTY) + if obj_meta.get('family') == 'rig': + rig = None + for child in obj.children: + if child.type == 'ARMATURE': + rig = child + break + if not rig: + raise Exception("No armature in the rig asset group.") + if rig.animation_data and rig.animation_data.action: + instance_name = obj_meta.get('instance_name') + actions[instance_name] = rig.animation_data.action + + mat = asset_group.matrix_basis.copy() + + # Remove the children of the asset_group first + for child in list(asset_group.children): + self._remove_asset_and_library(child) + + # Check how many assets use the same library + count = 0 + for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects: + if obj.get(AVALON_PROPERTY).get('libpath') == group_libpath: + count += 1 + + self._remove(asset_group) + + # If it is the last object to use that library, remove it + if count == 1: + library = bpy.data.libraries.get(bpy.path.basename(group_libpath)) + bpy.data.libraries.remove(library) + + self._process(str(libpath), asset_group, object_name, actions) + + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + for child in asset_group.children: + if child.get(AVALON_PROPERTY): + avalon_container.objects.link(child) + + asset_group.matrix_basis = mat + + metadata["libpath"] = str(libpath) + metadata["representation"] = str(representation["_id"]) + + def exec_remove(self, container: Dict) -> bool: + """Remove an existing container from a Blender scene. + + Arguments: + container (openpype:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + + if not asset_group: + return False + + # Remove the children of the asset_group first + for child in list(asset_group.children): + self._remove_asset_and_library(child) + + self._remove_asset_and_library(asset_group) + + return True diff --git a/openpype/hosts/blender/plugins/load/load_layout_json.py b/openpype/hosts/blender/plugins/load/load_layout_json.py new file mode 100644 index 00000000000..1a4dbbb5cb2 --- /dev/null +++ b/openpype/hosts/blender/plugins/load/load_layout_json.py @@ -0,0 +1,259 @@ +"""Load a layout in Blender.""" + +from pathlib import Path +from pprint import pformat +from typing import Dict, Optional + +import bpy +import json + +from avalon import api +from avalon.blender.pipeline import AVALON_CONTAINERS +from avalon.blender.pipeline import AVALON_CONTAINER_ID +from avalon.blender.pipeline import AVALON_PROPERTY +from avalon.blender.pipeline import AVALON_INSTANCES +from openpype.hosts.blender.api import plugin + + +class JsonLayoutLoader(plugin.AssetLoader): + """Load layout published from Unreal.""" + + families = ["layout"] + representations = ["json"] + + label = "Load Layout" + icon = "code-fork" + color = "orange" + + animation_creator_name = "CreateAnimation" + + def _remove(self, asset_group): + objects = list(asset_group.children) + + for obj in objects: + api.remove(obj.get(AVALON_PROPERTY)) + + def _remove_animation_instances(self, asset_group): + instances = bpy.data.collections.get(AVALON_INSTANCES) + if instances: + for obj in list(asset_group.children): + anim_collection = instances.children.get( + obj.name + "_animation") + if anim_collection: + bpy.data.collections.remove(anim_collection) + + def _get_loader(self, loaders, family): + name = "" + if family == 'rig': + name = "BlendRigLoader" + elif family == 'model': + name = "BlendModelLoader" + + if name == "": + return None + + for loader in loaders: + if loader.__name__ == name: + return loader + + return None + + def _process(self, libpath, asset, asset_group, actions): + bpy.ops.object.select_all(action='DESELECT') + + with open(libpath, "r") as fp: + data = json.load(fp) + + all_loaders = api.discover(api.Loader) + + for element in data: + reference = element.get('reference') + family = element.get('family') + + loaders = api.loaders_from_representation(all_loaders, reference) + loader = self._get_loader(loaders, family) + + if not loader: + continue + + instance_name = element.get('instance_name') + + action = None + + if actions: + action = actions.get(instance_name, None) + + options = { + 'parent': asset_group, + 'transform': element.get('transform'), + 'action': action, + 'create_animation': True if family == 'rig' else False, + 'animation_asset': asset + } + + # This should return the loaded asset, but the load call will be + # added to the queue to run in the Blender main thread, so + # at this time it will not return anything. The assets will be + # loaded in the next Blender cycle, so we use the options to + # set the transform, parent and assign the action, if there is one. + api.load( + loader, + reference, + namespace=instance_name, + options=options + ) + + def process_asset(self, + context: dict, + name: str, + namespace: Optional[str] = None, + options: Optional[Dict] = None): + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + libpath = self.fname + asset = context["asset"]["name"] + subset = context["subset"]["name"] + + asset_name = plugin.asset_name(asset, subset) + unique_number = plugin.get_unique_number(asset, subset) + group_name = plugin.asset_name(asset, subset, unique_number) + namespace = namespace or f"{asset}_{unique_number}" + + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_container) + + asset_group = bpy.data.objects.new(group_name, object_data=None) + asset_group.empty_display_type = 'SINGLE_ARROW' + avalon_container.objects.link(asset_group) + + self._process(libpath, asset, asset_group, None) + + bpy.context.scene.collection.objects.link(asset_group) + + asset_group[AVALON_PROPERTY] = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or '', + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + "libpath": libpath, + "asset_name": asset_name, + "parent": str(context["representation"]["parent"]), + "family": context["representation"]["context"]["family"], + "objectName": group_name + } + + self[:] = asset_group.children + return asset_group.children + + def exec_update(self, container: Dict, representation: Dict): + """Update the loaded asset. + + This will remove all objects of the current collection, load the new + ones and add them to the collection. + If the objects of the collection are used in another collection they + will not be removed, only unlinked. Normally this should not be the + case though. + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + libpath = Path(api.get_representation_path(representation)) + extension = libpath.suffix.lower() + + self.log.info( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert asset_group, ( + f"The asset is not loaded: {container['objectName']}" + ) + assert libpath, ( + "No existing library file found for {container['objectName']}" + ) + assert libpath.is_file(), ( + f"The file doesn't exist: {libpath}" + ) + assert extension in plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}" + ) + + metadata = asset_group.get(AVALON_PROPERTY) + group_libpath = metadata["libpath"] + + normalized_group_libpath = ( + str(Path(bpy.path.abspath(group_libpath)).resolve()) + ) + normalized_libpath = ( + str(Path(bpy.path.abspath(str(libpath))).resolve()) + ) + self.log.debug( + "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_group_libpath, + normalized_libpath, + ) + if normalized_group_libpath == normalized_libpath: + self.log.info("Library already loaded, not updating...") + return + + actions = {} + + for obj in asset_group.children: + obj_meta = obj.get(AVALON_PROPERTY) + if obj_meta.get('family') == 'rig': + rig = None + for child in obj.children: + if child.type == 'ARMATURE': + rig = child + break + if not rig: + raise Exception("No armature in the rig asset group.") + if rig.animation_data and rig.animation_data.action: + namespace = obj_meta.get('namespace') + actions[namespace] = rig.animation_data.action + + mat = asset_group.matrix_basis.copy() + + self._remove_animation_instances(asset_group) + + self._remove(asset_group) + + self._process(str(libpath), asset_group, actions) + + asset_group.matrix_basis = mat + + metadata["libpath"] = str(libpath) + metadata["representation"] = str(representation["_id"]) + + def exec_remove(self, container: Dict) -> bool: + """Remove an existing container from a Blender scene. + + Arguments: + container (openpype:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + + if not asset_group: + return False + + self._remove_animation_instances(asset_group) + + self._remove(asset_group) + + bpy.data.objects.remove(asset_group) + + return True diff --git a/openpype/hosts/blender/plugins/load/load_model.py b/openpype/hosts/blender/plugins/load/load_model.py index 35a241b98e6..af5591c2998 100644 --- a/openpype/hosts/blender/plugins/load/load_model.py +++ b/openpype/hosts/blender/plugins/load/load_model.py @@ -1,13 +1,16 @@ """Load a model asset in Blender.""" -import logging from pathlib import Path from pprint import pformat from typing import Dict, List, Optional -from avalon import api, blender import bpy -import openpype.hosts.blender.api.plugin as plugin + +from avalon import api +from avalon.blender.pipeline import AVALON_CONTAINERS +from avalon.blender.pipeline import AVALON_CONTAINER_ID +from avalon.blender.pipeline import AVALON_PROPERTY +from openpype.hosts.blender.api import plugin class BlendModelLoader(plugin.AssetLoader): @@ -24,52 +27,75 @@ class BlendModelLoader(plugin.AssetLoader): icon = "code-fork" color = "orange" - def _remove(self, objects, container): - for obj in list(objects): - for material_slot in list(obj.material_slots): - bpy.data.materials.remove(material_slot.material) - bpy.data.meshes.remove(obj.data) + def _remove(self, asset_group): + objects = list(asset_group.children) - bpy.data.collections.remove(container) + for obj in objects: + if obj.type == 'MESH': + for material_slot in list(obj.material_slots): + bpy.data.materials.remove(material_slot.material) + bpy.data.meshes.remove(obj.data) + elif obj.type == 'EMPTY': + objects.extend(obj.children) + bpy.data.objects.remove(obj) - def _process( - self, libpath, lib_container, container_name, - parent_collection - ): - relative = bpy.context.preferences.filepaths.use_relative_paths + def _process(self, libpath, asset_group, group_name): with bpy.data.libraries.load( - libpath, link=True, relative=relative - ) as (_, data_to): - data_to.collections = [lib_container] + libpath, link=True, relative=False + ) as (data_from, data_to): + data_to.objects = data_from.objects + + parent = bpy.context.scene.collection + + empties = [obj for obj in data_to.objects if obj.type == 'EMPTY'] + + container = None + + for empty in empties: + if empty.get(AVALON_PROPERTY): + container = empty + break - parent = parent_collection + assert container, "No asset group found" - if parent is None: - parent = bpy.context.scene.collection + # Children must be linked before parents, + # otherwise the hierarchy will break + objects = [] + nodes = list(container.children) - parent.children.link(bpy.data.collections[lib_container]) + for obj in nodes: + obj.parent = asset_group - model_container = parent.children[lib_container].make_local() - model_container.name = container_name + for obj in nodes: + objects.append(obj) + nodes.extend(list(obj.children)) - for obj in model_container.objects: - local_obj = plugin.prepare_data(obj, container_name) - plugin.prepare_data(local_obj.data, container_name) + objects.reverse() - for material_slot in local_obj.material_slots: - plugin.prepare_data(material_slot.material, container_name) + for obj in objects: + parent.objects.link(obj) - if not obj.get(blender.pipeline.AVALON_PROPERTY): - local_obj[blender.pipeline.AVALON_PROPERTY] = dict() + for obj in objects: + local_obj = plugin.prepare_data(obj, group_name) + if local_obj.type != 'EMPTY': + plugin.prepare_data(local_obj.data, group_name) - avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY] - avalon_info.update({"container_name": container_name}) + for material_slot in local_obj.material_slots: + plugin.prepare_data(material_slot.material, group_name) - model_container.pop(blender.pipeline.AVALON_PROPERTY) + if not local_obj.get(AVALON_PROPERTY): + local_obj[AVALON_PROPERTY] = dict() + + avalon_info = local_obj[AVALON_PROPERTY] + avalon_info.update({"container_name": group_name}) + + objects.reverse() + + bpy.data.orphans_purge(do_local_ids=False) bpy.ops.object.select_all(action='DESELECT') - return model_container + return objects def process_asset( self, context: dict, name: str, namespace: Optional[str] = None, @@ -82,54 +108,80 @@ def process_asset( context: Full parenthood of representation to load options: Additional settings dictionary """ - libpath = self.fname asset = context["asset"]["name"] subset = context["subset"]["name"] - lib_container = plugin.asset_name( - asset, subset - ) - unique_number = plugin.get_unique_number( - asset, subset - ) + asset_name = plugin.asset_name(asset, subset) + unique_number = plugin.get_unique_number(asset, subset) + group_name = plugin.asset_name(asset, subset, unique_number) namespace = namespace or f"{asset}_{unique_number}" - container_name = plugin.asset_name( - asset, subset, unique_number - ) - - container = bpy.data.collections.new(lib_container) - container.name = container_name - blender.pipeline.containerise_existing( - container, - name, - namespace, - context, - self.__class__.__name__, - ) - - metadata = container.get(blender.pipeline.AVALON_PROPERTY) - - metadata["libpath"] = libpath - metadata["lib_container"] = lib_container - - obj_container = self._process( - libpath, lib_container, container_name, None) - - metadata["obj_container"] = obj_container - # Save the list of objects in the metadata container - metadata["objects"] = obj_container.all_objects + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_container) - metadata["parent"] = str(context["representation"]["parent"]) - metadata["family"] = context["representation"]["context"]["family"] + asset_group = bpy.data.objects.new(group_name, object_data=None) + asset_group.empty_display_type = 'SINGLE_ARROW' + avalon_container.objects.link(asset_group) - nodes = list(container.objects) - nodes.append(container) - self[:] = nodes - return nodes + bpy.ops.object.select_all(action='DESELECT') - def update(self, container: Dict, representation: Dict): + if options is not None: + parent = options.get('parent') + transform = options.get('transform') + + if parent and transform: + location = transform.get('translation') + rotation = transform.get('rotation') + scale = transform.get('scale') + + asset_group.location = ( + location.get('x'), + location.get('y'), + location.get('z') + ) + asset_group.rotation_euler = ( + rotation.get('x'), + rotation.get('y'), + rotation.get('z') + ) + asset_group.scale = ( + scale.get('x'), + scale.get('y'), + scale.get('z') + ) + + bpy.context.view_layer.objects.active = parent + asset_group.select_set(True) + + bpy.ops.object.parent_set(keep_transform=True) + + bpy.ops.object.select_all(action='DESELECT') + + objects = self._process(libpath, asset_group, group_name) + + bpy.context.scene.collection.objects.link(asset_group) + + asset_group[AVALON_PROPERTY] = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or '', + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + "libpath": libpath, + "asset_name": asset_name, + "parent": str(context["representation"]["parent"]), + "family": context["representation"]["context"]["family"], + "objectName": group_name + } + + self[:] = objects + return objects + + def exec_update(self, container: Dict, representation: Dict): """Update the loaded asset. This will remove all objects of the current collection, load the new @@ -137,13 +189,9 @@ def update(self, container: Dict, representation: Dict): If the objects of the collection are used in another collection they will not be removed, only unlinked. Normally this should not be the case though. - - Warning: - No nested collections are supported at the moment! """ - collection = bpy.data.collections.get( - container["objectName"] - ) + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) libpath = Path(api.get_representation_path(representation)) extension = libpath.suffix.lower() @@ -153,12 +201,9 @@ def update(self, container: Dict, representation: Dict): pformat(representation, indent=2), ) - assert collection, ( + assert asset_group, ( f"The asset is not loaded: {container['objectName']}" ) - assert not (collection.children), ( - "Nested collections are not supported." - ) assert libpath, ( "No existing library file found for {container['objectName']}" ) @@ -169,47 +214,47 @@ def update(self, container: Dict, representation: Dict): f"Unsupported file: {libpath}" ) - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) - collection_libpath = collection_metadata["libpath"] - lib_container = collection_metadata["lib_container"] + metadata = asset_group.get(AVALON_PROPERTY) + group_libpath = metadata["libpath"] - obj_container = plugin.get_local_collection_with_name( - collection_metadata["obj_container"].name - ) - objects = obj_container.all_objects - - container_name = obj_container.name - - normalized_collection_libpath = ( - str(Path(bpy.path.abspath(collection_libpath)).resolve()) + normalized_group_libpath = ( + str(Path(bpy.path.abspath(group_libpath)).resolve()) ) normalized_libpath = ( str(Path(bpy.path.abspath(str(libpath))).resolve()) ) self.log.debug( - "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_collection_libpath, + "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_group_libpath, normalized_libpath, ) - if normalized_collection_libpath == normalized_libpath: + if normalized_group_libpath == normalized_libpath: self.log.info("Library already loaded, not updating...") return - parent = plugin.get_parent_collection(obj_container) + # Check how many assets use the same library + count = 0 + for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects: + if obj.get(AVALON_PROPERTY).get('libpath') == group_libpath: + count += 1 + + mat = asset_group.matrix_basis.copy() + + self._remove(asset_group) - self._remove(objects, obj_container) + # If it is the last object to use that library, remove it + if count == 1: + library = bpy.data.libraries.get(bpy.path.basename(group_libpath)) + bpy.data.libraries.remove(library) - obj_container = self._process( - str(libpath), lib_container, container_name, parent) + self._process(str(libpath), asset_group, object_name) - # Save the list of objects in the metadata container - collection_metadata["obj_container"] = obj_container - collection_metadata["objects"] = obj_container.all_objects - collection_metadata["libpath"] = str(libpath) - collection_metadata["representation"] = str(representation["_id"]) + asset_group.matrix_basis = mat - def remove(self, container: Dict) -> bool: + metadata["libpath"] = str(libpath) + metadata["representation"] = str(representation["_id"]) + + def exec_remove(self, container: Dict) -> bool: """Remove an existing container from a Blender scene. Arguments: @@ -218,29 +263,27 @@ def remove(self, container: Dict) -> bool: Returns: bool: Whether the container was deleted. - - Warning: - No nested collections are supported at the moment! """ - collection = bpy.data.collections.get( - container["objectName"] - ) - if not collection: - return False - assert not (collection.children), ( - "Nested collections are not supported." - ) + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + libpath = asset_group.get(AVALON_PROPERTY).get('libpath') - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) + # Check how many assets use the same library + count = 0 + for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects: + if obj.get(AVALON_PROPERTY).get('libpath') == libpath: + count += 1 - obj_container = plugin.get_local_collection_with_name( - collection_metadata["obj_container"].name - ) - objects = obj_container.all_objects + if not asset_group: + return False + + self._remove(asset_group) - self._remove(objects, obj_container) + bpy.data.objects.remove(asset_group) - bpy.data.collections.remove(collection) + # If it is the last object to use that library, remove it + if count == 1: + library = bpy.data.libraries.get(bpy.path.basename(libpath)) + bpy.data.libraries.remove(library) return True diff --git a/openpype/hosts/blender/plugins/load/load_rig.py b/openpype/hosts/blender/plugins/load/load_rig.py index b6be8f4cf61..5573c081e12 100644 --- a/openpype/hosts/blender/plugins/load/load_rig.py +++ b/openpype/hosts/blender/plugins/load/load_rig.py @@ -1,21 +1,21 @@ """Load a rig asset in Blender.""" -import logging from pathlib import Path from pprint import pformat from typing import Dict, List, Optional -from avalon import api, blender import bpy -import openpype.hosts.blender.api.plugin as plugin +from avalon import api +from avalon.blender.pipeline import AVALON_CONTAINERS +from avalon.blender.pipeline import AVALON_CONTAINER_ID +from avalon.blender.pipeline import AVALON_PROPERTY +from openpype import lib +from openpype.hosts.blender.api import plugin -class BlendRigLoader(plugin.AssetLoader): - """Load rigs from a .blend file. - Because they come from a .blend file we can simply link the collection that - contains the model. There is no further need to 'containerise' it. - """ +class BlendRigLoader(plugin.AssetLoader): + """Load rigs from a .blend file.""" families = ["rig"] representations = ["blend"] @@ -24,105 +24,113 @@ class BlendRigLoader(plugin.AssetLoader): icon = "code-fork" color = "orange" - def _remove(self, objects, obj_container): - for obj in list(objects): - if obj.type == 'ARMATURE': - bpy.data.armatures.remove(obj.data) - elif obj.type == 'MESH': + def _remove(self, asset_group): + objects = list(asset_group.children) + + for obj in objects: + if obj.type == 'MESH': + for material_slot in list(obj.material_slots): + if material_slot.material: + bpy.data.materials.remove(material_slot.material) bpy.data.meshes.remove(obj.data) + elif obj.type == 'ARMATURE': + objects.extend(obj.children) + bpy.data.armatures.remove(obj.data) elif obj.type == 'CURVE': bpy.data.curves.remove(obj.data) + elif obj.type == 'EMPTY': + objects.extend(obj.children) + bpy.data.objects.remove(obj) - for child in obj_container.children: - bpy.data.collections.remove(child) - - bpy.data.collections.remove(obj_container) - - def make_local_and_metadata(self, obj, collection_name): - local_obj = plugin.prepare_data(obj, collection_name) - plugin.prepare_data(local_obj.data, collection_name) - - if not local_obj.get(blender.pipeline.AVALON_PROPERTY): - local_obj[blender.pipeline.AVALON_PROPERTY] = dict() - - avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY] - avalon_info.update({"container_name": collection_name + '_CON'}) - - return local_obj - - def _process( - self, libpath, lib_container, collection_name, - action, parent_collection - ): - relative = bpy.context.preferences.filepaths.use_relative_paths + def _process(self, libpath, asset_group, group_name, action): with bpy.data.libraries.load( - libpath, link=True, relative=relative - ) as (_, data_to): - data_to.collections = [lib_container] + libpath, link=True, relative=False + ) as (data_from, data_to): + data_to.objects = data_from.objects + + parent = bpy.context.scene.collection - parent = parent_collection + empties = [obj for obj in data_to.objects if obj.type == 'EMPTY'] - if parent is None: - parent = bpy.context.scene.collection + container = None - parent.children.link(bpy.data.collections[lib_container]) + for empty in empties: + if empty.get(AVALON_PROPERTY): + container = empty + break - rig_container = parent.children[lib_container].make_local() - rig_container.name = collection_name + assert container, "No asset group found" + # Children must be linked before parents, + # otherwise the hierarchy will break objects = [] - armatures = [ - obj for obj in rig_container.objects - if obj.type == 'ARMATURE' - ] + nodes = list(container.children) - for child in rig_container.children: - local_child = plugin.prepare_data(child, collection_name) - objects.extend(local_child.objects) + for obj in nodes: + obj.parent = asset_group - # for obj in bpy.data.objects: - # obj.select_set(False) + for obj in nodes: + objects.append(obj) + nodes.extend(list(obj.children)) + + objects.reverse() constraints = [] + armatures = [obj for obj in objects if obj.type == 'ARMATURE'] + for armature in armatures: for bone in armature.pose.bones: for constraint in bone.constraints: if hasattr(constraint, 'target'): constraints.append(constraint) - # Link armatures after other objects. - # The armature is unparented for all the non-local meshes, - # when it is made local. for obj in objects: - local_obj = self.make_local_and_metadata(obj, collection_name) + parent.objects.link(obj) - if obj != local_obj: - for constraint in constraints: - if constraint.target == obj: - constraint.target = local_obj + for obj in objects: + local_obj = plugin.prepare_data(obj, group_name) - for armature in armatures: - local_obj = self.make_local_and_metadata(armature, collection_name) + if local_obj.type == 'MESH': + plugin.prepare_data(local_obj.data, group_name) + + if obj != local_obj: + for constraint in constraints: + if constraint.target == obj: + constraint.target = local_obj - if action is not None: - local_obj.animation_data.action = action - elif local_obj.animation_data.action is not None: - plugin.prepare_data( - local_obj.animation_data.action, collection_name) + for material_slot in local_obj.material_slots: + if material_slot.material: + plugin.prepare_data(material_slot.material, group_name) + elif local_obj.type == 'ARMATURE': + plugin.prepare_data(local_obj.data, group_name) - # Set link the drivers to the local object - if local_obj.data.animation_data: - for d in local_obj.data.animation_data.drivers: - for v in d.driver.variables: - for t in v.targets: - t.id = local_obj + if action is not None: + local_obj.animation_data.action = action + elif local_obj.animation_data.action is not None: + plugin.prepare_data( + local_obj.animation_data.action, group_name) - rig_container.pop(blender.pipeline.AVALON_PROPERTY) + # Set link the drivers to the local object + if local_obj.data.animation_data: + for d in local_obj.data.animation_data.drivers: + for v in d.driver.variables: + for t in v.targets: + t.id = local_obj + + if not local_obj.get(AVALON_PROPERTY): + local_obj[AVALON_PROPERTY] = dict() + + avalon_info = local_obj[AVALON_PROPERTY] + avalon_info.update({"container_name": group_name}) + + objects.reverse() + + bpy.data.orphans_purge(do_local_ids=False) bpy.ops.object.select_all(action='DESELECT') - return rig_container + return objects def process_asset( self, context: dict, name: str, namespace: Optional[str] = None, @@ -138,61 +146,111 @@ def process_asset( libpath = self.fname asset = context["asset"]["name"] subset = context["subset"]["name"] - lib_container = plugin.asset_name( - asset, subset - ) - unique_number = plugin.get_unique_number( - asset, subset - ) - namespace = namespace or f"{asset}_{unique_number}" - collection_name = plugin.asset_name( - asset, subset, unique_number - ) - - container = bpy.data.collections.new(collection_name) - blender.pipeline.containerise_existing( - container, - name, - namespace, - context, - self.__class__.__name__, - ) - - metadata = container.get(blender.pipeline.AVALON_PROPERTY) - metadata["libpath"] = libpath - metadata["lib_container"] = lib_container + asset_name = plugin.asset_name(asset, subset) + unique_number = plugin.get_unique_number(asset, subset) + group_name = plugin.asset_name(asset, subset, unique_number) + namespace = namespace or f"{asset}_{unique_number}" - obj_container = self._process( - libpath, lib_container, collection_name, None, None) + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_container) - metadata["obj_container"] = obj_container - # Save the list of objects in the metadata container - metadata["objects"] = obj_container.all_objects + asset_group = bpy.data.objects.new(group_name, object_data=None) + asset_group.empty_display_type = 'SINGLE_ARROW' + avalon_container.objects.link(asset_group) - metadata["parent"] = str(context["representation"]["parent"]) - metadata["family"] = context["representation"]["context"]["family"] + action = None - nodes = list(container.objects) - nodes.append(container) - self[:] = nodes - return nodes + bpy.ops.object.select_all(action='DESELECT') - def update(self, container: Dict, representation: Dict): + create_animation = False + + if options is not None: + parent = options.get('parent') + transform = options.get('transform') + action = options.get('action') + create_animation = options.get('create_animation') + + if parent and transform: + location = transform.get('translation') + rotation = transform.get('rotation') + scale = transform.get('scale') + + asset_group.location = ( + location.get('x'), + location.get('y'), + location.get('z') + ) + asset_group.rotation_euler = ( + rotation.get('x'), + rotation.get('y'), + rotation.get('z') + ) + asset_group.scale = ( + scale.get('x'), + scale.get('y'), + scale.get('z') + ) + + bpy.context.view_layer.objects.active = parent + asset_group.select_set(True) + + bpy.ops.object.parent_set(keep_transform=True) + + bpy.ops.object.select_all(action='DESELECT') + + objects = self._process(libpath, asset_group, group_name, action) + + if create_animation: + creator_plugin = lib.get_creator_by_name("CreateAnimation") + if not creator_plugin: + raise ValueError("Creator plugin \"CreateAnimation\" was " + "not found.") + + asset_group.select_set(True) + + animation_asset = options.get('animation_asset') + + api.create( + creator_plugin, + name=namespace + "_animation", + # name=f"{unique_number}_{subset}_animation", + asset=animation_asset, + options={"useSelection": False, "asset_group": asset_group}, + data={"dependencies": str(context["representation"]["_id"])} + ) + + bpy.ops.object.select_all(action='DESELECT') + + bpy.context.scene.collection.objects.link(asset_group) + + asset_group[AVALON_PROPERTY] = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or '', + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + "libpath": libpath, + "asset_name": asset_name, + "parent": str(context["representation"]["parent"]), + "family": context["representation"]["context"]["family"], + "objectName": group_name + } + + self[:] = objects + return objects + + def exec_update(self, container: Dict, representation: Dict): """Update the loaded asset. - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - - Warning: - No nested collections are supported at the moment! + This will remove all children of the asset group, load the new ones + and add them as children of the group. """ - collection = bpy.data.collections.get( - container["objectName"] - ) + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) libpath = Path(api.get_representation_path(representation)) extension = libpath.suffix.lower() @@ -202,12 +260,9 @@ def update(self, container: Dict, representation: Dict): pformat(representation, indent=2), ) - assert collection, ( + assert asset_group, ( f"The asset is not loaded: {container['objectName']}" ) - assert not (collection.children), ( - "Nested collections are not supported." - ) assert libpath, ( "No existing library file found for {container['objectName']}" ) @@ -218,89 +273,84 @@ def update(self, container: Dict, representation: Dict): f"Unsupported file: {libpath}" ) - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) - collection_libpath = collection_metadata["libpath"] - lib_container = collection_metadata["lib_container"] - - obj_container = plugin.get_local_collection_with_name( - collection_metadata["obj_container"].name - ) - objects = obj_container.all_objects - - container_name = obj_container.name + metadata = asset_group.get(AVALON_PROPERTY) + group_libpath = metadata["libpath"] - normalized_collection_libpath = ( - str(Path(bpy.path.abspath(collection_libpath)).resolve()) + normalized_group_libpath = ( + str(Path(bpy.path.abspath(group_libpath)).resolve()) ) normalized_libpath = ( str(Path(bpy.path.abspath(str(libpath))).resolve()) ) self.log.debug( - "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_collection_libpath, + "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_group_libpath, normalized_libpath, ) - if normalized_collection_libpath == normalized_libpath: + if normalized_group_libpath == normalized_libpath: self.log.info("Library already loaded, not updating...") return + # Check how many assets use the same library + count = 0 + for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects: + if obj.get(AVALON_PROPERTY).get('libpath') == group_libpath: + count += 1 + # Get the armature of the rig - armatures = [obj for obj in objects if obj.type == 'ARMATURE'] - assert(len(armatures) == 1) + objects = asset_group.children + armature = [obj for obj in objects if obj.type == 'ARMATURE'][0] action = None - if armatures[0].animation_data and armatures[0].animation_data.action: - action = armatures[0].animation_data.action + if armature.animation_data and armature.animation_data.action: + action = armature.animation_data.action - parent = plugin.get_parent_collection(obj_container) + mat = asset_group.matrix_basis.copy() - self._remove(objects, obj_container) + self._remove(asset_group) - obj_container = self._process( - str(libpath), lib_container, container_name, action, parent) + # If it is the last object to use that library, remove it + if count == 1: + library = bpy.data.libraries.get(bpy.path.basename(group_libpath)) + bpy.data.libraries.remove(library) - # Save the list of objects in the metadata container - collection_metadata["obj_container"] = obj_container - collection_metadata["objects"] = obj_container.all_objects - collection_metadata["libpath"] = str(libpath) - collection_metadata["representation"] = str(representation["_id"]) + self._process(str(libpath), asset_group, object_name, action) - bpy.ops.object.select_all(action='DESELECT') + asset_group.matrix_basis = mat + + metadata["libpath"] = str(libpath) + metadata["representation"] = str(representation["_id"]) - def remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. + def exec_remove(self, container: Dict) -> bool: + """Remove an existing asset group from a Blender scene. Arguments: container (openpype:container-1.0): Container to remove, from `host.ls()`. Returns: - bool: Whether the container was deleted. - - Warning: - No nested collections are supported at the moment! + bool: Whether the asset group was deleted. """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + libpath = asset_group.get(AVALON_PROPERTY).get('libpath') - collection = bpy.data.collections.get( - container["objectName"] - ) - if not collection: - return False - assert not (collection.children), ( - "Nested collections are not supported." - ) + # Check how many assets use the same library + count = 0 + for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects: + if obj.get(AVALON_PROPERTY).get('libpath') == libpath: + count += 1 - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) + if not asset_group: + return False - obj_container = plugin.get_local_collection_with_name( - collection_metadata["obj_container"].name - ) - objects = obj_container.all_objects + self._remove(asset_group) - self._remove(objects, obj_container) + bpy.data.objects.remove(asset_group) - bpy.data.collections.remove(collection) + # If it is the last object to use that library, remove it + if count == 1: + library = bpy.data.libraries.get(bpy.path.basename(libpath)) + bpy.data.libraries.remove(library) return True diff --git a/openpype/hosts/blender/plugins/publish/collect_instances.py b/openpype/hosts/blender/plugins/publish/collect_instances.py index 1d3693216d0..0d683dace45 100644 --- a/openpype/hosts/blender/plugins/publish/collect_instances.py +++ b/openpype/hosts/blender/plugins/publish/collect_instances.py @@ -5,6 +5,7 @@ import pyblish.api from avalon.blender.pipeline import AVALON_PROPERTY +from avalon.blender.pipeline import AVALON_INSTANCES class CollectInstances(pyblish.api.ContextPlugin): @@ -14,6 +15,20 @@ class CollectInstances(pyblish.api.ContextPlugin): label = "Collect Instances" order = pyblish.api.CollectorOrder + @staticmethod + def get_asset_groups() -> Generator: + """Return all 'model' collections. + + Check if the family is 'model' and if it doesn't have the + representation set. If the representation is set, it is a loaded model + and we don't want to publish it. + """ + instances = bpy.data.collections.get(AVALON_INSTANCES) + for obj in instances.objects: + avalon_prop = obj.get(AVALON_PROPERTY) or dict() + if avalon_prop.get('id') == 'pyblish.avalon.instance': + yield obj + @staticmethod def get_collections() -> Generator: """Return all 'model' collections. @@ -29,8 +44,35 @@ def get_collections() -> Generator: def process(self, context): """Collect the models from the current Blender scene.""" + asset_groups = self.get_asset_groups() collections = self.get_collections() + for group in asset_groups: + avalon_prop = group[AVALON_PROPERTY] + asset = avalon_prop['asset'] + family = avalon_prop['family'] + subset = avalon_prop['subset'] + task = avalon_prop['task'] + name = f"{asset}_{subset}" + instance = context.create_instance( + name=name, + family=family, + families=[family], + subset=subset, + asset=asset, + task=task, + ) + objects = list(group.children) + members = set() + for obj in objects: + objects.extend(list(obj.children)) + members.add(obj) + members.add(group) + instance[:] = list(members) + self.log.debug(json.dumps(instance.data, indent=4)) + for obj in instance: + self.log.debug(obj) + for collection in collections: avalon_prop = collection[AVALON_PROPERTY] asset = avalon_prop['asset'] @@ -47,6 +89,12 @@ def process(self, context): task=task, ) members = list(collection.objects) + if family == "animation": + for obj in collection.objects: + if obj.type == 'EMPTY' and obj.get(AVALON_PROPERTY): + for child in obj.children: + if child.type == 'ARMATURE': + members.append(child) members.append(collection) instance[:] = members self.log.debug(json.dumps(instance.data, indent=4)) diff --git a/openpype/hosts/blender/plugins/publish/extract_abc.py b/openpype/hosts/blender/plugins/publish/extract_abc.py index a6315908fc6..4696da3db4b 100644 --- a/openpype/hosts/blender/plugins/publish/extract_abc.py +++ b/openpype/hosts/blender/plugins/publish/extract_abc.py @@ -1,12 +1,13 @@ import os -import openpype.api -import openpype.hosts.blender.api.plugin +from openpype import api +from openpype.hosts.blender.api import plugin +from avalon.blender.pipeline import AVALON_PROPERTY import bpy -class ExtractABC(openpype.api.Extractor): +class ExtractABC(api.Extractor): """Extract as ABC.""" label = "Extract ABC" @@ -16,7 +17,6 @@ class ExtractABC(openpype.api.Extractor): def process(self, instance): # Define extract output file path - stagingdir = self.staging_dir(instance) filename = f"{instance.name}.abc" filepath = os.path.join(stagingdir, filename) @@ -28,57 +28,29 @@ def process(self, instance): # Perform extraction self.log.info("Performing extraction..") - collections = [ - obj for obj in instance if type(obj) is bpy.types.Collection] - - assert len(collections) == 1, "There should be one and only one " \ - "collection collected for this asset" - - old_active_layer_collection = view_layer.active_layer_collection - - layers = view_layer.layer_collection.children - - # Get the layer collection from the collection we need to export. - # This is needed because in Blender you can only set the active - # collection with the layer collection, and there is no way to get - # the layer collection from the collection - # (but there is the vice versa). - layer_collections = [ - layer for layer in layers if layer.collection == collections[0]] - - assert len(layer_collections) == 1 - - view_layer.active_layer_collection = layer_collections[0] - - old_scale = scene.unit_settings.scale_length - bpy.ops.object.select_all(action='DESELECT') - selected = list() + selected = [] + asset_group = None for obj in instance: - try: - obj.select_set(True) - selected.append(obj) - except: - continue + obj.select_set(True) + selected.append(obj) + if obj.get(AVALON_PROPERTY): + asset_group = obj - new_context = openpype.hosts.blender.api.plugin.create_blender_context( - active=selected[0], selected=selected) - - # We set the scale of the scene for the export - scene.unit_settings.scale_length = 0.01 + context = plugin.create_blender_context( + active=asset_group, selected=selected) # We export the abc bpy.ops.wm.alembic_export( - new_context, + context, filepath=filepath, - selected=True + selected=True, + flatten=False ) - view_layer.active_layer_collection = old_active_layer_collection - - scene.unit_settings.scale_length = old_scale + bpy.ops.object.select_all(action='DESELECT') if "representations" not in instance.data: instance.data["representations"] = [] diff --git a/openpype/hosts/blender/plugins/publish/extract_blend.py b/openpype/hosts/blender/plugins/publish/extract_blend.py index 890c8b5ffdd..6687c9fe765 100644 --- a/openpype/hosts/blender/plugins/publish/extract_blend.py +++ b/openpype/hosts/blender/plugins/publish/extract_blend.py @@ -1,6 +1,8 @@ import os -import avalon.blender.workio +import bpy + +# import avalon.blender.workio import openpype.api @@ -9,7 +11,7 @@ class ExtractBlend(openpype.api.Extractor): label = "Extract Blend" hosts = ["blender"] - families = ["model", "camera", "rig", "action", "layout", "animation"] + families = ["model", "camera", "rig", "action", "layout"] optional = True def process(self, instance): @@ -22,15 +24,12 @@ def process(self, instance): # Perform extraction self.log.info("Performing extraction..") - # Just save the file to a temporary location. At least for now it's no - # problem to have (possibly) extra stuff in the file. - avalon.blender.workio.save_file(filepath, copy=True) - # - # # Store reference for integration - # if "files" not in instance.data: - # instance.data["files"] = list() - # - # # instance.data["files"].append(filename) + data_blocks = set() + + for obj in instance: + data_blocks.add(obj) + + bpy.data.libraries.write(filepath, data_blocks) if "representations" not in instance.data: instance.data["representations"] = [] diff --git a/openpype/hosts/blender/plugins/publish/extract_blend_animation.py b/openpype/hosts/blender/plugins/publish/extract_blend_animation.py new file mode 100644 index 00000000000..239ca53f98e --- /dev/null +++ b/openpype/hosts/blender/plugins/publish/extract_blend_animation.py @@ -0,0 +1,53 @@ +import os + +import bpy + +import openpype.api + + +class ExtractBlendAnimation(openpype.api.Extractor): + """Extract a blend file.""" + + label = "Extract Blend" + hosts = ["blender"] + families = ["animation"] + optional = True + + def process(self, instance): + # Define extract output file path + + stagingdir = self.staging_dir(instance) + filename = f"{instance.name}.blend" + filepath = os.path.join(stagingdir, filename) + + # Perform extraction + self.log.info("Performing extraction..") + + data_blocks = set() + + for obj in instance: + if isinstance(obj, bpy.types.Object) and obj.type == 'EMPTY': + child = obj.children[0] + if child and child.type == 'ARMATURE': + if not obj.animation_data: + obj.animation_data_create() + obj.animation_data.action = child.animation_data.action + obj.animation_data_clear() + data_blocks.add(child.animation_data.action) + data_blocks.add(obj) + + bpy.data.libraries.write(filepath, data_blocks) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'blend', + 'ext': 'blend', + 'files': filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + + self.log.info("Extracted instance '%s' to: %s", + instance.name, representation) diff --git a/openpype/hosts/blender/plugins/publish/extract_fbx.py b/openpype/hosts/blender/plugins/publish/extract_fbx.py index 05149eacc16..b91f2a75efa 100644 --- a/openpype/hosts/blender/plugins/publish/extract_fbx.py +++ b/openpype/hosts/blender/plugins/publish/extract_fbx.py @@ -1,11 +1,13 @@ import os -import openpype.api +from openpype import api +from openpype.hosts.blender.api import plugin +from avalon.blender.pipeline import AVALON_PROPERTY import bpy -class ExtractFBX(openpype.api.Extractor): +class ExtractFBX(api.Extractor): """Extract as FBX.""" label = "Extract FBX" @@ -15,71 +17,56 @@ class ExtractFBX(openpype.api.Extractor): def process(self, instance): # Define extract output file path - stagingdir = self.staging_dir(instance) filename = f"{instance.name}.fbx" filepath = os.path.join(stagingdir, filename) - context = bpy.context - scene = context.scene - view_layer = context.view_layer - # Perform extraction self.log.info("Performing extraction..") - collections = [ - obj for obj in instance if type(obj) is bpy.types.Collection] - - assert len(collections) == 1, "There should be one and only one " \ - "collection collected for this asset" - - old_active_layer_collection = view_layer.active_layer_collection + bpy.ops.object.select_all(action='DESELECT') - layers = view_layer.layer_collection.children + selected = [] + asset_group = None - # Get the layer collection from the collection we need to export. - # This is needed because in Blender you can only set the active - # collection with the layer collection, and there is no way to get - # the layer collection from the collection - # (but there is the vice versa). - layer_collections = [ - layer for layer in layers if layer.collection == collections[0]] + for obj in instance: + obj.select_set(True) + selected.append(obj) + if obj.get(AVALON_PROPERTY): + asset_group = obj - assert len(layer_collections) == 1 - - view_layer.active_layer_collection = layer_collections[0] - - old_scale = scene.unit_settings.scale_length - - # We set the scale of the scene for the export - scene.unit_settings.scale_length = 0.01 + context = plugin.create_blender_context( + active=asset_group, selected=selected) new_materials = [] + new_materials_objs = [] + objects = list(asset_group.children) - for obj in collections[0].all_objects: - if obj.type == 'MESH': + for obj in objects: + objects.extend(obj.children) + if obj.type == 'MESH' and len(obj.data.materials) == 0: mat = bpy.data.materials.new(obj.name) obj.data.materials.append(mat) new_materials.append(mat) + new_materials_objs.append(obj) # We export the fbx bpy.ops.export_scene.fbx( + context, filepath=filepath, - use_active_collection=True, + use_active_collection=False, + use_selection=True, mesh_smooth_type='FACE', add_leaf_bones=False ) - view_layer.active_layer_collection = old_active_layer_collection - - scene.unit_settings.scale_length = old_scale + bpy.ops.object.select_all(action='DESELECT') for mat in new_materials: bpy.data.materials.remove(mat) - for obj in collections[0].all_objects: - if obj.type == 'MESH': - obj.data.materials.pop() + for obj in new_materials_objs: + obj.data.materials.pop() if "representations" not in instance.data: instance.data["representations"] = [] diff --git a/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py b/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py index 8312114c7b9..16443b760c6 100644 --- a/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py +++ b/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py @@ -1,14 +1,16 @@ import os import json -import openpype.api - import bpy import bpy_extras import bpy_extras.anim_utils +from openpype import api +from openpype.hosts.blender.api import plugin +from avalon.blender.pipeline import AVALON_PROPERTY + -class ExtractAnimationFBX(openpype.api.Extractor): +class ExtractAnimationFBX(api.Extractor): """Extract as animation.""" label = "Extract FBX" @@ -20,33 +22,26 @@ def process(self, instance): # Define extract output file path stagingdir = self.staging_dir(instance) - context = bpy.context - scene = context.scene - # Perform extraction self.log.info("Performing extraction..") - collections = [ - obj for obj in instance if type(obj) is bpy.types.Collection] - - assert len(collections) == 1, "There should be one and only one " \ - "collection collected for this asset" - - old_scale = scene.unit_settings.scale_length + # The first collection object in the instance is taken, as there + # should be only one that contains the asset group. + collection = [ + obj for obj in instance if type(obj) is bpy.types.Collection][0] - # We set the scale of the scene for the export - scene.unit_settings.scale_length = 0.01 + # Again, the first object in the collection is taken , as there + # should be only the asset group in the collection. + asset_group = collection.objects[0] - armatures = [ - obj for obj in collections[0].objects if obj.type == 'ARMATURE'] + armature = [ + obj for obj in asset_group.children if obj.type == 'ARMATURE'][0] - assert len(collections) == 1, "There should be one and only one " \ - "armature collected for this asset" - - armature = armatures[0] + asset_group_name = asset_group.name + asset_group.name = asset_group.get(AVALON_PROPERTY).get("asset_name") armature_name = armature.name - original_name = armature_name.split(':')[0] + original_name = armature_name.split(':')[1] armature.name = original_name object_action_pairs = [] @@ -89,27 +84,29 @@ def process(self, instance): for obj in bpy.data.objects: obj.select_set(False) + asset_group.select_set(True) armature.select_set(True) fbx_filename = f"{instance.name}_{armature.name}.fbx" filepath = os.path.join(stagingdir, fbx_filename) - override = bpy.context.copy() - override['selected_objects'] = [armature] + override = plugin.create_blender_context( + active=asset_group, selected=[asset_group, armature]) bpy.ops.export_scene.fbx( override, filepath=filepath, + use_active_collection=False, use_selection=True, bake_anim_use_nla_strips=False, bake_anim_use_all_actions=False, add_leaf_bones=False, armature_nodetype='ROOT', - object_types={'ARMATURE'} + object_types={'EMPTY', 'ARMATURE'} ) armature.name = armature_name + asset_group.name = asset_group_name + asset_group.select_set(False) armature.select_set(False) - scene.unit_settings.scale_length = old_scale - # We delete the baked action and set the original one back for i in range(0, len(object_action_pairs)): pair = object_action_pairs[i] @@ -125,18 +122,20 @@ def process(self, instance): json_filename = f"{instance.name}.json" json_path = os.path.join(stagingdir, json_filename) - json_dict = {} + json_dict = { + "instance_name": asset_group.get(AVALON_PROPERTY).get("namespace") + } - collection = instance.data.get("name") - container = None - for obj in bpy.data.collections[collection].objects: - if obj.type == "ARMATURE": - container_name = obj.get("avalon").get("container_name") - container = bpy.data.collections[container_name] - if container: - json_dict = { - "instance_name": container.get("avalon").get("instance_name") - } + # collection = instance.data.get("name") + # container = None + # for obj in bpy.data.collections[collection].objects: + # if obj.type == "ARMATURE": + # container_name = obj.get("avalon").get("container_name") + # container = bpy.data.collections[container_name] + # if container: + # json_dict = { + # "instance_name": container.get("avalon").get("instance_name") + # } with open(json_path, "w+") as file: json.dump(json_dict, fp=file, indent=2) @@ -159,6 +158,5 @@ def process(self, instance): instance.data["representations"].append(fbx_representation) instance.data["representations"].append(json_representation) - self.log.info("Extracted instance '{}' to: {}".format( instance.name, fbx_representation)) diff --git a/openpype/hosts/blender/plugins/publish/extract_layout.py b/openpype/hosts/blender/plugins/publish/extract_layout.py index c6c9bf67f53..cd081b44797 100644 --- a/openpype/hosts/blender/plugins/publish/extract_layout.py +++ b/openpype/hosts/blender/plugins/publish/extract_layout.py @@ -3,7 +3,8 @@ import bpy -from avalon import blender, io +from avalon import io +from avalon.blender.pipeline import AVALON_PROPERTY import openpype.api @@ -24,52 +25,49 @@ def process(self, instance): json_data = [] - for collection in instance: - for asset in collection.children: - collection = bpy.data.collections[asset.name] - container = bpy.data.collections[asset.name + '_CON'] - metadata = container.get(blender.pipeline.AVALON_PROPERTY) - - parent = metadata["parent"] - family = metadata["family"] - - self.log.debug("Parent: {}".format(parent)) - blend = io.find_one( - { - "type": "representation", - "parent": io.ObjectId(parent), - "name": "blend" - }, - projection={"_id": True}) - blend_id = blend["_id"] - - json_element = {} - json_element["reference"] = str(blend_id) - json_element["family"] = family - json_element["instance_name"] = asset.name - json_element["asset_name"] = metadata["lib_container"] - json_element["file_path"] = metadata["libpath"] - - obj = collection.objects[0] - - json_element["transform"] = { - "translation": { - "x": obj.location.x, - "y": obj.location.y, - "z": obj.location.z - }, - "rotation": { - "x": obj.rotation_euler.x, - "y": obj.rotation_euler.y, - "z": obj.rotation_euler.z, - }, - "scale": { - "x": obj.scale.x, - "y": obj.scale.y, - "z": obj.scale.z - } + asset_group = bpy.data.objects[str(instance)] + + for asset in asset_group.children: + metadata = asset.get(AVALON_PROPERTY) + + parent = metadata["parent"] + family = metadata["family"] + + self.log.debug("Parent: {}".format(parent)) + blend = io.find_one( + { + "type": "representation", + "parent": io.ObjectId(parent), + "name": "blend" + }, + projection={"_id": True}) + blend_id = blend["_id"] + + json_element = {} + json_element["reference"] = str(blend_id) + json_element["family"] = family + json_element["instance_name"] = asset.name + json_element["asset_name"] = metadata["asset_name"] + json_element["file_path"] = metadata["libpath"] + + json_element["transform"] = { + "translation": { + "x": asset.location.x, + "y": asset.location.y, + "z": asset.location.z + }, + "rotation": { + "x": asset.rotation_euler.x, + "y": asset.rotation_euler.y, + "z": asset.rotation_euler.z, + }, + "scale": { + "x": asset.scale.x, + "y": asset.scale.y, + "z": asset.scale.z } - json_data.append(json_element) + } + json_data.append(json_element) json_filename = "{}.json".format(instance.name) json_path = os.path.join(stagingdir, json_filename) diff --git a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py new file mode 100644 index 00000000000..261ff864d59 --- /dev/null +++ b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py @@ -0,0 +1,39 @@ +from typing import List + +import pyblish.api +import openpype.hosts.blender.api.action + + +class ValidateNoColonsInName(pyblish.api.InstancePlugin): + """There cannot be colons in names + + Object or bone names cannot include colons. Other software do not + handle colons correctly. + + """ + + order = openpype.api.ValidateContentsOrder + hosts = ["blender"] + families = ["model", "rig"] + version = (0, 1, 0) + label = "No Colons in names" + actions = [openpype.hosts.blender.api.action.SelectInvalidAction] + + @classmethod + def get_invalid(cls, instance) -> List: + invalid = [] + for obj in [obj for obj in instance]: + if ':' in obj.name: + invalid.append(obj) + if obj.type == 'ARMATURE': + for bone in obj.data.bones: + if ':' in bone.name: + invalid.append(obj) + break + return invalid + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + f"Objects found with colon in name: {invalid}") diff --git a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py new file mode 100644 index 00000000000..7456dbc4238 --- /dev/null +++ b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py @@ -0,0 +1,40 @@ +from typing import List + +import mathutils + +import pyblish.api +import openpype.hosts.blender.api.action + + +class ValidateTransformZero(pyblish.api.InstancePlugin): + """Transforms can't have any values + + To solve this issue, try freezing the transforms. So long + as the transforms, rotation and scale values are zero, + you're all good. + + """ + + order = openpype.api.ValidateContentsOrder + hosts = ["blender"] + families = ["model"] + category = "geometry" + version = (0, 1, 0) + label = "Transform Zero" + actions = [openpype.hosts.blender.api.action.SelectInvalidAction] + + _identity = mathutils.Matrix() + + @classmethod + def get_invalid(cls, instance) -> List: + invalid = [] + for obj in [obj for obj in instance]: + if obj.matrix_basis != cls._identity: + invalid.append(obj) + return invalid + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + f"Object found in instance is not in Object Mode: {invalid}") diff --git a/openpype/hosts/houdini/api/__init__.py b/openpype/hosts/houdini/api/__init__.py index 21f4ae41c3d..7328236b97a 100644 --- a/openpype/hosts/houdini/api/__init__.py +++ b/openpype/hosts/houdini/api/__init__.py @@ -1,17 +1,21 @@ import os +import sys import logging +import contextlib import hou from pyblish import api as pyblish - from avalon import api as avalon -from avalon.houdini import pipeline as houdini import openpype.hosts.houdini from openpype.hosts.houdini.api import lib -from openpype.lib import any_outdated +from openpype.lib import ( + any_outdated +) + +from .lib import get_asset_fps log = logging.getLogger("openpype.hosts.houdini") @@ -22,6 +26,7 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + def install(): pyblish.register_plugin_path(PUBLISH_PATH) @@ -29,19 +34,28 @@ def install(): avalon.register_plugin_path(avalon.Creator, CREATE_PATH) log.info("Installing callbacks ... ") - avalon.on("init", on_init) + # avalon.on("init", on_init) avalon.before("save", before_save) avalon.on("save", on_save) avalon.on("open", on_open) + avalon.on("new", on_new) pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) log.info("Setting default family states for loader..") - avalon.data["familiesStateToggled"] = ["imagesequence"] + avalon.data["familiesStateToggled"] = [ + "imagesequence", + "review" + ] + + # add houdini vendor packages + hou_pythonpath = os.path.join(os.path.dirname(HOST_DIR), "vendor") + sys.path.append(hou_pythonpath) -def on_init(*args): - houdini.on_houdini_initialize() + # Set asset FPS for the empty scene directly after launch of Houdini + # so it initializes into the correct scene FPS + _set_asset_fps() def before_save(*args): @@ -59,10 +73,18 @@ def on_save(*args): def on_open(*args): + if not hou.isUIAvailable(): + log.debug("Batch mode detected, ignoring `on_open` callbacks..") + return + avalon.logger.info("Running callback on open..") + # Validate FPS after update_task_from_path to + # ensure it is using correct FPS for the asset + lib.validate_fps() + if any_outdated(): - from ..widgets import popup + from openpype.widgets import popup log.warning("Scene has outdated content.") @@ -70,7 +92,7 @@ def on_open(*args): parent = hou.ui.mainQtWindow() if parent is None: log.info("Skipping outdated content pop-up " - "because Maya window can't be found.") + "because Houdini window can't be found.") else: # Show outdated pop-up @@ -79,15 +101,52 @@ def _on_show_inventory(): tool.show(parent=parent) dialog = popup.Popup(parent=parent) - dialog.setWindowTitle("Maya scene has outdated content") + dialog.setWindowTitle("Houdini scene has outdated content") dialog.setMessage("There are outdated containers in " - "your Maya scene.") - dialog.on_show.connect(_on_show_inventory) + "your Houdini scene.") + dialog.on_clicked.connect(_on_show_inventory) dialog.show() +def on_new(_): + """Set project resolution and fps when create a new file""" + avalon.logger.info("Running callback on new..") + _set_asset_fps() + + +def _set_asset_fps(): + """Set Houdini scene FPS to the default required for current asset""" + + # Set new scene fps + fps = get_asset_fps() + print("Setting scene FPS to %i" % fps) + lib.set_scene_fps(fps) + + def on_pyblish_instance_toggled(instance, new_value, old_value): """Toggle saver tool passthrough states on instance toggles.""" + @contextlib.contextmanager + def main_take(no_update=True): + """Enter root take during context""" + original_take = hou.takes.currentTake() + original_update_mode = hou.updateModeSetting() + root = hou.takes.rootTake() + has_changed = False + try: + if original_take != root: + has_changed = True + if no_update: + hou.setUpdateMode(hou.updateMode.Manual) + hou.takes.setCurrentTake(root) + yield + finally: + if has_changed: + if no_update: + hou.setUpdateMode(original_update_mode) + hou.takes.setCurrentTake(original_take) + + if not instance.data.get("_allowToggleBypass", True): + return nodes = instance[:] if not nodes: @@ -96,8 +155,20 @@ def on_pyblish_instance_toggled(instance, new_value, old_value): # Assume instance node is first node instance_node = nodes[0] + if not hasattr(instance_node, "isBypassed"): + # Likely not a node that can actually be bypassed + log.debug("Can't bypass node: %s", instance_node.path()) + return + if instance_node.isBypassed() != (not old_value): print("%s old bypass state didn't match old instance state, " "updating anyway.." % instance_node.path()) - instance_node.bypass(not new_value) + try: + # Go into the main take, because when in another take changing + # the bypass state of a note cannot be done due to it being locked + # by default. + with main_take(no_update=True): + instance_node.bypass(not new_value) + except hou.PermissionError as exc: + log.warning("%s - %s", instance_node.path(), exc) diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py index 1f0f90811ff..53f0e59ea92 100644 --- a/openpype/hosts/houdini/api/lib.py +++ b/openpype/hosts/houdini/api/lib.py @@ -1,14 +1,19 @@ import uuid - +import logging from contextlib import contextmanager +from openpype.api import get_asset +from avalon import api, io +from avalon.houdini import lib as houdini + import hou -from openpype import lib +log = logging.getLogger(__name__) -from avalon import api, io -from avalon.houdini import lib as houdini +def get_asset_fps(): + """Return current asset fps.""" + return get_asset()["data"].get("fps") def set_id(node, unique_id, overwrite=False): @@ -171,10 +176,10 @@ def get_output_parameter(node): node_type = node.type().name() if node_type == "geometry": return node.parm("sopoutput") - elif node_type == "alembic": return node.parm("filename") - + elif node_type == "comp": + return node.parm("copoutput") else: raise TypeError("Node type '%s' not supported" % node_type) @@ -205,7 +210,7 @@ def validate_fps(): """ - fps = lib.get_asset()["data"]["fps"] + fps = get_asset_fps() current_fps = hou.fps() # returns float if current_fps != fps: @@ -217,18 +222,123 @@ def validate_fps(): if parent is None: pass else: - dialog = popup.Popup2(parent=parent) + dialog = popup.Popup(parent=parent) dialog.setModal(True) - dialog.setWindowTitle("Houdini scene not in line with project") - dialog.setMessage("The FPS is out of sync, please fix it") + dialog.setWindowTitle("Houdini scene does not match project FPS") + dialog.setMessage("Scene %i FPS does not match project %i FPS" % + (current_fps, fps)) + dialog.setButtonText("Fix") - # Set new text for button (add optional argument for the popup?) - toggle = dialog.widgets["toggle"] - toggle.setEnabled(False) - dialog.on_show.connect(lambda: set_scene_fps(fps)) + # on_show is the Fix button clicked callback + dialog.on_clicked.connect(lambda: set_scene_fps(fps)) dialog.show() return False return True + + +def create_remote_publish_node(force=True): + """Function to create a remote publish node in /out + + This is a hacked "Shell" node that does *nothing* except for triggering + `colorbleed.lib.publish_remote()` as pre-render script. + + All default attributes of the Shell node are hidden to the Artist to + avoid confusion. + + Additionally some custom attributes are added that can be collected + by a Collector to set specific settings for the publish, e.g. whether + to separate the jobs per instance or process in one single job. + + """ + + cmd = "import colorbleed.lib; colorbleed.lib.publish_remote()" + + existing = hou.node("/out/REMOTE_PUBLISH") + if existing: + if force: + log.warning("Removing existing '/out/REMOTE_PUBLISH' node..") + existing.destroy() + else: + raise RuntimeError("Node already exists /out/REMOTE_PUBLISH. " + "Please remove manually or set `force` to " + "True.") + + # Create the shell node + out = hou.node("/out") + node = out.createNode("shell", node_name="REMOTE_PUBLISH") + node.moveToGoodPosition() + + # Set color make it stand out (avalon/pyblish color) + node.setColor(hou.Color(0.439, 0.709, 0.933)) + + # Set the pre-render script + node.setParms({ + "prerender": cmd, + "lprerender": "python" # command language + }) + + # Lock the attributes to ensure artists won't easily mess things up. + node.parm("prerender").lock(True) + node.parm("lprerender").lock(True) + + # Lock up the actual shell command + command_parm = node.parm("command") + command_parm.set("") + command_parm.lock(True) + shellexec_parm = node.parm("shellexec") + shellexec_parm.set(False) + shellexec_parm.lock(True) + + # Get the node's parm template group so we can customize it + template = node.parmTemplateGroup() + + # Hide default tabs + template.hideFolder("Shell", True) + template.hideFolder("Scripts", True) + + # Hide default settings + template.hide("execute", True) + template.hide("renderdialog", True) + template.hide("trange", True) + template.hide("f", True) + template.hide("take", True) + + # Add custom settings to this node. + parm_folder = hou.FolderParmTemplate("folder", "Submission Settings") + + # Separate Jobs per Instance + parm = hou.ToggleParmTemplate(name="separateJobPerInstance", + label="Separate Job per Instance", + default_value=False) + parm_folder.addParmTemplate(parm) + + # Add our custom Submission Settings folder + template.append(parm_folder) + + # Apply template back to the node + node.setParmTemplateGroup(template) + + +def render_rop(ropnode): + """Render ROP node utility for Publishing. + + This renders a ROP node with the settings we want during Publishing. + """ + # Print verbose when in batch mode without UI + verbose = not hou.isUIAvailable() + + # Render + try: + ropnode.render(verbose=verbose, + # Allow Deadline to capture completion percentage + output_progress=verbose) + except hou.Error as exc: + # The hou.Error is not inherited from a Python Exception class, + # so we explicitly capture the houdini error, otherwise pyblish + # will remain hanging. + import traceback + traceback.print_exc() + raise RuntimeError("Render failed: {0}".format(exc)) diff --git a/openpype/hosts/houdini/api/plugin.py b/openpype/hosts/houdini/api/plugin.py index 9820ed49c3c..efdaa600849 100644 --- a/openpype/hosts/houdini/api/plugin.py +++ b/openpype/hosts/houdini/api/plugin.py @@ -1,6 +1,26 @@ +# -*- coding: utf-8 -*- +"""Houdini specific Avalon/Pyblish plugin definitions.""" +import sys from avalon import houdini +import six + +import hou from openpype.api import PypeCreatorMixin -class Creator(PypeCreatorMixin, houdini.Creator): +class OpenPypeCreatorError(Exception): pass + + +class Creator(PypeCreatorMixin, houdini.Creator): + def process(self): + try: + # re-raise as standard Python exception so + # Avalon can catch it + instance = super(Creator, self).process() + self._process(instance) + except hou.Error as er: + six.reraise( + OpenPypeCreatorError, + OpenPypeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2]) diff --git a/openpype/hosts/houdini/api/usd.py b/openpype/hosts/houdini/api/usd.py new file mode 100644 index 00000000000..850ffb60e51 --- /dev/null +++ b/openpype/hosts/houdini/api/usd.py @@ -0,0 +1,255 @@ +"""Houdini-specific USD Library functions.""" + +import contextlib + +import logging +from Qt import QtCore, QtGui +from avalon.tools.widgets import AssetWidget +from avalon import style + +from pxr import Sdf + + +log = logging.getLogger(__name__) + + +def pick_asset(node): + """Show a user interface to select an Asset in the project + + When double clicking an asset it will set the Asset value in the + 'asset' parameter. + + """ + + pos = QtGui.QCursor.pos() + + parm = node.parm("asset_name") + if not parm: + log.error("Node has no 'asset' parameter: %s", node) + return + + # Construct the AssetWidget as a frameless popup so it automatically + # closes when clicked outside of it. + global tool + tool = AssetWidget(silo_creatable=False) + tool.setContentsMargins(5, 5, 5, 5) + tool.setWindowTitle("Pick Asset") + tool.setStyleSheet(style.load_stylesheet()) + tool.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup) + tool.refresh() + + # Select the current asset if there is any + name = parm.eval() + if name: + from avalon import io + + db_asset = io.find_one({"name": name, "type": "asset"}) + if db_asset: + silo = db_asset.get("silo") + if silo: + tool.set_silo(silo) + tool.select_assets([name], expand=True) + + # Show cursor (top right of window) near cursor + tool.resize(250, 400) + tool.move(tool.mapFromGlobal(pos) - QtCore.QPoint(tool.width(), 0)) + + def set_parameter_callback(index): + name = index.data(tool.model.DocumentRole)["name"] + parm.set(name) + tool.close() + + tool.view.doubleClicked.connect(set_parameter_callback) + tool.show() + + +def add_usd_output_processor(ropnode, processor): + """Add USD Output Processor to USD Rop node. + + Args: + ropnode (hou.RopNode): The USD Rop node. + processor (str): The output processor name. This is the basename of + the python file that contains the Houdini USD Output Processor. + + """ + + import loputils + + loputils.handleOutputProcessorAdd( + { + "node": ropnode, + "parm": ropnode.parm("outputprocessors"), + "script_value": processor, + } + ) + + +def remove_usd_output_processor(ropnode, processor): + """Removes USD Output Processor from USD Rop node. + + Args: + ropnode (hou.RopNode): The USD Rop node. + processor (str): The output processor name. This is the basename of + the python file that contains the Houdini USD Output Processor. + + """ + import loputils + + parm = ropnode.parm(processor + "_remove") + if not parm: + raise RuntimeError( + "Output Processor %s does not " + "exist on %s" % (processor, ropnode.name()) + ) + + loputils.handleOutputProcessorRemove({"node": ropnode, "parm": parm}) + + +@contextlib.contextmanager +def outputprocessors(ropnode, processors=tuple(), disable_all_others=True): + """Context manager to temporarily add Output Processors to USD ROP node. + + Args: + ropnode (hou.RopNode): The USD Rop node. + processors (tuple or list): The processors to add. + disable_all_others (bool, Optional): Whether to disable all + output processors currently on the ROP node that are not in the + `processors` list passed to this function. + + """ + # TODO: Add support for forcing the correct Order of the processors + + original = [] + prefix = "enableoutputprocessor_" + processor_parms = ropnode.globParms(prefix + "*") + for parm in processor_parms: + original.append((parm, parm.eval())) + + if disable_all_others: + for parm in processor_parms: + parm.set(False) + + added = [] + for processor in processors: + + parm = ropnode.parm(prefix + processor) + if parm: + # If processor already exists, just enable it + parm.set(True) + + else: + # Else add the new processor + add_usd_output_processor(ropnode, processor) + added.append(processor) + + try: + yield + finally: + + # Remove newly added processors + for processor in added: + remove_usd_output_processor(ropnode, processor) + + # Revert to original values + for parm, value in original: + if parm: + parm.set(value) + + +def get_usd_rop_loppath(node): + + # Get sop path + node_type = node.type().name() + if node_type == "usd": + return node.parm("loppath").evalAsNode() + + elif node_type in {"usd_rop", "usdrender_rop"}: + # Inside Solaris e.g. /stage (not in ROP context) + # When incoming connection is present it takes it directly + inputs = node.inputs() + if inputs: + return inputs[0] + else: + return node.parm("loppath").evalAsNode() + + +def get_layer_save_path(layer): + """Get custom HoudiniLayerInfo->HoudiniSavePath from SdfLayer. + + Args: + layer (pxr.Sdf.Layer): The Layer to retrieve the save pah data from. + + Returns: + str or None: Path to save to when data exists. + + """ + hou_layer_info = layer.rootPrims.get("HoudiniLayerInfo") + if not hou_layer_info: + return + + save_path = hou_layer_info.customData.get("HoudiniSavePath", None) + if save_path: + # Unfortunately this doesn't actually resolve the full absolute path + return layer.ComputeAbsolutePath(save_path) + + +def get_referenced_layers(layer): + """Return SdfLayers for all external references of the current layer + + Args: + layer (pxr.Sdf.Layer): The Layer to retrieve the save pah data from. + + Returns: + list: List of pxr.Sdf.Layer that are external references to this layer + + """ + + layers = [] + for layer_id in layer.GetExternalReferences(): + layer = Sdf.Layer.Find(layer_id) + if not layer: + # A file may not be in memory and is + # referenced from disk. As such it cannot + # be found. We will ignore those layers. + continue + + layers.append(layer) + + return layers + + +def iter_layer_recursive(layer): + """Recursively iterate all 'external' referenced layers""" + + layers = get_referenced_layers(layer) + traversed = set(layers) # Avoid recursion to itself (if even possible) + traverse = list(layers) + for layer in traverse: + + # Include children layers (recursion) + children_layers = get_referenced_layers(layer) + children_layers = [x for x in children_layers if x not in traversed] + traverse.extend(children_layers) + traversed.update(children_layers) + + yield layer + + +def get_configured_save_layers(usd_rop): + + lop_node = get_usd_rop_loppath(usd_rop) + stage = lop_node.stage(apply_viewport_overrides=False) + if not stage: + raise RuntimeError( + "No valid USD stage for ROP node: " "%s" % usd_rop.path() + ) + + root_layer = stage.GetRootLayer() + + save_layers = [] + for layer in iter_layer_recursive(root_layer): + save_path = get_layer_save_path(layer) + if save_path is not None: + save_layers.append(layer) + + return save_layers diff --git a/openpype/hosts/houdini/hooks/set_paths.py b/openpype/hosts/houdini/hooks/set_paths.py new file mode 100644 index 00000000000..cd2f98fb762 --- /dev/null +++ b/openpype/hosts/houdini/hooks/set_paths.py @@ -0,0 +1,18 @@ +from openpype.lib import PreLaunchHook +import os + + +class SetPath(PreLaunchHook): + """Set current dir to workdir. + + Hook `GlobalHostDataHook` must be executed before this hook. + """ + app_groups = ["houdini"] + + def execute(self): + workdir = self.launch_context.env.get("AVALON_WORKDIR", "") + if not workdir: + self.log.warning("BUG: Workdir is not filled.") + return + + os.chdir(workdir) diff --git a/openpype/hosts/houdini/plugins/create/create_alembic_camera.py b/openpype/hosts/houdini/plugins/create/create_alembic_camera.py index adcfb48539b..eef86005f5b 100644 --- a/openpype/hosts/houdini/plugins/create/create_alembic_camera.py +++ b/openpype/hosts/houdini/plugins/create/create_alembic_camera.py @@ -18,28 +18,29 @@ def __init__(self, *args, **kwargs): # Set node type to create for output self.data.update({"node_type": "alembic"}) - def process(self): - instance = super(CreateAlembicCamera, self).process() + def _process(self, instance): + """Creator main entry point. + Args: + instance (hou.Node): Created Houdini instance. + + """ parms = { "filename": "$HIP/pyblish/%s.abc" % self.name, - "use_sop_path": False + "use_sop_path": False, } if self.nodes: node = self.nodes[0] path = node.path() - # Split the node path into the first root and the remainder # So we can set the root and objects parameters correctly _, root, remainder = path.split("/", 2) - parms.update({ - "root": "/" + root, - "objects": remainder - }) + parms.update({"root": "/" + root, "objects": remainder}) instance.setParms(parms) # Lock the Use Sop Path setting so the # user doesn't accidentally enable it. instance.parm("use_sop_path").lock(True) + instance.parm("trange").set(1) diff --git a/openpype/hosts/houdini/plugins/create/create_composite.py b/openpype/hosts/houdini/plugins/create/create_composite.py new file mode 100644 index 00000000000..e2787080761 --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_composite.py @@ -0,0 +1,44 @@ +from openpype.hosts.houdini.api import plugin + + +class CreateCompositeSequence(plugin.Creator): + """Composite ROP to Image Sequence""" + + label = "Composite (Image Sequence)" + family = "imagesequence" + icon = "gears" + + def __init__(self, *args, **kwargs): + super(CreateCompositeSequence, self).__init__(*args, **kwargs) + + # Remove the active, we are checking the bypass flag of the nodes + self.data.pop("active", None) + + # Type of ROP node to create + self.data.update({"node_type": "comp"}) + + def _process(self, instance): + """Creator main entry point. + + Args: + instance (hou.Node): Created Houdini instance. + + """ + parms = {"copoutput": "$HIP/pyblish/%s.$F4.exr" % self.name} + + if self.nodes: + node = self.nodes[0] + parms.update({"coppath": node.path()}) + + instance.setParms(parms) + + # Lock any parameters in this list + to_lock = ["prim_to_detail_pattern"] + for name in to_lock: + try: + parm = instance.parm(name) + parm.lock(True) + except AttributeError: + # missing lock pattern + self.log.debug( + "missing lock pattern {}".format(name)) diff --git a/openpype/hosts/houdini/plugins/create/create_pointcache.py b/openpype/hosts/houdini/plugins/create/create_pointcache.py index 6be854ac284..feb683edf60 100644 --- a/openpype/hosts/houdini/plugins/create/create_pointcache.py +++ b/openpype/hosts/houdini/plugins/create/create_pointcache.py @@ -17,21 +17,29 @@ def __init__(self, *args, **kwargs): self.data.update({"node_type": "alembic"}) - def process(self): - instance = super(CreatePointCache, self).process() - - parms = {"use_sop_path": True, # Export single node from SOP Path - "build_from_path": True, # Direct path of primitive in output - "path_attrib": "path", # Pass path attribute for output - "prim_to_detail_pattern": "cbId", - "format": 2, # Set format to Ogawa - "filename": "$HIP/pyblish/%s.abc" % self.name} + def _process(self, instance): + """Creator main entry point. + + Args: + instance (hou.Node): Created Houdini instance. + + """ + parms = { + "use_sop_path": True, # Export single node from SOP Path + "build_from_path": True, # Direct path of primitive in output + "path_attrib": "path", # Pass path attribute for output + "prim_to_detail_pattern": "cbId", + "format": 2, # Set format to Ogawa + "facesets": 0, # No face sets (by default exclude them) + "filename": "$HIP/pyblish/%s.abc" % self.name, + } if self.nodes: node = self.nodes[0] parms.update({"sop_path": node.path()}) instance.setParms(parms) + instance.parm("trange").set(1) # Lock any parameters in this list to_lock = ["prim_to_detail_pattern"] diff --git a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py new file mode 100644 index 00000000000..6949ca169b3 --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py @@ -0,0 +1,70 @@ +import hou +from openpype.hosts.houdini.api import plugin + + +class CreateRedshiftROP(plugin.Creator): + """Redshift ROP""" + + label = "Redshift ROP" + family = "redshift_rop" + icon = "magic" + defaults = ["master"] + + def __init__(self, *args, **kwargs): + super(CreateRedshiftROP, self).__init__(*args, **kwargs) + + # Clear the family prefix from the subset + subset = self.data["subset"] + subset_no_prefix = subset[len(self.family):] + subset_no_prefix = subset_no_prefix[0].lower() + subset_no_prefix[1:] + self.data["subset"] = subset_no_prefix + + # Add chunk size attribute + self.data["chunkSize"] = 10 + + # Remove the active, we are checking the bypass flag of the nodes + self.data.pop("active", None) + + self.data.update({"node_type": "Redshift_ROP"}) + + def _process(self, instance): + """Creator main entry point. + + Args: + instance (hou.Node): Created Houdini instance. + + """ + basename = instance.name() + instance.setName(basename + "_ROP", unique_name=True) + + # Also create the linked Redshift IPR Rop + try: + ipr_rop = self.parent.createNode( + "Redshift_IPR", node_name=basename + "_IPR" + ) + except hou.OperationFailed: + raise Exception(("Cannot create Redshift node. Is Redshift " + "installed and enabled?")) + + # Move it to directly under the Redshift ROP + ipr_rop.setPosition(instance.position() + hou.Vector2(0, -1)) + + # Set the linked rop to the Redshift ROP + ipr_rop.parm("linked_rop").set(ipr_rop.relativePathTo(instance)) + + prefix = '${HIP}/render/${HIPNAME}/`chs("subset")`.${AOV}.$F4.exr' + parms = { + # Render frame range + "trange": 1, + # Redshift ROP settings + "RS_outputFileNamePrefix": prefix, + "RS_outputMultilayerMode": 0, # no multi-layered exr + "RS_outputBeautyAOVSuffix": "beauty", + } + instance.setParms(parms) + + # Lock some Avalon attributes + to_lock = ["family", "id"] + for name in to_lock: + parm = instance.parm(name) + parm.lock(True) diff --git a/openpype/hosts/houdini/plugins/create/create_usd.py b/openpype/hosts/houdini/plugins/create/create_usd.py new file mode 100644 index 00000000000..5bcb7840c06 --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_usd.py @@ -0,0 +1,47 @@ +from openpype.hosts.houdini.api import plugin + + +class CreateUSD(plugin.Creator): + """Universal Scene Description""" + + label = "USD (experimental)" + family = "usd" + icon = "gears" + enabled = False + + def __init__(self, *args, **kwargs): + super(CreateUSD, self).__init__(*args, **kwargs) + + # Remove the active, we are checking the bypass flag of the nodes + self.data.pop("active", None) + + self.data.update({"node_type": "usd"}) + + def _process(self, instance): + """Creator main entry point. + + Args: + instance (hou.Node): Created Houdini instance. + + """ + parms = { + "lopoutput": "$HIP/pyblish/%s.usd" % self.name, + "enableoutputprocessor_simplerelativepaths": False, + } + + if self.nodes: + node = self.nodes[0] + parms.update({"loppath": node.path()}) + + instance.setParms(parms) + + # Lock any parameters in this list + to_lock = [ + "fileperframe", + # Lock some Avalon attributes + "family", + "id", + ] + for name in to_lock: + parm = instance.parm(name) + parm.lock(True) diff --git a/openpype/hosts/houdini/plugins/create/create_usdrender.py b/openpype/hosts/houdini/plugins/create/create_usdrender.py new file mode 100644 index 00000000000..cb3fe3f02b6 --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_usdrender.py @@ -0,0 +1,42 @@ +import hou +from openpype.hosts.houdini.api import plugin + + +class CreateUSDRender(plugin.Creator): + """USD Render ROP in /stage""" + + label = "USD Render (experimental)" + family = "usdrender" + icon = "magic" + + def __init__(self, *args, **kwargs): + super(CreateUSDRender, self).__init__(*args, **kwargs) + + self.parent = hou.node("/stage") + + # Remove the active, we are checking the bypass flag of the nodes + self.data.pop("active", None) + + self.data.update({"node_type": "usdrender"}) + + def _process(self, instance): + """Creator main entry point. + + Args: + instance (hou.Node): Created Houdini instance. + + """ + parms = { + # Render frame range + "trange": 1 + } + if self.nodes: + node = self.nodes[0] + parms.update({"loppath": node.path()}) + instance.setParms(parms) + + # Lock some Avalon attributes + to_lock = ["family", "id"] + for name in to_lock: + parm = instance.parm(name) + parm.lock(True) diff --git a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py b/openpype/hosts/houdini/plugins/create/create_vbd_cache.py index f8f3bbf9c3e..242c21fc723 100644 --- a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py +++ b/openpype/hosts/houdini/plugins/create/create_vbd_cache.py @@ -18,11 +18,18 @@ def __init__(self, *args, **kwargs): # Set node type to create for output self.data["node_type"] = "geometry" - def process(self): - instance = super(CreateVDBCache, self).process() - - parms = {"sopoutput": "$HIP/pyblish/%s.$F4.vdb" % self.name, - "initsim": True} + def _process(self, instance): + """Creator main entry point. + + Args: + instance (hou.Node): Created Houdini instance. + + """ + parms = { + "sopoutput": "$HIP/pyblish/%s.$F4.vdb" % self.name, + "initsim": True, + "trange": 1 + } if self.nodes: node = self.nodes[0] diff --git a/openpype/hosts/houdini/plugins/load/actions.py b/openpype/hosts/houdini/plugins/load/actions.py new file mode 100644 index 00000000000..6e9410ff58e --- /dev/null +++ b/openpype/hosts/houdini/plugins/load/actions.py @@ -0,0 +1,86 @@ +"""A module containing generic loader actions that will display in the Loader. + +""" + +from avalon import api + + +class SetFrameRangeLoader(api.Loader): + """Set Houdini frame range""" + + families = [ + "animation", + "camera", + "pointcache", + "vdbcache", + "usd", + ] + representations = ["abc", "vdb", "usd"] + + label = "Set frame range" + order = 11 + icon = "clock-o" + color = "white" + + def load(self, context, name, namespace, data): + + import hou + + version = context["version"] + version_data = version.get("data", {}) + + start = version_data.get("startFrame", None) + end = version_data.get("endFrame", None) + + if start is None or end is None: + print( + "Skipping setting frame range because start or " + "end frame data is missing.." + ) + return + + hou.playbar.setFrameRange(start, end) + hou.playbar.setPlaybackRange(start, end) + + +class SetFrameRangeWithHandlesLoader(api.Loader): + """Set Maya frame range including pre- and post-handles""" + + families = [ + "animation", + "camera", + "pointcache", + "vdbcache", + "usd", + ] + representations = ["abc", "vdb", "usd"] + + label = "Set frame range (with handles)" + order = 12 + icon = "clock-o" + color = "white" + + def load(self, context, name, namespace, data): + + import hou + + version = context["version"] + version_data = version.get("data", {}) + + start = version_data.get("startFrame", None) + end = version_data.get("endFrame", None) + + if start is None or end is None: + print( + "Skipping setting frame range because start or " + "end frame data is missing.." + ) + return + + # Include handles + handles = version_data.get("handles", 0) + start -= handles + end += handles + + hou.playbar.setFrameRange(start, end) + hou.playbar.setPlaybackRange(start, end) diff --git a/openpype/hosts/houdini/plugins/load/load_alembic.py b/openpype/hosts/houdini/plugins/load/load_alembic.py index 8fc2b6a61ae..cd0f0f0d2d3 100644 --- a/openpype/hosts/houdini/plugins/load/load_alembic.py +++ b/openpype/hosts/houdini/plugins/load/load_alembic.py @@ -6,9 +6,7 @@ class AbcLoader(api.Loader): """Specific loader of Alembic for the avalon.animation family""" - families = ["model", - "animation", - "pointcache"] + families = ["model", "animation", "pointcache", "gpuCache"] label = "Load Alembic" representations = ["abc"] order = -10 @@ -68,8 +66,9 @@ def load(self, context, name=None, namespace=None, data=None): null = container.createNode("null", node_name="OUT".format(name)) null.setInput(0, normal_node) - # Set display on last node - null.setDisplayFlag(True) + # Ensure display flag is on the Alembic input node and not on the OUT + # node to optimize "debug" displaying in the viewport. + alembic.setDisplayFlag(True) # Set new position for unpack node else it gets cluttered nodes = [container, alembic, unpack, normal_node, null] @@ -78,18 +77,22 @@ def load(self, context, name=None, namespace=None, data=None): self[:] = nodes - return pipeline.containerise(node_name, - namespace, - nodes, - context, - self.__class__.__name__) + return pipeline.containerise( + node_name, + namespace, + nodes, + context, + self.__class__.__name__, + suffix="", + ) def update(self, container, representation): node = container["node"] try: - alembic_node = next(n for n in node.children() if - n.type().name() == "alembic") + alembic_node = next( + n for n in node.children() if n.type().name() == "alembic" + ) except StopIteration: self.log.error("Could not find node of type `alembic`") return diff --git a/openpype/hosts/houdini/plugins/load/load_camera.py b/openpype/hosts/houdini/plugins/load/load_camera.py index a3d67f6e5ed..83246b7d971 100644 --- a/openpype/hosts/houdini/plugins/load/load_camera.py +++ b/openpype/hosts/houdini/plugins/load/load_camera.py @@ -1,8 +1,79 @@ from avalon import api - from avalon.houdini import pipeline, lib +ARCHIVE_EXPRESSION = ('__import__("_alembic_hom_extensions")' + '.alembicGetCameraDict') + + +def transfer_non_default_values(src, dest, ignore=None): + """Copy parm from src to dest. + + Because the Alembic Archive rebuilds the entire node + hierarchy on triggering "Build Hierarchy" we want to + preserve any local tweaks made by the user on the camera + for ease of use. That could be a background image, a + resolution change or even Redshift camera parameters. + + We try to do so by finding all Parms that exist on both + source and destination node, include only those that both + are not at their default value, they must be visible, + we exclude those that have the special "alembic archive" + channel expression and ignore certain Parm types. + + """ + import hou + + src.updateParmStates() + + for parm in src.allParms(): + + if ignore and parm.name() in ignore: + continue + + # If destination parm does not exist, ignore.. + dest_parm = dest.parm(parm.name()) + if not dest_parm: + continue + + # Ignore values that are currently at default + if parm.isAtDefault() and dest_parm.isAtDefault(): + continue + + if not parm.isVisible(): + # Ignore hidden parameters, assume they + # are implementation details + continue + + expression = None + try: + expression = parm.expression() + except hou.OperationFailed: + # No expression present + pass + + if expression is not None and ARCHIVE_EXPRESSION in expression: + # Assume it's part of the automated connections that the + # Alembic Archive makes on loading of the camera and thus we do + # not want to transfer the expression + continue + + # Ignore folders, separators, etc. + ignore_types = { + hou.parmTemplateType.Toggle, + hou.parmTemplateType.Menu, + hou.parmTemplateType.Button, + hou.parmTemplateType.FolderSet, + hou.parmTemplateType.Separator, + hou.parmTemplateType.Label, + } + if parm.parmTemplate().type() in ignore_types: + continue + + print("Preserving attribute: %s" % parm.name()) + dest_parm.setFromParm(parm) + + class CameraLoader(api.Loader): """Specific loader of Alembic for the avalon.animation family""" @@ -30,7 +101,7 @@ def load(self, context, name=None, namespace=None, data=None): counter = 1 asset_name = context["asset"]["name"] - namespace = namespace if namespace else asset_name + namespace = namespace or asset_name formatted = "{}_{}".format(namespace, name) if namespace else name node_name = "{0}_{1:03d}".format(formatted, counter) @@ -59,7 +130,8 @@ def load(self, context, name=None, namespace=None, data=None): namespace, nodes, context, - self.__class__.__name__) + self.__class__.__name__, + suffix="") def update(self, container, representation): @@ -73,14 +145,40 @@ def update(self, container, representation): node.setParms({"fileName": file_path, "representation": str(representation["_id"])}) + # Store the cam temporarily next to the Alembic Archive + # so that we can preserve parm values the user set on it + # after build hierarchy was triggered. + old_camera = self._get_camera(node) + temp_camera = old_camera.copyTo(node.parent()) + # Rebuild node.parm("buildHierarchy").pressButton() + # Apply values to the new camera + new_camera = self._get_camera(node) + transfer_non_default_values(temp_camera, + new_camera, + # The hidden uniform scale attribute + # gets a default connection to + # "icon_scale" just skip that completely + ignore={"scale"}) + + temp_camera.destroy() + def remove(self, container): node = container["node"] node.destroy() + def _get_camera(self, node): + import hou + cameras = node.recursiveGlob("*", + filter=hou.nodeTypeFilter.ObjCamera, + include_subnets=False) + + assert len(cameras) == 1, "Camera instance must have only one camera" + return cameras[0] + def create_and_connect(self, node, node_type, name=None): """Create a node within a node which and connect it to the input @@ -93,27 +191,10 @@ def create_and_connect(self, node, node_type, name=None): hou.Node """ + if name: + new_node = node.createNode(node_type, node_name=name) + else: + new_node = node.createNode(node_type) - import hou - - try: - - if name: - new_node = node.createNode(node_type, node_name=name) - else: - new_node = node.createNode(node_type) - - new_node.moveToGoodPosition() - - try: - input_node = next(i for i in node.allItems() if - isinstance(i, hou.SubnetIndirectInput)) - except StopIteration: - return new_node - - new_node.setInput(0, input_node) - return new_node - - except Exception: - raise RuntimeError("Could not created node type `%s` in node `%s`" - % (node_type, node)) + new_node.moveToGoodPosition() + return new_node diff --git a/openpype/hosts/houdini/plugins/load/load_image.py b/openpype/hosts/houdini/plugins/load/load_image.py new file mode 100644 index 00000000000..4ff2777d77c --- /dev/null +++ b/openpype/hosts/houdini/plugins/load/load_image.py @@ -0,0 +1,123 @@ +import os + +from avalon import api +from avalon.houdini import pipeline, lib + +import hou + + +def get_image_avalon_container(): + """The COP2 files must be in a COP2 network. + + So we maintain a single entry point within AVALON_CONTAINERS, + just for ease of use. + + """ + + path = pipeline.AVALON_CONTAINERS + avalon_container = hou.node(path) + if not avalon_container: + # Let's create avalon container secretly + # but make sure the pipeline still is built the + # way we anticipate it was built, asserting it. + assert path == "/obj/AVALON_CONTAINERS" + + parent = hou.node("/obj") + avalon_container = parent.createNode( + "subnet", node_name="AVALON_CONTAINERS" + ) + + image_container = hou.node(path + "/IMAGES") + if not image_container: + image_container = avalon_container.createNode( + "cop2net", node_name="IMAGES" + ) + image_container.moveToGoodPosition() + + return image_container + + +class ImageLoader(api.Loader): + """Specific loader of Alembic for the avalon.animation family""" + + families = ["colorbleed.imagesequence"] + label = "Load Image (COP2)" + representations = ["*"] + order = -10 + + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + + # Format file name, Houdini only wants forward slashes + file_path = os.path.normpath(self.fname) + file_path = file_path.replace("\\", "/") + file_path = self._get_file_sequence(file_path) + + # Get the root node + parent = get_image_avalon_container() + + # Define node name + namespace = namespace if namespace else context["asset"]["name"] + node_name = "{}_{}".format(namespace, name) if namespace else name + + node = parent.createNode("file", node_name=node_name) + node.moveToGoodPosition() + + node.setParms({"filename1": file_path}) + + # Imprint it manually + data = { + "schema": "avalon-core:container-2.0", + "id": pipeline.AVALON_CONTAINER_ID, + "name": node_name, + "namespace": namespace, + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + } + + # todo: add folder="Avalon" + lib.imprint(node, data) + + return node + + def update(self, container, representation): + + node = container["node"] + + # Update the file path + file_path = api.get_representation_path(representation) + file_path = file_path.replace("\\", "/") + file_path = self._get_file_sequence(file_path) + + # Update attributes + node.setParms( + { + "filename1": file_path, + "representation": str(representation["_id"]), + } + ) + + def remove(self, container): + + node = container["node"] + + # Let's clean up the IMAGES COP2 network + # if it ends up being empty and we deleted + # the last file node. Store the parent + # before we delete the node. + parent = node.parent() + + node.destroy() + + if not parent.children(): + parent.destroy() + + def _get_file_sequence(self, root): + files = sorted(os.listdir(root)) + + first_fname = files[0] + prefix, padding, suffix = first_fname.rsplit(".", 2) + fname = ".".join([prefix, "$F{}".format(len(padding)), suffix]) + return os.path.join(root, fname).replace("\\", "/") diff --git a/openpype/hosts/houdini/plugins/load/load_usd_layer.py b/openpype/hosts/houdini/plugins/load/load_usd_layer.py new file mode 100644 index 00000000000..74831014094 --- /dev/null +++ b/openpype/hosts/houdini/plugins/load/load_usd_layer.py @@ -0,0 +1,80 @@ +from avalon import api +from avalon.houdini import pipeline, lib + + +class USDSublayerLoader(api.Loader): + """Sublayer USD file in Solaris""" + + families = [ + "colorbleed.usd", + "colorbleed.pointcache", + "colorbleed.animation", + "colorbleed.camera", + "usdCamera", + ] + label = "Sublayer USD" + representations = ["usd", "usda", "usdlc", "usdnc", "abc"] + order = 1 + + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + + import os + import hou + + # Format file name, Houdini only wants forward slashes + file_path = os.path.normpath(self.fname) + file_path = file_path.replace("\\", "/") + + # Get the root node + stage = hou.node("/stage") + + # Define node name + namespace = namespace if namespace else context["asset"]["name"] + node_name = "{}_{}".format(namespace, name) if namespace else name + + # Create USD reference + container = stage.createNode("sublayer", node_name=node_name) + container.setParms({"filepath1": file_path}) + container.moveToGoodPosition() + + # Imprint it manually + data = { + "schema": "avalon-core:container-2.0", + "id": pipeline.AVALON_CONTAINER_ID, + "name": node_name, + "namespace": namespace, + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + } + + # todo: add folder="Avalon" + lib.imprint(container, data) + + return container + + def update(self, container, representation): + + node = container["node"] + + # Update the file path + file_path = api.get_representation_path(representation) + file_path = file_path.replace("\\", "/") + + # Update attributes + node.setParms( + { + "filepath1": file_path, + "representation": str(representation["_id"]), + } + ) + + # Reload files + node.parm("reload").pressButton() + + def remove(self, container): + + node = container["node"] + node.destroy() diff --git a/openpype/hosts/houdini/plugins/load/load_usd_reference.py b/openpype/hosts/houdini/plugins/load/load_usd_reference.py new file mode 100644 index 00000000000..cab3cb52696 --- /dev/null +++ b/openpype/hosts/houdini/plugins/load/load_usd_reference.py @@ -0,0 +1,80 @@ +from avalon import api +from avalon.houdini import pipeline, lib + + +class USDReferenceLoader(api.Loader): + """Reference USD file in Solaris""" + + families = [ + "colorbleed.usd", + "colorbleed.pointcache", + "colorbleed.animation", + "colorbleed.camera", + "usdCamera", + ] + label = "Reference USD" + representations = ["usd", "usda", "usdlc", "usdnc", "abc"] + order = -8 + + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + + import os + import hou + + # Format file name, Houdini only wants forward slashes + file_path = os.path.normpath(self.fname) + file_path = file_path.replace("\\", "/") + + # Get the root node + stage = hou.node("/stage") + + # Define node name + namespace = namespace if namespace else context["asset"]["name"] + node_name = "{}_{}".format(namespace, name) if namespace else name + + # Create USD reference + container = stage.createNode("reference", node_name=node_name) + container.setParms({"filepath1": file_path}) + container.moveToGoodPosition() + + # Imprint it manually + data = { + "schema": "avalon-core:container-2.0", + "id": pipeline.AVALON_CONTAINER_ID, + "name": node_name, + "namespace": namespace, + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + } + + # todo: add folder="Avalon" + lib.imprint(container, data) + + return container + + def update(self, container, representation): + + node = container["node"] + + # Update the file path + file_path = api.get_representation_path(representation) + file_path = file_path.replace("\\", "/") + + # Update attributes + node.setParms( + { + "filepath1": file_path, + "representation": str(representation["_id"]), + } + ) + + # Reload files + node.parm("reload").pressButton() + + def remove(self, container): + + node = container["node"] + node.destroy() diff --git a/openpype/hosts/houdini/plugins/load/load_vdb.py b/openpype/hosts/houdini/plugins/load/load_vdb.py new file mode 100644 index 00000000000..5f7e400b394 --- /dev/null +++ b/openpype/hosts/houdini/plugins/load/load_vdb.py @@ -0,0 +1,110 @@ +import os +import re +from avalon import api + +from avalon.houdini import pipeline + + +class VdbLoader(api.Loader): + """Specific loader of Alembic for the avalon.animation family""" + + families = ["vdbcache"] + label = "Load VDB" + representations = ["vdb"] + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + + import hou + + # Get the root node + obj = hou.node("/obj") + + # Define node name + namespace = namespace if namespace else context["asset"]["name"] + node_name = "{}_{}".format(namespace, name) if namespace else name + + # Create a new geo node + container = obj.createNode("geo", node_name=node_name) + + # Remove the file node, it only loads static meshes + # Houdini 17 has removed the file node from the geo node + file_node = container.node("file1") + if file_node: + file_node.destroy() + + # Explicitly create a file node + file_node = container.createNode("file", node_name=node_name) + file_node.setParms({"file": self.format_path(self.fname)}) + + # Set display on last node + file_node.setDisplayFlag(True) + + nodes = [container, file_node] + self[:] = nodes + + return pipeline.containerise( + node_name, + namespace, + nodes, + context, + self.__class__.__name__, + suffix="", + ) + + def format_path(self, path): + """Format file path correctly for single vdb or vdb sequence.""" + if not os.path.exists(path): + raise RuntimeError("Path does not exist: %s" % path) + + # The path is either a single file or sequence in a folder. + is_single_file = os.path.isfile(path) + if is_single_file: + filename = path + else: + # The path points to the publish .vdb sequence folder so we + # find the first file in there that ends with .vdb + files = sorted(os.listdir(path)) + first = next((x for x in files if x.endswith(".vdb")), None) + if first is None: + raise RuntimeError( + "Couldn't find first .vdb file of " + "sequence in: %s" % path + ) + + # Set .vdb to $F.vdb + first = re.sub(r"\.(\d+)\.vdb$", ".$F.vdb", first) + + filename = os.path.join(path, first) + + filename = os.path.normpath(filename) + filename = filename.replace("\\", "/") + + return filename + + def update(self, container, representation): + + node = container["node"] + try: + file_node = next( + n for n in node.children() if n.type().name() == "file" + ) + except StopIteration: + self.log.error("Could not find node of type `alembic`") + return + + # Update the file path + file_path = api.get_representation_path(representation) + file_path = self.format_path(file_path) + + file_node.setParms({"fileName": file_path}) + + # Update attribute + node.setParms({"representation": str(representation["_id"])}) + + def remove(self, container): + + node = container["node"] + node.destroy() diff --git a/openpype/hosts/houdini/plugins/load/show_usdview.py b/openpype/hosts/houdini/plugins/load/show_usdview.py new file mode 100644 index 00000000000..f23974094ec --- /dev/null +++ b/openpype/hosts/houdini/plugins/load/show_usdview.py @@ -0,0 +1,43 @@ +from avalon import api + + +class ShowInUsdview(api.Loader): + """Open USD file in usdview""" + + families = ["colorbleed.usd"] + label = "Show in usdview" + representations = ["usd", "usda", "usdlc", "usdnc"] + order = 10 + + icon = "code-fork" + color = "white" + + def load(self, context, name=None, namespace=None, data=None): + + import os + import subprocess + + import avalon.lib as lib + + usdview = lib.which("usdview") + + filepath = os.path.normpath(self.fname) + filepath = filepath.replace("\\", "/") + + if not os.path.exists(filepath): + self.log.error("File does not exist: %s" % filepath) + return + + self.log.info("Start houdini variant of usdview...") + + # For now avoid some pipeline environment variables that initialize + # Avalon in Houdini as it is redundant for usdview and slows boot time + env = os.environ.copy() + env.pop("PYTHONPATH", None) + env.pop("HOUDINI_SCRIPT_PATH", None) + env.pop("HOUDINI_MENU_PATH", None) + + # Force string to avoid unicode issues + env = {str(key): str(value) for key, value in env.items()} + + subprocess.Popen([usdview, filepath, "--renderer", "GL"], env=env) diff --git a/openpype/hosts/houdini/plugins/publish/collect_active_state.py b/openpype/hosts/houdini/plugins/publish/collect_active_state.py new file mode 100644 index 00000000000..1193f0cd19f --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_active_state.py @@ -0,0 +1,38 @@ +import pyblish.api + + +class CollectInstanceActiveState(pyblish.api.InstancePlugin): + """Collect default active state for instance from its node bypass state. + + This is done at the very end of the CollectorOrder so that any required + collecting of data iterating over instances (with InstancePlugin) will + actually collect the data for when the user enables the state in the UI. + Otherwise potentially required data might have skipped collecting. + + """ + + order = pyblish.api.CollectorOrder + 0.299 + families = ["*"] + hosts = ["houdini"] + label = "Instance Active State" + + def process(self, instance): + + # Must have node to check for bypass state + if len(instance) == 0: + return + + # Check bypass state and reverse + node = instance[0] + active = not node.isBypassed() + + # Set instance active state + instance.data.update( + { + "active": active, + # temporarily translation of `active` to `publish` till + # issue has been resolved: + # https://github.com/pyblish/pyblish-base/issues/307 + "publish": active, + } + ) diff --git a/openpype/hosts/houdini/plugins/publish/collect_current_file.py b/openpype/hosts/houdini/plugins/publish/collect_current_file.py index b35a9438331..c0b987ebbcc 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/collect_current_file.py @@ -9,7 +9,7 @@ class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder - 0.5 label = "Houdini Current File" - hosts = ['houdini'] + hosts = ["houdini"] def process(self, context): """Inject the current working file""" @@ -27,8 +27,10 @@ def process(self, context): # could have existed already. We will allow it if the file exists, # but show a warning for this edge case to clarify the potential # false positive. - self.log.warning("Current file is 'untitled.hip' and we are " - "unable to detect whether the current scene is " - "saved correctly.") + self.log.warning( + "Current file is 'untitled.hip' and we are " + "unable to detect whether the current scene is " + "saved correctly." + ) - context.data['currentFile'] = filepath + context.data["currentFile"] = filepath diff --git a/openpype/hosts/houdini/plugins/publish/collect_frames.py b/openpype/hosts/houdini/plugins/publish/collect_frames.py index 1d664aeaeb0..ef77c3230bc 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_frames.py +++ b/openpype/hosts/houdini/plugins/publish/collect_frames.py @@ -6,11 +6,11 @@ class CollectFrames(pyblish.api.InstancePlugin): - """Collect all frames which would be a resukl""" + """Collect all frames which would be saved from the ROP nodes""" order = pyblish.api.CollectorOrder label = "Collect Frames" - families = ["vdbcache"] + families = ["vdbcache", "imagesequence"] def process(self, instance): @@ -19,10 +19,17 @@ def process(self, instance): output_parm = lib.get_output_parameter(ropnode) output = output_parm.eval() + _, ext = os.path.splitext(output) file_name = os.path.basename(output) - match = re.match("(\w+)\.(\d+)\.vdb", file_name) result = file_name + # Get the filename pattern match from the output + # path so we can compute all frames that would + # come out from rendering the ROP node if there + # is a frame pattern in the name + pattern = r"\w+\.(\d+)" + re.escape(ext) + match = re.match(pattern, file_name) + start_frame = instance.data.get("frameStart", None) end_frame = instance.data.get("frameEnd", None) @@ -31,10 +38,12 @@ def process(self, instance): # Check if frames are bigger than 1 (file collection) # override the result if end_frame - start_frame > 1: - result = self.create_file_list(match, - int(start_frame), - int(end_frame)) + result = self.create_file_list( + match, int(start_frame), int(end_frame) + ) + # todo: `frames` currently conflicts with "explicit frames" for a + # for a custom frame list. So this should be refactored. instance.data.update({"frames": result}) def create_file_list(self, match, start_frame, end_frame): @@ -50,17 +59,24 @@ def create_file_list(self, match, start_frame, end_frame): """ + # Get the padding length + frame = match.group(1) + padding = len(frame) + + # Get the parts of the filename surrounding the frame number + # so we can put our own frame numbers in. + span = match.span(1) + prefix = match.string[: span[0]] + suffix = match.string[span[1]:] + + # Generate filenames for all frames result = [] + for i in range(start_frame, end_frame + 1): - padding = len(match.group(2)) - name = match.group(1) - padding_format = "{number:0{width}d}" + # Format frame number by the padding amount + str_frame = "{number:0{width}d}".format(number=i, width=padding) - count = start_frame - while count <= end_frame: - str_count = padding_format.format(number=count, width=padding) - file_name = "{}.{}.vdb".format(name, str_count) + file_name = prefix + str_frame + suffix result.append(file_name) - count += 1 return result diff --git a/openpype/hosts/houdini/plugins/publish/collect_inputs.py b/openpype/hosts/houdini/plugins/publish/collect_inputs.py new file mode 100644 index 00000000000..39e2737e8c1 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_inputs.py @@ -0,0 +1,120 @@ +import avalon.api as api +import pyblish.api + + +def collect_input_containers(nodes): + """Collect containers that contain any of the node in `nodes`. + + This will return any loaded Avalon container that contains at least one of + the nodes. As such, the Avalon container is an input for it. Or in short, + there are member nodes of that container. + + Returns: + list: Input avalon containers + + """ + + # Lookup by node ids + lookup = frozenset(nodes) + + containers = [] + host = api.registered_host() + for container in host.ls(): + + node = container["node"] + + # Usually the loaded containers don't have any complex references + # and the contained children should be all we need. So we disregard + # checking for .references() on the nodes. + members = set(node.allSubChildren()) + members.add(node) # include the node itself + + # If there's an intersection + if not lookup.isdisjoint(members): + containers.append(container) + + return containers + + +def iter_upstream(node): + """Yields all upstream inputs for the current node. + + This includes all `node.inputAncestors()` but also traverses through all + `node.references()` for the node itself and for any of the upstream nodes. + This method has no max-depth and will collect all upstream inputs. + + Yields: + hou.Node: The upstream nodes, including references. + + """ + + upstream = node.inputAncestors( + include_ref_inputs=True, follow_subnets=True + ) + + # Initialize process queue with the node's ancestors itself + queue = list(upstream) + collected = set(upstream) + + # Traverse upstream references for all nodes and yield them as we + # process the queue. + while queue: + upstream_node = queue.pop() + yield upstream_node + + # Find its references that are not collected yet. + references = upstream_node.references() + references = [n for n in references if n not in collected] + + queue.extend(references) + collected.update(references) + + # Include the references' ancestors that have not been collected yet. + for reference in references: + ancestors = reference.inputAncestors( + include_ref_inputs=True, follow_subnets=True + ) + ancestors = [n for n in ancestors if n not in collected] + + queue.extend(ancestors) + collected.update(ancestors) + + +class CollectUpstreamInputs(pyblish.api.InstancePlugin): + """Collect source input containers used for this publish. + + This will include `inputs` data of which loaded publishes were used in the + generation of this publish. This leaves an upstream trace to what was used + as input. + + """ + + label = "Collect Inputs" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + + def process(self, instance): + # We can't get the "inputAncestors" directly from the ROP + # node, so we find the related output node (set in SOP/COP path) + # and include that together with its ancestors + output = instance.data.get("output_node") + + if output is None: + # If no valid output node is set then ignore it as validation + # will be checking those cases. + self.log.debug( + "No output node found, skipping " "collecting of inputs.." + ) + return + + # Collect all upstream parents + nodes = list(iter_upstream(output)) + nodes.append(output) + + # Collect containers for the given set of nodes + containers = collect_input_containers(nodes) + + inputs = [c["representation"] for c in containers] + instance.data["inputs"] = inputs + + self.log.info("Collected inputs: %s" % inputs) diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances.py b/openpype/hosts/houdini/plugins/publish/collect_instances.py index 2e294face22..1b365267838 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_instances.py +++ b/openpype/hosts/houdini/plugins/publish/collect_instances.py @@ -31,6 +31,13 @@ class CollectInstances(pyblish.api.ContextPlugin): def process(self, context): nodes = hou.node("/out").children() + + # Include instances in USD stage only when it exists so it + # remains backwards compatible with version before houdini 18 + stage = hou.node("/stage") + if stage: + nodes += stage.recursiveGlob("*", filter=hou.nodeTypeFilter.Rop) + for node in nodes: if not node.parm("id"): @@ -55,6 +62,8 @@ def process(self, context): # Create nice name if the instance has a frame range. label = data.get("name", node.name()) + label += " (%s)" % data["asset"] # include asset in name + if "frameStart" in data and "frameEnd" in data: frames = "[{frameStart} - {frameEnd}]".format(**data) label = "{} {}".format(label, frames) diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py b/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py new file mode 100644 index 00000000000..7df5e8b6f24 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py @@ -0,0 +1,152 @@ +import hou +import pyblish.api +from avalon.houdini import lib +import openpype.hosts.houdini.api.usd as hou_usdlib +import openpype.lib.usdlib as usdlib + + +class CollectInstancesUsdLayered(pyblish.api.ContextPlugin): + """Collect Instances from a ROP Network and its configured layer paths. + + The output nodes of the ROP node will only be published when *any* of the + layers remain set to 'publish' by the user. + + This works differently from most of our Avalon instances in the pipeline. + As opposed to storing `pyblish.avalon.instance` as id on the node we store + `pyblish.avalon.usdlayered`. + + Additionally this instance has no need for storing family, asset, subset + or name on the nodes. Instead all information is retrieved solely from + the output filepath, which is an Avalon URI: + avalon://{asset}/{subset}.{representation} + + Each final ROP node is considered a dependency for any of the Configured + Save Path layers it sets along the way. As such, the instances shown in + the Pyblish UI are solely the configured layers. The encapsulating usd + files are generated whenever *any* of the dependencies is published. + + These dependency instances are stored in: + instance.data["publishDependencies"] + + """ + + order = pyblish.api.CollectorOrder - 0.01 + label = "Collect Instances (USD Configured Layers)" + hosts = ["houdini"] + + def process(self, context): + + stage = hou.node("/stage") + if not stage: + # Likely Houdini version <18 + return + + nodes = stage.recursiveGlob("*", filter=hou.nodeTypeFilter.Rop) + for node in nodes: + + if not node.parm("id"): + continue + + if node.evalParm("id") != "pyblish.avalon.usdlayered": + continue + + has_family = node.evalParm("family") + assert has_family, "'%s' is missing 'family'" % node.name() + + self.process_node(node, context) + + def sort_by_family(instance): + """Sort by family""" + return instance.data.get("families", instance.data.get("family")) + + # Sort/grouped by family (preserving local index) + context[:] = sorted(context, key=sort_by_family) + + return context + + def process_node(self, node, context): + + # Allow a single ROP node or a full ROP network of USD ROP nodes + # to be processed as a single entry that should "live together" on + # a publish. + if node.type().name() == "ropnet": + # All rop nodes inside ROP Network + ropnodes = node.recursiveGlob("*", filter=hou.nodeTypeFilter.Rop) + else: + # A single node + ropnodes = [node] + + data = lib.read(node) + + # Don't use the explicit "colorbleed.usd.layered" family for publishing + # instead use the "colorbleed.usd" family to integrate. + data["publishFamilies"] = ["colorbleed.usd"] + + # For now group ALL of them into USD Layer subset group + # Allow this subset to be grouped into a USD Layer on creation + data["subsetGroup"] = "USD Layer" + + instances = list() + dependencies = [] + for ropnode in ropnodes: + + # Create a dependency instance per ROP Node. + lopoutput = ropnode.evalParm("lopoutput") + dependency_save_data = self.get_save_data(lopoutput) + dependency = context.create_instance(dependency_save_data["name"]) + dependency.append(ropnode) + dependency.data.update(data) + dependency.data.update(dependency_save_data) + dependency.data["family"] = "colorbleed.usd.dependency" + dependency.data["optional"] = False + dependencies.append(dependency) + + # Hide the dependency instance from the context + context.pop() + + # Get all configured layers for this USD ROP node + # and create a Pyblish instance for each one + layers = hou_usdlib.get_configured_save_layers(ropnode) + for layer in layers: + save_path = hou_usdlib.get_layer_save_path(layer) + save_data = self.get_save_data(save_path) + if not save_data: + continue + self.log.info(save_path) + + instance = context.create_instance(save_data["name"]) + instance[:] = [node] + + # Set the instance data + instance.data.update(data) + instance.data.update(save_data) + instance.data["usdLayer"] = layer + + # Don't allow the Pyblish `instanceToggled` we have installed + # to set this node to bypass. + instance.data["_allowToggleBypass"] = False + + instances.append(instance) + + # Store the collected ROP node dependencies + self.log.debug("Collected dependencies: %s" % (dependencies,)) + for instance in instances: + instance.data["publishDependencies"] = dependencies + + def get_save_data(self, save_path): + + # Resolve Avalon URI + uri_data = usdlib.parse_avalon_uri(save_path) + if not uri_data: + self.log.warning("Non Avalon URI Layer Path: %s" % save_path) + return {} + + # Collect asset + subset from URI + name = "{subset} ({asset})".format(**uri_data) + fname = "{asset}_{subset}.{ext}".format(**uri_data) + + data = dict(uri_data) + data["usdSavePath"] = save_path + data["usdFilename"] = fname + data["name"] = name + return data diff --git a/openpype/hosts/houdini/plugins/publish/collect_output_node.py b/openpype/hosts/houdini/plugins/publish/collect_output_node.py index c0587d5336a..938ee81cc3f 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_output_node.py +++ b/openpype/hosts/houdini/plugins/publish/collect_output_node.py @@ -2,13 +2,20 @@ class CollectOutputSOPPath(pyblish.api.InstancePlugin): - """Collect the out node's SOP Path value.""" + """Collect the out node's SOP/COP Path value.""" order = pyblish.api.CollectorOrder - families = ["pointcache", - "vdbcache"] + families = [ + "pointcache", + "camera", + "vdbcache", + "imagesequence", + "usd", + "usdrender", + ] + hosts = ["houdini"] - label = "Collect Output SOP Path" + label = "Collect Output Node Path" def process(self, instance): @@ -17,12 +24,44 @@ def process(self, instance): node = instance[0] # Get sop path - if node.type().name() == "alembic": - sop_path_parm = "sop_path" + node_type = node.type().name() + if node_type == "geometry": + out_node = node.parm("soppath").evalAsNode() + + elif node_type == "alembic": + + # Alembic can switch between using SOP Path or object + if node.parm("use_sop_path").eval(): + out_node = node.parm("sop_path").evalAsNode() + else: + root = node.parm("root").eval() + objects = node.parm("objects").eval() + path = root + "/" + objects + out_node = hou.node(path) + + elif node_type == "comp": + out_node = node.parm("coppath").evalAsNode() + + elif node_type == "usd" or node_type == "usdrender": + out_node = node.parm("loppath").evalAsNode() + + elif node_type == "usd_rop" or node_type == "usdrender_rop": + # Inside Solaris e.g. /stage (not in ROP context) + # When incoming connection is present it takes it directly + inputs = node.inputs() + if inputs: + out_node = inputs[0] + else: + out_node = node.parm("loppath").evalAsNode() + else: - sop_path_parm = "soppath" + raise ValueError( + "ROP node type '%s' is" " not supported." % node_type + ) - sop_path = node.parm(sop_path_parm).eval() - out_node = hou.node(sop_path) + if not out_node: + self.log.warning("No output node collected.") + return + self.log.debug("Output node: %s" % out_node.path()) instance.data["output_node"] = out_node diff --git a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py new file mode 100644 index 00000000000..72b554b567e --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py @@ -0,0 +1,135 @@ +import re +import os + +import hou +import pyblish.api + + +def get_top_referenced_parm(parm): + + processed = set() # disallow infinite loop + while True: + if parm.path() in processed: + raise RuntimeError("Parameter references result in cycle.") + + processed.add(parm.path()) + + ref = parm.getReferencedParm() + if ref.path() == parm.path(): + # It returns itself when it doesn't reference + # another parameter + return ref + else: + parm = ref + + +def evalParmNoFrame(node, parm, pad_character="#"): + + parameter = node.parm(parm) + assert parameter, "Parameter does not exist: %s.%s" % (node, parm) + + # If the parameter has a parameter reference, then get that + # parameter instead as otherwise `unexpandedString()` fails. + parameter = get_top_referenced_parm(parameter) + + # Substitute out the frame numbering with padded characters + try: + raw = parameter.unexpandedString() + except hou.Error as exc: + print("Failed: %s" % parameter) + raise RuntimeError(exc) + + def replace(match): + padding = 1 + n = match.group(2) + if n and int(n): + padding = int(n) + return pad_character * padding + + expression = re.sub(r"(\$F([0-9]*))", replace, raw) + + with hou.ScriptEvalContext(parameter): + return hou.expandStringAtFrame(expression, 0) + + +class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): + """Collect USD Render Products + + Collects the instance.data["files"] for the render products. + + Provides: + instance -> files + + """ + + label = "Redshift ROP Render Products" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + families = ["redshift_rop"] + + def process(self, instance): + + rop = instance[0] + + # Collect chunkSize + chunk_size_parm = rop.parm("chunkSize") + if chunk_size_parm: + chunk_size = int(chunk_size_parm.eval()) + instance.data["chunkSize"] = chunk_size + self.log.debug("Chunk Size: %s" % chunk_size) + + default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix") + beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix") + render_products = [] + + # Default beauty AOV + beauty_product = self.get_render_product_name( + prefix=default_prefix, suffix=beauty_suffix + ) + render_products.append(beauty_product) + + num_aovs = rop.evalParm("RS_aov") + for index in range(num_aovs): + i = index + 1 + + # Skip disabled AOVs + if not rop.evalParm("RS_aovEnable_%s" % i): + continue + + aov_suffix = rop.evalParm("RS_aovSuffix_%s" % i) + aov_prefix = evalParmNoFrame(rop, "RS_aovCustomPrefix_%s" % i) + if not aov_prefix: + aov_prefix = default_prefix + + aov_product = self.get_render_product_name(aov_prefix, aov_suffix) + render_products.append(aov_product) + + for product in render_products: + self.log.debug("Found render product: %s" % product) + + filenames = list(render_products) + instance.data["files"] = filenames + + def get_render_product_name(self, prefix, suffix): + """Return the output filename using the AOV prefix and suffix""" + + # When AOV is explicitly defined in prefix we just swap it out + # directly with the AOV suffix to embed it. + # Note: ${AOV} seems to be evaluated in the parameter as %AOV% + has_aov_in_prefix = "%AOV%" in prefix + if has_aov_in_prefix: + # It seems that when some special separator characters are present + # before the %AOV% token that Redshift will secretly remove it if + # there is no suffix for the current product, for example: + # foo_%AOV% -> foo.exr + pattern = "%AOV%" if suffix else "[._-]?%AOV%" + product_name = re.sub(pattern, suffix, prefix, flags=re.IGNORECASE) + else: + if suffix: + # Add ".{suffix}" before the extension + prefix_base, ext = os.path.splitext(prefix) + product_name = prefix_base + "." + suffix + ext + else: + product_name = prefix + + return product_name diff --git a/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py b/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py new file mode 100644 index 00000000000..3ae16efe56f --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py @@ -0,0 +1,30 @@ +import pyblish.api +import openpype.api + +import hou +from avalon.houdini import lib + + +class CollectRemotePublishSettings(pyblish.api.ContextPlugin): + """Collect custom settings of the Remote Publish node.""" + + order = pyblish.api.CollectorOrder + families = ["*"] + hosts = ["houdini"] + targets = ["deadline"] + label = "Remote Publish Submission Settings" + actions = [openpype.api.RepairAction] + + def process(self, context): + + node = hou.node("/out/REMOTE_PUBLISH") + if not node: + return + + attributes = lib.read(node) + + # Debug the settings we have collected + for key, value in sorted(attributes.items()): + self.log.debug("Collected %s: %s" % (key, value)) + + context.data.update(attributes) diff --git a/openpype/hosts/houdini/plugins/publish/collect_render_products.py b/openpype/hosts/houdini/plugins/publish/collect_render_products.py new file mode 100644 index 00000000000..d7163b43c0d --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_render_products.py @@ -0,0 +1,133 @@ +import re +import os + +import hou +import pxr.UsdRender + +import pyblish.api + + +def get_var_changed(variable=None): + """Return changed variables and operators that use it. + + Note: `varchange` hscript states that it forces a recook of the nodes + that use Variables. That was tested in Houdini + 18.0.391. + + Args: + variable (str, Optional): A specific variable to query the operators + for. When None is provided it will return all variables that have + had recent changes and require a recook. Defaults to None. + + Returns: + dict: Variable that changed with the operators that use it. + + """ + cmd = "varchange -V" + if variable: + cmd += " {0}".format(variable) + output, _ = hou.hscript(cmd) + + changed = {} + for line in output.split("Variable: "): + if not line.strip(): + continue + + split = line.split() + var = split[0] + operators = split[1:] + changed[var] = operators + + return changed + + +class CollectRenderProducts(pyblish.api.InstancePlugin): + """Collect USD Render Products.""" + + label = "Collect Render Products" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + families = ["usdrender"] + + def process(self, instance): + + node = instance.data.get("output_node") + if not node: + rop_path = instance[0].path() + raise RuntimeError( + "No output node found. Make sure to connect an " + "input to the USD ROP: %s" % rop_path + ) + + # Workaround Houdini 18.0.391 bug where $HIPNAME doesn't automatically + # update after scene save. + if hou.applicationVersion() == (18, 0, 391): + self.log.debug( + "Checking for recook to workaround " "$HIPNAME refresh bug..." + ) + changed = get_var_changed("HIPNAME").get("HIPNAME") + if changed: + self.log.debug("Recooking for $HIPNAME refresh bug...") + for operator in changed: + hou.node(operator).cook(force=True) + + # Make sure to recook any 'cache' nodes in the history chain + chain = [node] + chain.extend(node.inputAncestors()) + for input_node in chain: + if input_node.type().name() == "cache": + input_node.cook(force=True) + + stage = node.stage() + + filenames = [] + for prim in stage.Traverse(): + + if not prim.IsA(pxr.UsdRender.Product): + continue + + # Get Render Product Name + product = pxr.UsdRender.Product(prim) + + # We force taking it from any random time sample as opposed to + # "default" that the USD Api falls back to since that won't return + # time sampled values if they were set per time sample. + name = product.GetProductNameAttr().Get(time=0) + dirname = os.path.dirname(name) + basename = os.path.basename(name) + + dollarf_regex = r"(\$F([0-9]?))" + frame_regex = r"^(.+\.)([0-9]+)(\.[a-zA-Z]+)$" + if re.match(dollarf_regex, basename): + # TODO: Confirm this actually is allowed USD stages and HUSK + # Substitute $F + def replace(match): + """Replace $F4 with padded #.""" + padding = int(match.group(2)) if match.group(2) else 1 + return "#" * padding + + filename_base = re.sub(dollarf_regex, replace, basename) + filename = os.path.join(dirname, filename_base) + else: + # Substitute basename.0001.ext + def replace(match): + prefix, frame, ext = match.groups() + padding = "#" * len(frame) + return prefix + padding + ext + + filename_base = re.sub(frame_regex, replace, basename) + filename = os.path.join(dirname, filename_base) + filename = filename.replace("\\", "/") + + assert "#" in filename, ( + "Couldn't resolve render product name " + "with frame number: %s" % name + ) + + filenames.append(filename) + + prim_path = str(prim.GetPath()) + self.log.info("Collected %s name: %s" % (prim_path, filename)) + + # Filenames for Deadline + instance.data["files"] = filenames diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py new file mode 100644 index 00000000000..66dfba64df4 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py @@ -0,0 +1,110 @@ +import pyblish.api + +from avalon import io +import openpype.lib.usdlib as usdlib + + +class CollectUsdBootstrap(pyblish.api.InstancePlugin): + """Collect special Asset/Shot bootstrap instances if those are needed. + + Some specific subsets are intended to be part of the default structure + of an "Asset" or "Shot" in our USD pipeline. For example, for an Asset + we layer a Model and Shade USD file over each other and expose that in + a Asset USD file, ready to use. + + On the first publish of any of the components of a Asset or Shot the + missing pieces are bootstrapped and generated in the pipeline too. This + means that on the very first publish of your model the Asset USD file + will exist too. + + """ + + order = pyblish.api.CollectorOrder + 0.35 + label = "Collect USD Bootstrap" + hosts = ["houdini"] + families = ["usd", "usd.layered"] + + def process(self, instance): + + # Detect whether the current subset is a subset in a pipeline + def get_bootstrap(instance): + instance_subset = instance.data["subset"] + for name, layers in usdlib.PIPELINE.items(): + if instance_subset in set(layers): + return name # e.g. "asset" + break + else: + return + + bootstrap = get_bootstrap(instance) + if bootstrap: + self.add_bootstrap(instance, bootstrap) + + # Check if any of the dependencies requires a bootstrap + for dependency in instance.data.get("publishDependencies", list()): + bootstrap = get_bootstrap(dependency) + if bootstrap: + self.add_bootstrap(dependency, bootstrap) + + def add_bootstrap(self, instance, bootstrap): + + self.log.debug("Add bootstrap for: %s" % bootstrap) + + asset = io.find_one({"name": instance.data["asset"], "type": "asset"}) + assert asset, "Asset must exist: %s" % asset + + # Check which are not about to be created and don't exist yet + required = {"shot": ["usdShot"], "asset": ["usdAsset"]}.get(bootstrap) + + require_all_layers = instance.data.get("requireAllLayers", False) + if require_all_layers: + # USD files load fine in usdview and Houdini even when layered or + # referenced files do not exist. So by default we don't require + # the layers to exist. + layers = usdlib.PIPELINE.get(bootstrap) + if layers: + required += list(layers) + + self.log.debug("Checking required bootstrap: %s" % required) + for subset in required: + if self._subset_exists(instance, subset, asset): + continue + + self.log.debug( + "Creating {0} USD bootstrap: {1} {2}".format( + bootstrap, asset["name"], subset + ) + ) + + new = instance.context.create_instance(subset) + new.data["subset"] = subset + new.data["label"] = "{0} ({1})".format(subset, asset["name"]) + new.data["family"] = "usd.bootstrap" + new.data["comment"] = "Automated bootstrap USD file." + new.data["publishFamilies"] = ["usd"] + + # Do not allow the user to toggle this instance + new.data["optional"] = False + + # Copy some data from the instance for which we bootstrap + for key in ["asset"]: + new.data[key] = instance.data[key] + + def _subset_exists(self, instance, subset, asset): + """Return whether subset exists in current context or in database.""" + # Allow it to be created during this publish session + context = instance.context + for inst in context: + if ( + inst.data["subset"] == subset + and inst.data["asset"] == asset["name"] + ): + return True + + # Or, if they already exist in the database we can + # skip them too. + return bool( + io.find_one( + {"name": subset, "type": "subset", "parent": asset["_id"]} + ) + ) diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py b/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py new file mode 100644 index 00000000000..8be6ead1b1d --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py @@ -0,0 +1,61 @@ +import os + +import pyblish.api +import openpype.hosts.houdini.api.usd as usdlib + + +class CollectUsdLayers(pyblish.api.InstancePlugin): + """Collect the USD Layers that have configured save paths.""" + + order = pyblish.api.CollectorOrder + 0.35 + label = "Collect USD Layers" + hosts = ["houdini"] + families = ["usd"] + + def process(self, instance): + + output = instance.data.get("output_node") + if not output: + self.log.debug("No output node found..") + return + + rop_node = instance[0] + + save_layers = [] + for layer in usdlib.get_configured_save_layers(rop_node): + + info = layer.rootPrims.get("HoudiniLayerInfo") + save_path = info.customData.get("HoudiniSavePath") + creator = info.customData.get("HoudiniCreatorNode") + + self.log.debug("Found configured save path: " + "%s -> %s" % (layer, save_path)) + + # Log node that configured this save path + if creator: + self.log.debug("Created by: %s" % creator) + + save_layers.append((layer, save_path)) + + # Store on the instance + instance.data["usdConfiguredSavePaths"] = save_layers + + # Create configured layer instances so User can disable updating + # specific configured layers for publishing. + context = instance.context + for layer, save_path in save_layers: + name = os.path.basename(save_path) + label = "{0} -> {1}".format(instance.data["name"], name) + layer_inst = context.create_instance(name) + + family = "colorbleed.usdlayer" + layer_inst.data["family"] = family + layer_inst.data["families"] = [family] + layer_inst.data["subset"] = "__stub__" + layer_inst.data["label"] = label + layer_inst.data["asset"] = instance.data["asset"] + layer_inst.append(instance[0]) # include same USD ROP + layer_inst.append((layer, save_path)) # include layer data + + # Allow this subset to be grouped into a USD Layer on creation + layer_inst.data["subsetGroup"] = "USD Layer" diff --git a/openpype/hosts/houdini/plugins/publish/collect_workscene_fps.py b/openpype/hosts/houdini/plugins/publish/collect_workscene_fps.py index c145eea5192..6f6cc978cd6 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_workscene_fps.py +++ b/openpype/hosts/houdini/plugins/publish/collect_workscene_fps.py @@ -3,7 +3,7 @@ class CollectWorksceneFPS(pyblish.api.ContextPlugin): - """Get the FPS of the work scene""" + """Get the FPS of the work scene.""" label = "Workscene FPS" order = pyblish.api.CollectorOrder diff --git a/openpype/hosts/houdini/plugins/publish/extract_alembic.py b/openpype/hosts/houdini/plugins/publish/extract_alembic.py index b251ebdc905..83b790407f1 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_alembic.py +++ b/openpype/hosts/houdini/plugins/publish/extract_alembic.py @@ -2,6 +2,7 @@ import pyblish.api import openpype.api +from openpype.hosts.houdini.api.lib import render_rop class ExtractAlembic(openpype.api.Extractor): @@ -13,29 +14,20 @@ class ExtractAlembic(openpype.api.Extractor): def process(self, instance): - import hou - ropnode = instance[0] # Get the filename from the filename parameter output = ropnode.evalParm("filename") staging_dir = os.path.dirname(output) - # instance.data["stagingDir"] = staging_dir + instance.data["stagingDir"] = staging_dir file_name = os.path.basename(output) # We run the render self.log.info("Writing alembic '%s' to '%s'" % (file_name, staging_dir)) - try: - ropnode.render() - except hou.Error as exc: - # The hou.Error is not inherited from a Python Exception class, - # so we explicitly capture the houdini error, otherwise pyblish - # will remain hanging. - import traceback - traceback.print_exc() - raise RuntimeError("Render failed: {0}".format(exc)) + + render_rop(ropnode) if "representations" not in instance.data: instance.data["representations"] = [] diff --git a/openpype/hosts/houdini/plugins/publish/extract_composite.py b/openpype/hosts/houdini/plugins/publish/extract_composite.py new file mode 100644 index 00000000000..f300b6d28d1 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/extract_composite.py @@ -0,0 +1,35 @@ +import os + +import pyblish.api +import openpype.api + +from openpype.hosts.houdini.api.lib import render_rop + + +class ExtractComposite(openpype.api.Extractor): + + order = pyblish.api.ExtractorOrder + label = "Extract Composite (Image Sequence)" + hosts = ["houdini"] + families = ["imagesequence"] + + def process(self, instance): + + ropnode = instance[0] + + # Get the filename from the copoutput parameter + # `.evalParm(parameter)` will make sure all tokens are resolved + output = ropnode.evalParm("copoutput") + staging_dir = os.path.dirname(output) + instance.data["stagingDir"] = staging_dir + file_name = os.path.basename(output) + + self.log.info("Writing comp '%s' to '%s'" % (file_name, staging_dir)) + + render_rop(ropnode) + + if "files" not in instance.data: + instance.data["files"] = [] + + frames = instance.data["frames"] + instance.data["files"].append(frames) diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd.py b/openpype/hosts/houdini/plugins/publish/extract_usd.py new file mode 100644 index 00000000000..0fc26900fb7 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/extract_usd.py @@ -0,0 +1,42 @@ +import os + +import pyblish.api +import openpype.api +from openpype.hosts.houdini.api.lib import render_rop + + +class ExtractUSD(openpype.api.Extractor): + + order = pyblish.api.ExtractorOrder + label = "Extract USD" + hosts = ["houdini"] + families = ["usd", + "usdModel", + "usdSetDress"] + + def process(self, instance): + + ropnode = instance[0] + + # Get the filename from the filename parameter + output = ropnode.evalParm("lopoutput") + staging_dir = os.path.dirname(output) + instance.data["stagingDir"] = staging_dir + file_name = os.path.basename(output) + + self.log.info("Writing USD '%s' to '%s'" % (file_name, staging_dir)) + + render_rop(ropnode) + + assert os.path.exists(output), "Output does not exist: %s" % output + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'usd', + 'ext': 'usd', + 'files': file_name, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py new file mode 100644 index 00000000000..645bd05d4b9 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py @@ -0,0 +1,315 @@ +import os +import contextlib +import hou +import sys +from collections import deque + +import pyblish.api +import openpype.api + +import openpype.hosts.houdini.api.usd as hou_usdlib +from openpype.hosts.houdini.api.lib import render_rop + + +class ExitStack(object): + """Context manager for dynamic management of a stack of exit callbacks. + + For example: + + with ExitStack() as stack: + files = [stack.enter_context(open(fname)) for fname in filenames] + # All opened files will automatically be closed at the end of + # the with statement, even if attempts to open files later + # in the list raise an exception + + """ + + def __init__(self): + self._exit_callbacks = deque() + + def pop_all(self): + """Preserve the context stack by transferring it to a new instance""" + new_stack = type(self)() + new_stack._exit_callbacks = self._exit_callbacks + self._exit_callbacks = deque() + return new_stack + + def _push_cm_exit(self, cm, cm_exit): + """Helper to correctly register callbacks to __exit__ methods""" + + def _exit_wrapper(*exc_details): + return cm_exit(cm, *exc_details) + + _exit_wrapper.__self__ = cm + self.push(_exit_wrapper) + + def push(self, exit): + """Registers a callback with the standard __exit__ method signature. + + Can suppress exceptions the same way __exit__ methods can. + + Also accepts any object with an __exit__ method (registering a call + to the method instead of the object itself) + + """ + # We use an unbound method rather than a bound method to follow + # the standard lookup behaviour for special methods + _cb_type = type(exit) + try: + exit_method = _cb_type.__exit__ + except AttributeError: + # Not a context manager, so assume its a callable + self._exit_callbacks.append(exit) + else: + self._push_cm_exit(exit, exit_method) + return exit # Allow use as a decorator + + def callback(self, callback, *args, **kwds): + """Registers an arbitrary callback and arguments. + + Cannot suppress exceptions. + """ + + def _exit_wrapper(exc_type, exc, tb): + callback(*args, **kwds) + + # We changed the signature, so using @wraps is not appropriate, but + # setting __wrapped__ may still help with introspection + _exit_wrapper.__wrapped__ = callback + self.push(_exit_wrapper) + return callback # Allow use as a decorator + + def enter_context(self, cm): + """Enters the supplied context manager + + If successful, also pushes its __exit__ method as a callback and + returns the result of the __enter__ method. + """ + # We look up the special methods on the type to match the with + # statement + _cm_type = type(cm) + _exit = _cm_type.__exit__ + result = _cm_type.__enter__(cm) + self._push_cm_exit(cm, _exit) + return result + + def close(self): + """Immediately unwind the context stack""" + self.__exit__(None, None, None) + + def __enter__(self): + return self + + def __exit__(self, *exc_details): + # We manipulate the exception state so it behaves as though + # we were actually nesting multiple with statements + frame_exc = sys.exc_info()[1] + + def _fix_exception_context(new_exc, old_exc): + while 1: + exc_context = new_exc.__context__ + if exc_context in (None, frame_exc): + break + new_exc = exc_context + new_exc.__context__ = old_exc + + # Callbacks are invoked in LIFO order to match the behaviour of + # nested context managers + suppressed_exc = False + while self._exit_callbacks: + cb = self._exit_callbacks.pop() + try: + if cb(*exc_details): + suppressed_exc = True + exc_details = (None, None, None) + except Exception: + new_exc_details = sys.exc_info() + # simulate the stack of exceptions by setting the context + _fix_exception_context(new_exc_details[1], exc_details[1]) + if not self._exit_callbacks: + raise + exc_details = new_exc_details + return suppressed_exc + + +@contextlib.contextmanager +def parm_values(overrides): + """Override Parameter values during the context.""" + + originals = [] + try: + for parm, value in overrides: + originals.append((parm, parm.eval())) + parm.set(value) + yield + finally: + for parm, value in originals: + # Parameter might not exist anymore so first + # check whether it's still valid + if hou.parm(parm.path()): + parm.set(value) + + +class ExtractUSDLayered(openpype.api.Extractor): + + order = pyblish.api.ExtractorOrder + label = "Extract Layered USD" + hosts = ["houdini"] + families = ["usdLayered", "usdShade"] + + # Force Output Processors so it will always save any file + # into our unique staging directory with processed Avalon paths + output_processors = ["avalon_uri_processor", "stagingdir_processor"] + + def process(self, instance): + + self.log.info("Extracting: %s" % instance) + + staging_dir = self.staging_dir(instance) + fname = instance.data.get("usdFilename") + + # The individual rop nodes are collected as "publishDependencies" + dependencies = instance.data["publishDependencies"] + ropnodes = [dependency[0] for dependency in dependencies] + assert all( + node.type().name() in {"usd", "usd_rop"} for node in ropnodes + ) + + # Main ROP node, either a USD Rop or ROP network with + # multiple USD ROPs + node = instance[0] + + # Collect any output dependencies that have not been processed yet + # during extraction of other instances + outputs = [fname] + active_dependencies = [ + dep + for dep in dependencies + if dep.data.get("publish", True) + and not dep.data.get("_isExtracted", False) + ] + for dependency in active_dependencies: + outputs.append(dependency.data["usdFilename"]) + + pattern = r"*[/\]{0} {0}" + save_pattern = " ".join(pattern.format(fname) for fname in outputs) + + # Run a stack of context managers before we start the render to + # temporarily adjust USD ROP settings for our publish output. + rop_overrides = { + # This sets staging directory on the processor to force our + # output files to end up in the Staging Directory. + "stagingdiroutputprocessor_stagingDir": staging_dir, + # Force the Avalon URI Output Processor to refactor paths for + # references, payloads and layers to published paths. + "avalonurioutputprocessor_use_publish_paths": True, + # Only write out specific USD files based on our outputs + "savepattern": save_pattern, + } + overrides = list() + with ExitStack() as stack: + + for ropnode in ropnodes: + manager = hou_usdlib.outputprocessors( + ropnode, + processors=self.output_processors, + disable_all_others=True, + ) + stack.enter_context(manager) + + # Some of these must be added after we enter the output + # processor context manager because those parameters only + # exist when the Output Processor is added to the ROP node. + for name, value in rop_overrides.items(): + parm = ropnode.parm(name) + assert parm, "Parm not found: %s.%s" % ( + ropnode.path(), + name, + ) + overrides.append((parm, value)) + + stack.enter_context(parm_values(overrides)) + + # Render the single ROP node or the full ROP network + render_rop(node) + + # Assert all output files in the Staging Directory + for output_fname in outputs: + path = os.path.join(staging_dir, output_fname) + assert os.path.exists(path), "Output file must exist: %s" % path + + # Set up the dependency for publish if they have new content + # compared to previous publishes + for dependency in active_dependencies: + dependency_fname = dependency.data["usdFilename"] + + filepath = os.path.join(staging_dir, dependency_fname) + similar = self._compare_with_latest_publish(dependency, filepath) + if similar: + # Deactivate this dependency + self.log.debug( + "Dependency matches previous publish version," + " deactivating %s for publish" % dependency + ) + dependency.data["publish"] = False + else: + self.log.debug("Extracted dependency: %s" % dependency) + # This dependency should be published + dependency.data["files"] = [dependency_fname] + dependency.data["stagingDir"] = staging_dir + dependency.data["_isExtracted"] = True + + # Store the created files on the instance + if "files" not in instance.data: + instance.data["files"] = [] + instance.data["files"].append(fname) + + def _compare_with_latest_publish(self, dependency, new_file): + + from avalon import api, io + import filecmp + + _, ext = os.path.splitext(new_file) + + # Compare this dependency with the latest published version + # to detect whether we should make this into a new publish + # version. If not, skip it. + asset = io.find_one( + {"name": dependency.data["asset"], "type": "asset"} + ) + subset = io.find_one( + { + "name": dependency.data["subset"], + "type": "subset", + "parent": asset["_id"], + } + ) + if not subset: + # Subset doesn't exist yet. Definitely new file + self.log.debug("No existing subset..") + return False + + version = io.find_one( + {"type": "version", "parent": subset["_id"], }, + sort=[("name", -1)] + ) + if not version: + self.log.debug("No existing version..") + return False + + representation = io.find_one( + { + "name": ext.lstrip("."), + "type": "representation", + "parent": version["_id"], + } + ) + if not representation: + self.log.debug("No existing representation..") + return False + + old_file = api.get_representation_path(representation) + if not os.path.exists(old_file): + return False + + return filecmp.cmp(old_file, new_file) diff --git a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py index f480fe62364..78794acc97c 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py +++ b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py @@ -2,6 +2,7 @@ import pyblish.api import openpype.api +from openpype.hosts.houdini.api.lib import render_rop class ExtractVDBCache(openpype.api.Extractor): @@ -13,8 +14,6 @@ class ExtractVDBCache(openpype.api.Extractor): def process(self, instance): - import hou - ropnode = instance[0] # Get the filename from the filename parameter @@ -25,15 +24,8 @@ def process(self, instance): file_name = os.path.basename(sop_output) self.log.info("Writing VDB '%s' to '%s'" % (file_name, staging_dir)) - try: - ropnode.render() - except hou.Error as exc: - # The hou.Error is not inherited from a Python Exception class, - # so we explicitly capture the houdini error, otherwise pyblish - # will remain hanging. - import traceback - traceback.print_exc() - raise RuntimeError("Render failed: {0}".format(exc)) + + render_rop(ropnode) output = instance.data["frames"] @@ -41,9 +33,9 @@ def process(self, instance): instance.data["representations"] = [] representation = { - 'name': 'mov', - 'ext': 'mov', - 'files': output, + "name": "vdb", + "ext": "vdb", + "files": output, "stagingDir": staging_dir, } instance.data["representations"].append(representation) diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py new file mode 100644 index 00000000000..31c2954ee75 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/increment_current_file.py @@ -0,0 +1,51 @@ +import pyblish.api +import avalon.api + +from openpype.api import version_up +from openpype.action import get_errored_plugins_from_data + + +class IncrementCurrentFile(pyblish.api.InstancePlugin): + """Increment the current file. + + Saves the current scene with an increased version number. + + """ + + label = "Increment current file" + order = pyblish.api.IntegratorOrder + 9.0 + hosts = ["houdini"] + families = ["colorbleed.usdrender", "redshift_rop"] + targets = ["local"] + + def process(self, instance): + + # This should be a ContextPlugin, but this is a workaround + # for a bug in pyblish to run once for a family: issue #250 + context = instance.context + key = "__hasRun{}".format(self.__class__.__name__) + if context.data.get(key, False): + return + else: + context.data[key] = True + + context = instance.context + errored_plugins = get_errored_plugins_from_data(context) + if any( + plugin.__name__ == "HoudiniSubmitPublishDeadline" + for plugin in errored_plugins + ): + raise RuntimeError( + "Skipping incrementing current file because " + "submission to deadline failed." + ) + + # Filename must not have changed since collecting + host = avalon.api.registered_host() + current_file = host.current_file() + assert ( + context.data["currentFile"] == current_file + ), "Collected filename from current scene name." + + new_filepath = version_up(current_file) + host.save(new_filepath) diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file_deadline.py b/openpype/hosts/houdini/plugins/publish/increment_current_file_deadline.py new file mode 100644 index 00000000000..faa015f739f --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/increment_current_file_deadline.py @@ -0,0 +1,35 @@ +import pyblish.api + +import hou +from openpype.api import version_up +from openpype.action import get_errored_plugins_from_data + + +class IncrementCurrentFileDeadline(pyblish.api.ContextPlugin): + """Increment the current file. + + Saves the current scene with an increased version number. + + """ + + label = "Increment current file" + order = pyblish.api.IntegratorOrder + 9.0 + hosts = ["houdini"] + targets = ["deadline"] + + def process(self, context): + + errored_plugins = get_errored_plugins_from_data(context) + if any( + plugin.__name__ == "HoudiniSubmitPublishDeadline" + for plugin in errored_plugins + ): + raise RuntimeError( + "Skipping incrementing current file because " + "submission to deadline failed." + ) + + current_filepath = context.data["currentFile"] + new_filepath = version_up(current_filepath) + + hou.hipFile.save(file_name=new_filepath, save_to_recent_files=True) diff --git a/openpype/hosts/houdini/plugins/publish/save_scene.py b/openpype/hosts/houdini/plugins/publish/save_scene.py new file mode 100644 index 00000000000..1b12efa603b --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/save_scene.py @@ -0,0 +1,37 @@ +import pyblish.api +import avalon.api + + +class SaveCurrentScene(pyblish.api.InstancePlugin): + """Save current scene""" + + label = "Save current file" + order = pyblish.api.IntegratorOrder - 0.49 + hosts = ["houdini"] + families = ["usdrender", + "redshift_rop"] + targets = ["local"] + + def process(self, instance): + + # This should be a ContextPlugin, but this is a workaround + # for a bug in pyblish to run once for a family: issue #250 + context = instance.context + key = "__hasRun{}".format(self.__class__.__name__) + if context.data.get(key, False): + return + else: + context.data[key] = True + + # Filename must not have changed since collecting + host = avalon.api.registered_host() + current_file = host.current_file() + assert context.data['currentFile'] == current_file, ( + "Collected filename from current scene name." + ) + + if host.has_unsaved_changes(): + self.log.info("Saving current file..") + host.save_file(current_file) + else: + self.log.debug("No unsaved changes, skipping file save..") diff --git a/openpype/hosts/houdini/plugins/publish/save_scene_deadline.py b/openpype/hosts/houdini/plugins/publish/save_scene_deadline.py new file mode 100644 index 00000000000..a0efd0610cf --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/save_scene_deadline.py @@ -0,0 +1,23 @@ +import pyblish.api + + +class SaveCurrentSceneDeadline(pyblish.api.ContextPlugin): + """Save current scene""" + + label = "Save current file" + order = pyblish.api.IntegratorOrder - 0.49 + hosts = ["houdini"] + targets = ["deadline"] + + def process(self, context): + import hou + + assert ( + context.data["currentFile"] == hou.hipFile.path() + ), "Collected filename from current scene name." + + if hou.hipFile.hasUnsavedChanges(): + self.log.info("Saving current file..") + hou.hipFile.save(save_to_recent_files=True) + else: + self.log.debug("No unsaved changes, skipping file save..") diff --git a/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py b/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py index 7b23d73ac70..0ae1bc94eb3 100644 --- a/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py +++ b/openpype/hosts/houdini/plugins/publish/valiate_vdb_input_node.py @@ -3,7 +3,7 @@ class ValidateVDBInputNode(pyblish.api.InstancePlugin): - """Validate that the node connected to the output node is of type VDB + """Validate that the node connected to the output node is of type VDB. Regardless of the amount of VDBs create the output will need to have an equal amount of VDBs, points, primitives and vertices @@ -24,8 +24,9 @@ class ValidateVDBInputNode(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Node connected to the output node is not" - "of type VDB!") + raise RuntimeError( + "Node connected to the output node is not" "of type VDB!" + ) @classmethod def get_invalid(cls, instance): diff --git a/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py b/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py new file mode 100644 index 00000000000..8fe1b44b7a0 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py @@ -0,0 +1,132 @@ +import pyblish.api +import openpype.api + +from collections import defaultdict + + +class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): + """Validate Alembic ROP Primitive to Detail attribute is consistent. + + The Alembic ROP crashes Houdini whenever an attribute in the "Primitive to + Detail" parameter exists on only a part of the primitives that belong to + the same hierarchy path. Whenever it encounters inconsistent values, + specifically where some are empty as opposed to others then Houdini + crashes. (Tested in Houdini 17.5.229) + + """ + + order = openpype.api.ValidateContentsOrder + 0.1 + families = ["pointcache"] + hosts = ["houdini"] + label = "Validate Primitive to Detail (Abc)" + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + "Primitives found with inconsistent primitive " + "to detail attributes. See log." + ) + + @classmethod + def get_invalid(cls, instance): + + output = instance.data["output_node"] + + rop = instance[0] + pattern = rop.parm("prim_to_detail_pattern").eval().strip() + if not pattern: + cls.log.debug( + "Alembic ROP has no 'Primitive to Detail' pattern. " + "Validation is ignored.." + ) + return + + build_from_path = rop.parm("build_from_path").eval() + if not build_from_path: + cls.log.debug( + "Alembic ROP has 'Build from Path' disabled. " + "Validation is ignored.." + ) + return + + path_attr = rop.parm("path_attrib").eval() + if not path_attr: + cls.log.error( + "The Alembic ROP node has no Path Attribute" + "value set, but 'Build Hierarchy from Attribute'" + "is enabled." + ) + return [rop.path()] + + # Let's assume each attribute is explicitly named for now and has no + # wildcards for Primitive to Detail. This simplifies the check. + cls.log.debug("Checking Primitive to Detail pattern: %s" % pattern) + cls.log.debug("Checking with path attribute: %s" % path_attr) + + # Check if the primitive attribute exists + frame = instance.data.get("startFrame", 0) + geo = output.geometryAtFrame(frame) + + # If there are no primitives on the start frame then it might be + # something that is emitted over time. As such we can't actually + # validate whether the attributes exist, because they won't exist + # yet. In that case, just warn the user and allow it. + if len(geo.iterPrims()) == 0: + cls.log.warning( + "No primitives found on current frame. Validation" + " for Primitive to Detail will be skipped." + ) + return + + attrib = geo.findPrimAttrib(path_attr) + if not attrib: + cls.log.info( + "Geometry Primitives are missing " + "path attribute: `%s`" % path_attr + ) + return [output.path()] + + # Ensure at least a single string value is present + if not attrib.strings(): + cls.log.info( + "Primitive path attribute has no " + "string values: %s" % path_attr + ) + return [output.path()] + + paths = None + for attr in pattern.split(" "): + if not attr.strip(): + # Ignore empty values + continue + + # Check if the primitive attribute exists + attrib = geo.findPrimAttrib(attr) + if not attrib: + # It is allowed to not have the attribute at all + continue + + # The issue can only happen if at least one string attribute is + # present. So we ignore cases with no values whatsoever. + if not attrib.strings(): + continue + + check = defaultdict(set) + values = geo.primStringAttribValues(attr) + if paths is None: + paths = geo.primStringAttribValues(path_attr) + + for path, value in zip(paths, values): + check[path].add(value) + + for path, values in check.items(): + # Whenever a single path has multiple values for the + # Primitive to Detail attribute then we consider it + # inconsistent and invalidate the ROP node's content. + if len(values) > 1: + cls.log.warning( + "Path has multiple values: %s (path: %s)" + % (list(values), path) + ) + return [output.path()] diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py b/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py new file mode 100644 index 00000000000..e9126ffef0c --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py @@ -0,0 +1,37 @@ +import pyblish.api +import openpype.api + + +class ValidateAlembicROPFaceSets(pyblish.api.InstancePlugin): + """Validate Face Sets are disabled for extraction to pointcache. + + When groups are saved as Face Sets with the Alembic these show up + as shadingEngine connections in Maya - however, with animated groups + these connections in Maya won't work as expected, it won't update per + frame. Additionally, it can break shader assignments in some cases + where it requires to first break this connection to allow a shader to + be assigned. + + It is allowed to include Face Sets, so only an issue is logged to + identify that it could introduce issues down the pipeline. + + """ + + order = openpype.api.ValidateContentsOrder + 0.1 + families = ["pointcache"] + hosts = ["houdini"] + label = "Validate Alembic ROP Face Sets" + + def process(self, instance): + + rop = instance[0] + facesets = rop.parm("facesets").eval() + + # 0 = No Face Sets + # 1 = Save Non-Empty Groups as Face Sets + # 2 = Save All Groups As Face Sets + if facesets != 0: + self.log.warning( + "Alembic ROP saves 'Face Sets' for Geometry. " + "Are you sure you want this?" + ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py index e8596b739d0..17c9da837a7 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py @@ -1,9 +1,9 @@ import pyblish.api -import openpype.api +import colorbleed.api class ValidateAlembicInputNode(pyblish.api.InstancePlugin): - """Validate that the node connected to the output is correct + """Validate that the node connected to the output is correct. The connected node cannot be of the following types for Alembic: - VDB @@ -11,7 +11,7 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + 0.1 + order = colorbleed.api.ValidateContentsOrder + 0.1 families = ["pointcache"] hosts = ["houdini"] label = "Validate Input Node (Abc)" @@ -19,19 +19,35 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Node connected to the output node incorrect") + raise RuntimeError( + "Primitive types found that are not supported" + "for Alembic output." + ) @classmethod def get_invalid(cls, instance): - invalid_nodes = ["VDB", "Volume"] + invalid_prim_types = ["VDB", "Volume"] node = instance.data["output_node"] - prims = node.geometry().prims() + if not hasattr(node, "geometry"): + # In the case someone has explicitly set an Object + # node instead of a SOP node in Geometry context + # then for now we ignore - this allows us to also + # export object transforms. + cls.log.warning("No geometry output node found, skipping check..") + return + + frame = instance.data.get("startFrame", 0) + geo = node.geometryAtFrame(frame) + + invalid = False + for prim_type in invalid_prim_types: + if geo.countPrimType(prim_type) > 0: + cls.log.error( + "Found a primitive which is of type '%s' !" % prim_type + ) + invalid = True - for prim in prims: - prim_type = prim.type().name() - if prim_type in invalid_nodes: - cls.log.error("Found a primitive which is of type '%s' !" - % prim_type) - return [instance] + if invalid: + return [instance] diff --git a/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py b/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py index a42c3696da0..5eb8f93d03e 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py +++ b/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py @@ -29,8 +29,9 @@ def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Output settings do no match for '%s'" % - instance) + raise RuntimeError( + "Output settings do no match for '%s'" % instance + ) @classmethod def get_invalid(cls, instance): diff --git a/openpype/hosts/houdini/plugins/publish/validate_bypass.py b/openpype/hosts/houdini/plugins/publish/validate_bypass.py index 9118ae0e8ce..79c67c30086 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_bypass.py +++ b/openpype/hosts/houdini/plugins/publish/validate_bypass.py @@ -18,12 +18,17 @@ class ValidateBypassed(pyblish.api.InstancePlugin): def process(self, instance): + if len(instance) == 0: + # Ignore instances without any nodes + # e.g. in memory bootstrap instances + return + invalid = self.get_invalid(instance) if invalid: rop = invalid[0] raise RuntimeError( - "ROP node %s is set to bypass, publishing cannot continue.." % - rop.path() + "ROP node %s is set to bypass, publishing cannot continue.." + % rop.path() ) @classmethod diff --git a/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py b/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py index ca755792676..a0919e13238 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py +++ b/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py @@ -6,9 +6,9 @@ class ValidateCameraROP(pyblish.api.InstancePlugin): """Validate Camera ROP settings.""" order = openpype.api.ValidateContentsOrder - families = ['camera'] - hosts = ['houdini'] - label = 'Camera ROP' + families = ["camera"] + hosts = ["houdini"] + label = "Camera ROP" def process(self, instance): @@ -16,8 +16,10 @@ def process(self, instance): node = instance[0] if node.parm("use_sop_path").eval(): - raise RuntimeError("Alembic ROP for Camera export should not be " - "set to 'Use Sop Path'. Please disable.") + raise RuntimeError( + "Alembic ROP for Camera export should not be " + "set to 'Use Sop Path'. Please disable." + ) # Get the root and objects parameter of the Alembic ROP node root = node.parm("root").eval() @@ -34,8 +36,8 @@ def process(self, instance): if not camera: raise ValueError("Camera path does not exist: %s" % path) - if not camera.type().name() == "cam": - raise ValueError("Object set in Alembic ROP is not a camera: " - "%s (type: %s)" % (camera, camera.type().name())) - - + if camera.type().name() != "cam": + raise ValueError( + "Object set in Alembic ROP is not a camera: " + "%s (type: %s)" % (camera, camera.type().name()) + ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py new file mode 100644 index 00000000000..543539ffe3a --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py @@ -0,0 +1,60 @@ +import pyblish.api + + +class ValidateCopOutputNode(pyblish.api.InstancePlugin): + """Validate the instance COP Output Node. + + This will ensure: + - The COP Path is set. + - The COP Path refers to an existing object. + - The COP Path node is a COP node. + + """ + + order = pyblish.api.ValidatorOrder + families = ["imagesequence"] + hosts = ["houdini"] + label = "Validate COP Output Node" + + def process(self, instance): + + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + "Output node(s) `%s` are incorrect. " + "See plug-in log for details." % invalid + ) + + @classmethod + def get_invalid(cls, instance): + + import hou + + output_node = instance.data["output_node"] + + if output_node is None: + node = instance[0] + cls.log.error( + "COP Output node in '%s' does not exist. " + "Ensure a valid COP output path is set." % node.path() + ) + + return [node.path()] + + # Output node must be a Sop node. + if not isinstance(output_node, hou.CopNode): + cls.log.error( + "Output node %s is not a COP node. " + "COP Path must point to a COP node, " + "instead found category type: %s" + % (output_node.path(), output_node.type().category().name()) + ) + return [output_node.path()] + + # For the sake of completeness also assert the category type + # is Cop2 to avoid potential edge case scenarios even though + # the isinstance check above should be stricter than this category + assert output_node.type().category().name() == "Cop2", ( + "Output node %s is not of category Cop2. This is a bug.." + % output_node.path() + ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_file_extension.py b/openpype/hosts/houdini/plugins/publish/validate_file_extension.py new file mode 100644 index 00000000000..b26d28a1e7b --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_file_extension.py @@ -0,0 +1,59 @@ +import os +import pyblish.api + +from openpype.hosts.houdini.api import lib + + +class ValidateFileExtension(pyblish.api.InstancePlugin): + """Validate the output file extension fits the output family. + + File extensions: + - Pointcache must be .abc + - Camera must be .abc + - VDB must be .vdb + + """ + + order = pyblish.api.ValidatorOrder + families = ["pointcache", "camera", "vdbcache"] + hosts = ["houdini"] + label = "Output File Extension" + + family_extensions = { + "pointcache": ".abc", + "camera": ".abc", + "vdbcache": ".vdb", + } + + def process(self, instance): + + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + "ROP node has incorrect " "file extension: %s" % invalid + ) + + @classmethod + def get_invalid(cls, instance): + + # Get ROP node from instance + node = instance[0] + + # Create lookup for current family in instance + families = [] + family = instance.data.get("family", None) + if family: + families.append(family) + families = set(families) + + # Perform extension check + output = lib.get_output_parameter(node).eval() + _, output_extension = os.path.splitext(output) + + for family in families: + extension = cls.family_extensions.get(family, None) + if extension is None: + raise RuntimeError("Unsupported family: %s" % family) + + if output_extension != extension: + return [node.path()] diff --git a/openpype/hosts/houdini/plugins/publish/validate_frame_token.py b/openpype/hosts/houdini/plugins/publish/validate_frame_token.py new file mode 100644 index 00000000000..76b5910576e --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_frame_token.py @@ -0,0 +1,51 @@ +import pyblish.api + +from openpype.hosts.houdini.api import lib + + +class ValidateFrameToken(pyblish.api.InstancePlugin): + """Validate if the unexpanded string contains the frame ('$F') token. + + This validator will *only* check the output parameter of the node if + the Valid Frame Range is not set to 'Render Current Frame' + + Rules: + If you render out a frame range it is mandatory to have the + frame token - '$F4' or similar - to ensure that each frame gets + written. If this is not the case you will override the same file + every time a frame is written out. + + Examples: + Good: 'my_vbd_cache.$F4.vdb' + Bad: 'my_vbd_cache.vdb' + + """ + + order = pyblish.api.ValidatorOrder + label = "Validate Frame Token" + families = ["vdbcache"] + + def process(self, instance): + + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + "Output settings do no match for '%s'" % instance + ) + + @classmethod + def get_invalid(cls, instance): + + node = instance[0] + + # Check trange parm, 0 means Render Current Frame + frame_range = node.evalParm("trange") + if frame_range == 0: + return [] + + output_parm = lib.get_output_parameter(node) + unexpanded_str = output_parm.unexpandedString() + + if "$F" not in unexpanded_str: + cls.log.error("No frame token found in '%s'" % node.path()) + return [instance] diff --git a/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py b/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py new file mode 100644 index 00000000000..f5f03aa844b --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py @@ -0,0 +1,30 @@ +import pyblish.api + + +class ValidateHoudiniCommercialLicense(pyblish.api.InstancePlugin): + """Validate the Houdini instance runs a Commercial license. + + When extracting USD files from a non-commercial Houdini license, even with + Houdini Indie license, the resulting files will get "scrambled" with + a license protection and get a special .usdnc or .usdlc suffix. + + This currently breaks the Subset/representation pipeline so we disallow + any publish with those licenses. Only the commercial license is valid. + + """ + + order = pyblish.api.ValidatorOrder + families = ["usd"] + hosts = ["houdini"] + label = "Houdini Commercial License" + + def process(self, instance): + + import hou + + license = hou.licenseCategory() + if license != hou.licenseCategoryType.Commercial: + raise RuntimeError( + "USD Publishing requires a full Commercial " + "license. You are on: %s" % license + ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py b/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py index a735f4b64bd..cd72877949e 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py +++ b/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py @@ -6,18 +6,18 @@ class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin): """Validate Create Intermediate Directories is enabled on ROP node.""" order = openpype.api.ValidateContentsOrder - families = ['pointcache', - 'camera', - 'vdbcache'] - hosts = ['houdini'] - label = 'Create Intermediate Directories Checked' + families = ["pointcache", "camera", "vdbcache"] + hosts = ["houdini"] + label = "Create Intermediate Directories Checked" def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Found ROP node with Create Intermediate " - "Directories turned off: %s" % invalid) + raise RuntimeError( + "Found ROP node with Create Intermediate " + "Directories turned off: %s" % invalid + ) @classmethod def get_invalid(cls, instance): diff --git a/openpype/hosts/houdini/plugins/publish/validate_no_errors.py b/openpype/hosts/houdini/plugins/publish/validate_no_errors.py new file mode 100644 index 00000000000..f58e5f8d7db --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_no_errors.py @@ -0,0 +1,65 @@ +import pyblish.api +import openpype.api +import hou + + +def cook_in_range(node, start, end): + current = hou.intFrame() + if start >= current >= end: + # Allow cooking current frame since we're in frame range + node.cook(force=False) + else: + node.cook(force=False, frame_range=(start, start)) + + +def get_errors(node): + """Get cooking errors. + + If node already has errors check whether it needs to recook + If so, then recook first to see if that solves it. + + """ + if node.errors() and node.needsToCook(): + node.cook() + + return node.errors() + + +class ValidateNoErrors(pyblish.api.InstancePlugin): + """Validate the Instance has no current cooking errors.""" + + order = openpype.api.ValidateContentsOrder + hosts = ["houdini"] + label = "Validate no errors" + + def process(self, instance): + + validate_nodes = [] + + if len(instance) > 0: + validate_nodes.append(instance[0]) + output_node = instance.data.get("output_node") + if output_node: + validate_nodes.append(output_node) + + for node in validate_nodes: + self.log.debug("Validating for errors: %s" % node.path()) + errors = get_errors(node) + + if errors: + # If there are current errors, then try an unforced cook + # to see whether the error will disappear. + self.log.debug( + "Recooking to revalidate error " + "is up to date for: %s" % node.path() + ) + current_frame = hou.intFrame() + start = instance.data.get("frameStart", current_frame) + end = instance.data.get("frameEnd", current_frame) + cook_in_range(node, start=start, end=end) + + # Check for errors again after the forced recook + errors = get_errors(node) + if errors: + self.log.error(errors) + raise RuntimeError("Node has errors: %s" % node.path()) diff --git a/openpype/hosts/houdini/plugins/publish/validate_outnode_exists.py b/openpype/hosts/houdini/plugins/publish/validate_outnode_exists.py deleted file mode 100644 index bfa2d38f1a9..00000000000 --- a/openpype/hosts/houdini/plugins/publish/validate_outnode_exists.py +++ /dev/null @@ -1,50 +0,0 @@ -import pyblish.api -import openpype.api - - -class ValidatOutputNodeExists(pyblish.api.InstancePlugin): - """Validate if node attribute Create intermediate Directories is turned on - - Rules: - * The node must have Create intermediate Directories turned on to - ensure the output file will be created - - """ - - order = openpype.api.ValidateContentsOrder - families = ["*"] - hosts = ['houdini'] - label = "Output Node Exists" - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise RuntimeError("Could not find output node(s)!") - - @classmethod - def get_invalid(cls, instance): - - import hou - - result = set() - - node = instance[0] - if node.type().name() == "alembic": - soppath_parm = "sop_path" - else: - # Fall back to geometry node - soppath_parm = "soppath" - - sop_path = node.parm(soppath_parm).eval() - output_node = hou.node(sop_path) - - if output_node is None: - cls.log.error("Node at '%s' does not exist" % sop_path) - result.add(node.path()) - - # Added cam as this is a legit output type (cameras can't - if output_node.type().name() not in ["output", "cam"]: - cls.log.error("SOP Path does not end path at output node") - result.add(node.path()) - - return result diff --git a/openpype/hosts/houdini/plugins/publish/validate_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_output_node.py index 5e20ee40d6c..0b60ab5c48b 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_output_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_output_node.py @@ -14,8 +14,7 @@ class ValidateOutputNode(pyblish.api.InstancePlugin): """ order = pyblish.api.ValidatorOrder - families = ["pointcache", - "vdbcache"] + families = ["pointcache", "vdbcache"] hosts = ["houdini"] label = "Validate Output Node" @@ -23,8 +22,10 @@ def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Output node(s) `%s` are incorrect. " - "See plug-in log for details." % invalid) + raise RuntimeError( + "Output node(s) `%s` are incorrect. " + "See plug-in log for details." % invalid + ) @classmethod def get_invalid(cls, instance): @@ -35,39 +36,42 @@ def get_invalid(cls, instance): if output_node is None: node = instance[0] - cls.log.error("SOP Output node in '%s' does not exist. " - "Ensure a valid SOP output path is set." - % node.path()) + cls.log.error( + "SOP Output node in '%s' does not exist. " + "Ensure a valid SOP output path is set." % node.path() + ) return [node.path()] # Output node must be a Sop node. if not isinstance(output_node, hou.SopNode): - cls.log.error("Output node %s is not a SOP node. " - "SOP Path must point to a SOP node, " - "instead found category type: %s" % ( - output_node.path(), - output_node.type().category().name() - ) - ) + cls.log.error( + "Output node %s is not a SOP node. " + "SOP Path must point to a SOP node, " + "instead found category type: %s" + % (output_node.path(), output_node.type().category().name()) + ) return [output_node.path()] # For the sake of completeness also assert the category type # is Sop to avoid potential edge case scenarios even though # the isinstance check above should be stricter than this category assert output_node.type().category().name() == "Sop", ( - "Output node %s is not of category Sop. This is a bug.." % - output_node.path() + "Output node %s is not of category Sop. This is a bug.." + % output_node.path() ) # Check if output node has incoming connections if not output_node.inputConnections(): - cls.log.error("Output node `%s` has no incoming connections" - % output_node.path()) + cls.log.error( + "Output node `%s` has no incoming connections" + % output_node.path() + ) return [output_node.path()] # Ensure the output node has at least Geometry data if not output_node.geometry(): - cls.log.error("Output node `%s` has no geometry data." - % output_node.path()) + cls.log.error( + "Output node `%s` has no geometry data." % output_node.path() + ) return [output_node.path()] diff --git a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py b/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py index 608e2361983..3c15532be8a 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py +++ b/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py @@ -19,8 +19,9 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("See log for details. " - "Invalid nodes: {0}".format(invalid)) + raise RuntimeError( + "See log for details. " "Invalid nodes: {0}".format(invalid) + ) @classmethod def get_invalid(cls, instance): @@ -28,48 +29,68 @@ def get_invalid(cls, instance): import hou output = instance.data["output_node"] - prims = output.geometry().prims() rop = instance[0] build_from_path = rop.parm("build_from_path").eval() if not build_from_path: - cls.log.debug("Alembic ROP has 'Build from Path' disabled. " - "Validation is ignored..") + cls.log.debug( + "Alembic ROP has 'Build from Path' disabled. " + "Validation is ignored.." + ) return path_attr = rop.parm("path_attrib").eval() if not path_attr: - cls.log.error("The Alembic ROP node has no Path Attribute" - "value set, but 'Build Hierarchy from Attribute'" - "is enabled.") + cls.log.error( + "The Alembic ROP node has no Path Attribute" + "value set, but 'Build Hierarchy from Attribute'" + "is enabled." + ) return [rop.path()] cls.log.debug("Checking for attribute: %s" % path_attr) - missing_attr = [] - invalid_attr = [] - for prim in prims: - - try: - path = prim.stringAttribValue(path_attr) - except hou.OperationFailed: - # Attribute does not exist. - missing_attr.append(prim) - continue - - if not path: - # Empty path value is invalid. - invalid_attr.append(prim) - continue + # Check if the primitive attribute exists + frame = instance.data.get("startFrame", 0) + geo = output.geometryAtFrame(frame) + + # If there are no primitives on the current frame then we can't + # check whether the path names are correct. So we'll just issue a + # warning that the check can't be done consistently and skip + # validation. + if len(geo.iterPrims()) == 0: + cls.log.warning( + "No primitives found on current frame. Validation" + " for primitive hierarchy paths will be skipped," + " thus can't be validated." + ) + return - if missing_attr: - cls.log.info("Prims are missing attribute `%s`" % path_attr) + # Check if there are any values for the primitives + attrib = geo.findPrimAttrib(path_attr) + if not attrib: + cls.log.info( + "Geometry Primitives are missing " + "path attribute: `%s`" % path_attr + ) + return [output.path()] - if invalid_attr: - cls.log.info("Prims have no value for attribute `%s` " - "(%s of %s prims)" % (path_attr, - len(invalid_attr), - len(prims))) + # Ensure at least a single string value is present + if not attrib.strings(): + cls.log.info( + "Primitive path attribute has no " + "string values: %s" % path_attr + ) + return [output.path()] - if missing_attr or invalid_attr: + paths = geo.primStringAttribValues(path_attr) + # Ensure all primitives are set to a valid path + # Collect all invalid primitive numbers + invalid_prims = [i for i, path in enumerate(paths) if not path] + if invalid_prims: + num_prims = len(geo.iterPrims()) # faster than len(geo.prims()) + cls.log.info( + "Prims have no value for attribute `%s` " + "(%s of %s prims)" % (path_attr, len(invalid_prims), num_prims) + ) return [output.path()] diff --git a/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py b/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py new file mode 100644 index 00000000000..95c66edff0a --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py @@ -0,0 +1,43 @@ +import pyblish.api +import openpype.api + +from openpype.hosts.houdini.api import lib + +import hou + + +class ValidateRemotePublishOutNode(pyblish.api.ContextPlugin): + """Validate the remote publish out node exists for Deadline to trigger.""" + + order = pyblish.api.ValidatorOrder - 0.4 + families = ["*"] + hosts = ["houdini"] + targets = ["deadline"] + label = "Remote Publish ROP node" + actions = [openpype.api.RepairContextAction] + + def process(self, context): + + cmd = "import colorbleed.lib; colorbleed.lib.publish_remote()" + + node = hou.node("/out/REMOTE_PUBLISH") + if not node: + raise RuntimeError("Missing REMOTE_PUBLISH node.") + + # We ensure it's a shell node and that it has the pre-render script + # set correctly. Plus the shell script it will trigger should be + # completely empty (doing nothing) + assert node.type().name() == "shell", "Must be shell ROP node" + assert node.parm("command").eval() == "", "Must have no command" + assert not node.parm("shellexec").eval(), "Must not execute in shell" + assert ( + node.parm("prerender").eval() == cmd + ), "REMOTE_PUBLISH node does not have correct prerender script." + assert ( + node.parm("lprerender").eval() == "python" + ), "REMOTE_PUBLISH node prerender script type not set to 'python'" + + @classmethod + def repair(cls, context): + """(Re)create the node if it fails to pass validation.""" + lib.create_remote_publish_node(force=True) diff --git a/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py b/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py new file mode 100644 index 00000000000..b681fd0ee1d --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py @@ -0,0 +1,35 @@ +import pyblish.api +import openpype.api + +import hou + + +class ValidateRemotePublishEnabled(pyblish.api.ContextPlugin): + """Validate the remote publish node is *not* bypassed.""" + + order = pyblish.api.ValidatorOrder - 0.39 + families = ["*"] + hosts = ["houdini"] + targets = ["deadline"] + label = "Remote Publish ROP enabled" + actions = [openpype.api.RepairContextAction] + + def process(self, context): + + node = hou.node("/out/REMOTE_PUBLISH") + if not node: + raise RuntimeError("Missing REMOTE_PUBLISH node.") + + if node.isBypassed(): + raise RuntimeError("REMOTE_PUBLISH must not be bypassed.") + + @classmethod + def repair(cls, context): + """(Re)create the node if it fails to pass validation.""" + + node = hou.node("/out/REMOTE_PUBLISH") + if not node: + raise RuntimeError("Missing REMOTE_PUBLISH node.") + + cls.log.info("Disabling bypass on /out/REMOTE_PUBLISH") + node.bypass(False) diff --git a/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py new file mode 100644 index 00000000000..a5a07b1b1a2 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py @@ -0,0 +1,80 @@ +import pyblish.api + + +class ValidateSopOutputNode(pyblish.api.InstancePlugin): + """Validate the instance SOP Output Node. + + This will ensure: + - The SOP Path is set. + - The SOP Path refers to an existing object. + - The SOP Path node is a SOP node. + - The SOP Path node has at least one input connection (has an input) + - The SOP Path has geometry data. + + """ + + order = pyblish.api.ValidatorOrder + families = ["pointcache", "vdbcache"] + hosts = ["houdini"] + label = "Validate Output Node" + + def process(self, instance): + + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + "Output node(s) `%s` are incorrect. " + "See plug-in log for details." % invalid + ) + + @classmethod + def get_invalid(cls, instance): + + import hou + + output_node = instance.data["output_node"] + + if output_node is None: + node = instance[0] + cls.log.error( + "SOP Output node in '%s' does not exist. " + "Ensure a valid SOP output path is set." % node.path() + ) + + return [node.path()] + + # Output node must be a Sop node. + if not isinstance(output_node, hou.SopNode): + cls.log.error( + "Output node %s is not a SOP node. " + "SOP Path must point to a SOP node, " + "instead found category type: %s" + % (output_node.path(), output_node.type().category().name()) + ) + return [output_node.path()] + + # For the sake of completeness also assert the category type + # is Sop to avoid potential edge case scenarios even though + # the isinstance check above should be stricter than this category + assert output_node.type().category().name() == "Sop", ( + "Output node %s is not of category Sop. This is a bug.." + % output_node.path() + ) + + # Ensure the node is cooked and succeeds to cook so we can correctly + # check for its geometry data. + if output_node.needsToCook(): + cls.log.debug("Cooking node: %s" % output_node.path()) + try: + output_node.cook() + except hou.Error as exc: + cls.log.error("Cook failed: %s" % exc) + cls.log.error(output_node.errors()[0]) + return [output_node.path()] + + # Ensure the output node has at least Geometry data + if not output_node.geometry(): + cls.log.error( + "Output node `%s` has no geometry data." % output_node.path() + ) + return [output_node.path()] diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py b/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py new file mode 100644 index 00000000000..ac0181aed29 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py @@ -0,0 +1,50 @@ +import pyblish.api + +import openpype.hosts.houdini.api.usd as hou_usdlib + + +class ValidateUSDLayerPathBackslashes(pyblish.api.InstancePlugin): + """Validate USD loaded paths have no backslashes. + + This is a crucial validation for HUSK USD rendering as Houdini's + USD Render ROP will fail to write out a .usd file for rendering that + correctly preserves the backslashes, e.g. it will incorrectly convert a + '\t' to a TAB character disallowing HUSK to find those specific files. + + This validation is redundant for usdModel since that flattens the model + before write. As such it will never have any used layers with a path. + + """ + + order = pyblish.api.ValidatorOrder + families = ["usdSetDress", "usdShade", "usd", "usdrender"] + hosts = ["houdini"] + label = "USD Layer path backslashes" + optional = True + + def process(self, instance): + + rop = instance[0] + lop_path = hou_usdlib.get_usd_rop_loppath(rop) + stage = lop_path.stage(apply_viewport_overrides=False) + + invalid = [] + for layer in stage.GetUsedLayers(): + references = layer.externalReferences + + for ref in references: + + # Ignore anonymous layers + if ref.startswith("anon:"): + continue + + # If any backslashes in the path consider it invalid + if "\\" in ref: + self.log.error("Found invalid path: %s" % ref) + invalid.append(layer) + + if invalid: + raise RuntimeError( + "Loaded layers have backslashes. " + "This is invalid for HUSK USD rendering." + ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py b/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py new file mode 100644 index 00000000000..2fd2f5eb9fa --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py @@ -0,0 +1,76 @@ +import pyblish.api + +import openpype.hosts.houdini.api.usd as hou_usdlib + + +from pxr import UsdShade, UsdRender, UsdLux + + +def fullname(o): + """Get fully qualified class name""" + module = o.__module__ + if module is None or module == str.__module__: + return o.__name__ + return module + "." + o.__name__ + + +class ValidateUsdModel(pyblish.api.InstancePlugin): + """Validate USD Model. + + Disallow Shaders, Render settings, products and vars and Lux lights. + + """ + + order = pyblish.api.ValidatorOrder + families = ["usdModel"] + hosts = ["houdini"] + label = "Validate USD Model" + optional = True + + disallowed = [ + UsdShade.Shader, + UsdRender.Settings, + UsdRender.Product, + UsdRender.Var, + UsdLux.Light, + ] + + def process(self, instance): + + rop = instance[0] + lop_path = hou_usdlib.get_usd_rop_loppath(rop) + stage = lop_path.stage(apply_viewport_overrides=False) + + invalid = [] + for prim in stage.Traverse(): + + for klass in self.disallowed: + if klass(prim): + # Get full class name without pxr. prefix + name = fullname(klass).split("pxr.", 1)[-1] + path = str(prim.GetPath()) + self.log.warning("Disallowed %s: %s" % (name, path)) + + invalid.append(prim) + + if invalid: + prim_paths = sorted([str(prim.GetPath()) for prim in invalid]) + raise RuntimeError("Found invalid primitives: %s" % prim_paths) + + +class ValidateUsdShade(ValidateUsdModel): + """Validate usdShade. + + Disallow Render settings, products, vars and Lux lights. + + """ + + families = ["usdShade"] + label = "Validate USD Shade" + + disallowed = [ + UsdRender.Settings, + UsdRender.Product, + UsdRender.Var, + UsdLux.Light, + ] diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py new file mode 100644 index 00000000000..1f10fafdf4b --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py @@ -0,0 +1,52 @@ +import pyblish.api + + +class ValidateUSDOutputNode(pyblish.api.InstancePlugin): + """Validate the instance USD LOPs Output Node. + + This will ensure: + - The LOP Path is set. + - The LOP Path refers to an existing object. + - The LOP Path node is a LOP node. + + """ + + order = pyblish.api.ValidatorOrder + families = ["usd"] + hosts = ["houdini"] + label = "Validate Output Node (USD)" + + def process(self, instance): + + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + "Output node(s) `%s` are incorrect. " + "See plug-in log for details." % invalid + ) + + @classmethod + def get_invalid(cls, instance): + + import hou + + output_node = instance.data["output_node"] + + if output_node is None: + node = instance[0] + cls.log.error( + "USD node '%s' LOP path does not exist. " + "Ensure a valid LOP path is set." % node.path() + ) + + return [node.path()] + + # Output node must be a Sop node. + if not isinstance(output_node, hou.LopNode): + cls.log.error( + "Output node %s is not a LOP node. " + "LOP Path must point to a LOP node, " + "instead found category type: %s" + % (output_node.path(), output_node.type().category().name()) + ) + return [output_node.path()] diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py b/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py new file mode 100644 index 00000000000..36336a03ae0 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py @@ -0,0 +1,31 @@ +import pyblish.api + +import os + + +class ValidateUSDRenderProductNames(pyblish.api.InstancePlugin): + """Validate USD Render Product names are correctly set absolute paths.""" + + order = pyblish.api.ValidatorOrder + families = ["usdrender"] + hosts = ["houdini"] + label = "Validate USD Render Product Names" + optional = True + + def process(self, instance): + + invalid = [] + for filepath in instance.data["files"]: + + if not filepath: + invalid.append("Detected empty output filepath.") + + if not os.path.isabs(filepath): + invalid.append( + "Output file path is not " "absolute path: %s" % filepath + ) + + if invalid: + for message in invalid: + self.log.error(message) + raise RuntimeError("USD Render Paths are invalid.") diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py b/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py new file mode 100644 index 00000000000..fb1094e6b5a --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py @@ -0,0 +1,54 @@ +import pyblish.api + +import openpype.hosts.houdini.api.usd as hou_usdlib + + +class ValidateUsdSetDress(pyblish.api.InstancePlugin): + """Validate USD Set Dress. + + Must only have references or payloads. May not generate new mesh or + flattened meshes. + + """ + + order = pyblish.api.ValidatorOrder + families = ["usdSetDress"] + hosts = ["houdini"] + label = "Validate USD Set Dress" + optional = True + + def process(self, instance): + + from pxr import UsdGeom + + rop = instance[0] + lop_path = hou_usdlib.get_usd_rop_loppath(rop) + stage = lop_path.stage(apply_viewport_overrides=False) + + invalid = [] + for node in stage.Traverse(): + + if UsdGeom.Mesh(node): + # This solely checks whether there is any USD involved + # in this Prim's Stack and doesn't accurately tell us + # whether it was generated locally or not. + # TODO: More accurately track whether the Prim was created + # in the local scene + stack = node.GetPrimStack() + for sdf in stack: + path = sdf.layer.realPath + if path: + break + else: + prim_path = node.GetPath() + self.log.error( + "%s is not referenced geometry." % prim_path + ) + invalid.append(node) + + if invalid: + raise RuntimeError( + "SetDress contains local geometry. " + "This is not allowed, it must be an assembly " + "of referenced assets." + ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py new file mode 100644 index 00000000000..fcfbf6b22dc --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py @@ -0,0 +1,41 @@ +import re + +import pyblish.api +import openpype.api + +from avalon import io + + +class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin): + """Validate the Instance has no current cooking errors.""" + + order = openpype.api.ValidateContentsOrder + hosts = ["houdini"] + families = ["usdShade"] + label = "USD Shade model exists" + + def process(self, instance): + + asset = instance.data["asset"] + subset = instance.data["subset"] + + # Assume shading variation starts after a dot separator + shade_subset = subset.split(".", 1)[0] + model_subset = re.sub("^usdShade", "usdModel", shade_subset) + + asset_doc = io.find_one({"name": asset, "type": "asset"}) + if not asset_doc: + raise RuntimeError("Asset does not exist: %s" % asset) + + subset_doc = io.find_one( + { + "name": model_subset, + "type": "subset", + "parent": asset_doc["_id"], + } + ) + if not subset_doc: + raise RuntimeError( + "USD Model subset not found: " + "%s (%s)" % (model_subset, asset) + ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py new file mode 100644 index 00000000000..a77ca2f3cb1 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py @@ -0,0 +1,63 @@ +import pyblish.api +import openpype.api + +import hou + + +class ValidateUsdShadeWorkspace(pyblish.api.InstancePlugin): + """Validate USD Shading Workspace is correct version. + + There have been some issues with outdated/erroneous Shading Workspaces + so this is to confirm everything is set as it should. + + """ + + order = openpype.api.ValidateContentsOrder + hosts = ["houdini"] + families = ["usdShade"] + label = "USD Shade Workspace" + + def process(self, instance): + + rop = instance[0] + workspace = rop.parent() + + definition = workspace.type().definition() + name = definition.nodeType().name() + library = definition.libraryFilePath() + + all_definitions = hou.hda.definitionsInFile(library) + node_type, version = name.rsplit(":", 1) + version = float(version) + + highest = version + for other_definition in all_definitions: + other_name = other_definition.nodeType().name() + other_node_type, other_version = other_name.rsplit(":", 1) + other_version = float(other_version) + + if node_type != other_node_type: + continue + + # Get highest version + highest = max(highest, other_version) + + if version != highest: + raise RuntimeError( + "Shading Workspace is not the latest version." + " Found %s. Latest is %s." % (version, highest) + ) + + # There were some issues with the editable node not having the right + # configured path. So for now let's assure that is correct to.from + value = ( + 'avalon://`chs("../asset_name")`/' + 'usdShade`chs("../model_variantname1")`.usd' + ) + rop_value = rop.parm("lopoutput").rawValue() + if rop_value != value: + raise RuntimeError( + "Shading Workspace has invalid 'lopoutput'" + " parameter value. The Shading Workspace" + " needs to be reset to its default values." + ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py b/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py index 7b23d73ac70..0ae1bc94eb3 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py @@ -3,7 +3,7 @@ class ValidateVDBInputNode(pyblish.api.InstancePlugin): - """Validate that the node connected to the output node is of type VDB + """Validate that the node connected to the output node is of type VDB. Regardless of the amount of VDBs create the output will need to have an equal amount of VDBs, points, primitives and vertices @@ -24,8 +24,9 @@ class ValidateVDBInputNode(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Node connected to the output node is not" - "of type VDB!") + raise RuntimeError( + "Node connected to the output node is not" "of type VDB!" + ) @classmethod def get_invalid(cls, instance): diff --git a/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py new file mode 100644 index 00000000000..1ba840b71d3 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py @@ -0,0 +1,73 @@ +import pyblish.api +import openpype.api +import hou + + +class ValidateVDBOutputNode(pyblish.api.InstancePlugin): + """Validate that the node connected to the output node is of type VDB. + + Regardless of the amount of VDBs create the output will need to have an + equal amount of VDBs, points, primitives and vertices + + A VDB is an inherited type of Prim, holds the following data: + - Primitives: 1 + - Points: 1 + - Vertices: 1 + - VDBs: 1 + + """ + + order = openpype.api.ValidateContentsOrder + 0.1 + families = ["vdbcache"] + hosts = ["houdini"] + label = "Validate Output Node (VDB)" + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + "Node connected to the output node is not" " of type VDB!" + ) + + @classmethod + def get_invalid(cls, instance): + + node = instance.data["output_node"] + if node is None: + cls.log.error( + "SOP path is not correctly set on " + "ROP node '%s'." % instance[0].path() + ) + return [instance] + + frame = instance.data.get("frameStart", 0) + geometry = node.geometryAtFrame(frame) + if geometry is None: + # No geometry data on this node, maybe the node hasn't cooked? + cls.log.error( + "SOP node has no geometry data. " + "Is it cooked? %s" % node.path() + ) + return [node] + + prims = geometry.prims() + nr_of_prims = len(prims) + + # All primitives must be hou.VDB + invalid_prim = False + for prim in prims: + if not isinstance(prim, hou.VDB): + cls.log.error("Found non-VDB primitive: %s" % prim) + invalid_prim = True + if invalid_prim: + return [instance] + + nr_of_points = len(geometry.points()) + if nr_of_points != nr_of_prims: + cls.log.error("The number of primitives and points do not match") + return [instance] + + for prim in prims: + if prim.numVertices() != 1: + cls.log.error("Found primitive with more than 1 vertex!") + return [instance] diff --git a/openpype/hosts/houdini/startup/scripts/123.py b/openpype/hosts/houdini/startup/scripts/123.py index 6d90b8352e0..4233d68c15e 100644 --- a/openpype/hosts/houdini/startup/scripts/123.py +++ b/openpype/hosts/houdini/startup/scripts/123.py @@ -1,5 +1,4 @@ from avalon import api, houdini -import hou def main(): diff --git a/openpype/hosts/houdini/vendor/husdoutputprocessors/__init__.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/__init__.py new file mode 100644 index 00000000000..69e3be50dac --- /dev/null +++ b/openpype/hosts/houdini/vendor/husdoutputprocessors/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py new file mode 100644 index 00000000000..4071eb3e0c7 --- /dev/null +++ b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py @@ -0,0 +1,168 @@ +import hou +import husdoutputprocessors.base as base +import os +import re +import logging + +import colorbleed.usdlib as usdlib + + +def _get_project_publish_template(): + """Return publish template from database for current project""" + from avalon import io + project = io.find_one({"type": "project"}, + projection={"config.template.publish": True}) + return project["config"]["template"]["publish"] + + +class AvalonURIOutputProcessor(base.OutputProcessorBase): + """Process Avalon URIs into their full path equivalents. + + """ + + _parameters = None + _param_prefix = 'avalonurioutputprocessor_' + _parms = { + "use_publish_paths": _param_prefix + "use_publish_paths" + } + + def __init__(self): + """ There is only one object of each output processor class that is + ever created in a Houdini session. Therefore be very careful + about what data gets put in this object. + """ + self._template = None + self._use_publish_paths = False + self._cache = dict() + + def displayName(self): + return 'Avalon URI Output Processor' + + def parameters(self): + + if not self._parameters: + parameters = hou.ParmTemplateGroup() + use_publish_path = hou.ToggleParmTemplate( + name=self._parms["use_publish_paths"], + label='Resolve Reference paths to publish paths', + default_value=False, + help=("When enabled any paths for Layers, References or " + "Payloads are resolved to published master versions.\n" + "This is usually only used by the publishing pipeline, " + "but can be used for testing too.")) + parameters.append(use_publish_path) + self._parameters = parameters.asDialogScript() + + return self._parameters + + def beginSave(self, config_node, t): + self._template = _get_project_publish_template() + + parm = self._parms["use_publish_paths"] + self._use_publish_paths = config_node.parm(parm).evalAtTime(t) + self._cache.clear() + + def endSave(self): + self._template = None + self._use_publish_paths = None + self._cache.clear() + + def processAsset(self, + asset_path, + asset_path_for_save, + referencing_layer_path, + asset_is_layer, + for_save): + """ + Args: + asset_path (str): The incoming file path you want to alter or not. + asset_path_for_save (bool): Whether the current path is a + referenced path in the USD file. When True, return the path + you want inside USD file. + referencing_layer_path (str): ??? + asset_is_layer (bool): Whether this asset is a USD layer file. + If this is False, the asset is something else (for example, + a texture or volume file). + for_save (bool): Whether the asset path is for a file to be saved + out. If so, then return actual written filepath. + + Returns: + The refactored asset path. + + """ + + # Retrieve from cache if this query occurred before (optimization) + cache_key = (asset_path, asset_path_for_save, asset_is_layer, for_save) + if cache_key in self._cache: + return self._cache[cache_key] + + relative_template = "{asset}_{subset}.{ext}" + uri_data = usdlib.parse_avalon_uri(asset_path) + if uri_data: + + if for_save: + # Set save output path to a relative path so other + # processors can potentially manage it easily? + path = relative_template.format(**uri_data) + + print("Avalon URI Resolver: %s -> %s" % (asset_path, path)) + self._cache[cache_key] = path + return path + + if self._use_publish_paths: + # Resolve to an Avalon published asset for embedded paths + path = self._get_usd_master_path(**uri_data) + else: + path = relative_template.format(**uri_data) + + print("Avalon URI Resolver: %s -> %s" % (asset_path, path)) + self._cache[cache_key] = path + return path + + self._cache[cache_key] = asset_path + return asset_path + + def _get_usd_master_path(self, + asset, + subset, + ext): + """Get the filepath for a .usd file of a subset. + + This will return the path to an unversioned master file generated by + `usd_master_file.py`. + + """ + + from avalon import api, io + + PROJECT = api.Session["AVALON_PROJECT"] + asset_doc = io.find_one({"name": asset, + "type": "asset"}) + if not asset_doc: + raise RuntimeError("Invalid asset name: '%s'" % asset) + + root = api.registered_root() + path = self._template.format(**{ + "root": root, + "project": PROJECT, + "silo": asset_doc["silo"], + "asset": asset_doc["name"], + "subset": subset, + "representation": ext, + "version": 0 # stub version zero + }) + + # Remove the version folder + subset_folder = os.path.dirname(os.path.dirname(path)) + master_folder = os.path.join(subset_folder, "master") + fname = "{0}.{1}".format(subset, ext) + + return os.path.join(master_folder, fname).replace("\\", "/") + + +output_processor = AvalonURIOutputProcessor() + + +def usdOutputProcessor(): + return output_processor + diff --git a/openpype/hosts/houdini/vendor/husdoutputprocessors/stagingdir_processor.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/stagingdir_processor.py new file mode 100644 index 00000000000..d8e36d5aa81 --- /dev/null +++ b/openpype/hosts/houdini/vendor/husdoutputprocessors/stagingdir_processor.py @@ -0,0 +1,90 @@ +import hou +import husdoutputprocessors.base as base +import os + + +class StagingDirOutputProcessor(base.OutputProcessorBase): + """Output all USD Rop file nodes into the Staging Directory + + Ignore any folders and paths set in the Configured Layers + and USD Rop node, just take the filename and save into a + single directory. + + """ + theParameters = None + parameter_prefix = "stagingdiroutputprocessor_" + stagingdir_parm_name = parameter_prefix + "stagingDir" + + def __init__(self): + self.staging_dir = None + + def displayName(self): + return 'StagingDir Output Processor' + + def parameters(self): + if not self.theParameters: + parameters = hou.ParmTemplateGroup() + rootdirparm = hou.StringParmTemplate( + self.stagingdir_parm_name, + 'Staging Directory', 1, + string_type=hou.stringParmType.FileReference, + file_type=hou.fileType.Directory + ) + parameters.append(rootdirparm) + self.theParameters = parameters.asDialogScript() + return self.theParameters + + def beginSave(self, config_node, t): + + # Use the Root Directory parameter if it is set. + root_dir_parm = config_node.parm(self.stagingdir_parm_name) + if root_dir_parm: + self.staging_dir = root_dir_parm.evalAtTime(t) + + if not self.staging_dir: + out_file_parm = config_node.parm('lopoutput') + if out_file_parm: + self.staging_dir = out_file_parm.evalAtTime(t) + if self.staging_dir: + (self.staging_dir, filename) = os.path.split(self.staging_dir) + + def endSave(self): + self.staging_dir = None + + def processAsset(self, asset_path, + asset_path_for_save, + referencing_layer_path, + asset_is_layer, + for_save): + """ + Args: + asset_path (str): The incoming file path you want to alter or not. + asset_path_for_save (bool): Whether the current path is a + referenced path in the USD file. When True, return the path + you want inside USD file. + referencing_layer_path (str): ??? + asset_is_layer (bool): Whether this asset is a USD layer file. + If this is False, the asset is something else (for example, + a texture or volume file). + for_save (bool): Whether the asset path is for a file to be saved + out. If so, then return actual written filepath. + + Returns: + The refactored asset path. + + """ + + # Treat save paths as being relative to the output path. + if for_save and self.staging_dir: + # Whenever we're processing a Save Path make sure to + # resolve it to the Staging Directory + filename = os.path.basename(asset_path) + return os.path.join(self.staging_dir, filename) + + return asset_path + + +output_processor = StagingDirOutputProcessor() +def usdOutputProcessor(): + return output_processor + diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py index a9279bf6e0b..ad37a7a068c 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py @@ -15,6 +15,46 @@ class PointCacheAlembicLoader(api.Loader): icon = "cube" color = "orange" + def get_task( + self, filename, asset_dir, asset_name, replace, frame_start, frame_end + ): + task = unreal.AssetImportTask() + options = unreal.AbcImportSettings() + gc_settings = unreal.AbcGeometryCacheSettings() + conversion_settings = unreal.AbcConversionSettings() + sampling_settings = unreal.AbcSamplingSettings() + + task.set_editor_property('filename', filename) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', replace) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options.set_editor_property( + 'import_type', unreal.AlembicImportType.GEOMETRY_CACHE) + + gc_settings.set_editor_property('flatten_tracks', False) + + conversion_settings.set_editor_property('flip_u', False) + conversion_settings.set_editor_property('flip_v', True) + conversion_settings.set_editor_property( + 'scale', unreal.Vector(x=100.0, y=100.0, z=100.0)) + conversion_settings.set_editor_property( + 'rotation', unreal.Vector(x=-90.0, y=0.0, z=180.0)) + + sampling_settings.set_editor_property('frame_start', frame_start) + sampling_settings.set_editor_property('frame_end', frame_end) + + options.geometry_cache_settings = gc_settings + options.conversion_settings = conversion_settings + options.sampling_settings = sampling_settings + task.options = options + + return task + def load(self, context, name, namespace, data): """ Load and containerise representation into Content Browser. @@ -55,25 +95,17 @@ def load(self, context, name, namespace, data): unreal.EditorAssetLibrary.make_directory(asset_dir) - task = unreal.AssetImportTask() - - task.set_editor_property('filename', self.fname) - task.set_editor_property('destination_path', asset_dir) - task.set_editor_property('destination_name', asset_name) - task.set_editor_property('replace_existing', False) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) + frame_start = context.get('asset').get('data').get('frameStart') + frame_end = context.get('asset').get('data').get('frameEnd') - # set import options here - # Unreal 4.24 ignores the settings. It works with Unreal 4.26 - options = unreal.AbcImportSettings() - options.set_editor_property( - 'import_type', unreal.AlembicImportType.GEOMETRY_CACHE) + # If frame start and end are the same, we increse the end frame by + # one, otherwise Unreal will not import it + if frame_start == frame_end: + frame_end += 1 - options.geometry_cache_settings.set_editor_property( - 'flatten_tracks', False) + task = self.get_task( + self.fname, asset_dir, asset_name, False, frame_start, frame_end) - task.options = options unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 # Create Asset Container @@ -109,28 +141,11 @@ def update(self, container, representation): source_path = api.get_representation_path(representation) destination_path = container["namespace"] - task = unreal.AssetImportTask() - - task.set_editor_property('filename', source_path) - task.set_editor_property('destination_path', destination_path) - # strip suffix - task.set_editor_property('destination_name', name) - task.set_editor_property('replace_existing', True) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) + task = self.get_task(source_path, destination_path, name, True) - # set import options here - # Unreal 4.24 ignores the settings. It works with Unreal 4.26 - options = unreal.AbcImportSettings() - options.set_editor_property( - 'import_type', unreal.AlembicImportType.GEOMETRY_CACHE) - - options.geometry_cache_settings.set_editor_property( - 'flatten_tracks', False) - - task.options = options # do import fbx and replace existing data unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + container_path = "{}/{}".format(container["namespace"], container["objectName"]) # update metadata diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py index 12b9320f72e..ccec31b8329 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py @@ -15,6 +15,39 @@ class StaticMeshAlembicLoader(api.Loader): icon = "cube" color = "orange" + def get_task(self, filename, asset_dir, asset_name, replace): + task = unreal.AssetImportTask() + options = unreal.AbcImportSettings() + sm_settings = unreal.AbcStaticMeshSettings() + conversion_settings = unreal.AbcConversionSettings() + + task.set_editor_property('filename', filename) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', replace) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options.set_editor_property( + 'import_type', unreal.AlembicImportType.STATIC_MESH) + + sm_settings.set_editor_property('merge_meshes', True) + + conversion_settings.set_editor_property('flip_u', False) + conversion_settings.set_editor_property('flip_v', True) + conversion_settings.set_editor_property( + 'scale', unreal.Vector(x=100.0, y=100.0, z=100.0)) + conversion_settings.set_editor_property( + 'rotation', unreal.Vector(x=-90.0, y=0.0, z=180.0)) + + options.static_mesh_settings = sm_settings + options.conversion_settings = conversion_settings + task.options = options + + return task + def load(self, context, name, namespace, data): """ Load and containerise representation into Content Browser. @@ -55,22 +88,8 @@ def load(self, context, name, namespace, data): unreal.EditorAssetLibrary.make_directory(asset_dir) - task = unreal.AssetImportTask() + task = self.get_task(self.fname, asset_dir, asset_name, False) - task.set_editor_property('filename', self.fname) - task.set_editor_property('destination_path', asset_dir) - task.set_editor_property('destination_name', asset_name) - task.set_editor_property('replace_existing', False) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) - - # set import options here - # Unreal 4.24 ignores the settings. It works with Unreal 4.26 - options = unreal.AbcImportSettings() - options.set_editor_property( - 'import_type', unreal.AlembicImportType.STATIC_MESH) - - task.options = options unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 # Create Asset Container @@ -106,25 +125,11 @@ def update(self, container, representation): source_path = api.get_representation_path(representation) destination_path = container["namespace"] - task = unreal.AssetImportTask() + task = self.get_task(source_path, destination_path, name, True) - task.set_editor_property('filename', source_path) - task.set_editor_property('destination_path', destination_path) - # strip suffix - task.set_editor_property('destination_name', name) - task.set_editor_property('replace_existing', True) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) - - # set import options here - # Unreal 4.24 ignores the settings. It works with Unreal 4.26 - options = unreal.AbcImportSettings() - options.set_editor_property( - 'import_type', unreal.AlembicImportType.STATIC_MESH) - - task.options = options # do import fbx and replace existing data unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + container_path = "{}/{}".format(container["namespace"], container["objectName"]) # update metadata diff --git a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py index dcb566fa4c1..d25f84ea690 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py @@ -15,6 +15,31 @@ class StaticMeshFBXLoader(api.Loader): icon = "cube" color = "orange" + def get_task(self, filename, asset_dir, asset_name, replace): + task = unreal.AssetImportTask() + options = unreal.FbxImportUI() + import_data = unreal.FbxStaticMeshImportData() + + task.set_editor_property('filename', filename) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', replace) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + options.set_editor_property( + 'automated_import_should_detect_type', False) + options.set_editor_property('import_animations', False) + + import_data.set_editor_property('combine_meshes', True) + import_data.set_editor_property('remove_degenerates', False) + + options.static_mesh_import_data = import_data + task.options = options + + return task + def load(self, context, name, namespace, data): """ Load and containerise representation into Content Browser. @@ -55,22 +80,8 @@ def load(self, context, name, namespace, data): unreal.EditorAssetLibrary.make_directory(asset_dir) - task = unreal.AssetImportTask() - - task.set_editor_property('filename', self.fname) - task.set_editor_property('destination_path', asset_dir) - task.set_editor_property('destination_name', asset_name) - task.set_editor_property('replace_existing', False) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) - - # set import options here - options = unreal.FbxImportUI() - options.set_editor_property( - 'automated_import_should_detect_type', False) - options.set_editor_property('import_animations', False) + task = self.get_task(self.fname, asset_dir, asset_name, False) - task.options = options unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 # Create Asset Container @@ -102,29 +113,15 @@ def load(self, context, name, namespace, data): return asset_content def update(self, container, representation): - name = container["name"] + name = container["asset_name"] source_path = api.get_representation_path(representation) destination_path = container["namespace"] - task = unreal.AssetImportTask() + task = self.get_task(source_path, destination_path, name, True) - task.set_editor_property('filename', source_path) - task.set_editor_property('destination_path', destination_path) - # strip suffix - task.set_editor_property('destination_name', name) - task.set_editor_property('replace_existing', True) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) - - # set import options here - options = unreal.FbxImportUI() - options.set_editor_property( - 'automated_import_should_detect_type', False) - options.set_editor_property('import_animations', False) - - task.options = options # do import fbx and replace existing data unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + container_path = "{}/{}".format(container["namespace"], container["objectName"]) # update metadata diff --git a/openpype/hosts/unreal/plugins/publish/extract_layout.py b/openpype/hosts/unreal/plugins/publish/extract_layout.py index 2d9f6eb3d14..a47187cf47b 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_layout.py +++ b/openpype/hosts/unreal/plugins/publish/extract_layout.py @@ -83,7 +83,7 @@ def process(self, instance): "z": transform.translation.z }, "rotation": { - "x": math.radians(transform.rotation.euler().x + 90.0), + "x": math.radians(transform.rotation.euler().x), "y": math.radians(transform.rotation.euler().y), "z": math.radians(180.0 - transform.rotation.euler().z) }, diff --git a/openpype/lib/usdlib.py b/openpype/lib/usdlib.py new file mode 100644 index 00000000000..3ae7430c7b4 --- /dev/null +++ b/openpype/lib/usdlib.py @@ -0,0 +1,350 @@ +import os +import re +import logging + +try: + from pxr import Usd, UsdGeom, Sdf, Kind +except ImportError: + # Allow to fall back on Multiverse 6.3.0+ pxr usd library + from mvpxr import Usd, UsdGeom, Sdf, Kind + +from avalon import io, api + +log = logging.getLogger(__name__) + + +# The predefined steps order used for bootstrapping USD Shots and Assets. +# These are ordered in order from strongest to weakest opinions, like in USD. +PIPELINE = { + "shot": [ + "usdLighting", + "usdFx", + "usdSimulation", + "usdAnimation", + "usdLayout", + ], + "asset": ["usdShade", "usdModel"], +} + + +def create_asset( + filepath, asset_name, reference_layers, kind=Kind.Tokens.component +): + """ + Creates an asset file that consists of a top level layer and sublayers for + shading and geometry. + + Args: + filepath (str): Filepath where the asset.usd file will be saved. + reference_layers (list): USD Files to reference in the asset. + Note that the bottom layer (first file, like a model) would + be last in the list. The strongest layer will be the first + index. + asset_name (str): The name for the Asset identifier and default prim. + kind (pxr.Kind): A USD Kind for the root asset. + + """ + # Also see create_asset.py in PixarAnimationStudios/USD endToEnd example + + log.info("Creating asset at %s", filepath) + + # Make the layer ascii - good for readability, plus the file is small + root_layer = Sdf.Layer.CreateNew(filepath, args={"format": "usda"}) + stage = Usd.Stage.Open(root_layer) + + # Define a prim for the asset and make it the default for the stage. + asset_prim = UsdGeom.Xform.Define(stage, "/%s" % asset_name).GetPrim() + stage.SetDefaultPrim(asset_prim) + + # Let viewing applications know how to orient a free camera properly + UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) + + # Usually we will "loft up" the kind authored into the exported geometry + # layer rather than re-stamping here; we'll leave that for a later + # tutorial, and just be explicit here. + model = Usd.ModelAPI(asset_prim) + if kind: + model.SetKind(kind) + + model.SetAssetName(asset_name) + model.SetAssetIdentifier("%s/%s.usd" % (asset_name, asset_name)) + + # Add references to the asset prim + references = asset_prim.GetReferences() + for reference_filepath in reference_layers: + references.AddReference(reference_filepath) + + stage.GetRootLayer().Save() + + +def create_shot(filepath, layers, create_layers=False): + """Create a shot with separate layers for departments. + + Args: + filepath (str): Filepath where the asset.usd file will be saved. + layers (str): When provided this will be added verbatim in the + subLayerPaths layers. When the provided layer paths do not exist + they are generated using Sdf.Layer.CreateNew + create_layers (bool): Whether to create the stub layers on disk if + they do not exist yet. + + Returns: + str: The saved shot file path + + """ + # Also see create_shot.py in PixarAnimationStudios/USD endToEnd example + + stage = Usd.Stage.CreateNew(filepath) + log.info("Creating shot at %s" % filepath) + + for layer_path in layers: + if create_layers and not os.path.exists(layer_path): + # We use the Sdf API here to quickly create layers. Also, we're + # using it as a way to author the subLayerPaths as there is no + # way to do that directly in the Usd API. + layer_folder = os.path.dirname(layer_path) + if not os.path.exists(layer_folder): + os.makedirs(layer_folder) + + Sdf.Layer.CreateNew(layer_path) + + stage.GetRootLayer().subLayerPaths.append(layer_path) + + # Lets viewing applications know how to orient a free camera properly + UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) + stage.GetRootLayer().Save() + + return filepath + + +def create_model(filename, asset, variant_subsets): + """Create a USD Model file. + + For each of the variation paths it will payload the path and set its + relevant variation name. + + """ + + asset_doc = io.find_one({"name": asset, "type": "asset"}) + assert asset_doc, "Asset not found: %s" % asset + + variants = [] + for subset in variant_subsets: + prefix = "usdModel" + if subset.startswith(prefix): + # Strip off `usdModel_` + variant = subset[len(prefix):] + else: + raise ValueError( + "Model subsets must start " "with usdModel: %s" % subset + ) + + path = get_usd_master_path( + asset=asset_doc, subset=subset, representation="usd" + ) + variants.append((variant, path)) + + stage = _create_variants_file( + filename, + variants=variants, + variantset="model", + variant_prim="/root", + reference_prim="/root/geo", + as_payload=True, + ) + + UsdGeom.SetStageMetersPerUnit(stage, 1) + UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) + + # modelAPI = Usd.ModelAPI(root_prim) + # modelAPI.SetKind(Kind.Tokens.component) + + # See http://openusd.org/docs/api/class_usd_model_a_p_i.html#details + # for more on assetInfo + # modelAPI.SetAssetName(asset) + # modelAPI.SetAssetIdentifier(asset) + + stage.GetRootLayer().Save() + + +def create_shade(filename, asset, variant_subsets): + """Create a master USD shade file for an asset. + + For each available model variation this should generate a reference + to a `usdShade_{modelVariant}` subset. + + """ + + asset_doc = io.find_one({"name": asset, "type": "asset"}) + assert asset_doc, "Asset not found: %s" % asset + + variants = [] + + for subset in variant_subsets: + prefix = "usdModel" + if subset.startswith(prefix): + # Strip off `usdModel_` + variant = subset[len(prefix):] + else: + raise ValueError( + "Model subsets must start " "with usdModel: %s" % subset + ) + + shade_subset = re.sub("^usdModel", "usdShade", subset) + path = get_usd_master_path( + asset=asset_doc, subset=shade_subset, representation="usd" + ) + variants.append((variant, path)) + + stage = _create_variants_file( + filename, variants=variants, variantset="model", variant_prim="/root" + ) + + stage.GetRootLayer().Save() + + +def create_shade_variation(filename, asset, model_variant, shade_variants): + """Create the master Shade file for a specific model variant. + + This should reference all shade variants for the specific model variant. + + """ + + asset_doc = io.find_one({"name": asset, "type": "asset"}) + assert asset_doc, "Asset not found: %s" % asset + + variants = [] + for variant in shade_variants: + subset = "usdShade_{model}_{shade}".format( + model=model_variant, shade=variant + ) + path = get_usd_master_path( + asset=asset_doc, subset=subset, representation="usd" + ) + variants.append((variant, path)) + + stage = _create_variants_file( + filename, variants=variants, variantset="shade", variant_prim="/root" + ) + + stage.GetRootLayer().Save() + + +def _create_variants_file( + filename, + variants, + variantset, + default_variant=None, + variant_prim="/root", + reference_prim=None, + set_default_variant=True, + as_payload=False, + skip_variant_on_single_file=True, +): + + root_layer = Sdf.Layer.CreateNew(filename, args={"format": "usda"}) + stage = Usd.Stage.Open(root_layer) + + root_prim = stage.DefinePrim(variant_prim) + stage.SetDefaultPrim(root_prim) + + def _reference(path): + """Reference/Payload path depending on function arguments""" + + if reference_prim: + prim = stage.DefinePrim(reference_prim) + else: + prim = root_prim + + if as_payload: + # Payload + prim.GetPayloads().AddPayload(Sdf.Payload(path)) + else: + # Reference + prim.GetReferences().AddReference(Sdf.Reference(path)) + + assert variants, "Must have variants, got: %s" % variants + + log.info(filename) + + if skip_variant_on_single_file and len(variants) == 1: + # Reference directly, no variants + variant_path = variants[0][1] + _reference(variant_path) + + log.info("Non-variants..") + log.info("Path: %s" % variant_path) + + else: + # Variants + append = Usd.ListPositionBackOfAppendList + variant_set = root_prim.GetVariantSets().AddVariantSet( + variantset, append + ) + + for variant, variant_path in variants: + + if default_variant is None: + default_variant = variant + + variant_set.AddVariant(variant, append) + variant_set.SetVariantSelection(variant) + with variant_set.GetVariantEditContext(): + _reference(variant_path) + + log.info("Variants..") + log.info("Variant: %s" % variant) + log.info("Path: %s" % variant_path) + + if set_default_variant: + variant_set.SetVariantSelection(default_variant) + + return stage + + +def get_usd_master_path(asset, subset, representation): + """Get the filepath for a .usd file of a subset. + + This will return the path to an unversioned master file generated by + `usd_master_file.py`. + + """ + + project = io.find_one( + {"type": "project"}, projection={"config.template.publish": True} + ) + template = project["config"]["template"]["publish"] + + if isinstance(asset, dict) and "silo" in asset and "name" in asset: + # Allow explicitly passing asset document + asset_doc = asset + else: + asset_doc = io.find_one({"name": asset, "type": "asset"}) + + path = template.format( + **{ + "root": api.registered_root(), + "project": api.Session["AVALON_PROJECT"], + "silo": asset_doc["silo"], + "asset": asset_doc["name"], + "subset": subset, + "representation": representation, + "version": 0, # stub version zero + } + ) + + # Remove the version folder + subset_folder = os.path.dirname(os.path.dirname(path)) + master_folder = os.path.join(subset_folder, "master") + fname = "{0}.{1}".format(subset, representation) + + return os.path.join(master_folder, fname).replace("\\", "/") + + +def parse_avalon_uri(uri): + # URI Pattern: avalon://{asset}/{subset}.{ext} + pattern = r"avalon://(?P[^/.]*)/(?P[^/]*)\.(?P.*)" + if uri.startswith("avalon://"): + match = re.match(pattern, uri) + if match: + return match.groupdict() diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py new file mode 100644 index 00000000000..9ada437716c --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -0,0 +1,153 @@ +import os +import json + +import hou + +from avalon import api, io +from avalon.vendor import requests + +import pyblish.api + + +class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): + """Submit Houdini scene to perform a local publish in Deadline. + + Publishing in Deadline can be helpful for scenes that publish very slow. + This way it can process in the background on another machine without the + Artist having to wait for the publish to finish on their local machine. + + Submission is done through the Deadline Web Service as + supplied via the environment variable AVALON_DEADLINE. + + """ + + label = "Submit Scene to Deadline" + order = pyblish.api.IntegratorOrder + hosts = ["houdini"] + families = ["*"] + targets = ["deadline"] + + def process(self, context): + + # Ensure no errors so far + assert all( + result["success"] for result in context.data["results"] + ), "Errors found, aborting integration.." + + # Deadline connection + AVALON_DEADLINE = api.Session.get( + "AVALON_DEADLINE", "http://localhost:8082" + ) + assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" + + # Note that `publish` data member might change in the future. + # See: https://github.com/pyblish/pyblish-base/issues/307 + actives = [i for i in context if i.data["publish"]] + instance_names = sorted(instance.name for instance in actives) + + if not instance_names: + self.log.warning( + "No active instances found. " "Skipping submission.." + ) + return + + scene = context.data["currentFile"] + scenename = os.path.basename(scene) + + # Get project code + project = io.find_one({"type": "project"}) + code = project["data"].get("code", project["name"]) + + job_name = "{scene} [PUBLISH]".format(scene=scenename) + batch_name = "{code} - {scene}".format(code=code, scene=scenename) + deadline_user = "roy" # todo: get deadline user dynamically + + # Get only major.minor version of Houdini, ignore patch version + version = hou.applicationVersionString() + version = ".".join(version.split(".")[:2]) + + # Generate the payload for Deadline submission + payload = { + "JobInfo": { + "Plugin": "Houdini", + "Pool": "houdini", # todo: remove hardcoded pool + "BatchName": batch_name, + "Comment": context.data.get("comment", ""), + "Priority": 50, + "Frames": "1-1", # Always trigger a single frame + "IsFrameDependent": False, + "Name": job_name, + "UserName": deadline_user, + # "Comment": instance.context.data.get("comment", ""), + # "InitialStatus": state + }, + "PluginInfo": { + "Build": None, # Don't force build + "IgnoreInputs": True, + # Inputs + "SceneFile": scene, + "OutputDriver": "/out/REMOTE_PUBLISH", + # Mandatory for Deadline + "Version": version, + }, + # Mandatory for Deadline, may be empty + "AuxFiles": [], + } + + # Process submission per individual instance if the submission + # is set to publish each instance as a separate job. Else submit + # a single job to process all instances. + per_instance = context.data.get("separateJobPerInstance", False) + if per_instance: + # Submit a job per instance + job_name = payload["JobInfo"]["Name"] + for instance in instance_names: + # Clarify job name per submission (include instance name) + payload["JobInfo"]["Name"] = job_name + " - %s" % instance + self.submit_job( + payload, instances=[instance], deadline=AVALON_DEADLINE + ) + else: + # Submit a single job + self.submit_job( + payload, instances=instance_names, deadline=AVALON_DEADLINE + ) + + def submit_job(self, payload, instances, deadline): + + # Ensure we operate on a copy, a shallow copy is fine. + payload = payload.copy() + + # Include critical environment variables with submission + api.Session + keys = [ + # Submit along the current Avalon tool setup that we launched + # this application with so the Render Slave can build its own + # similar environment using it, e.g. "houdini17.5;pluginx2.3" + "AVALON_TOOLS", + ] + + environment = dict( + {key: os.environ[key] for key in keys if key in os.environ}, + **api.Session + ) + environment["PYBLISH_ACTIVE_INSTANCES"] = ",".join(instances) + + payload["JobInfo"].update( + { + "EnvironmentKeyValue%d" + % index: "{key}={value}".format( + key=key, value=environment[key] + ) + for index, key in enumerate(environment) + } + ) + + # Submit + self.log.info("Submitting..") + self.log.debug(json.dumps(payload, indent=4, sort_keys=True)) + + # E.g. http://192.168.0.1:8082/api/jobs + url = "{}/api/jobs".format(deadline) + response = requests.post(url, json=payload) + if not response.ok: + raise Exception(response.text) diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py new file mode 100644 index 00000000000..f471d788b66 --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -0,0 +1,158 @@ +import os +import json +import getpass + +from avalon import api +from avalon.vendor import requests + +import pyblish.api + +import hou + + +class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): + """Submit Solaris USD Render ROPs to Deadline. + + Renders are submitted to a Deadline Web Service as + supplied via the environment variable AVALON_DEADLINE. + + Target "local": + Even though this does *not* render locally this is seen as + a 'local' submission as it is the regular way of submitting + a Houdini render locally. + + """ + + label = "Submit Render to Deadline" + order = pyblish.api.IntegratorOrder + hosts = ["houdini"] + families = ["usdrender", + "redshift_rop"] + targets = ["local"] + + def process(self, instance): + + context = instance.context + code = context.data["code"] + filepath = context.data["currentFile"] + filename = os.path.basename(filepath) + comment = context.data.get("comment", "") + deadline_user = context.data.get("deadlineUser", getpass.getuser()) + jobname = "%s - %s" % (filename, instance.name) + + # Support code prefix label for batch name + batch_name = filename + if code: + batch_name = "{0} - {1}".format(code, batch_name) + + # Output driver to render + driver = instance[0] + + # StartFrame to EndFrame by byFrameStep + frames = "{start}-{end}x{step}".format( + start=int(instance.data["startFrame"]), + end=int(instance.data["endFrame"]), + step=int(instance.data["byFrameStep"]), + ) + + # Documentation for keys available at: + # https://docs.thinkboxsoftware.com + # /products/deadline/8.0/1_User%20Manual/manual + # /manual-submission.html#job-info-file-options + payload = { + "JobInfo": { + # Top-level group name + "BatchName": batch_name, + + # Job name, as seen in Monitor + "Name": jobname, + + # Arbitrary username, for visualisation in Monitor + "UserName": deadline_user, + + "Plugin": "Houdini", + "Pool": "houdini_redshift", # todo: remove hardcoded pool + "Frames": frames, + + "ChunkSize": instance.data.get("chunkSize", 10), + + "Comment": comment + }, + "PluginInfo": { + # Input + "SceneFile": filepath, + "OutputDriver": driver.path(), + + # Mandatory for Deadline + # Houdini version without patch number + "Version": hou.applicationVersionString().rsplit(".", 1)[0], + + "IgnoreInputs": True + }, + + # Mandatory for Deadline, may be empty + "AuxFiles": [] + } + + # Include critical environment variables with submission + api.Session + keys = [ + # Submit along the current Avalon tool setup that we launched + # this application with so the Render Slave can build its own + # similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9" + "AVALON_TOOLS", + ] + environment = dict({key: os.environ[key] for key in keys + if key in os.environ}, **api.Session) + + payload["JobInfo"].update({ + "EnvironmentKeyValue%d" % index: "{key}={value}".format( + key=key, + value=environment[key] + ) for index, key in enumerate(environment) + }) + + # Include OutputFilename entries + # The first entry also enables double-click to preview rendered + # frames from Deadline Monitor + output_data = {} + for i, filepath in enumerate(instance.data["files"]): + dirname = os.path.dirname(filepath) + fname = os.path.basename(filepath) + output_data["OutputDirectory%d" % i] = dirname.replace("\\", "/") + output_data["OutputFilename%d" % i] = fname + + # For now ensure destination folder exists otherwise HUSK + # will fail to render the output image. This is supposedly fixed + # in new production builds of Houdini + # TODO Remove this workaround with Houdini 18.0.391+ + if not os.path.exists(dirname): + self.log.info("Ensuring output directory exists: %s" % + dirname) + os.makedirs(dirname) + + payload["JobInfo"].update(output_data) + + self.submit(instance, payload) + + def submit(self, instance, payload): + + AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE", + "http://localhost:8082") + assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" + + plugin = payload["JobInfo"]["Plugin"] + self.log.info("Using Render Plugin : {}".format(plugin)) + + self.log.info("Submitting..") + self.log.debug(json.dumps(payload, indent=4, sort_keys=True)) + + # E.g. http://192.168.0.1:8082/api/jobs + url = "{}/api/jobs".format(AVALON_DEADLINE) + response = requests.post(url, json=payload) + if not response.ok: + raise Exception(response.text) + + # Store output dir for unified publisher (filesequence) + output_dir = os.path.dirname(instance.data["files"][0]) + instance.data["outputDir"] = output_dir + instance.data["deadlineSubmissionJob"] = response.json() diff --git a/openpype/plugins/publish/collect_scene_version.py b/openpype/plugins/publish/collect_scene_version.py index 62969858c5b..ca12f2900c6 100644 --- a/openpype/plugins/publish/collect_scene_version.py +++ b/openpype/plugins/publish/collect_scene_version.py @@ -11,15 +11,22 @@ class CollectSceneVersion(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder label = 'Collect Version' + hosts = [ + "aftereffects", + "blender", + "celaction", + "fusion", + "harmony", + "hiero", + "houdini", + "maya", + "nuke", + "photoshop", + "resolve", + "tvpaint" + ] def process(self, context): - if "standalonepublisher" in context.data.get("host", []): - return - - if "unreal" in pyblish.api.registered_hosts() or \ - "webpublisher" in pyblish.api.registered_hosts(): - return - assert context.data.get('currentFile'), "Cannot get current file" filename = os.path.basename(context.data.get('currentFile')) diff --git a/openpype/settings/defaults/project_settings/houdini.json b/openpype/settings/defaults/project_settings/houdini.json index 811a446e59a..809c732d6fa 100644 --- a/openpype/settings/defaults/project_settings/houdini.json +++ b/openpype/settings/defaults/project_settings/houdini.json @@ -1,4 +1,46 @@ { + "create": { + "CreateAlembicCamera": { + "enabled": true, + "defaults": [] + }, + "CreateCompositeSequence": { + "enabled": true, + "defaults": [] + }, + "CreatePointCache": { + "enabled": true, + "defaults": [] + }, + "CreateRedshiftROP": { + "enabled": true, + "defaults": [] + }, + "CreateRemotePublish": { + "enabled": true, + "defaults": [] + }, + "CreateVDBCache": { + "enabled": true, + "defaults": [] + }, + "CreateUSD": { + "enabled": false, + "defaults": [] + }, + "CreateUSDModel": { + "enabled": false, + "defaults": [] + }, + "USDCreateShadingWorkspace": { + "enabled": false, + "defaults": [] + }, + "CreateUSDRender": { + "enabled": false, + "defaults": [] + } + }, "publish": { "ValidateContainers": { "enabled": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json b/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json index c6de257a617..cad99dde22c 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json @@ -5,6 +5,10 @@ "label": "Houdini", "is_file": true, "children": [ + { + "type": "schema", + "name": "schema_houdini_create" + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_create.json new file mode 100644 index 00000000000..72b8032d4bd --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_create.json @@ -0,0 +1,54 @@ +{ + "type": "dict", + "collapsible": true, + "key": "create", + "label": "Creator plugins", + "children": [ + { + "type": "schema_template", + "name": "template_create_plugin", + "template_data": [ + { + "key": "CreateAlembicCamera", + "label": "Create Alembic Camera" + }, + { + "key": "CreateCompositeSequence", + "label": "Create Composite (Image Sequence)" + }, + { + "key": "CreatePointCache", + "label": "Create Point Cache" + }, + { + "key": "CreateRedshiftROP", + "label": "Create Redshift ROP" + }, + { + "key": "CreateRemotePublish", + "label": "Create Remote Publish" + }, + { + "key": "CreateVDBCache", + "label": "Create VDB Cache" + }, + { + "key": "CreateUSD", + "label": "Create USD" + }, + { + "key": "CreateUSDModel", + "label": "Create USD Model" + }, + { + "key": "USDCreateShadingWorkspace", + "label": "Create USD Shading Workspace" + }, + { + "key": "CreateUSDRender", + "label": "Create USD Render" + } + ] + } + ] +} \ No newline at end of file diff --git a/website/docs/artist_hosts_houdini.md b/website/docs/artist_hosts_houdini.md new file mode 100644 index 00000000000..d2aadf05cbd --- /dev/null +++ b/website/docs/artist_hosts_houdini.md @@ -0,0 +1,78 @@ +--- +id: artist_hosts_houdini +title: Houdini +sidebar_label: Houdini +--- + +## OpenPype global tools + +- [Work Files](artist_tools.md#workfiles) +- [Create](artist_tools.md#creator) +- [Load](artist_tools.md#loader) +- [Manage (Inventory)](artist_tools.md#inventory) +- [Publish](artist_tools.md#publisher) +- [Library Loader](artist_tools.md#library-loader) + +## Publishing Alembic Cameras +You can publish baked camera in Alembic format. Select your camera and go **OpenPype -> Create** and select **Camera (abc)**. +This will create Alembic ROP in **out** with path and frame range already set. This node will have a name you've +assigned in the **Creator** menu. For example if you name the subset `Default`, output Alembic Driver will be named +`cameraDefault`. After that, you can **OpenPype -> Publish** and after some validations your camera will be published +to `abc` file. + +## Publishing Composites - Image Sequences +You can publish image sequence directly from Houdini. You can use any `cop` network you have and publish image +sequence generated from it. For example I've created simple **cop** graph to generate some noise: +![Noise COP](assets/houdini_imagesequence_cop.png) + +If I want to publish it, I'll select node I like - in this case `radialblur1` and go **OpenPype -> Create** and +select **Composite (Image Sequence)**. This will create `/out/imagesequenceNoise` Composite ROP (I've named my subset +*Noise*) with frame range set. When you hit **Publish** it will render image sequence from selected node. + +## Publishing Point Caches (alembic) +Publishing point caches in alembic format is pretty straightforward, but it is by default enforcing better compatibility +with other DCCs, so it needs data do be exported prepared in certain way. You need to add `path` attribute so objects +in alembic are better structured. When using alembic round trip in Houdini (loading alembics, modifying then and +then publishing modifications), `path` is automatically resolved by alembic nodes. + +In this example, I've created this node graph on **sop** level, and I want to publish it as point cache. + +![Pointcache setup](assets/houdini_pointcache_path.png) + +*Note: `connectivity` will add index for each primitive and `primitivewrangle1` will add `path` attribute, so it will +be for each primitive (`sphere1` and `sphere2`) as Maya is expecting - `strange_GRP/strange0_GEO/strange0_GEOShape`. How +you handle `path` attribute is up to you, this is just an example.* + +Now select the `output0` node and go **OpenPype -> Create** and select **Point Cache**. It will create +Alembic ROP `/out/pointcacheStrange` + + +## Redshift +:::note Work in progress +This part of documentation is still work in progress. +::: + +## USD (experimental support) +### Publishing USD +You can publish your Solaris Stage as USD file. +![Solaris USD](assets/houdini_usd_stage.png) + +This is very simple test stage. I've selected `output` **lop** node and went to **OpenPype -> Create** where I've +selected **USD**. This created `/out/usdDefault` USD ROP node. + +### Publishing USD render + +USD Render works in similar manner as USD file, except it will create **USD Render** ROP node in out and will publish +images produced by it. If you have selected node in Solaris Stage it will by added as **lop path** to ROP. + +## Publishing VDB + +Publishing VDB files works as with other data types. In this example I've created simple PyroFX explosion from +sphere. In `pyro_import` I've converted the volume to VDB: + +![VDB Setup](assets/houdini_vdb_setup.png) + +I've selected `vdb1` and went **OpenPype -> Create** and selected **VDB Cache**. This will create +geometry ROP in `/out` and sets its paths to output vdb files. During the publishing process +whole dops are cooked. + diff --git a/website/docs/assets/houdini_imagesequence_cop.png b/website/docs/assets/houdini_imagesequence_cop.png new file mode 100644 index 00000000000..54ed5977b90 Binary files /dev/null and b/website/docs/assets/houdini_imagesequence_cop.png differ diff --git a/website/docs/assets/houdini_pointcache_path.png b/website/docs/assets/houdini_pointcache_path.png new file mode 100644 index 00000000000..3687a9c0ddf Binary files /dev/null and b/website/docs/assets/houdini_pointcache_path.png differ diff --git a/website/docs/assets/houdini_usd_stage.png b/website/docs/assets/houdini_usd_stage.png new file mode 100644 index 00000000000..cba94286048 Binary files /dev/null and b/website/docs/assets/houdini_usd_stage.png differ diff --git a/website/docs/assets/houdini_vdb_setup.png b/website/docs/assets/houdini_vdb_setup.png new file mode 100644 index 00000000000..e27e0b6c368 Binary files /dev/null and b/website/docs/assets/houdini_vdb_setup.png differ diff --git a/website/sidebars.js b/website/sidebars.js index 488814a385e..3a4b933b9ac 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -22,6 +22,7 @@ module.exports = { "artist_hosts_maya", "artist_hosts_blender", "artist_hosts_harmony", + "artist_hosts_houdini", "artist_hosts_aftereffects", "artist_hosts_resolve", "artist_hosts_photoshop", diff --git a/website/yarn.lock b/website/yarn.lock index a63bf377316..88f3db082e6 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -6168,9 +6168,9 @@ path-key@^3.0.0, path-key@^3.1.0: integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== path-parse@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c" - integrity sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw== + version "1.0.7" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== path-to-regexp@0.1.7: version "0.1.7"