Skip to content

Commit

Permalink
Merge pull request #9 from ynput/feature/houdini_cleanup_after_publis…
Browse files Browse the repository at this point in the history
…hing

Houdini: Cleanup after publishing
  • Loading branch information
MustafaJafar authored Jun 27, 2024
2 parents 552270d + 78ab03a commit eed7634
Show file tree
Hide file tree
Showing 6 changed files with 195 additions and 104 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -27,47 +27,35 @@ def process(self, instance):
# Why do we need this particular collector to collect the expected
# output files from a ROP node. Don't we have a dedicated collector
# for that yet?
# Answer: No, we don't have a generic expected file collector.
# Because different product types needs different logic.
# e.g. check CollectMantraROPRenderProducts
# and CollectKarmaROPRenderProducts
# Collect expected files
ropnode = hou.node(instance.data["instance_node"])
output_parm = lib.get_output_parameter(ropnode)
expected_filepath = output_parm.eval()
instance.data.setdefault("files", list())
instance.data.setdefault("expectedFiles", list())
if instance.data.get("frames"):
files = self.get_files(instance, expected_filepath)
# list of files
instance.data["files"].extend(files)
else:

frames = instance.data.get("frames", "")
if isinstance(frames, str):
# single file
instance.data["files"].append(output_parm.eval())
cache_files = {"_": instance.data["files"]}
# Convert instance family to pointcache if it is bgeo or abc
# because ???
self.log.debug(instance.data["families"])
instance.data["files"].append(expected_filepath)
else:
# list of files
staging_dir, _ = os.path.split(expected_filepath)
instance.data["files"].extend(
["{}/{}".format(staging_dir, f) for f in frames]
)

cache_files = {"cache": instance.data["files"]}

instance.data.update({
"plugin": "Houdini",
"publish": True
})
instance.data["families"].append("publish.hou")
instance.data["expectedFiles"].append(cache_files)

self.log.debug("{}".format(instance.data))

def get_files(self, instance, output_parm):
"""Get the files with the frame range data
Args:
instance (_type_): instance
output_parm (_type_): path of output parameter
Returns:
files: a list of files
"""
directory = os.path.dirname(output_parm)

files = [
os.path.join(directory, frame).replace("\\", "/")
for frame in instance.data["frames"]
]

return files
self.log.debug("Caching on farm expected files: {}".format(instance.data["expectedFiles"]))
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
import os
from typing import List

import pyblish.api
from ayon_core.pipeline import AYONPyblishPluginMixin
from ayon_houdini.api import plugin


class CollectFilesForCleaningUp(plugin.HoudiniInstancePlugin,
AYONPyblishPluginMixin):
"""Collect Files For Cleaning Up.
This collector collects output files and adds them to file remove list.
CAUTION:
This collector registers exported files and
the parent folder for deletion in `ExplicitCleanUp` plug-in.
please refer to `ExplicitCleanUp`'s docstring for further info.
Notes:
Artists are free to change the file path in the ROP node.
Farm instances will be processed on farm by other dedicated plugins
that live in core addon e.g. `CollectRenderedFiles` plugin.
These dedicated plugins don't support tracking and removing
intermediate render files.
Local Render instances don't track intermediate render files,
Therefore, this plugin doesn't support removing
intermediate render files.
HDA is not added to this plugin's options in server settings.
Cleaning up HDA products will break the scene as Houdini will no longer
be able to find the HDA file.
In addition,HDA plugins always save HDAs to external files.
Therefore, Cleaning up HDA products will break the ability to go back
to the workfile and continue on the HDA.
"""

# It should run after CollectFrames and Collect Render plugins,
# and before CollectLocalRenderInstances.
order = pyblish.api.CollectorOrder + 0.115

hosts = ["houdini"]
families = ["*"]
label = "Collect Files For Cleaning Up"

def process(self, instance):

if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return

files: List[str] = []
staging_dirs: List[str] = []
expected_files = instance.data.get("expectedFiles", [])

# Prefer 'expectedFiles' over 'frames' because it usually contains more
# output files than just a single file or single sequence of files.
if expected_files:
# Products with expected files
# This can be Render products or submitted cache to farm.
for expected in expected_files:
# expected.values() is a list of lists
for output_files in expected.values():
staging_dir, _ = os.path.split(output_files[0])
if staging_dir not in staging_dirs:
staging_dirs.append(staging_dir)
files.extend(output_files)
else:
# Products with frames or single file.

frames = instance.data.get("frames")
if frames is None:
self.log.warning(
f"No frames data found on instance {instance}"
". Skipping collection for caching on farm..."
)
return

staging_dir = instance.data.get("stagingDir")
staging_dirs.append(staging_dir)

if isinstance(frames, str):
# single file.
files.append(f"{staging_dir}/{frames}")
else:
# list of frame.
files.extend(
[f"{staging_dir}/{frame}" for frame in frames]
)

self.log.debug(
f"Add directories to 'cleanupEmptyDir': {staging_dirs}")
instance.context.data["cleanupEmptyDirs"].extend(staging_dirs)

self.log.debug("Add files to 'cleanupFullPaths': {}".format(files))
instance.context.data["cleanupFullPaths"].extend(files)
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
# -*- coding: utf-8 -*-
"""Collector plugin for frames data on ROP instances."""
import os
import re

import hou # noqa
import clique
import pyblish.api
from ayon_houdini.api import lib, plugin

Expand All @@ -16,86 +15,49 @@ class CollectFrames(plugin.HoudiniInstancePlugin):
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Frames"
families = ["camera", "vdbcache", "imagesequence", "ass",
"redshiftproxy", "review", "pointcache", "fbx"]
"redshiftproxy", "review", "pointcache", "fbx",
"model"]

def process(self, instance):

ropnode = hou.node(instance.data["instance_node"])

start_frame = instance.data.get("frameStartHandle", None)
end_frame = instance.data.get("frameEndHandle", None)
# CollectRopFrameRange computes `start_frame` and `end_frame`
# depending on the trange value.
start_frame = instance.data["frameStartHandle"]
end_frame = instance.data["frameEndHandle"]

# Evaluate the file name at the first frame.
ropnode = hou.node(instance.data["instance_node"])
output_parm = lib.get_output_parameter(ropnode)
if start_frame is not None:
# When rendering only a single frame still explicitly
# get the name for that particular frame instead of current frame
output = output_parm.evalAtFrame(start_frame)
else:
self.log.warning("Using current frame: {}".format(hou.frame()))
output = output_parm.eval()

_, ext = lib.splitext(
output, allowed_multidot_extensions=[
".ass.gz", ".bgeo.sc", ".bgeo.gz",
".bgeo.lzma", ".bgeo.bz2"])
output = output_parm.evalAtFrame(start_frame)
file_name = os.path.basename(output)
result = file_name

# Get the filename pattern match from the output
# path, so we can compute all frames that would
# come out from rendering the ROP node if there
# is a frame pattern in the name
pattern = r"\w+\.(\d+)" + re.escape(ext)
match = re.match(pattern, file_name)

if match and start_frame is not None:

# Check if frames are bigger than 1 (file collection)
# override the result
if end_frame - start_frame > 0:
result = self.create_file_list(
match, int(start_frame), int(end_frame)
)

# todo: `frames` currently conflicts with "explicit frames" for a
# for a custom frame list. So this should be refactored.

instance.data.update({
"frames": result,
"frames": file_name, # Set frames to the file name by default.
"stagingDir": os.path.dirname(output)
})

@staticmethod
def create_file_list(match, start_frame, end_frame):
"""Collect files based on frame range and `regex.match`
Args:
match(re.match): match object
start_frame(int): start of the animation
end_frame(int): end of the animation
Returns:
list
"""

# Get the padding length
frame = match.group(1)
padding = len(frame)

# Get the parts of the filename surrounding the frame number,
# so we can put our own frame numbers in.
span = match.span(1)
prefix = match.string[: span[0]]
suffix = match.string[span[1]:]

# Generate filenames for all frames
result = []
for i in range(start_frame, end_frame + 1):

# Format frame number by the padding amount
str_frame = "{number:0{width}d}".format(number=i, width=padding)

file_name = prefix + str_frame + suffix
result.append(file_name)

return result
# Skip unnecessary logic if start and end frames are equal.
if start_frame == end_frame:
return

# Create collection using frame pattern.
# e.g. 'pointcacheBgeoCache_AB010.1001.bgeo'
# will be <Collection "pointcacheBgeoCache_AB010.%d.bgeo [1001]">
frame_collection, _ = clique.assemble(
[file_name],
patterns=[clique.PATTERNS["frames"]],
minimum_items=1
)

# Return as no frame pattern detected.
if not frame_collection:
return

# It's always expected to be one collection.
frame_collection = frame_collection[0]
frame_collection.indexes.clear()
frame_collection.indexes.update(list(range(start_frame, (end_frame + 1))))
instance.data["frames"] = list(frame_collection)
2 changes: 1 addition & 1 deletion server_addon/houdini/client/ayon_houdini/version.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'houdini' version."""
__version__ = "0.3.7"
__version__ = "0.3.8"
2 changes: 1 addition & 1 deletion server_addon/houdini/package.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "houdini"
title = "Houdini"
version = "0.3.7"
version = "0.3.8"

client_dir = "ayon_houdini"

Expand Down
43 changes: 43 additions & 0 deletions server_addon/houdini/server/settings/publish.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,39 @@ class CollectLocalRenderInstancesModel(BaseSettingsModel):
)


def product_types_enum():
return [
{"value": "camera", "label": "Camera (Abc)"},
{"value": "pointcache", "label": "PointCache (Abc)/PointCache (Bgeo)"},
{"value": "review", "label": "Review"},
{"value": "staticMesh", "label": "Static Mesh (FBX)"},
{"value": "usd", "label": "USD (experimental)"},
{"value": "vdbcache", "label": "VDB Cache"},
{"value": "imagesequence", "label": "Composite (Image Sequence)"},
{"value": "ass", "label": "Arnold ASS"},
{"value": "arnold_rop", "label": "Arnold ROP"},
{"value": "mantra_rop", "label": "Mantra ROP"},
{"value": "redshiftproxy", "label": "Redshift Proxy"},
{"value": "redshift_rop", "label": "Redshift ROP"},
{"value": "karma_rop", "label": "Karma ROP"},
{"value": "vray_rop", "label": "VRay ROP"},
{"value": "model", "label": "Model"},
]


class CollectFilesForCleaningUpModel(BaseSettingsModel):
enabled: bool = SettingsField(title="Enabled")
optional: bool = SettingsField(title="Optional")
active: bool = SettingsField(title="Active")

families: list[str] = SettingsField(
default_factory=list,
enum_resolver=product_types_enum,
conditionalEnum=True,
title="Product Types"
)


class ValidateWorkfilePathsModel(BaseSettingsModel):
enabled: bool = SettingsField(title="Enabled")
optional: bool = SettingsField(title="Optional")
Expand Down Expand Up @@ -74,6 +107,10 @@ class PublishPluginsModel(BaseSettingsModel):
default_factory=CollectChunkSizeModel,
title="Collect Chunk Size"
)
CollectFilesForCleaningUp: CollectFilesForCleaningUpModel = SettingsField(
default_factory=CollectFilesForCleaningUpModel,
title="Collect Files For Cleaning Up."
)
CollectLocalRenderInstances: CollectLocalRenderInstancesModel = SettingsField(
default_factory=CollectLocalRenderInstancesModel,
title="Collect Local Render Instances"
Expand Down Expand Up @@ -113,6 +150,12 @@ class PublishPluginsModel(BaseSettingsModel):
"optional": True,
"chunk_size": 999999
},
"CollectFilesForCleaningUp": {
"enabled": False,
"optional": True,
"active": True,
"families" : []
},
"CollectLocalRenderInstances": {
"use_deadline_aov_filter": False,
"aov_filter": {
Expand Down

0 comments on commit eed7634

Please sign in to comment.