diff --git a/openpype/hosts/aftereffects/plugins/load/load_file.py b/openpype/hosts/aftereffects/plugins/load/load_file.py index 8d52aac5461..9204c6c1faf 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_file.py +++ b/openpype/hosts/aftereffects/plugins/load/load_file.py @@ -17,7 +17,8 @@ class FileLoader(api.AfterEffectsLoader): "render", "prerender", "review", - "audio"] + "audio", + "workfile"] representations = ["*"] def load(self, context, name=None, namespace=None, data=None): diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index 52f96261b21..67b2978f58b 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -697,6 +697,9 @@ def __init__(self, cls, track_item, **kwargs): # adding ui inputs if any self.ui_inputs = kwargs.get("ui_inputs", {}) + project_settings = get_current_project_settings() + self.symlink = project_settings["hiero"]["create"]["CreateShotClip"]["symlink"] # noqa + # populate default data before we get other attributes self._populate_track_item_default_data() @@ -751,7 +754,8 @@ def _populate_track_item_default_data(self): "_track_": self.track_name, "_clip_": self.ti_name, "_trackIndex_": self.track_index, - "_clipIndex_": self.ti_index + "_clipIndex_": self.ti_index, + "_symlink_": self.symlink } def _populate_attributes(self): @@ -775,6 +779,11 @@ def _populate_attributes(self): self.hierarchy_data = self.ui_inputs.get( "hierarchyData", {}).get("value") or \ self.track_item_default_data.copy() + + ui_symlink = self.ui_inputs.get( + "hierarchyData", {}).get("value").get("symlink").get("value") + self.hierarchy_data["symlink"].update({"value": str(ui_symlink)}) + self.count_from = self.ui_inputs.get( "countFrom", {}).get("value") or self.count_from_default self.count_steps = self.ui_inputs.get( diff --git a/openpype/hosts/hiero/plugins/create/create_shot_clip.py b/openpype/hosts/hiero/plugins/create/create_shot_clip.py index d0c81cffa26..f39aa9ee6c2 100644 --- a/openpype/hosts/hiero/plugins/create/create_shot_clip.py +++ b/openpype/hosts/hiero/plugins/create/create_shot_clip.py @@ -102,7 +102,14 @@ class CreateShotClip(phiero.Creator): "label": "{shot}", "target": "tag", "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 4} + "order": 4}, + "symlink": { + "value": False, + "type": "QCheckBox", + "label": "Publish using symlink", + "target": "tag", + "toolTip": "Publish symlinks, don't copy files", + "order": 5} } }, "verticalSync": { diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 40b3419e73c..1662e4a14c3 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -439,11 +439,28 @@ def imprint(node, data): add_type = {"attributeType": "enum", "enumName": ":".join(value)} set_type = {"keyable": False, "channelBox": True} value = 0 # enum default + elif isinstance(value, dict): + for key, group_list in value.items(): + cmds.addAttr( + node, + longName=key, + numberOfChildren=len(group_list), + attributeType="compound" + ) + for group in group_list: + cmds.addAttr( + node, + longName=group, + attributeType="bool", + parent=key + ) + continue else: raise TypeError("Unsupported type: %r" % type(value)) - cmds.addAttr(node, longName=key, **add_type) - cmds.setAttr(node + "." + key, value, **set_type) + if not isinstance(value, dict): + cmds.addAttr(node, longName=key, **add_type) + cmds.setAttr(node + "." + key, value, **set_type) def lsattr(attr, value=None): @@ -2284,6 +2301,7 @@ def get_frame_range(include_animation_range=False): "handleStart": handle_start, "handleEnd": handle_end } + if include_animation_range: # The animation range values are only included to define whether # the Maya time slider should include the handles or not. @@ -2307,13 +2325,15 @@ def get_frame_range(include_animation_range=False): animation_start -= int(handle_start) animation_end += int(handle_end) + frame_range["frameStart"] = animation_start + frame_range["frameEnd"] = animation_end frame_range["animationStart"] = animation_start frame_range["animationEnd"] = animation_end return frame_range -def reset_frame_range(playback=True, render=True, fps=True): +def reset_frame_range(playback=True, render=True, fps=True, instances=True): """Set frame range to current asset Args: @@ -2322,6 +2342,8 @@ def reset_frame_range(playback=True, render=True, fps=True): render (bool, Optional): Whether to set the maya render frame range. Defaults to True. fps (bool, Optional): Whether to set scene FPS. Defaults to True. + instances (bool, Optional): Whether to update publishable instances. + Defaults to True. """ if fps: set_scene_fps(get_fps_for_current_context()) @@ -2349,6 +2371,12 @@ def reset_frame_range(playback=True, render=True, fps=True): cmds.setAttr("defaultRenderGlobals.startFrame", animation_start) cmds.setAttr("defaultRenderGlobals.endFrame", animation_end) + if instances: + project_name = get_current_project_name() + settings = get_project_settings(project_name) + if settings["maya"]["update_publishable_frame_range"]["enabled"]: + update_instances_frame_range() + def reset_scene_resolution(): """Apply the scene resolution from the project definition @@ -3169,31 +3197,63 @@ def remove_render_layer_observer(): pass -def update_content_on_context_change(): +def iter_publish_instances(): + """Iterate over publishable instances (their objectSets). """ - This will update scene content to match new asset on context change + for node in cmds.ls( + "*.id", + long=True, + type="objectSet", + recursive=True, + objectsOnly=True + ): + if cmds.getAttr("{}.id".format(node)) != "pyblish.avalon.instance": + continue + yield node + + +def update_instances_asset_name(): + """Update 'asset' attribute of publishable instances (their objectSets) + that got one. """ - scene_sets = cmds.listSets(allSets=True) - asset_doc = get_current_project_asset() - new_asset = asset_doc["name"] - new_data = asset_doc["data"] - for s in scene_sets: - try: - if cmds.getAttr("{}.id".format(s)) == "pyblish.avalon.instance": - attr = cmds.listAttr(s) - print(s) - if "asset" in attr: - print(" - setting asset to: [ {} ]".format(new_asset)) - cmds.setAttr("{}.asset".format(s), - new_asset, type="string") - if "frameStart" in attr: - cmds.setAttr("{}.frameStart".format(s), - new_data["frameStart"]) - if "frameEnd" in attr: - cmds.setAttr("{}.frameEnd".format(s), - new_data["frameEnd"],) - except ValueError: - pass + + for instance in iter_publish_instances(): + if not cmds.attributeQuery("asset", node=instance, exists=True): + continue + attr = "{}.asset".format(instance) + cmds.setAttr(attr, get_current_asset_name(), type="string") + + +def update_instances_frame_range(): + """Update 'frameStart', 'frameEnd', 'handleStart', 'handleEnd' and 'fps' + attributes of publishable instances (their objectSets) that got one. + """ + + attributes = ["frameStart", "frameEnd", "handleStart", "handleEnd", "fps"] + + attrs_per_instance = {} + for instance in iter_publish_instances(): + instance_attrs = [ + attr for attr in attributes + if cmds.attributeQuery(attr, node=instance, exists=True) + ] + + if instance_attrs: + attrs_per_instance[instance] = instance_attrs + + if not attrs_per_instance: + # no instances with any frame related attributes + return + + fields = ["data.{}".format(key) for key in attributes] + asset_doc = get_current_project_asset(fields=fields) + asset_data = asset_doc["data"] + + for node, attrs in attrs_per_instance.items(): + for attr in attrs: + plug = "{}.{}".format(node, attr) + value = asset_data[attr] + cmds.setAttr(plug, value) def show_message(title, msg): diff --git a/openpype/hosts/maya/api/lib_rendersettings.py b/openpype/hosts/maya/api/lib_rendersettings.py index 42cf29d0a79..95b61573b38 100644 --- a/openpype/hosts/maya/api/lib_rendersettings.py +++ b/openpype/hosts/maya/api/lib_rendersettings.py @@ -158,7 +158,12 @@ def _set_arnold_settings(self, width, height): cmds.setAttr( "defaultArnoldDriver.mergeAOVs", multi_exr) self._additional_attribs_setter(additional_options) - reset_frame_range(playback=False, fps=False, render=True) + reset_frame_range( + playback=False, + fps=False, + render=True, + instances=False + ) def _set_redshift_settings(self, width, height): """Sets settings for Redshift.""" diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index 9ec192e9e08..8047ecbc436 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -662,7 +662,8 @@ def on_task_changed(): with lib.suspended_refresh(): lib.set_context_settings() - lib.update_content_on_context_change() + lib.update_instances_frame_range() + lib.update_instances_asset_name() def before_workfile_open(): diff --git a/openpype/hosts/maya/api/workfile_template_builder.py b/openpype/hosts/maya/api/workfile_template_builder.py index a46b0e46944..020f7f133a0 100644 --- a/openpype/hosts/maya/api/workfile_template_builder.py +++ b/openpype/hosts/maya/api/workfile_template_builder.py @@ -14,7 +14,14 @@ WorkfileBuildPlaceholderDialog, ) -from .lib import read, imprint, get_reference_node, get_main_window +from .lib import ( + read, + imprint, + get_reference_node, + get_main_window, + update_instances_frame_range, + update_instances_asset_name, +) PLACEHOLDER_SET = "PLACEHOLDERS_SET" @@ -254,6 +261,8 @@ def post_placeholder_process(self, placeholder, failed): cmds.sets(node, addElement=PLACEHOLDER_SET) cmds.hide(node) cmds.setAttr(node + ".hiddenInOutliner", True) + update_instances_frame_range() + update_instances_asset_name() def delete_placeholder(self, placeholder): """Remove placeholder if building was successful""" diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 6266689af41..e4afad81018 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -1,108 +1,440 @@ # -*- coding: utf-8 -*- """Create ``Render`` instance in Maya.""" +import json +import os +import appdirs +import requests + +from maya import cmds +from maya.app.renderSetup.model import renderSetup + +from openpype.settings import ( + get_system_settings, + get_project_settings, +) +from openpype.lib import requests_get +from openpype.modules import ModulesManager +from openpype.pipeline import legacy_io from openpype.hosts.maya.api import ( + lib, lib_rendersettings, plugin ) -from openpype.pipeline import CreatorError -from openpype.lib import ( - BoolDef, - NumberDef, -) -class CreateRenderlayer(plugin.RenderlayerCreator): - """Create and manages renderlayer subset per renderLayer in workfile. +class CreateRender(plugin.Creator): + """Create *render* instance. + + Render instances are not actually published, they hold options for + collecting of render data. It render instance is present, it will trigger + collection of render layers, AOVs, cameras for either direct submission + to render farm or export as various standalone formats (like V-Rays + ``vrscenes`` or Arnolds ``ass`` files) and then submitting them to render + farm. + + Instance has following attributes:: + + primaryPool (list of str): Primary list of slave machine pool to use. + secondaryPool (list of str): Optional secondary list of slave pools. + suspendPublishJob (bool): Suspend the job after it is submitted. + extendFrames (bool): Use already existing frames from previous version + to extend current render. + overrideExistingFrame (bool): Overwrite already existing frames. + priority (int): Submitted job priority + framesPerTask (int): How many frames per task to render. This is + basically job division on render farm. + whitelist (list of str): White list of slave machines + machineList (list of str): Specific list of slave machines to use + useMayaBatch (bool): Use Maya batch mode to render as opposite to + Maya interactive mode. This consumes different licenses. + vrscene (bool): Submit as ``vrscene`` file for standalone V-Ray + renderer. + ass (bool): Submit as ``ass`` file for standalone Arnold renderer. + tileRendering (bool): Instance is set to tile rendering mode. We + won't submit actual render, but we'll make publish job to wait + for Tile Assembly job done and then publish. + strict_error_checking (bool): Enable/disable error checking on DL - This generates a single node in the scene which tells the Creator to if - it exists collect Maya rendersetup renderlayers as individual instances. - As such, triggering create doesn't actually create the instance node per - layer but only the node which tells the Creator it may now collect - the renderlayers. + See Also: + https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup """ - identifier = "io.openpype.creators.maya.renderlayer" - family = "renderlayer" label = "Render" + family = "rendering" icon = "eye" + _token = None + _user = None + _password = None - layer_instance_prefix = "render" - singleton_node_name = "renderingMain" + _project_settings = None - render_settings = {} + def __init__(self, *args, **kwargs): + """Constructor.""" + super(CreateRender, self).__init__(*args, **kwargs) - @classmethod - def apply_settings(cls, project_settings): - cls.render_settings = project_settings["maya"]["RenderSettings"] + # Defaults + self._project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"]) + if self._project_settings["maya"]["RenderSettings"]["apply_render_settings"]: # noqa + lib_rendersettings.RenderSettings().set_default_renderer_settings() - def create(self, subset_name, instance_data, pre_create_data): - # Only allow a single render instance to exist - if self._get_singleton_node(): - raise CreatorError("A Render instance already exists - only " - "one can be configured.") + # Deadline-only + manager = ModulesManager() + deadline_settings = get_system_settings()["modules"]["deadline"] + if not deadline_settings["enabled"]: + self.deadline_servers = {} + return + self.deadline_module = manager.modules_by_name["deadline"] + try: + default_servers = deadline_settings["deadline_urls"] + project_servers = ( + self._project_settings["deadline"]["deadline_servers"] + ) + self.deadline_servers = { + k: default_servers[k] + for k in project_servers + if k in default_servers + } - # Apply default project render settings on create - if self.render_settings.get("apply_render_settings"): - lib_rendersettings.RenderSettings().set_default_renderer_settings() + if not self.deadline_servers: + self.deadline_servers = default_servers - super(CreateRenderlayer, self).create(subset_name, - instance_data, - pre_create_data) + except AttributeError: + # Handle situation were we had only one url for deadline. + # get default deadline webservice url from deadline module + self.deadline_servers = self.deadline_module.deadline_urls - def get_instance_attr_defs(self): - """Create instance settings.""" + def process(self): + """Entry point.""" + exists = cmds.ls(self.name) + if exists: + cmds.warning("%s already exists." % exists[0]) + return + + use_selection = self.options.get("useSelection") + with lib.undo_chunk(): + self._create_render_settings() + self.instance = super(CreateRender, self).process() + # create namespace with instance + index = 1 + namespace_name = "_{}".format(str(self.instance)) + try: + cmds.namespace(rm=namespace_name) + except RuntimeError: + # namespace is not empty, so we leave it untouched + pass + + while cmds.namespace(exists=namespace_name): + namespace_name = "_{}{}".format(str(self.instance), index) + index += 1 + + namespace = cmds.namespace(add=namespace_name) - return [ - BoolDef("review", - label="Review", - tooltip="Mark as reviewable", - default=True), - BoolDef("extendFrames", - label="Extend Frames", - tooltip="Extends the frames on top of the previous " - "publish.\nIf the previous was 1001-1050 and you " - "would now submit 1020-1070 only the new frames " - "1051-1070 would be rendered and published " - "together with the previously rendered frames.\n" - "If 'overrideExistingFrame' is enabled it *will* " - "render any existing frames.", - default=False), - BoolDef("overrideExistingFrame", - label="Override Existing Frame", - tooltip="Override existing rendered frames " - "(if they exist).", - default=True), - - # TODO: Should these move to submit_maya_deadline plugin? - # Tile rendering - BoolDef("tileRendering", - label="Enable tiled rendering", - default=False), - NumberDef("tilesX", - label="Tiles X", - default=2, - minimum=1, - decimals=0), - NumberDef("tilesY", - label="Tiles Y", - default=2, - minimum=1, - decimals=0), - - # Additional settings - BoolDef("convertToScanline", - label="Convert to Scanline", - tooltip="Convert the output images to scanline images", - default=False), - BoolDef("useReferencedAovs", - label="Use Referenced AOVs", - tooltip="Consider the AOVs from referenced scenes as well", - default=False), - - BoolDef("renderSetupIncludeLights", - label="Render Setup Include Lights", - default=self.render_settings.get("enable_all_lights", - False)) + # add Deadline server selection list + if self.deadline_servers: + cmds.scriptJob( + attributeChange=[ + "{}.deadlineServers".format(self.instance), + self._deadline_webservice_changed + ]) + + cmds.setAttr("{}.machineList".format(self.instance), lock=True) + rs = renderSetup.instance() + layers = rs.getRenderLayers() + if use_selection: + self.log.info("Processing existing layers") + sets = [] + for layer in layers: + self.log.info(" - creating set for {}:{}".format( + namespace, layer.name())) + render_set = cmds.sets( + n="{}:{}".format(namespace, layer.name())) + sets.append(render_set) + cmds.sets(sets, forceElement=self.instance) + + # if no render layers are present, create default one with + # asterisk selector + if not layers: + render_layer = rs.createRenderLayer('Main') + collection = render_layer.createCollection("defaultCollection") + collection.getSelector().setPattern('*') + + return self.instance + + def _deadline_webservice_changed(self): + """Refresh Deadline server dependent options.""" + # get selected server + webservice = self.deadline_servers[ + self.server_aliases[ + cmds.getAttr("{}.deadlineServers".format(self.instance)) + ] ] + pools = self.deadline_module.get_deadline_pools(webservice, self.log) + cmds.deleteAttr("{}.primaryPool".format(self.instance)) + cmds.deleteAttr("{}.secondaryPool".format(self.instance)) + + pool_setting = (self._project_settings["deadline"] + ["publish"] + ["CollectDeadlinePools"]) + + primary_pool = pool_setting["primary_pool"] + sorted_pools = self._set_default_pool(list(pools), primary_pool) + cmds.addAttr( + self.instance, + longName="primaryPool", + attributeType="enum", + enumName=":".join(sorted_pools) + ) + cmds.setAttr( + "{}.primaryPool".format(self.instance), + 0, + keyable=False, + channelBox=True + ) + + pools = ["-"] + pools + secondary_pool = pool_setting["secondary_pool"] + sorted_pools = self._set_default_pool(list(pools), secondary_pool) + cmds.addAttr( + self.instance, + longName="secondaryPool", + attributeType="enum", + enumName=":".join(sorted_pools) + ) + cmds.setAttr( + "{}.secondaryPool".format(self.instance), + 0, + keyable=False, + channelBox=True + ) + + def _create_render_settings(self): + """Create instance settings.""" + # get pools (slave machines of the render farm) + pool_names = [] + default_priority = 50 + + self.data["suspendPublishJob"] = False + self.data["review"] = True + self.data["extendFrames"] = False + self.data["overrideExistingFrame"] = True + # self.data["useLegacyRenderLayers"] = True + self.data["priority"] = default_priority + self.data["tile_priority"] = default_priority + self.data["framesPerTask"] = 1 + self.data["machineLimit"] = self._project_settings.get( + "deadline").get( + "publish").get( + "MayaSubmitDeadline").get( + "jobInfo").get( + "machineLimit", 0) + self.data["whitelist"] = False + self.data["machineList"] = "" + self.data["useMayaBatch"] = False + self.data["tileRendering"] = False + self.data["tilesX"] = 2 + self.data["tilesY"] = 2 + self.data["convertToScanline"] = False + self.data["useReferencedAovs"] = False + self.data["renderSetupIncludeLights"] = ( + self._project_settings.get( + "maya", {}).get( + "RenderSettings", {}).get( + "enable_all_lights", False) + ) + # Disable for now as this feature is not working yet + # self.data["assScene"] = False + + system_settings = get_system_settings()["modules"] + + deadline_enabled = system_settings["deadline"]["enabled"] + muster_enabled = system_settings["muster"]["enabled"] + muster_url = system_settings["muster"]["MUSTER_REST_URL"] + + if deadline_enabled and muster_enabled: + self.log.error( + "Both Deadline and Muster are enabled. " "Cannot support both." + ) + raise RuntimeError("Both Deadline and Muster are enabled") + + if deadline_enabled: + self.server_aliases = list(self.deadline_servers.keys()) + self.data["deadlineServers"] = self.server_aliases + + try: + deadline_url = self.deadline_servers["default"] + except KeyError: + # if 'default' server is not between selected, + # use first one for initial list of pools. + deadline_url = next(iter(self.deadline_servers.values())) + # Uses function to get pool machines from the assigned deadline + # url in settings + pool_names = self.deadline_module.get_deadline_pools(deadline_url, + self.log) + maya_submit_dl = self._project_settings.get( + "deadline", {}).get( + "publish", {}).get( + "MayaSubmitDeadline", {}) + priority = maya_submit_dl.get("priority", default_priority) + self.data["priority"] = priority + + tile_priority = maya_submit_dl.get("tile_priority", + default_priority) + self.data["tile_priority"] = tile_priority + + strict_error_checking = maya_submit_dl.get("strict_error_checking", + True) + self.data["strict_error_checking"] = strict_error_checking + + # Pool attributes should be last since they will be recreated when + # the deadline server changes. + pool_setting = (self._project_settings["deadline"] + ["publish"] + ["CollectDeadlinePools"]) + primary_pool = pool_setting["primary_pool"] + self.data["primaryPool"] = self._set_default_pool(pool_names, + primary_pool) + # We add a string "-" to allow the user to not + # set any secondary pools + pool_names = ["-"] + pool_names + secondary_pool = pool_setting["secondary_pool"] + self.data["secondaryPool"] = self._set_default_pool(pool_names, + secondary_pool) + + requested_arguments = {"NamesOnly": True} + limit_groups = self.deadline_module.get_deadline_data( + deadline_url, + "limitgroups", + log=self.log, + **requested_arguments + ) + self.data["limits"] = {"limits": limit_groups} + + if muster_enabled: + self.log.info(">>> Loading Muster credentials ...") + self._load_credentials() + self.log.info(">>> Getting pools ...") + pools = [] + try: + pools = self._get_muster_pools() + except requests.exceptions.HTTPError as e: + if e.startswith("401"): + self.log.warning("access token expired") + self._show_login() + raise RuntimeError("Access token expired") + except requests.exceptions.ConnectionError: + self.log.error("Cannot connect to Muster API endpoint.") + raise RuntimeError("Cannot connect to {}".format(muster_url)) + for pool in pools: + self.log.info(" - pool: {}".format(pool["name"])) + pool_names.append(pool["name"]) + + self.options = {"useSelection": False} # Force no content + + def _set_default_pool(self, pool_names, pool_value): + """Reorder pool names, default should come first""" + if pool_value and pool_value in pool_names: + pool_names.remove(pool_value) + pool_names = [pool_value] + pool_names + return pool_names + + def _load_credentials(self): + """Load Muster credentials. + + Load Muster credentials from file and set ``MUSTER_USER``, + ``MUSTER_PASSWORD``, ``MUSTER_REST_URL`` is loaded from settings. + + Raises: + RuntimeError: If loaded credentials are invalid. + AttributeError: If ``MUSTER_REST_URL`` is not set. + + """ + app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype")) + file_name = "muster_cred.json" + fpath = os.path.join(app_dir, file_name) + file = open(fpath, "r") + muster_json = json.load(file) + self._token = muster_json.get("token", None) + if not self._token: + self._show_login() + raise RuntimeError("Invalid access token for Muster") + file.close() + self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL") + if not self.MUSTER_REST_URL: + raise AttributeError("Muster REST API url not set") + + def _get_muster_pools(self): + """Get render pools from Muster. + + Raises: + Exception: If pool list cannot be obtained from Muster. + + """ + params = {"authToken": self._token} + api_entry = "/api/pools/list" + response = requests_get(self.MUSTER_REST_URL + api_entry, + params=params) + if response.status_code != 200: + if response.status_code == 401: + self.log.warning("Authentication token expired.") + self._show_login() + else: + self.log.error( + ("Cannot get pools from " + "Muster: {}").format(response.status_code) + ) + raise Exception("Cannot get pools from Muster") + try: + pools = response.json()["ResponseData"]["pools"] + except ValueError as e: + self.log.error("Invalid response from Muster server {}".format(e)) + raise Exception("Invalid response from Muster server") + + return pools + + def _show_login(self): + # authentication token expired so we need to login to Muster + # again to get it. We use Pype API call to show login window. + api_url = "{}/muster/show_login".format( + os.environ["OPENPYPE_WEBSERVER_URL"]) + self.log.debug(api_url) + login_response = requests_get(api_url, timeout=1) + if login_response.status_code != 200: + self.log.error("Cannot show login form to Muster") + raise Exception("Cannot show login form to Muster") + + def _requests_post(self, *args, **kwargs): + """Wrap request post method. + + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. + + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.post(*args, **kwargs) + + def _requests_get(self, *args, **kwargs): + """Wrap request get method. + + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. + + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.get(*args, **kwargs) diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py index db042963c69..5e7f105c160 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_look.py @@ -17,6 +17,11 @@ "visibleInRefractions", "doubleSided", "opposite"] + +RENDERER_NODE_TYPES = [ + # redshift + "RedshiftMeshParameters" +] SHAPE_ATTRS = set(SHAPE_ATTRS) @@ -31,13 +36,12 @@ def get_pxr_multitexture_file_attrs(node): FILE_NODES = { - # maya "file": "fileTextureName", - # arnold (mtoa) + "aiImage": "filename", - # redshift + "RedshiftNormalMap": "tex0", - # renderman + "PxrBump": "filename", "PxrNormalMap": "filename", "PxrMultiTexture": get_pxr_multitexture_file_attrs, @@ -45,22 +49,6 @@ def get_pxr_multitexture_file_attrs(node): "PxrTexture": "filename" } -# Keep only node types that actually exist -all_node_types = set(cmds.allNodeTypes()) -for node_type in list(FILE_NODES.keys()): - if node_type not in all_node_types: - FILE_NODES.pop(node_type) -del all_node_types - -# Cache pixar dependency node types so we can perform a type lookup against it -PXR_NODES = set() -if cmds.pluginInfo("RenderMan_for_Maya", query=True, loaded=True): - PXR_NODES = set( - cmds.pluginInfo("RenderMan_for_Maya", - query=True, - dependNode=True) - ) - def get_attributes(dictionary, attr, node=None): # type: (dict, str, str) -> list @@ -244,17 +232,20 @@ def get_file_node_files(node): """ paths = get_file_node_paths(node) - - # For sequences get all files and filter to only existing files - result = [] + sequences = [] + replaces = [] for index, path in enumerate(paths): if node_uses_image_sequence(node, path): glob_pattern = seq_to_glob(path) - result.extend(glob.glob(glob_pattern)) - elif os.path.exists(path): - result.append(path) + sequences.extend(glob.glob(glob_pattern)) + replaces.append(index) - return result + for index in replaces: + paths.pop(index) + + paths.extend(sequences) + + return [p for p in paths if os.path.exists(p)] class CollectLook(pyblish.api.InstancePlugin): @@ -269,7 +260,7 @@ class CollectLook(pyblish.api.InstancePlugin): membership relations. Collects: - lookAttributes (list): Nodes in instance with their altered attributes + lookAttribtutes (list): Nodes in instance with their altered attributes lookSetRelations (list): Sets and their memberships lookSets (list): List of set names included in the look @@ -294,41 +285,82 @@ def collect(self, instance): instance: Instance to collect. """ - self.log.debug("Looking for look associations " - "for %s" % instance.data['name']) - - # Lookup set (optimization) - instance_lookup = set(cmds.ls(instance, long=True)) + self.log.info("Looking for look associations " + "for %s" % instance.data['name']) # Discover related object sets - self.log.debug("Gathering sets ...") + self.log.info("Gathering sets ...") sets = self.collect_sets(instance) # Lookup set (optimization) instance_lookup = set(cmds.ls(instance, long=True)) - self.log.debug("Gathering set relations ...") - # Ensure iteration happen in a list to allow removing keys from the + self.log.info("Gathering set relations ...") + # Ensure iteration happen in a list so we can remove keys from the # dict within the loop + + # skipped types of attribute on render specific nodes + disabled_types = ["message", "TdataCompound"] + for obj_set in list(sets): self.log.debug("From {}".format(obj_set)) + + # if node is specified as renderer node type, it will be + # serialized with its attributes. + if cmds.nodeType(obj_set) in RENDERER_NODE_TYPES: + self.log.info("- {} is {}".format( + obj_set, cmds.nodeType(obj_set))) + + node_attrs = [] + + # serialize its attributes so they can be recreated on look + # load. + for attr in cmds.listAttr(obj_set): + # skip publishedNodeInfo attributes as they break + # getAttr() and we don't need them anyway + if attr.startswith("publishedNodeInfo"): + continue + + # skip attributes types defined in 'disabled_type' list + if cmds.getAttr("{}.{}".format(obj_set, attr), type=True) in disabled_types: # noqa + continue + + node_attrs.append(( + attr, + cmds.getAttr("{}.{}".format(obj_set, attr)), + cmds.getAttr( + "{}.{}".format(obj_set, attr), type=True) + )) + + for member in cmds.ls( + cmds.sets(obj_set, query=True), long=True): + member_data = self.collect_member_data(member, + instance_lookup) + if not member_data: + continue + + # Add information of the node to the members list + sets[obj_set]["members"].append(member_data) + # Get all nodes of the current objectSet (shadingEngine) for member in cmds.ls(cmds.sets(obj_set, query=True), long=True): member_data = self.collect_member_data(member, instance_lookup) - if member_data: - # Add information of the node to the members list - sets[obj_set]["members"].append(member_data) + if not member_data: + continue + + # Add information of the node to the members list + sets[obj_set]["members"].append(member_data) # Remove sets that didn't have any members assigned in the end # Thus the data will be limited to only what we need. + self.log.info("obj_set {}".format(sets[obj_set])) if not sets[obj_set]["members"]: - self.log.debug( - "Removing redundant set information: {}".format(obj_set) - ) + self.log.info( + "Removing redundant set information: {}".format(obj_set)) sets.pop(obj_set, None) - self.log.debug("Gathering attribute changes to instance members..") + self.log.info("Gathering attribute changes to instance members..") attributes = self.collect_attributes_changed(instance) # Store data on the instance @@ -350,28 +382,35 @@ def collect(self, instance): "rman__displacement" ] if look_sets: - self.log.debug("Found look sets: {}".format(look_sets)) + materials = [] - # Get all material attrs for all look sets to retrieve their inputs - existing_attrs = [] for look in look_sets: - for attr in shader_attrs: - if cmds.attributeQuery(attr, node=look, exists=True): - existing_attrs.append("{}.{}".format(look, attr)) - materials = cmds.listConnections(existing_attrs, - source=True, - destination=False) or [] - - self.log.debug("Found materials:\n{}".format(materials)) - - self.log.debug("Found the following sets:\n{}".format(look_sets)) + for at in shader_attrs: + try: + con = cmds.listConnections("{}.{}".format(look, at)) + except ValueError: + # skip attributes that are invalid in current + # context. For example in the case where + # Arnold is not enabled. + continue + if con: + materials.extend(con) + + self.log.info("Found materials:\n{}".format(materials)) + + self.log.info("Found the following sets:\n{}".format(look_sets)) # Get the entire node chain of the look sets - # history = cmds.listHistory(look_sets, allConnections=True) - history = cmds.listHistory(materials, allConnections=True) + # history = cmds.listHistory(look_sets) + history = [] + for material in materials: + history.extend(cmds.listHistory(material, ac=True)) + + # handle VrayPluginNodeMtl node - see #1397 + vray_plugin_nodes = cmds.ls( + history, type="VRayPluginNodeMtl", long=True) + for vray_node in vray_plugin_nodes: + history.extend(cmds.listHistory(vray_node, ac=True)) - # Since we retrieved history only of the connected materials - # connected to the look sets above we now add direct history - # for some of the look sets directly # handling render attribute sets render_set_types = [ "VRayDisplacement", @@ -389,26 +428,20 @@ def collect(self, instance): or [] ) - # Ensure unique entries only - history = list(set(history)) - - files = cmds.ls(history, - # It's important only node types are passed that - # exist (e.g. for loaded plugins) because otherwise - # the result will turn back empty - type=list(FILE_NODES.keys()), - long=True) - - # Sort for log readability - files.sort() + all_supported_nodes = FILE_NODES.keys() + files = [] + for node_type in all_supported_nodes: + files.extend(cmds.ls(history, type=node_type, long=True)) - self.log.debug("Collected file nodes:\n{}".format(files)) + self.log.info("Collected file nodes:\n{}".format(files)) # Collect textures if any file nodes are found - resources = [] - for node in files: # sort for log readability - resources.extend(self.collect_resources(node)) - instance.data["resources"] = resources - self.log.debug("Collected resources: {}".format(resources)) + instance.data["resources"] = [] + for n in files: + for res in self.collect_resources(n): + instance.data["resources"].append(res) + + self.log.info("Collected resources: {}".format( + instance.data["resources"])) # Log warning when no relevant sets were retrieved for the look. if ( @@ -423,7 +456,7 @@ def collect(self, instance): instance.extend(shader for shader in look_sets if shader not in instance_lookup) - self.log.debug("Collected look for %s" % instance) + self.log.info("Collected look for %s" % instance) def collect_sets(self, instance): """Collect all objectSets which are of importance for publishing @@ -503,14 +536,14 @@ def collect_attributes_changed(self, instance): # Collect changes to "custom" attributes node_attrs = get_look_attrs(node) + self.log.info( + "Node \"{0}\" attributes: {1}".format(node, node_attrs) + ) + # Only include if there are any properties we care about if not node_attrs: continue - self.log.debug( - "Node \"{0}\" attributes: {1}".format(node, node_attrs) - ) - node_attributes = {} for attr in node_attrs: if not cmds.attributeQuery(attr, node=node, exists=True): @@ -521,7 +554,10 @@ def collect_attributes_changed(self, instance): self.log.warning("Attribute '{}' is mixed-type and is " "not supported yet.".format(attribute)) continue - if cmds.getAttr(attribute, type=True) == "message": + if cmds.getAttr(attribute, type=True) in [ + "message", + "TdataCompound" + ]: continue node_attributes[attr] = cmds.getAttr(attribute, asString=True) # Only include if there are any properties we care about @@ -541,14 +577,14 @@ def collect_resources(self, node): Returns: dict """ - if cmds.nodeType(node) not in FILE_NODES: + self.log.debug("processing: {}".format(node)) + all_supported_nodes = FILE_NODES.keys() + if cmds.nodeType(node) not in all_supported_nodes: self.log.error( "Unsupported file node: {}".format(cmds.nodeType(node))) raise AssertionError("Unsupported file node") - self.log.debug( - "Collecting resource: {} ({})".format(node, cmds.nodeType(node)) - ) + self.log.debug(" - got {}".format(cmds.nodeType(node))) attributes = get_attributes(FILE_NODES, cmds.nodeType(node), node) for attribute in attributes: @@ -556,34 +592,41 @@ def collect_resources(self, node): node, attribute )) + if not source: + continue + computed_attribute = "{}.{}".format(node, attribute) + if attribute == "fileTextureName": + computed_attribute = node + ".computedFileTextureNamePattern" - self.log.debug(" - file source: {}".format(source)) + self.log.info(" - file source: {}".format(source)) color_space_attr = "{}.colorSpace".format(node) try: color_space = cmds.getAttr(color_space_attr) except ValueError: # node doesn't have colorspace attribute color_space = "Raw" - # Compare with the computed file path, e.g. the one with # the pattern in it, to generate some logging information # about this difference - # Only for file nodes with `fileTextureName` attribute - if attribute == "fileTextureName": - computed_source = cmds.getAttr( - "{}.computedFileTextureNamePattern".format(node) - ) - if source != computed_source: - self.log.debug("Detected computed file pattern difference " - "from original pattern: {0} " - "({1} -> {2})".format(node, - source, - computed_source)) + computed_source = cmds.getAttr(computed_attribute) + if source != computed_source: + self.log.debug("Detected computed file pattern difference " + "from original pattern: {0} " + "({1} -> {2})".format(node, + source, + computed_source)) # renderman allows nodes to have filename attribute empty while # you can have another incoming connection from different node. - if not source and cmds.nodeType(node) in PXR_NODES: - self.log.debug("Renderman: source is empty, skipping...") + pxr_nodes = set() + if cmds.pluginInfo("RenderMan_for_Maya", query=True, loaded=True): + pxr_nodes = set( + cmds.pluginInfo("RenderMan_for_Maya", + query=True, + dependNode=True) + ) + if not source and cmds.nodeType(node) in pxr_nodes: + self.log.info("Renderman: source is empty, skipping...") continue # We replace backslashes with forward slashes because V-Ray # can't handle the UDIM files with the backslashes in the @@ -592,14 +635,14 @@ def collect_resources(self, node): files = get_file_node_files(node) if len(files) == 0: - self.log.debug("No valid files found from node `%s`" % node) - - self.log.debug("collection of resource done:") - self.log.debug(" - node: {}".format(node)) - self.log.debug(" - attribute: {}".format(attribute)) - self.log.debug(" - source: {}".format(source)) - self.log.debug(" - file: {}".format(files)) - self.log.debug(" - color space: {}".format(color_space)) + self.log.error("No valid files found from node `%s`" % node) + + self.log.info("collection of resource done:") + self.log.info(" - node: {}".format(node)) + self.log.info(" - attribute: {}".format(attribute)) + self.log.info(" - source: {}".format(source)) + self.log.info(" - file: {}".format(files)) + self.log.info(" - color space: {}".format(color_space)) # Define the resource yield { diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index 82392f67bd0..8a334232b6c 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -39,29 +39,29 @@ instance -> pixelAspect """ +import re import os import platform import json from maya import cmds +import maya.app.renderSetup.model.renderSetup as renderSetup import pyblish.api -from openpype.pipeline import KnownPublishError from openpype.lib import get_formatted_current_time -from openpype.hosts.maya.api.lib_renderproducts import ( - get as get_layer_render_products, - UnsupportedRendererException -) +from openpype.pipeline import legacy_io +from openpype.settings import get_system_settings, get_project_settings +from openpype.modules import ModulesManager +from openpype.hosts.maya.api.lib_renderproducts import get as get_layer_render_products # noqa: E501 from openpype.hosts.maya.api import lib -class CollectMayaRender(pyblish.api.InstancePlugin): +class CollectMayaRender(pyblish.api.ContextPlugin): """Gather all publishable render layers from renderSetup.""" order = pyblish.api.CollectorOrder + 0.01 hosts = ["maya"] - families = ["renderlayer"] label = "Collect Render Layers" sync_workfile_version = False @@ -71,258 +71,395 @@ class CollectMayaRender(pyblish.api.InstancePlugin): "underscore": "_" } - def process(self, instance): + def process(self, context): + """Entry point to collector.""" + render_instance = None - # TODO: Re-add force enable of workfile instance? - # TODO: Re-add legacy layer support with LAYER_ prefix but in Creator - # TODO: Set and collect active state of RenderLayer in Creator using - # renderlayer.isRenderable() - context = instance.context + for instance in context: + if "rendering" in instance.data["families"]: + render_instance = instance + render_instance.data["remove"] = True - layer = instance.data["transientData"]["layer"] - objset = instance.data.get("instance_node") + # make sure workfile instance publishing is enabled + if "workfile" in instance.data["families"]: + instance.data["publish"] = True + + if not render_instance: + self.log.info( + "No render instance found, skipping render " + "layer collection." + ) + return + + render_globals = render_instance + collected_render_layers = render_instance.data["setMembers"] filepath = context.data["currentFile"].replace("\\", "/") + asset = legacy_io.Session["AVALON_ASSET"] workspace = context.data["workspaceDir"] - # check if layer is renderable - if not layer.isRenderable(): - msg = "Render layer [ {} ] is not " "renderable".format( - layer.name() - ) - self.log.warning(msg) + # Retrieve render setup layers + rs = renderSetup.instance() + maya_render_layers = { + layer.name(): layer for layer in rs.getRenderLayers() + } - # detect if there are sets (subsets) to attach render to - sets = cmds.sets(objset, query=True) or [] - attach_to = [] - for s in sets: - if not cmds.attributeQuery("family", node=s, exists=True): + for layer in collected_render_layers: + if layer.startswith("LAYER_"): + # this is support for legacy mode where render layers + # started with `LAYER_` prefix. + layer_name_pattern = r"^LAYER_(.*)" + else: + # new way is to prefix render layer name with instance + # namespace. + layer_name_pattern = r"^.+:(.*)" + + # todo: We should have a more explicit way to link the renderlayer + match = re.match(layer_name_pattern, layer) + if not match: + msg = "Invalid layer name in set [ {} ]".format(layer) + self.log.warning(msg) continue - attach_to.append( - { - "version": None, # we need integrator for that - "subset": s, - "family": cmds.getAttr("{}.family".format(s)), - } - ) - self.log.debug(" -> attach render to: {}".format(s)) + expected_layer_name = match.group(1) + self.log.info("Processing '{}' as layer [ {} ]" + "".format(layer, expected_layer_name)) - layer_name = layer.name() + # check if layer is part of renderSetup + if expected_layer_name not in maya_render_layers: + msg = "Render layer [ {} ] is not in " "Render Setup".format( + expected_layer_name + ) + self.log.warning(msg) + continue - # collect all frames we are expecting to be rendered - # return all expected files for all cameras and aovs in given - # frame range - try: - layer_render_products = get_layer_render_products(layer.name()) - except UnsupportedRendererException as exc: - raise KnownPublishError(exc) - render_products = layer_render_products.layer_data.products - assert render_products, "no render products generated" - expected_files = [] - multipart = False - for product in render_products: - if product.multipart: - multipart = True - product_name = product.productName - if product.camera and layer_render_products.has_camera_token(): - product_name = "{}{}".format( - product.camera, - "_{}".format(product_name) if product_name else "") - expected_files.append( - { - product_name: layer_render_products.get_files( - product) - }) - - has_cameras = any(product.camera for product in render_products) - assert has_cameras, "No render cameras found." - - self.log.debug("multipart: {}".format( - multipart)) - assert expected_files, "no file names were generated, this is a bug" - self.log.debug( - "expected files: {}".format( - json.dumps(expected_files, indent=4, sort_keys=True) + # check if layer is renderable + if not maya_render_layers[expected_layer_name].isRenderable(): + msg = "Render layer [ {} ] is not " "renderable".format( + expected_layer_name + ) + self.log.warning(msg) + continue + + # detect if there are sets (subsets) to attach render to + sets = cmds.sets(layer, query=True) or [] + attach_to = [] + for s in sets: + if not cmds.attributeQuery("family", node=s, exists=True): + continue + + attach_to.append( + { + "version": None, # we need integrator for that + "subset": s, + "family": cmds.getAttr("{}.family".format(s)), + } + ) + self.log.info(" -> attach render to: {}".format(s)) + + layer_name = "rs_{}".format(expected_layer_name) + + # collect all frames we are expecting to be rendered + # return all expected files for all cameras and aovs in given + # frame range + layer_render_products = get_layer_render_products(layer_name) + render_products = layer_render_products.layer_data.products + assert render_products, "no render products generated" + exp_files = [] + multipart = False + for product in render_products: + if product.multipart: + multipart = True + product_name = product.productName + if product.camera and layer_render_products.has_camera_token(): + product_name = "{}{}".format( + product.camera, + "_" + product_name if product_name else "") + exp_files.append( + { + product_name: layer_render_products.get_files( + product) + }) + + has_cameras = any(product.camera for product in render_products) + assert has_cameras, "No render cameras found." + + self.log.info("multipart: {}".format( + multipart)) + assert exp_files, "no file names were generated, this is bug" + self.log.info( + "expected files: {}".format( + json.dumps(exp_files, indent=4, sort_keys=True) + ) ) - ) - # if we want to attach render to subset, check if we have AOV's - # in expectedFiles. If so, raise error as we cannot attach AOV - # (considered to be subset on its own) to another subset - if attach_to: - assert isinstance(expected_files, list), ( - "attaching multiple AOVs or renderable cameras to " - "subset is not supported" + # if we want to attach render to subset, check if we have AOV's + # in expectedFiles. If so, raise error as we cannot attach AOV + # (considered to be subset on its own) to another subset + if attach_to: + assert isinstance(exp_files, list), ( + "attaching multiple AOVs or renderable cameras to " + "subset is not supported" + ) + + # append full path + aov_dict = {} + default_render_file = context.data.get('project_settings')\ + .get('maya')\ + .get('RenderSettings')\ + .get('default_render_image_folder') or "" + # replace relative paths with absolute. Render products are + # returned as list of dictionaries. + publish_meta_path = None + for aov in exp_files: + full_paths = [] + aov_first_key = list(aov.keys())[0] + for file in aov[aov_first_key]: + full_path = os.path.join(workspace, default_render_file, + file) + full_path = full_path.replace("\\", "/") + full_paths.append(full_path) + publish_meta_path = os.path.dirname(full_path) + aov_dict[aov_first_key] = full_paths + full_exp_files = [aov_dict] + + frame_start_render = int(self.get_render_attribute( + "startFrame", layer=layer_name)) + frame_end_render = int(self.get_render_attribute( + "endFrame", layer=layer_name)) + + if (int(context.data['frameStartHandle']) == frame_start_render + and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501 + + handle_start = context.data['handleStart'] + handle_end = context.data['handleEnd'] + frame_start = context.data['frameStart'] + frame_end = context.data['frameEnd'] + frame_start_handle = context.data['frameStartHandle'] + frame_end_handle = context.data['frameEndHandle'] + else: + handle_start = 0 + handle_end = 0 + frame_start = frame_start_render + frame_end = frame_end_render + frame_start_handle = frame_start_render + frame_end_handle = frame_end_render + + # find common path to store metadata + # so if image prefix is branching to many directories + # metadata file will be located in top-most common + # directory. + # TODO: use `os.path.commonpath()` after switch to Python 3 + publish_meta_path = os.path.normpath(publish_meta_path) + common_publish_meta_path = os.path.splitdrive( + publish_meta_path)[0] + if common_publish_meta_path: + common_publish_meta_path += os.path.sep + for part in publish_meta_path.replace( + common_publish_meta_path, "").split(os.path.sep): + common_publish_meta_path = os.path.join( + common_publish_meta_path, part) + if part == expected_layer_name: + break + + # TODO: replace this terrible linux hotfix with real solution :) + if platform.system().lower() in ["linux", "darwin"]: + common_publish_meta_path = "/" + common_publish_meta_path + + self.log.info( + "Publish meta path: {}".format(common_publish_meta_path)) + + self.log.info(full_exp_files) + self.log.info("collecting layer: {}".format(layer_name)) + # Get layer specific settings, might be overrides + colorspace_data = lib.get_color_management_preferences() + data = { + "subset": expected_layer_name, + "attachTo": attach_to, + "setMembers": layer_name, + "multipartExr": multipart, + "review": render_instance.data.get("review") or False, + "publish": True, + + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartHandle": frame_start_handle, + "frameEndHandle": frame_end_handle, + "byFrameStep": int( + self.get_render_attribute("byFrameStep", + layer=layer_name)), + "renderer": self.get_render_attribute( + "currentRenderer", layer=layer_name).lower(), + # instance subset + "family": "renderlayer", + "families": ["renderlayer"], + "asset": asset, + "time": get_formatted_current_time(), + "author": context.data["user"], + # Add source to allow tracing back to the scene from + # which was submitted originally + "source": filepath, + "expectedFiles": full_exp_files, + "publishRenderMetadataFolder": common_publish_meta_path, + "renderProducts": layer_render_products, + "resolutionWidth": lib.get_attr_in_layer( + "defaultResolution.width", layer=layer_name + ), + "resolutionHeight": lib.get_attr_in_layer( + "defaultResolution.height", layer=layer_name + ), + "pixelAspect": lib.get_attr_in_layer( + "defaultResolution.pixelAspect", layer=layer_name + ), + "tileRendering": render_instance.data.get("tileRendering", False), # noqa: E501 + "tilesX": render_instance.data.get("tilesX", 2), + "tilesY": render_instance.data.get("tilesY", 2), + "priority": render_instance.data.get("priority"), + "machineLimit": render_instance.data.get("machineLimit", 0), + "convertToScanline": render_instance.data.get( + "convertToScanline", False), + "useReferencedAovs": render_instance.data.get( + "useReferencedAovs") or render_instance.data.get( + "vrayUseReferencedAovs") or False, + "aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501 + "renderSetupIncludeLights": render_instance.data.get( + "renderSetupIncludeLights" + ), + "colorspaceConfig": colorspace_data["config"], + "colorspaceDisplay": colorspace_data["display"], + "colorspaceView": colorspace_data["view"], + "strict_error_checking": render_instance.data.get( + "strict_error_checking", True + ) + } + + # Collect Deadline url if Deadline module is enabled + deadline_settings = ( + context.data["system_settings"]["modules"]["deadline"] + ) + if deadline_settings["enabled"]: + data["deadlineUrl"] = render_instance.data["deadlineUrl"] + + if self.sync_workfile_version: + data["version"] = context.data["version"] + + for instance in context: + if instance.data['family'] == "workfile": + instance.data["version"] = context.data["version"] + + # handle standalone renderers + if render_instance.data.get("vrayScene") is True: + data["families"].append("vrayscene_render") + + if render_instance.data.get("assScene") is True: + data["families"].append("assscene_render") + + # Include (optional) global settings + # Get global overrides and translate to Deadline values + overrides = self.parse_options(str(render_globals)) + data.update(**overrides) + + # get string values for pools + primary_pool = overrides["renderGlobals"]["Pool"] + secondary_pool = overrides["renderGlobals"].get("SecondaryPool") + data["primaryPool"] = primary_pool + data["secondaryPool"] = secondary_pool + + # Define nice label + label = "{0} ({1})".format(expected_layer_name, data["asset"]) + label += " [{0}-{1}]".format( + int(data["frameStartHandle"]), int(data["frameEndHandle"]) ) - # append full path - aov_dict = {} - default_render_folder = context.data.get("project_settings")\ - .get("maya")\ - .get("RenderSettings")\ - .get("default_render_image_folder") or "" - # replace relative paths with absolute. Render products are - # returned as list of dictionaries. - publish_meta_path = None - for aov in expected_files: - full_paths = [] - aov_first_key = list(aov.keys())[0] - for file in aov[aov_first_key]: - full_path = os.path.join(workspace, default_render_folder, - file) - full_path = full_path.replace("\\", "/") - full_paths.append(full_path) - publish_meta_path = os.path.dirname(full_path) - aov_dict[aov_first_key] = full_paths - full_exp_files = [aov_dict] - self.log.debug(full_exp_files) - - if publish_meta_path is None: - raise KnownPublishError("Unable to detect any expected output " - "images for: {}. Make sure you have a " - "renderable camera and a valid frame " - "range set for your renderlayer." - "".format(instance.name)) - - frame_start_render = int(self.get_render_attribute( - "startFrame", layer=layer_name)) - frame_end_render = int(self.get_render_attribute( - "endFrame", layer=layer_name)) - - if (int(context.data["frameStartHandle"]) == frame_start_render - and int(context.data["frameEndHandle"]) == frame_end_render): # noqa: W503, E501 - - handle_start = context.data["handleStart"] - handle_end = context.data["handleEnd"] - frame_start = context.data["frameStart"] - frame_end = context.data["frameEnd"] - frame_start_handle = context.data["frameStartHandle"] - frame_end_handle = context.data["frameEndHandle"] - else: - handle_start = 0 - handle_end = 0 - frame_start = frame_start_render - frame_end = frame_end_render - frame_start_handle = frame_start_render - frame_end_handle = frame_end_render - - # find common path to store metadata - # so if image prefix is branching to many directories - # metadata file will be located in top-most common - # directory. - # TODO: use `os.path.commonpath()` after switch to Python 3 - publish_meta_path = os.path.normpath(publish_meta_path) - common_publish_meta_path = os.path.splitdrive( - publish_meta_path)[0] - if common_publish_meta_path: - common_publish_meta_path += os.path.sep - for part in publish_meta_path.replace( - common_publish_meta_path, "").split(os.path.sep): - common_publish_meta_path = os.path.join( - common_publish_meta_path, part) - if part == layer_name: - break - - # TODO: replace this terrible linux hotfix with real solution :) - if platform.system().lower() in ["linux", "darwin"]: - common_publish_meta_path = "/" + common_publish_meta_path - - self.log.debug( - "Publish meta path: {}".format(common_publish_meta_path)) - - # Get layer specific settings, might be overrides - colorspace_data = lib.get_color_management_preferences() - data = { - "farm": True, - "attachTo": attach_to, - - "multipartExr": multipart, - "review": instance.data.get("review") or False, - - # Frame range - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": frame_start, - "frameEnd": frame_end, - "frameStartHandle": frame_start_handle, - "frameEndHandle": frame_end_handle, - "byFrameStep": int( - self.get_render_attribute("byFrameStep", - layer=layer_name)), - - # Renderlayer - "renderer": self.get_render_attribute( - "currentRenderer", layer=layer_name).lower(), - "setMembers": layer._getLegacyNodeName(), # legacy renderlayer - "renderlayer": layer_name, - - # todo: is `time` and `author` still needed? - "time": get_formatted_current_time(), - "author": context.data["user"], - - # Add source to allow tracing back to the scene from - # which was submitted originally - "source": filepath, - "expectedFiles": full_exp_files, - "publishRenderMetadataFolder": common_publish_meta_path, - "renderProducts": layer_render_products, - "resolutionWidth": lib.get_attr_in_layer( - "defaultResolution.width", layer=layer_name - ), - "resolutionHeight": lib.get_attr_in_layer( - "defaultResolution.height", layer=layer_name - ), - "pixelAspect": lib.get_attr_in_layer( - "defaultResolution.pixelAspect", layer=layer_name - ), - - # todo: Following are likely not needed due to collecting from the - # instance itself if they are attribute definitions - "tileRendering": instance.data.get("tileRendering") or False, # noqa: E501 - "tilesX": instance.data.get("tilesX") or 2, - "tilesY": instance.data.get("tilesY") or 2, - "convertToScanline": instance.data.get( - "convertToScanline") or False, - "useReferencedAovs": instance.data.get( - "useReferencedAovs") or instance.data.get( - "vrayUseReferencedAovs") or False, - "aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501 - "renderSetupIncludeLights": instance.data.get( - "renderSetupIncludeLights" - ), - "colorspaceConfig": colorspace_data["config"], - "colorspaceDisplay": colorspace_data["display"], - "colorspaceView": colorspace_data["view"], - } + instance = context.create_instance(expected_layer_name) + instance.data["label"] = label + instance.data["farm"] = True + instance.data.update(data) - rr_settings = ( - context.data["system_settings"]["modules"]["royalrender"] - ) - if rr_settings["enabled"]: - data["rrPathName"] = instance.data.get("rrPathName") - self.log.debug(data["rrPathName"]) - - if self.sync_workfile_version: - data["version"] = context.data["version"] - for _instance in context: - if _instance.data['family'] == "workfile": - _instance.data["version"] = context.data["version"] - - # Define nice label - label = "{0} ({1})".format(layer_name, instance.data["asset"]) - label += " [{0}-{1}]".format( - int(data["frameStartHandle"]), int(data["frameEndHandle"]) - ) - data["label"] = label + def parse_options(self, render_globals): + """Get all overrides with a value, skip those without. + + Here's the kicker. These globals override defaults in the submission + integrator, but an empty value means no overriding is made. + Otherwise, Frames would override the default frames set under globals. + + Args: + render_globals (str): collection of render globals + + Returns: + dict: only overrides with values + + """ + attributes = lib.read(render_globals) + + options = {"renderGlobals": {}} + options["renderGlobals"]["Priority"] = attributes["priority"] + + # Check for specific pools + pool_a, pool_b = self._discover_pools(attributes) + options["renderGlobals"].update({"Pool": pool_a}) + if pool_b: + options["renderGlobals"].update({"SecondaryPool": pool_b}) + + # Number of workers for the current job + options["machineLimit"] = attributes["machineLimit"] + + # Machine list + machine_list = attributes["machineList"] + if machine_list: + key = "Whitelist" if attributes["whitelist"] else "Blacklist" + options["renderGlobals"][key] = machine_list + + # Suspend publish job + state = "Suspended" if attributes["suspendPublishJob"] else "Active" + options["publishJobState"] = state + + chunksize = attributes.get("framesPerTask", 1) + options["renderGlobals"]["ChunkSize"] = chunksize # Override frames should be False if extendFrames is False. This is # to ensure it doesn't go off doing crazy unpredictable things - extend_frames = instance.data.get("extendFrames", False) - if not extend_frames: - instance.data["overrideExistingFrame"] = False + override_frames = False + extend_frames = attributes.get("extendFrames", False) + if extend_frames: + override_frames = attributes.get("overrideExistingFrame", False) + + options["extendFrames"] = extend_frames + options["overrideExistingFrame"] = override_frames + + maya_render_plugin = "MayaBatch" + + options["mayaRenderPlugin"] = maya_render_plugin + + limits = self._get_checked_limit_groups(attributes) + options["limits"] = limits + + return options + + def _discover_pools(self, attributes): + + pool_a = None + pool_b = None + + # Check for specific pools + pool_b = [] + if "primaryPool" in attributes: + pool_a = attributes["primaryPool"] + if "secondaryPool" in attributes: + pool_b = attributes["secondaryPool"] - # Update the instace - instance.data.update(data) + else: + # Backwards compatibility + pool_str = attributes.get("pools", None) + if pool_str: + pool_a, pool_b = pool_str.split(";") + + # Ensure empty entry token is caught + if pool_b == "-": + pool_b = None + + return pool_a, pool_b @staticmethod def get_render_attribute(attr, layer): @@ -339,3 +476,47 @@ def get_render_attribute(attr, layer): return lib.get_attr_in_layer( "defaultRenderGlobals.{}".format(attr), layer=layer ) + + def _get_checked_limit_groups(self, attributes): + checked_limits = [] + deadline_settings = get_system_settings()["modules"]["deadline"] + + if not deadline_settings["enabled"]: + return checked_limits + + manager = ModulesManager() + deadline_module = manager.modules_by_name["deadline"] + + try: + default_servers = deadline_settings["deadline_urls"] + project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + project_servers = ( + project_settings["deadline"]["deadline_servers"] + ) + deadline_servers = { + k: default_servers[k] + for k in project_servers + if k in default_servers + } + + if not deadline_servers: + deadline_servers = default_servers + except AttributeError: + # Handle situation were we had only one url for deadline. + # get default deadline webservice url from deadline module + deadline_servers = deadline_module.deadline_urls + + requested_arguments = {"NamesOnly": True} + limit_groups = deadline_module.get_deadline_data( + deadline_settings['deadline_urls']["default"], + "limitgroups", + log=self.log, + **requested_arguments + ) + for group, value in zip(limit_groups, attributes['limits'][0]): + if value is True: + checked_limits.append(group) + + return checked_limits diff --git a/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py b/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py index 4ec1399df46..78c252fef1d 100644 --- a/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py +++ b/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py @@ -85,7 +85,9 @@ def process(self, instance): transform = cmds.listRelatives( member, parent=True, fullPath=True) transform = transform[0] if transform else member - job_str += ' -root {0}'.format(transform) + + if transform not in camera_root: + job_str += ' -root {0}'.format(transform) job_str += ' -file "{0}"'.format(path) diff --git a/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py b/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py index a50a8f0dfaf..391ba7d44ee 100644 --- a/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py +++ b/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py @@ -133,8 +133,7 @@ def process(self, instance): # get cameras members = cmds.ls(instance.data['setMembers'], leaf=True, shapes=True, long=True, dag=True) - cameras = cmds.ls(members, leaf=True, shapes=True, long=True, - dag=True, type="camera") + cameras = cmds.ls(members, type="camera") # validate required settings assert isinstance(step, float), "Step must be a float value" @@ -163,9 +162,6 @@ def process(self, instance): dag=True, shapes=True, long=True) - - members = members + baked_camera_shapes - members.remove(camera) else: baked_camera_shapes = cmds.ls(cameras, type="camera", @@ -187,7 +183,7 @@ def process(self, instance): cmds.setAttr(plug, value) self.log.debug("Performing extraction..") - cmds.select(cmds.ls(members, dag=True, + cmds.select(cmds.ls(baked, dag=True, shapes=True, long=True), noExpand=True) cmds.file(path, force=True, diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py index f15aa2efa8f..e95f8e0e0c0 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py @@ -1,11 +1,11 @@ import pyblish.api -import openpype.hosts.maya.api.action from openpype.client import get_assets -from openpype.hosts.maya.api import lib from openpype.pipeline import legacy_io -from openpype.pipeline.publish import ( - PublishValidationError, ValidatePipelineOrder) +from openpype.pipeline.publish import ValidatePipelineOrder +import openpype.hosts.maya.api.action +from openpype.hosts.maya.api import lib +from openpype.client.entities import get_projects class ValidateNodeIdsInDatabase(pyblish.api.InstancePlugin): @@ -30,9 +30,9 @@ class ValidateNodeIdsInDatabase(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise PublishValidationError( - ("Found asset IDs which are not related to " - "current project in instance: `{}`").format(instance.name)) + raise RuntimeError("Found asset IDs which are not related to " + "current project in instance: " + "`%s`" % instance.name) @classmethod def get_invalid(cls, instance): @@ -44,12 +44,16 @@ def get_invalid(cls, instance): nodes=instance[:]) # check ids against database ids - project_name = legacy_io.active_project() - asset_docs = get_assets(project_name, fields=["_id"]) - db_asset_ids = { - str(asset_doc["_id"]) - for asset_doc in asset_docs - } + projects_list = [legacy_io.active_project()] + for project in get_projects(fields=["name", "data.library_project"]): + if project.get("data", {}).get("library_project", False): + projects_list.append(project["name"]) + + db_asset_ids = set() + for project_name in projects_list: + asset_docs = get_assets(project_name, fields=["_id"]) + assets_ids = set( str(asset_doc["_id"]) for asset_doc in asset_docs ) + db_asset_ids.update(assets_ids) # Get all asset IDs for node in id_required_nodes: @@ -65,3 +69,12 @@ def get_invalid(cls, instance): invalid.append(node) return invalid + + def get_library_project_names(self): + libraries = list() + + for project in get_projects(fields=["name", "data.library_project"]): + if project.get("data", {}).get("library_project", False): + libraries.append(project["name"]) + + return libraries diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py index 61386fc9399..fe442a56263 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py @@ -20,7 +20,6 @@ class ValidateNodeIdsUnique(pyblish.api.InstancePlugin): hosts = ['maya'] families = ["model", "look", - "rig", "yetiRig"] actions = [openpype.hosts.maya.api.action.SelectInvalidAction, diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py index 24fb36eb8b9..5cd41d12762 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py @@ -9,7 +9,6 @@ from openpype.pipeline.publish import ( RepairAction, ValidateContentsOrder, - PublishValidationError ) @@ -35,7 +34,7 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance, compute=True) if invalid: - raise PublishValidationError("Found nodes with mismatched IDs.") + raise RuntimeError("Found nodes with mismatched IDs.") @classmethod def get_invalid(cls, instance, compute=False): @@ -47,10 +46,7 @@ def get_invalid_matches(cls, instance, compute=False): invalid = {} if compute: - out_set = instance.data["rig_sets"].get("out_SET") - if not out_set: - instance.data["mismatched_output_ids"] = invalid - return invalid + out_set = next(x for x in instance if x.startswith("out_SET")) instance_nodes = cmds.sets(out_set, query=True, nodesOnly=True) instance_nodes = cmds.ls(instance_nodes, long=True) @@ -111,7 +107,7 @@ def repair(cls, instance): set_id(instance_node, id_to_set, overwrite=True) if multiple_ids_match: - raise PublishValidationError( + raise RuntimeError( "Multiple matched ids found. Please repair manually: " "{}".format(multiple_ids_match) ) diff --git a/openpype/hosts/photoshop/plugins/load/load_image.py b/openpype/hosts/photoshop/plugins/load/load_image.py index eb770bbd200..38b9d8c0b9c 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image.py +++ b/openpype/hosts/photoshop/plugins/load/load_image.py @@ -11,7 +11,7 @@ class ImageLoader(photoshop.PhotoshopLoader): Stores the imported asset in a container named after the asset. """ - families = ["image", "render"] + families = ["image", "render", "workfile"] representations = ["*"] def load(self, context, name=None, namespace=None, data=None): diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index 58fbd095452..bce2dbafb43 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -164,7 +164,14 @@ def work_root(self, session): return session["AVALON_WORKDIR"] def get_current_workfile(self): - return execute_george("tv_GetProjectName") + # tvPaint return a '\' character when no scene is currently + # opened instead of a None value, which causes interferences + # in OpenPype's core code. So we check the returned value and + # send None if this character is retrieved. + current_workfile = execute_george("tv_GetProjectName") + if current_workfile == '\\': + current_workfile = None + return current_workfile def workfile_has_unsaved_changes(self): return None diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py index e89fbf78829..6655a6619b1 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py @@ -20,11 +20,11 @@ def process(self, instance): elif creator_identifier == "render.pass": self._collect_data_for_render_pass(instance) - elif creator_identifier == "render.scene": + elif creator_identifier in ["render.scene", "render.playblast"]: self._collect_data_for_render_scene(instance) else: - if creator_identifier == "scene.review": + if creator_identifier in ["scene.review", "publish.sequence"]: self._collect_data_for_review(instance) return @@ -100,15 +100,41 @@ def _collect_data_for_render_scene(self, instance): instance.context.data["layersData"] ) - render_pass_name = ( - instance.data["creator_attributes"]["render_pass_name"] - ) - subset_name = instance.data["subset"] - instance.data["subset"] = subset_name.format( - **prepare_template_data({"renderpass": render_pass_name}) + if instance.data["creator_attributes"].get('render_pass_name'): + render_pass_name = ( + instance.data["creator_attributes"]["render_pass_name"] + ) + subset_name = instance.data["subset"] + instance.data["subset"] = subset_name.format( + **prepare_template_data({"renderpass": render_pass_name}) + ) + + def _get_ignore_transparency_option(self, instance): + ignore_transparency = instance.data["creator_attributes"].get( + "ignore_layers_transparency", None ) + if not ignore_transparency: + keep_transparency = instance.data["creator_attributes"].get( + "keep_layers_transparency", None + ) + return not keep_transparency + + else: + return ignore_transparency + + def _collect_data_for_review(self, instance): instance.data["layers"] = copy.deepcopy( instance.context.data["layersData"] ) + + ignore_transparency = self._get_ignore_transparency_option(instance) + if ignore_transparency: + instance.data["ignoreLayersTransparency"] = ( + ignore_transparency + ) + else: + instance.data["ignoreLayersTransparency"] = ( + self.ignore_render_pass_transparency + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index a13a91de469..32f9bb70386 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -108,9 +108,17 @@ def process(self, instance): "Files will be rendered to folder: {}".format(output_dir) ) - if instance.data["family"] == "review": + export_type = instance.data["creator_attributes"].get("export_type", "project") + is_review = instance.data["family"] == "review" + is_playblast = instance.data["creator_identifier"] == "render.playblast" + publish_sequence_with_transparency = ( + instance.data["creator_identifier"] == "publish.sequence" and \ + not ignore_layers_transparency + ) + + if is_review or is_playblast or publish_sequence_with_transparency: result = self.render_review( - output_dir, mark_in, mark_out, scene_bg_color + output_dir, export_type, mark_in, mark_out, scene_bg_color ) else: # Render output @@ -143,6 +151,8 @@ def process(self, instance): tags = [] if "review" in instance.data["families"]: tags.append("review") + else: + tags.append("sequence") # Sequence of one frame single_file = len(repre_files) == 1 @@ -201,7 +211,7 @@ def _rename_output_files( return repre_filenames def render_review( - self, output_dir, mark_in, mark_out, scene_bg_color + self, output_dir, export_type, mark_in, mark_out, scene_bg_color ): """ Export images from TVPaint using `tv_savesequence` command. @@ -233,12 +243,13 @@ def render_review( "export_path = \"{}\"".format( first_frame_filepath.replace("\\", "/") ), - "tv_savesequence '\"'export_path'\"' {} {}".format( - mark_in, mark_out + "tv_projectsavesequence '\"'export_path'\"' \"{}\" {} {}".format( + export_type, mark_in, mark_out ) ] + if scene_bg_color: - # Change bg color back to previous scene bg color + # Change bg color back to previous scene bg colorq _scene_bg_color = copy.deepcopy(scene_bg_color) bg_type = _scene_bg_color.pop(0) orig_color_command = [ diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index f1eb564e5e5..5aa892b391d 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -149,6 +149,7 @@ format_file_size, collect_frames, create_hard_link, + create_symlink, version_up, get_version_from_path, get_last_version_from_path, @@ -264,6 +265,7 @@ "format_file_size", "collect_frames", "create_hard_link", + "create_symlink", "version_up", "get_version_from_path", "get_last_version_from_path", diff --git a/openpype/lib/file_transaction.py b/openpype/lib/file_transaction.py index 80f4e81f2c3..b2f3d103ff1 100644 --- a/openpype/lib/file_transaction.py +++ b/openpype/lib/file_transaction.py @@ -4,7 +4,7 @@ import errno import six -from openpype.lib import create_hard_link +from openpype.lib import create_hard_link, create_symlink # this is needed until speedcopy for linux is fixed if sys.platform == "win32": @@ -53,6 +53,7 @@ class FileTransaction(object): MODE_COPY = 0 MODE_HARDLINK = 1 + MODE_SYMLINK = 2 def __init__(self, log=None, allow_queue_replacements=False): if log is None: @@ -78,7 +79,7 @@ def add(self, src, dst, mode=MODE_COPY): Args: src (str): Source path. dst (str): Destination path. - mode (MODE_COPY, MODE_HARDLINK): Transfer mode. + mode (MODE_COPY, MODE_HARDLINK, MODE_SYMLINK): Transfer mode. """ opts = {"mode": mode} @@ -143,6 +144,10 @@ def process(self): self.log.debug("Hardlinking file ... {} -> {}".format( src, dst)) create_hard_link(src, dst) + elif opts["mode"] == self.MODE_SYMLINK: + self.log.debug("Symlinking file ... {} -> {}".format( + src, dst)) + create_symlink(src, dst) self._transferred.append(dst) diff --git a/openpype/lib/path_tools.py b/openpype/lib/path_tools.py index fec6a0c47dc..7538c7ef7c8 100644 --- a/openpype/lib/path_tools.py +++ b/openpype/lib/path_tools.py @@ -65,6 +65,41 @@ def create_hard_link(src_path, dst_path): ) +def create_symlink(src_path, dst_path): + """Create symlink of file. + Args: + src_path(str): Full path to a file which is used as source for + symlink. + dst_path(str): Full path to a file where a link of source will be + added. + """ + # Use `os.symlink` if is available + # - should be for all platforms with newer python versions + if hasattr(os, "symlink"): + os.symlink(src_path, dst_path) + return + + # Windows implementation of symlinks ( + # - for older versions of python + if platform.system().lower() == "windows": + import ctypes + from ctypes.wintypes import BOOL + CreateSymLink = ctypes.windll.kernel32.CreateSymbolicLinkW + CreateSymLink.argtypes = [ + ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p + ] + CreateSymLink.restype = BOOL + + res = CreateSymLink(dst_path, src_path, None) + if res == 0: + raise ctypes.WinError() + return + # Raises not implemented error if gets here + raise NotImplementedError( + "Implementation of symlink for current environment is missing." + ) + + def collect_frames(files): """Returns dict of source path and its frame, if from sequence diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index 97c8dd41abd..771f670f899 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -11,8 +11,8 @@ from .execute import run_subprocess from .vendor_bin_utils import ( - get_ffmpeg_tool_args, - get_oiio_tool_args, + get_ffmpeg_tool_path, + get_oiio_tools_path, is_oiio_supported, ) @@ -83,11 +83,11 @@ def get_oiio_info_for_input(filepath, logger=None, subimages=False): Stdout should contain xml format string. """ - args = get_oiio_tool_args( - "oiiotool", + args = [ + get_oiio_tools_path(), "--info", "-v" - ) + ] if subimages: args.append("-a") @@ -315,92 +315,6 @@ def parse_oiio_xml_output(xml_string, logger=None): return output -def get_review_info_by_layer_name(channel_names): - """Get channels info grouped by layer name. - - Finds all layers in channel names and returns list of dictionaries with - information about channels in layer. - Example output (not real world example): - [ - { - "name": "Main", - "review_channels": { - "R": "Main.red", - "G": "Main.green", - "B": "Main.blue", - "A": None, - } - }, - { - "name": "Composed", - "review_channels": { - "R": "Composed.R", - "G": "Composed.G", - "B": "Composed.B", - "A": "Composed.A", - } - }, - ... - ] - - Args: - channel_names (list[str]): List of channel names. - - Returns: - list[dict]: List of channels information. - """ - - layer_names_order = [] - rgba_by_layer_name = collections.defaultdict(dict) - channels_by_layer_name = collections.defaultdict(dict) - - for channel_name in channel_names: - layer_name = "" - last_part = channel_name - if "." in channel_name: - layer_name, last_part = channel_name.rsplit(".", 1) - - channels_by_layer_name[layer_name][channel_name] = last_part - if last_part.lower() not in { - "r", "red", - "g", "green", - "b", "blue", - "a", "alpha" - }: - continue - - if layer_name not in layer_names_order: - layer_names_order.append(layer_name) - # R, G, B or A - channel = last_part[0].upper() - rgba_by_layer_name[layer_name][channel] = channel_name - - # Put empty layer to the beginning of the list - # - if input has R, G, B, A channels they should be used for review - if "" in layer_names_order: - layer_names_order.remove("") - layer_names_order.insert(0, "") - - output = [] - for layer_name in layer_names_order: - rgba_layer_info = rgba_by_layer_name[layer_name] - red = rgba_layer_info.get("R") - green = rgba_layer_info.get("G") - blue = rgba_layer_info.get("B") - if not red or not green or not blue: - continue - output.append({ - "name": layer_name, - "review_channels": { - "R": red, - "G": green, - "B": blue, - "A": rgba_layer_info.get("A"), - } - }) - return output - - def get_convert_rgb_channels(channel_names): """Get first available RGB(A) group from channels info. @@ -409,7 +323,7 @@ def get_convert_rgb_channels(channel_names): # Ideal situation channels_info: [ "R", "G", "B", "A" - ] + } ``` Result will be `("R", "G", "B", "A")` @@ -417,60 +331,50 @@ def get_convert_rgb_channels(channel_names): # Not ideal situation channels_info: [ "beauty.red", - "beauty.green", + "beuaty.green", "beauty.blue", "depth.Z" ] ``` Result will be `("beauty.red", "beauty.green", "beauty.blue", None)` - Args: - channel_names (list[str]): List of channel names. - Returns: - Union[NoneType, tuple[str, str, str, Union[str, None]]]: Tuple of - 4 channel names defying channel names for R, G, B, A or None - if there is not any layer with RGB combination. + NoneType: There is not channel combination that matches RGB + combination. + tuple: Tuple of 4 channel names defying channel names for R, G, B, A + where A can be None. """ + rgb_by_main_name = collections.defaultdict(dict) + main_name_order = [""] + for channel_name in channel_names: + name_parts = channel_name.split(".") + rgb_part = name_parts.pop(-1).lower() + main_name = ".".join(name_parts) + if rgb_part in ("r", "red"): + rgb_by_main_name[main_name]["R"] = channel_name + elif rgb_part in ("g", "green"): + rgb_by_main_name[main_name]["G"] = channel_name + elif rgb_part in ("b", "blue"): + rgb_by_main_name[main_name]["B"] = channel_name + elif rgb_part in ("a", "alpha"): + rgb_by_main_name[main_name]["A"] = channel_name + else: + continue + if main_name not in main_name_order: + main_name_order.append(main_name) + + output = None + for main_name in main_name_order: + colors = rgb_by_main_name.get(main_name) or {} + red = colors.get("R") + green = colors.get("G") + blue = colors.get("B") + alpha = colors.get("A") + if red is not None and green is not None and blue is not None: + output = (red, green, blue, alpha) + break - channels_info = get_review_info_by_layer_name(channel_names) - for item in channels_info: - review_channels = item["review_channels"] - return ( - review_channels["R"], - review_channels["G"], - review_channels["B"], - review_channels["A"] - ) - return None - - -def get_review_layer_name(src_filepath): - """Find layer name that could be used for review. - - Args: - src_filepath (str): Path to input file. - - Returns: - Union[str, None]: Layer name of None. - """ - - ext = os.path.splitext(src_filepath)[-1].lower() - if ext != ".exr": - return None - - # Load info about file from oiio tool - input_info = get_oiio_info_for_input(src_filepath) - if not input_info: - return None - - channel_names = input_info["channelnames"] - channels_info = get_review_info_by_layer_name(channel_names) - for item in channels_info: - # Layer name can be '', when review channels are 'R', 'G', 'B' - # without layer - return item["name"] or None - return None + return output def should_convert_for_ffmpeg(src_filepath): @@ -491,7 +395,7 @@ def should_convert_for_ffmpeg(src_filepath): if not is_oiio_supported(): return None - # Load info about file from oiio tool + # Load info about info from oiio tool input_info = get_oiio_info_for_input(src_filepath) if not input_info: return None @@ -582,11 +486,12 @@ def convert_for_ffmpeg( compression = "none" # Prepare subprocess arguments - oiio_cmd = get_oiio_tool_args( - "oiiotool", + oiio_cmd = [ + get_oiio_tools_path(), + # Don't add any additional attributes "--nosoftwareattrib", - ) + ] # Add input compression if available if compression: oiio_cmd.extend(["--compression", compression]) @@ -751,11 +656,12 @@ def convert_input_paths_for_ffmpeg( for input_path in input_paths: # Prepare subprocess arguments - oiio_cmd = get_oiio_tool_args( - "oiiotool", + oiio_cmd = [ + get_oiio_tools_path(), + # Don't add any additional attributes "--nosoftwareattrib", - ) + ] # Add input compression if available if compression: oiio_cmd.extend(["--compression", compression]) @@ -820,11 +726,11 @@ def get_ffprobe_data(path_to_file, logger=None): """ if not logger: logger = logging.getLogger(__name__) - logger.debug( + logger.info( "Getting information about input \"{}\".".format(path_to_file) ) - ffprobe_args = get_ffmpeg_tool_args("ffprobe") - args = ffprobe_args + [ + args = [ + get_ffmpeg_tool_path("ffprobe"), "-hide_banner", "-loglevel", "fatal", "-show_error", @@ -1150,7 +1056,8 @@ def convert_colorspace( view=None, display=None, additional_command_args=None, - logger=None + logger=None, + input_args=None ): """Convert source file from one color space to another. @@ -1178,13 +1085,17 @@ def convert_colorspace( if logger is None: logger = logging.getLogger(__name__) - oiio_cmd = get_oiio_tool_args( - "oiiotool", + oiio_cmd = [get_oiio_tools_path()] + + if input_args: + oiio_cmd.extend(input_args) + + oiio_cmd.extend([ input_path, # Don't add any additional attributes "--nosoftwareattrib", "--colorconfig", config_path - ) + ]) if all([target_colorspace, view, display]): raise ValueError("Colorspace and both screen and display" diff --git a/openpype/modules/deadline/deadline_module.py b/openpype/modules/deadline/deadline_module.py index 9855f8c1b10..824e9887d9e 100644 --- a/openpype/modules/deadline/deadline_module.py +++ b/openpype/modules/deadline/deadline_module.py @@ -74,3 +74,50 @@ def get_deadline_pools(webservice, log=None): return [] return response.json() + + @staticmethod + def get_deadline_data(webservice, endpoint, log=None, **kwargs): + """Get Limits groups for Deadline + Args: + webservice (str): Server url + endpoint (str): Request endpoint + log (Logger) + kwargs (Any): Request payload content as key=value pairs + Returns: + Any: Returns the json-encoded content of a response, if any. + Throws: + RuntimeError: If Deadline webservice is unreachable. + """ + if not log: + log = Logger.get_logger(__name__) + + request = "{}/api/{}".format( + webservice, + endpoint + ) + + # Construct the full request with arguments + arguments = [] + for key, value in kwargs.items(): + new_argument = "{}={}".format(key, value) + arguments.append(new_argument) + + if arguments: + arguments = "&".join(arguments) + request = "{}?{}".format(request, arguments) + + try: + response = requests_get(request) + except requests.exceptions.ConnectionError as exc: + msg = "Cannot connect to DL web service {}".format(webservice) + log.error(msg) + six.reraise( + DeadlineWebserviceError, + DeadlineWebserviceError("{} - {}".format(msg, exc)), + sys.exc_info()[2] + ) + if not response.ok: + log.warning("The data requested could not be retrieved") + return [] + + return response.json() diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index 009375e87ee..e4405c91aae 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -11,6 +11,7 @@ from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.modules.deadline.utils import set_custom_deadline_name from openpype.tests.lib import is_in_tests from openpype.lib import is_running_from_build @@ -50,12 +51,23 @@ def get_job_info(self): dln_job_info = DeadlineJobInfo(Plugin="AfterEffects") context = self._instance.context + filename = os.path.basename(self._instance.data["source"]) + + job_name = set_custom_deadline_name( + self._instance, + filename, + "deadline_job_name" + ) + batch_name = set_custom_deadline_name( + self._instance, + filename, + "deadline_batch_name" + ) - batch_name = os.path.basename(self._instance.data["source"]) if is_in_tests(): batch_name += datetime.now().strftime("%d%m%Y%H%M%S") - dln_job_info.Name = self._instance.data["name"] - dln_job_info.BatchName = batch_name + dln_job_info.Name = job_name + dln_job_info.BatchName = "Group: " + batch_name dln_job_info.Plugin = "AfterEffects" dln_job_info.UserName = context.data.get( "deadlineUser", getpass.getuser()) diff --git a/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py b/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py index 47a0a257558..730f5925c31 100644 --- a/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py @@ -5,6 +5,8 @@ import requests import pyblish.api +from openpype.modules.deadline.utils import set_custom_deadline_name + class CelactionSubmitDeadline(pyblish.api.InstancePlugin): """Submit CelAction2D scene to Deadline @@ -74,6 +76,12 @@ def payload_submit(self, render_path = os.path.normpath(render_path) script_name = os.path.basename(script_path) + batch_name = set_custom_deadline_name( + instance, + script_name, + "deadline_batch_name" + ) + for item in instance.context: if "workfile" in item.data["family"]: msg = "Workfile (scene) must be published along" @@ -93,7 +101,11 @@ def payload_submit(self, "Using published scene for render {}".format(script_path) ) - jobname = "%s - %s" % (script_name, instance.name) + jobname = set_custom_deadline_name( + instance, + script_name, + "deadline_job_name" + ) output_filename_0 = self.preview_fname(render_path) @@ -136,7 +148,7 @@ def payload_submit(self, "Plugin": "CelAction", # Top-level group name - "BatchName": script_name, + "BatchName": "Group: " + batch_name, # Arbitrary username, for visualisation in Monitor "UserName": self._deadline_user, diff --git a/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py b/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py index 70aa12956d6..5051e0362e6 100644 --- a/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py @@ -14,6 +14,7 @@ BoolDef, NumberDef ) +from openpype.modules.deadline.utils import set_custom_deadline_name class FusionSubmitDeadline( @@ -141,6 +142,16 @@ def process(self, instance): ) filename = os.path.basename(script_path) + job_name = set_custom_deadline_name( + instance, + filename, + "deadline_job_name" + ) + batch_name = set_custom_deadline_name( + instance, + filename, + "deadline_batch_name" + ) # Documentation for keys available at: # https://docs.thinkboxsoftware.com @@ -149,13 +160,13 @@ def process(self, instance): payload = { "JobInfo": { # Top-level group name - "BatchName": filename, + "BatchName": "Group: " + batch_name, # Asset dependency to wait for at least the scene file to sync. "AssetDependency0": script_path, # Job name, as seen in Monitor - "Name": filename, + "Name": job_name, "Priority": attribute_values.get( "priority", self.priority), diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index 17e672334cf..1b4a6f17871 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -13,6 +13,7 @@ from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.modules.deadline.utils import set_custom_deadline_name from openpype.tests.lib import is_in_tests from openpype.lib import is_running_from_build @@ -252,7 +253,6 @@ class HarmonySubmitDeadline( def get_job_info(self): job_info = DeadlineJobInfo("Harmony") - job_info.Name = self._instance.data["name"] job_info.Plugin = "HarmonyOpenPype" job_info.Frames = "{}-{}".format( self._instance.data["frameStartHandle"], @@ -264,10 +264,22 @@ def get_job_info(self): job_info.Pool = self._instance.data.get("primaryPool") job_info.SecondaryPool = self._instance.data.get("secondaryPool") job_info.ChunkSize = self.chunk_size - batch_name = os.path.basename(self._instance.data["source"]) - if is_in_tests(): + filename = os.path.basename(self._instance.data["source"]) + job_name = set_custom_deadline_name( + self._instance, + filename, + "deadline_job_name" + ) + + batch_name = set_custom_deadline_name( + self._instance, + filename, + "deadline_batch_name" + ) + if is_in_tests: batch_name += datetime.now().strftime("%d%m%Y%H%M%S") - job_info.BatchName = batch_name + job_info.BatchName = "Group: " + batch_name + job_info.Name = job_name job_info.Department = self.department job_info.Group = self.group @@ -299,8 +311,8 @@ def get_job_info(self): if value: job_info.EnvironmentKeyValue[key] = value - # to recognize render jobs - job_info.add_render_job_env_var() + # to recognize job from PYPE for turning Event On/Off + job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" return job_info @@ -369,7 +381,7 @@ def get_plugin_info(self): # rendering, we need to unzip it. published_scene = Path( self.from_published_scene(False)) - self.log.debug(f"Processing {published_scene.as_posix()}") + self.log.info(f"Processing {published_scene.as_posix()}") xstage_path = self._unzip_scene_file(published_scene) render_path = xstage_path.parent / "renders" diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py index 39c0c3afe48..514603d86f0 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -76,7 +76,7 @@ def process(self, context): "JobInfo": { "Plugin": "Houdini", "Pool": "houdini", # todo: remove hardcoded pool - "BatchName": batch_name, + "BatchName": "Group: " + batch_name, "Comment": context.data.get("comment", ""), "Priority": 50, "Frames": "1-1", # Always trigger a single frame diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index 8f21a920be5..e17c707cf92 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -10,6 +10,7 @@ from openpype.tests.lib import is_in_tests from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.modules.deadline.utils import set_custom_deadline_name from openpype.lib import is_running_from_build @@ -55,8 +56,19 @@ def get_job_info(self): filepath = context.data["currentFile"] filename = os.path.basename(filepath) - job_info.Name = "{} - {}".format(filename, instance.name) - job_info.BatchName = filename + job_name = set_custom_deadline_name( + instance, + filename, + "deadline_job_name" + ) + batch_name = set_custom_deadline_name( + instance, + filename, + "deadline_batch_name" + ) + + job_info.Name = job_name + job_info.BatchName = "Group: " + batch_name job_info.Plugin = "Houdini" job_info.UserName = context.data.get( "deadlineUser", getpass.getuser()) diff --git a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py index 63c6e4a0c72..ac42a4cf652 100644 --- a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py @@ -12,9 +12,7 @@ legacy_io, OpenPypePyblishPluginMixin ) -from openpype.pipeline.publish.lib import ( - replace_with_published_scene_path -) +from openpype.settings import get_project_settings from openpype.hosts.max.api.lib import ( get_current_renderer, get_multipass_setting @@ -22,7 +20,7 @@ from openpype.hosts.max.api.lib_rendersettings import RenderSettings from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo -from openpype.lib import is_running_from_build +from openpype.modules.deadline.utils import set_custom_deadline_name @attr.s @@ -77,8 +75,19 @@ def get_job_info(self): src_filepath = context.data["currentFile"] src_filename = os.path.basename(src_filepath) - job_info.Name = "%s - %s" % (src_filename, instance.name) - job_info.BatchName = src_filename + job_name = set_custom_deadline_name( + instance, + src_filename, + "deadline_job_name" + ) + batch_name = set_custom_deadline_name( + instance, + src_filename, + "deadline_batch_name" + ) + + job_info.Name = job_name + job_info.BatchName = "Group: " + batch_name job_info.Plugin = instance.data["plugin"] job_info.UserName = context.data.get("deadlineUser", getpass.getuser()) job_info.EnableAutoTimeout = True @@ -113,13 +122,9 @@ def get_job_info(self): "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", + "OPENPYPE_VERSION", "IS_TEST" ] - - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") @@ -133,8 +138,8 @@ def get_job_info(self): continue job_info.EnvironmentKeyValue[key] = value - # to recognize render jobs - job_info.add_render_job_env_var() + # to recognize job from PYPE for turning Event On/Off + job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1" # Add list of expected files to job @@ -176,6 +181,7 @@ def process_submission(self): first_file = next(self._iter_expected_files(files)) output_dir = os.path.dirname(first_file) instance.data["outputDir"] = output_dir + instance.data["toBeRenderedOn"] = "deadline" filename = os.path.basename(filepath) @@ -185,18 +191,20 @@ def process_submission(self): } self.log.debug("Submitting 3dsMax render..") - project_settings = instance.context.data["project_settings"] - payload = self._use_published_name(payload_data, project_settings) + payload = self._use_published_name(payload_data) job_info, plugin_info = payload self.submit(self.assemble_payload(job_info, plugin_info)) - def _use_published_name(self, data, project_settings): + def _use_published_name(self, data): instance = self._instance job_info = copy.deepcopy(self.job_info) plugin_info = copy.deepcopy(self.plugin_info) plugin_data = {} + project_setting = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) - multipass = get_multipass_setting(project_settings) + multipass = get_multipass_setting(project_setting) if multipass: plugin_data["DisableMultipass"] = 0 else: @@ -237,10 +245,7 @@ def _use_published_name(self, data, project_settings): if renderer == "Redshift_Renderer": plugin_data["redshift_SeparateAovFiles"] = instance.data.get( "separateAovFiles") - if instance.data["cameras"]: - plugin_info["Camera0"] = None - plugin_info["Camera"] = instance.data["cameras"][0] - plugin_info["Camera1"] = instance.data["cameras"][0] + self.log.debug("plugin data:{}".format(plugin_data)) plugin_info.update(plugin_data) @@ -251,8 +256,7 @@ def from_published_scene(self, replace_in_path=True): if instance.data["renderer"] == "Redshift_Renderer": self.log.debug("Using Redshift...published scene wont be used..") replace_in_path = False - return replace_with_published_scene_path( - instance, replace_in_path) + return replace_in_path @staticmethod def _iter_expected_files(exp): diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 74ecdbe7bf0..7e112d1ce71 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -45,6 +45,7 @@ from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.modules.deadline.utils import set_custom_deadline_name from openpype.tests.lib import is_in_tests from openpype.lib import is_running_from_build from openpype.pipeline.farm.tools import iter_expected_files @@ -152,11 +153,22 @@ def get_job_info(self): src_filepath = context.data["currentFile"] src_filename = os.path.basename(src_filepath) + job_name = set_custom_deadline_name( + instance, + src_filename, + "deadline_job_name" + ) + batch_name = set_custom_deadline_name( + instance, + src_filename, + "deadline_batch_name" + ) + if is_in_tests(): - src_filename += datetime.now().strftime("%d%m%Y%H%M%S") + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") - job_info.Name = "%s - %s" % (src_filename, instance.name) - job_info.BatchName = src_filename + job_info.Name = job_name + job_info.BatchName = "Group: " + batch_name job_info.Plugin = instance.data.get("mayaRenderPlugin", "MayaBatch") job_info.UserName = context.data.get("deadlineUser", getpass.getuser()) @@ -172,10 +184,13 @@ def get_job_info(self): job_info.SecondaryPool = instance.data.get("secondaryPool") job_info.Comment = context.data.get("comment") job_info.Priority = instance.data.get("priority", self.priority) + job_info.MachineLimit = instance.data.get("machineLimit", 0) if self.group != "none" and self.group: job_info.Group = self.group + self.limit = instance.data.get('limits') + if self.limit: job_info.LimitGroups = ",".join(self.limit) diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py index 0d23f44333b..ab9a597ea97 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -1,34 +1,19 @@ import os -import attr +import requests from datetime import datetime from maya import cmds -from openpype import AYON_SERVER_ENABLED from openpype.pipeline import legacy_io, PublishXmlValidationError +from openpype.settings import get_project_settings from openpype.tests.lib import is_in_tests from openpype.lib import is_running_from_build -from openpype_modules.deadline import abstract_submit_deadline -from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.modules.deadline.utils import set_custom_deadline_name import pyblish.api -@attr.s -class MayaPluginInfo(object): - Build = attr.ib(default=None) # Don't force build - StrictErrorChecking = attr.ib(default=True) - - SceneFile = attr.ib(default=None) # Input scene - Version = attr.ib(default=None) # Mandatory for Deadline - ProjectPath = attr.ib(default=None) - - ScriptJob = attr.ib(default=True) - ScriptFilename = attr.ib(default=None) - - -class MayaSubmitRemotePublishDeadline( - abstract_submit_deadline.AbstractSubmitDeadline): +class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): """Submit Maya scene to perform a local publish in Deadline. Publishing in Deadline can be helpful for scenes that publish very slow. @@ -52,6 +37,13 @@ class MayaSubmitRemotePublishDeadline( targets = ["local"] def process(self, instance): + project_name = instance.context.data["projectName"] + # TODO settings can be received from 'context.data["project_settings"]' + settings = get_project_settings(project_name) + # use setting for publish job on farm, no reason to have it separately + deadline_publish_job_sett = (settings["deadline"] + ["publish"] + ["ProcessSubmittedJobOnFarm"]) # Ensure no errors so far if not (all(result["success"] @@ -63,39 +55,61 @@ def process(self, instance): "Skipping submission..") return - super(MayaSubmitRemotePublishDeadline, self).process(instance) - - def get_job_info(self): - instance = self._instance - context = instance.context - - project_name = instance.context.data["projectName"] scene = instance.context.data["currentFile"] scenename = os.path.basename(scene) - job_name = "{scene} [PUBLISH]".format(scene=scenename) - batch_name = "{code} - {scene}".format(code=project_name, - scene=scenename) - + job_name = set_custom_deadline_name( + instance, + scenename, + "deadline_job_name" + ) + batch_name = set_custom_deadline_name( + instance, + scenename, + "deadline_batch_name" + ) if is_in_tests(): batch_name += datetime.now().strftime("%d%m%Y%H%M%S") - job_info = DeadlineJobInfo(Plugin="MayaBatch") - job_info.BatchName = batch_name - job_info.Name = job_name - job_info.UserName = context.data.get("user") - job_info.Comment = context.data.get("comment", "") - - # use setting for publish job on farm, no reason to have it separately - project_settings = context.data["project_settings"] - deadline_publish_job_sett = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"] # noqa - job_info.Department = deadline_publish_job_sett["deadline_department"] - job_info.ChunkSize = deadline_publish_job_sett["deadline_chunk_size"] - job_info.Priority = deadline_publish_job_sett["deadline_priority"] - job_info.Group = deadline_publish_job_sett["deadline_group"] - job_info.Pool = deadline_publish_job_sett["deadline_pool"] - - # Include critical environment variables with submission + Session + # Generate the payload for Deadline submission + payload = { + "JobInfo": { + "Plugin": "MayaBatch", + "BatchName": "Group: " + batch_name, + "Name": job_name, + "UserName": instance.context.data["user"], + "Comment": instance.context.data.get("comment", ""), + # "InitialStatus": state + "Department": deadline_publish_job_sett["deadline_department"], + "ChunkSize": deadline_publish_job_sett["deadline_chunk_size"], + "Priority": deadline_publish_job_sett["deadline_priority"], + "Group": deadline_publish_job_sett["deadline_group"], + "Pool": deadline_publish_job_sett["deadline_pool"], + }, + "PluginInfo": { + + "Build": None, # Don't force build + "StrictErrorChecking": True, + "ScriptJob": True, + + # Inputs + "SceneFile": scene, + "ScriptFilename": "{OPENPYPE_REPOS_ROOT}/openpype/scripts/remote_publish.py", # noqa + + # Mandatory for Deadline + "Version": cmds.about(version=True), + + # Resolve relative references + "ProjectPath": cmds.workspace(query=True, + rootDirectory=True), + + }, + + # Mandatory for Deadline, may be empty + "AuxFiles": [] + } + + # Include critical environment variables with submission + api.Session keys = [ "FTRACK_API_USER", "FTRACK_API_KEY", @@ -111,30 +125,29 @@ def get_job_info(self): # TODO replace legacy_io with context.data environment["AVALON_PROJECT"] = project_name - environment["AVALON_ASSET"] = instance.context.data["asset"] - environment["AVALON_TASK"] = instance.context.data["task"] + environment["AVALON_ASSET"] = legacy_io.Session["AVALON_ASSET"] + environment["AVALON_TASK"] = legacy_io.Session["AVALON_TASK"] environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME") environment["OPENPYPE_LOG_NO_COLORS"] = "1" + environment["OPENPYPE_REMOTE_JOB"] = "1" environment["OPENPYPE_USERNAME"] = instance.context.data["user"] environment["OPENPYPE_PUBLISH_SUBSET"] = instance.data["subset"] environment["OPENPYPE_REMOTE_PUBLISH"] = "1" - if AYON_SERVER_ENABLED: - environment["AYON_REMOTE_PUBLISH"] = "1" - else: - environment["OPENPYPE_REMOTE_PUBLISH"] = "1" - for key, value in environment.items(): - job_info.EnvironmentKeyValue[key] = value - - def get_plugin_info(self): - - scene = self._instance.context.data["currentFile"] - - plugin_info = MayaPluginInfo() - plugin_info.SceneFile = scene - plugin_info.ScriptFilename = "{OPENPYPE_REPOS_ROOT}/openpype/scripts/remote_publish.py" # noqa - plugin_info.Version = cmds.about(version=True) - plugin_info.ProjectPath = cmds.workspace(query=True, - rootDirectory=True) - - return attr.asdict(plugin_info) + payload["JobInfo"].update({ + "EnvironmentKeyValue%d" % index: "{key}={value}".format( + key=key, + value=environment[key] + ) for index, key in enumerate(environment) + }) + + self.log.info("Submitting Deadline job ...") + deadline_url = instance.context.data["defaultDeadline"] + # if custom one is set in instance, use that + if instance.data.get("deadlineUrl"): + deadline_url = instance.data.get("deadlineUrl") + assert deadline_url, "Requires Deadline Webservice URL" + url = "{}/api/jobs".format(deadline_url) + response = requests.post(url, json=payload, timeout=10) + if not response.ok: + raise Exception(response.text) diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index 0295c2b7605..e0e4e77ca40 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -8,8 +8,6 @@ import pyblish.api import nuke - -from openpype import AYON_SERVER_ENABLED from openpype.pipeline import legacy_io from openpype.pipeline.publish import ( OpenPypePyblishPluginMixin @@ -20,6 +18,7 @@ BoolDef, NumberDef ) +from openpype.modules.deadline.utils import set_custom_deadline_name class NukeSubmitDeadline(pyblish.api.InstancePlugin, @@ -90,6 +89,7 @@ def process(self, instance): if not instance.data.get("farm"): self.log.debug("Skipping local instance.") return + instance.data["attributeValues"] = self.get_attr_values_from_data( instance.data) @@ -97,6 +97,7 @@ def process(self, instance): instance.data["suspend_publish"] = instance.data["attributeValues"][ "suspend_publish"] + instance.data["toBeRenderedOn"] = "deadline" families = instance.data["families"] node = instance.data["transientData"]["node"] @@ -121,10 +122,13 @@ def process(self, instance): render_path = instance.data['path'] script_path = context.data["currentFile"] - for item_ in context: - if "workfile" in item_.data["family"]: - template_data = item_.data.get("anatomyData") - rep = item_.data.get("representations")[0].get("name") + for item in context: + if "workfile" in item.data["families"]: + msg = "Workfile (scene) must be published along" + assert item.data["publish"] is True, msg + + template_data = item.data.get("anatomyData") + rep = item.data.get("representations")[0].get("name") template_data["representation"] = rep template_data["ext"] = rep template_data["comment"] = None @@ -136,24 +140,19 @@ def process(self, instance): "Using published scene for render {}".format(script_path) ) - # only add main rendering job if target is not frames_farm - r_job_response_json = None - if instance.data["render_target"] != "frames_farm": - r_job_response = self.payload_submit( - instance, - script_path, - render_path, - node.name(), - submit_frame_start, - submit_frame_end - ) - r_job_response_json = r_job_response.json() - instance.data["deadlineSubmissionJob"] = r_job_response_json - - # Store output dir for unified publisher (filesequence) - instance.data["outputDir"] = os.path.dirname( - render_path).replace("\\", "/") - instance.data["publishJobState"] = "Suspended" + response = self.payload_submit( + instance, + script_path, + render_path, + node.name(), + submit_frame_start, + submit_frame_end + ) + # Store output dir for unified publisher (filesequence) + instance.data["deadlineSubmissionJob"] = response.json() + instance.data["outputDir"] = os.path.dirname( + render_path).replace("\\", "/") + instance.data["publishJobState"] = "Suspended" if instance.data.get("bakingNukeScripts"): for baking_script in instance.data["bakingNukeScripts"]: @@ -161,20 +160,18 @@ def process(self, instance): script_path = baking_script["bakeScriptPath"] exe_node_name = baking_script["bakeWriteNodeName"] - b_job_response = self.payload_submit( + resp = self.payload_submit( instance, script_path, render_path, exe_node_name, submit_frame_start, submit_frame_end, - r_job_response_json, - baking_submission=True + response.json() ) # Store output dir for unified publisher (filesequence) - instance.data["deadlineSubmissionJob"] = b_job_response.json() - + instance.data["deadlineSubmissionJob"] = resp.json() instance.data["publishJobState"] = "Suspended" # add to list of job Id @@ -182,7 +179,7 @@ def process(self, instance): instance.data["bakingSubmissionJobs"] = [] instance.data["bakingSubmissionJobs"].append( - b_job_response.json()["_id"]) + resp.json()["_id"]) # redefinition of families if "render" in instance.data["family"]: @@ -201,32 +198,21 @@ def payload_submit( exe_node_name, start_frame, end_frame, - response_data=None, - baking_submission=False, + response_data=None ): - """Submit payload to Deadline - - Args: - instance (pyblish.api.Instance): pyblish instance - script_path (str): path to nuke script - render_path (str): path to rendered images - exe_node_name (str): name of the node to render - start_frame (int): start frame - end_frame (int): end frame - response_data Optional[dict]: response data from - previous submission - baking_submission Optional[bool]: if it's baking submission - - Returns: - requests.Response - """ render_dir = os.path.normpath(os.path.dirname(render_path)) + filename = os.path.basename(script_path) - # batch name - src_filepath = instance.context.data["currentFile"] - batch_name = os.path.basename(src_filepath) - job_name = os.path.basename(render_path) - + job_name = set_custom_deadline_name( + instance, + filename, + "deadline_job_name" + ) + batch_name = set_custom_deadline_name( + instance, + filename, + "deadline_batch_name" + ) if is_in_tests(): batch_name += datetime.now().strftime("%d%m%Y%H%M%S") @@ -243,12 +229,15 @@ def payload_submit( # resolve any limit groups limit_groups = self.get_limit_groups() - self.log.debug("Limit groups: `{}`".format(limit_groups)) + self.log.info("Limit groups: `{}`".format(limit_groups)) payload = { "JobInfo": { # Top-level group name - "BatchName": batch_name, + "BatchName": "Group: " + batch_name, + + # Asset dependency to wait for at least the scene file to sync. + # "AssetDependency0": script_path, # Job name, as seen in Monitor "Name": job_name, @@ -313,17 +302,12 @@ def payload_submit( "AuxFiles": [] } - # TODO: rewrite for baking with sequences - if baking_submission: - payload["JobInfo"].update({ - "JobType": "Normal", - "ChunkSize": 99999999 - }) - if response_data.get("_id"): payload["JobInfo"].update({ + "JobType": "Normal", "BatchName": response_data["Props"]["Batch"], "JobDependency0": response_data["_id"], + "ChunkSize": 99999999 }) # Include critical environment variables with submission @@ -363,14 +347,8 @@ def payload_submit( if _path.lower().startswith('openpype_'): environment[_path] = os.environ[_path] - # to recognize render jobs - if AYON_SERVER_ENABLED: - environment["AYON_BUNDLE_NAME"] = os.environ["AYON_BUNDLE_NAME"] - render_job_label = "AYON_RENDER_JOB" - else: - render_job_label = "OPENPYPE_RENDER_JOB" - - environment[render_job_label] = "1" + # to recognize job from PYPE for turning Event On/Off + environment["OPENPYPE_RENDER_JOB"] = "1" # finally search replace in values of any key if self.env_search_replace_values: @@ -386,10 +364,10 @@ def payload_submit( }) plugin = payload["JobInfo"]["Plugin"] - self.log.debug("using render plugin : {}".format(plugin)) + self.log.info("using render plugin : {}".format(plugin)) - self.log.debug("Submitting..") - self.log.debug(json.dumps(payload, indent=4, sort_keys=True)) + self.log.info("Submitting..") + self.log.info(json.dumps(payload, indent=4, sort_keys=True)) # adding expectied files to instance.data self.expected_files( diff --git a/openpype/modules/deadline/utils.py b/openpype/modules/deadline/utils.py new file mode 100644 index 00000000000..ee1720603fa --- /dev/null +++ b/openpype/modules/deadline/utils.py @@ -0,0 +1,56 @@ +import os +import re + +from openpype.settings import get_current_project_settings + + +class SafeDict(dict): + def __missing__(self, key): + return '{' + key + '}' + + +def set_custom_deadline_name(instance, filename, setting): + context = instance.context + basename, ext = os.path.splitext(filename) + subversion = basename.split("_")[-1] + version = "v" + str(instance.data.get("version")).zfill(3) + + if subversion == version: + subversion = "" + + anatomy_data = context.data.get("anatomyData") + + formatting_data = { + "asset": anatomy_data.get("asset"), + "task": anatomy_data.get("task"), + "subset": instance.data.get("subset"), + "version": version, + "project": anatomy_data.get("project"), + "family": instance.data.get("family"), + "comment": instance.data.get("comment"), + "subversion": subversion, + "inst_name": instance.data.get("name"), + "ext": ext[1:] + } + + custom_name_settings = get_current_project_settings()["deadline"][setting] # noqa + try: + custom_name = custom_name_settings.format_map( + SafeDict(**formatting_data) + ) + + for m in re.finditer("__", custom_name): + custom_name_list = list(custom_name) + custom_name_list.pop(m.start()) + custom_name = "".join(custom_name_list) + + if custom_name.endswith("_"): + custom_name = custom_name[:-1] + except Exception as e: + raise KeyError( + "OpenPype Studio Settings (Deadline section): Syntax issue(s) " + "in \"Job Name\" or \"Batch Name\" for the current project. " + "Error: {}".format(e) + ) + + return custom_name diff --git a/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py b/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py index ac4e499e417..450134f9e50 100644 --- a/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py +++ b/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py @@ -126,6 +126,21 @@ def ftrack_status_change(self, session, entity, project_name): ent_path = "/".join( [ent["name"] for ent in entity["link"]] ) + + change_statuses_settings = project_settings["ftrack"]["user_handlers"][ + "application_launch_statuses" + ] + ignored_statuses = [status.lower() for status in change_statuses_settings["ignored_statuses"]] # noqa + + if change_statuses_settings["enabled"] and actual_status in ignored_statuses: # noqa + self.log.debug( + "Ftrack status is '{}' for {}. " + "No status change.".format( + actual_status, ent_path + ) + ) + return + while True: next_status_name = None for key, value in status_mapping.items(): diff --git a/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py index b66e1f01e03..066c45bb5cd 100644 --- a/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py +++ b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py @@ -48,6 +48,12 @@ def replace_missing_key(match): pattern = r"\{([^}]*)\}" return re.sub(pattern, replace_missing_key, template) + def _get_representations_with_sequence_tag(self, representations): + return [ + repr for repr in representations + if 'sequence' in repr.get("tags", []) + ] + def process(self, context): for instance in context: # Check if instance is a review by checking its family @@ -55,7 +61,12 @@ def process(self, context): families = set( [instance.data["family"]] + instance.data.get("families", []) ) - if "review" not in families: + representations = instance.data.get("representations", []) + + # Subset should have a review or a kitsureview tag + is_kitsu_review = self._get_representations_with_sequence_tag(representations) + is_review = "review" in families + if not is_review and not is_kitsu_review: continue kitsu_task = instance.data.get("kitsu_task") diff --git a/openpype/pipeline/workfile/workfile_template_builder.py b/openpype/pipeline/workfile/workfile_template_builder.py index 8e199665afe..93a0d14c1ec 100644 --- a/openpype/pipeline/workfile/workfile_template_builder.py +++ b/openpype/pipeline/workfile/workfile_template_builder.py @@ -24,11 +24,13 @@ get_linked_assets, get_representations, ) +from openpype.client.entities import get_projects from openpype.settings import ( get_project_settings, get_system_settings, ) -from openpype.host import IWorkfileHost, HostBase +from openpype.host import IWorkfileHost +from openpype.host import HostBase from openpype.lib import ( Logger, StringTemplate, @@ -36,16 +38,12 @@ attribute_definitions, ) from openpype.lib.attribute_definitions import get_attributes_keys -from openpype.pipeline import Anatomy +from openpype.pipeline import legacy_io, Anatomy from openpype.pipeline.load import ( get_loaders_by_name, get_contexts_for_repre_docs, load_with_repre_context, ) -from openpype.pipeline.action import ( - get_actions_by_name, - action_with_repre_context -) from openpype.pipeline.create import ( discover_legacy_creator_plugins, @@ -116,7 +114,6 @@ def __init__(self, host): # Where created objects of placeholder plugins will be stored self._placeholder_plugins = None self._loaders_by_name = None - self._actions_by_name = None self._creators_by_name = None self._create_context = None @@ -127,32 +124,23 @@ def __init__(self, host): self._linked_asset_docs = None self._task_type = None + self._project_name = legacy_io.active_project() + @property def project_name(self): - if isinstance(self._host, HostBase): - return self._host.get_current_project_name() - return os.getenv("AVALON_PROJECT") + return self._project_name + + @project_name.setter + def project_name(self, name): + self._project_name = name @property def current_asset_name(self): - if isinstance(self._host, HostBase): - return self._host.get_current_asset_name() - return os.getenv("AVALON_ASSET") + return legacy_io.Session["AVALON_ASSET"] @property def current_task_name(self): - if isinstance(self._host, HostBase): - return self._host.get_current_task_name() - return os.getenv("AVALON_TASK") - - def get_current_context(self): - if isinstance(self._host, HostBase): - return self._host.get_current_context() - return { - "project_name": self.project_name, - "asset_name": self.current_asset_name, - "task_name": self.current_task_name - } + return legacy_io.Session["AVALON_TASK"] @property def system_settings(self): @@ -252,7 +240,6 @@ def refresh(self): self._placeholder_plugins = None self._loaders_by_name = None - self._actions_by_name = None self._creators_by_name = None self._current_asset_doc = None @@ -270,11 +257,6 @@ def get_loaders_by_name(self): self._loaders_by_name = get_loaders_by_name() return self._loaders_by_name - def get_actions_by_name(self): - if self._actions_by_name is None: - self._actions_by_name = get_actions_by_name() - return self._actions_by_name - def _collect_legacy_creators(self): creators_by_name = {} for creator in discover_legacy_creator_plugins(): @@ -815,9 +797,10 @@ def get_template_preset(self): fill_data["root"] = anatomy.roots fill_data["project"] = { "name": project_name, - "code": anatomy.project_code, + "code": anatomy["attributes"]["code"] } + result = StringTemplate.format_template(path, fill_data) if result.solved: path = result.normalized() @@ -876,6 +859,7 @@ class PlaceholderPlugin(object): def __init__(self, builder): self._builder = builder + self._project_name = self.builder.project_name @property def builder(self): @@ -889,7 +873,11 @@ def builder(self): @property def project_name(self): - return self._builder.project_name + return self._project_name + + @project_name.setter + def project_name(self, name): + self._project_name = name @property def log(self): @@ -1272,6 +1260,14 @@ def get_load_plugin_options(self, options=None): ] loader_items = list(sorted(loader_items, key=lambda i: i["label"])) + libraries_project_items = [ + { + "label": "From Library : {}".format(project_name), + "value": project_name + } + for project_name in get_library_project_names() + ] + options = options or {} # Get families from all loaders excluding "*" @@ -1283,13 +1279,6 @@ def get_load_plugin_options(self, options=None): # Sort for readability families = list(sorted(families)) - actions_by_name = get_actions_by_name() - actions_items = [{"value": "", "label": ""}] - actions_items.extend( - {"value": action_name, "label": action.label or action_name} - for action_name, action in actions_by_name.items() - ) - return [ attribute_definitions.UISeparatorDef(), attribute_definitions.UILabelDef("Main attributes"), @@ -1297,13 +1286,13 @@ def get_load_plugin_options(self, options=None): attribute_definitions.EnumDef( "builder_type", - label="Asset Builder Type", + label="Asset Builder Source", default=options.get("builder_type"), items=[ - {"label": "Current asset", "value": "context_asset"}, - {"label": "Linked assets", "value": "linked_asset"}, - {"label": "All assets", "value": "all_assets"}, - ], + {"label": "From Current asset", "value": "context_asset"}, + {"label": "From Linked assets", "value": "linked_asset"}, + {"label": "From Others assets", "value": "all_assets"}, + ] + libraries_project_items, tooltip=( "Asset Builder Type\n" "\nBuilder type describe what template loader will look" @@ -1314,6 +1303,10 @@ def get_load_plugin_options(self, options=None): " linked to current context asset." "\nLinked asset are looked in database under" " field \"inputLinks\"" + "\nAll assets : Template loader will look for all assets" + " in database." + "\nLibraries assets : Template loader will look for assets" + "in libraries." ) ), attribute_definitions.EnumDef( @@ -1341,17 +1334,6 @@ def get_load_plugin_options(self, options=None): "\nField is case sensitive." ) ), - attribute_definitions.EnumDef( - "action", - label="Builder Action", - default=options.get("action"), - items=actions_items, - tooltip=( - "Builder Action" - "\nUsed to do actions before or after processing" - " the placeholders." - ), - ), attribute_definitions.TextDef( "loader_args", label="Loader Arguments", @@ -1457,7 +1439,7 @@ def _get_representations(self, placeholder): from placeholder data. """ - project_name = self.builder.project_name + self.project_name = self.builder.project_name current_asset_doc = self.builder.current_asset_doc linked_asset_docs = self.builder.linked_asset_docs @@ -1486,8 +1468,9 @@ def _get_representations(self, placeholder): "representation": [placeholder.data["representation"]], "family": [placeholder.data["family"]], } - else: + if builder_type != "all_assets": + self.project_name = builder_type context_filters = { "asset": [re.compile(placeholder.data["asset"])], "subset": [re.compile(placeholder.data["subset"])], @@ -1497,7 +1480,7 @@ def _get_representations(self, placeholder): } return list(get_representations( - project_name, + self.project_name, context_filters=context_filters )) @@ -1586,7 +1569,7 @@ def populate_load_placeholder(self, placeholder, ignore_repre_ids=None): placeholder, representation ) self.log.info( - "Loading {} from {} with loader {}\n" + "Loading {} from {} with loader {} with" "Loader arguments used : {}".format( repre_context["subset"], repre_context["asset"], @@ -1608,10 +1591,6 @@ def populate_load_placeholder(self, placeholder, ignore_repre_ids=None): else: failed = False self.load_succeed(placeholder, container) - self.populate_action_placeholder( - placeholder, - repre_load_contexts - ) self.post_placeholder_process(placeholder, failed) if failed: @@ -1620,28 +1599,8 @@ def populate_load_placeholder(self, placeholder, ignore_repre_ids=None): "population." ) return - if not placeholder.data.get("keep_placeholder", True): self.delete_placeholder(placeholder) - self.cleanup_placeholder(placeholder, failed) - - - def populate_action_placeholder(self, placeholder, repre_load_contexts): - action_name = placeholder.data["action"] - - if not action_name: - return - - actions_by_name = self.builder.get_actions_by_name() - - for context in repre_load_contexts.values(): - try: - action_with_repre_context( - actions_by_name[action_name], - context - ) - except Exception as e: - self.log.warning(f"Action {action_name} failed: {e}") def load_failed(self, placeholder, representation): if hasattr(placeholder, "load_failed"): @@ -1665,7 +1624,7 @@ def post_placeholder_process(self, placeholder, failed): pass - def delete_placeholder(self, placeholder): + def delete_placeholder(self, placeholder, failed): """Called when all item population is done.""" self.log.debug("Clean up of placeholder is not implemented.") @@ -1771,10 +1730,9 @@ def populate_create_placeholder(self, placeholder, pre_create_data=None): creator_plugin = self.builder.get_creators_by_name()[creator_name] # create subset name - context = self._builder.get_current_context() - project_name = context["project_name"] - asset_name = context["asset_name"] - task_name = context["task_name"] + project_name = legacy_io.Session["AVALON_PROJECT"] + task_name = legacy_io.Session["AVALON_TASK"] + asset_name = legacy_io.Session["AVALON_ASSET"] if legacy_create: asset_doc = get_asset_by_name( @@ -1834,17 +1792,6 @@ def populate_create_placeholder(self, placeholder, pre_create_data=None): self.post_placeholder_process(placeholder, failed) - if failed: - self.log.debug( - "Placeholder cleanup skipped due to failed placeholder " - "population." - ) - return - - if not placeholder.data.get("keep_placeholder", True): - self.delete_placeholder(placeholder) - - def create_failed(self, placeholder, creator_data): if hasattr(placeholder, "create_failed"): placeholder.create_failed(creator_data) @@ -1864,11 +1811,8 @@ def post_placeholder_process(self, placeholder, failed): representation. failed (bool): Loading of representation failed. """ - pass - def delete_placeholder(self, placeholder): - """Called when all item population is done.""" - self.log.debug("Clean up of placeholder is not implemented.") + pass def _before_instance_create(self, placeholder): """Can be overriden. Is called before instance is created.""" @@ -1924,3 +1868,13 @@ def get_errors(self): def create_failed(self, creator_data): self._failed_created_publish_instances.append(creator_data) + + +def get_library_project_names(): + libraries = list() + + for project in get_projects(fields=["name", "data.library_project"]): + if project.get("data", {}).get("library_project", False): + libraries.append(project["name"]) + + return libraries diff --git a/openpype/plugins/publish/extract_color_transcode.py b/openpype/plugins/publish/extract_color_transcode.py index dbf1b6c8a6b..ec3625e14e9 100644 --- a/openpype/plugins/publish/extract_color_transcode.py +++ b/openpype/plugins/publish/extract_color_transcode.py @@ -158,7 +158,8 @@ def process(self, instance): view, display, additional_command_args, - self.log + self.log, + input_args=["-i:ch=R,G,B"] ) # cleanup temporary transcoded files diff --git a/openpype/plugins/publish/extract_thumbnail.py b/openpype/plugins/publish/extract_thumbnail.py index de101ac7ace..d10e9480a4b 100644 --- a/openpype/plugins/publish/extract_thumbnail.py +++ b/openpype/plugins/publish/extract_thumbnail.py @@ -1,16 +1,16 @@ import os -import subprocess import tempfile import pyblish.api from openpype.lib import ( - get_ffmpeg_tool_args, - get_oiio_tool_args, + get_ffmpeg_tool_path, + get_oiio_tools_path, is_oiio_supported, run_subprocess, path_to_subprocess_arg, ) +from openpype.lib.transcoding import convert_colorspace class ExtractThumbnail(pyblish.api.InstancePlugin): @@ -43,12 +43,12 @@ def process(self, instance): # Skip if instance have 'review' key in data set to 'False' if not self._is_review_instance(instance): - self.log.debug("Skipping - no review set on instance.") + self.log.info("Skipping - no review set on instance.") return # Check if already has thumbnail created if self._already_has_thumbnail(instance_repres): - self.log.debug("Thumbnail representation already present.") + self.log.info("Thumbnail representation already present.") return # skip crypto passes. @@ -58,15 +58,15 @@ def process(self, instance): # representation that can be determined much earlier and # with better precision. if "crypto" in subset_name.lower(): - self.log.debug("Skipping crypto passes.") + self.log.info("Skipping crypto passes.") return filtered_repres = self._get_filtered_repres(instance) if not filtered_repres: - self.log.info( - "Instance doesn't have representations that can be used " - "as source for thumbnail. Skipping thumbnail extraction." - ) + self.log.info(( + "Instance don't have representations" + " that can be used as source for thumbnail. Skipping" + )) return # Create temp directory for thumbnail @@ -99,18 +99,34 @@ def process(self, instance): self.log.debug("Trying to convert with OIIO") # If the input can read by OIIO then use OIIO method for # conversion otherwise use ffmpeg - thumbnail_created = self.create_thumbnail_oiio( - full_input_path, full_output_path - ) + colorspace_data = repre["colorspaceData"] + source_colorspace = colorspace_data["colorspace"] + config_path = colorspace_data.get("config", {}).get("path") + display = colorspace_data.get("display") + view = colorspace_data.get("view") + try: + thumbnail_created = self.create_thumbnail_oiio( + full_input_path, + full_output_path, + config_path, + source_colorspace, + display, + view + ) + except Exception as e: + self.log.debug(( + "Converting with OIIO failed " + "with the following error {}".format(e) + )) # Try to use FFMPEG if OIIO is not supported or for cases when # oiiotool isn't available if not thumbnail_created: if oiio_supported: - self.log.debug( + self.log.info(( "Converting with FFMPEG because input" " can't be read by OIIO." - ) + )) thumbnail_created = self.create_thumbnail_ffmpeg( full_input_path, full_output_path @@ -165,56 +181,61 @@ def _get_filtered_repres(self, instance): continue if not repre.get("files"): - self.log.debug(( - "Representation \"{}\" doesn't have files. Skipping" + self.log.info(( + "Representation \"{}\" don't have files. Skipping" ).format(repre["name"])) continue filtered_repres.append(repre) return filtered_repres - def create_thumbnail_oiio(self, src_path, dst_path): - self.log.debug("Extracting thumbnail with OIIO: {}".format(dst_path)) - oiio_cmd = get_oiio_tool_args( - "oiiotool", - "-a", src_path, - "-o", dst_path + def create_thumbnail_oiio( + self, + src_path, + dst_path, + config_path, + source_colorspace, + display, + view + ): + self.log.info("Extracting thumbnail {}".format(dst_path)) + + convert_colorspace( + src_path, + dst_path, + config_path, + source_colorspace, + view=view, + display=display, + input_args=["-i:ch=R,G,B"] ) - self.log.debug("running: {}".format(" ".join(oiio_cmd))) - try: - run_subprocess(oiio_cmd, logger=self.log) - return True - except Exception: - self.log.warning( - "Failed to create thumbnail using oiiotool", - exc_info=True - ) - return False + + return dst_path def create_thumbnail_ffmpeg(self, src_path, dst_path): - self.log.debug("Extracting thumbnail with FFMPEG: {}".format(dst_path)) + self.log.info("outputting {}".format(dst_path)) - ffmpeg_path_args = get_ffmpeg_tool_args("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") ffmpeg_args = self.ffmpeg_args or {} - jpeg_items = [ - subprocess.list2cmdline(ffmpeg_path_args) - ] + jpeg_items = [] + jpeg_items.append(path_to_subprocess_arg(ffmpeg_path)) + # override file if already exists + jpeg_items.append("-y") # flag for large file sizes max_int = 2147483647 - jpeg_items.extend([ - "-y", - "-analyzeduration", str(max_int), - "-probesize", str(max_int), - ]) + jpeg_items.append("-analyzeduration {}".format(max_int)) + jpeg_items.append("-probesize {}".format(max_int)) # use same input args like with mov jpeg_items.extend(ffmpeg_args.get("input") or []) # input file - jpeg_items.extend(["-i", path_to_subprocess_arg(src_path)]) + jpeg_items.append("-i {}".format( + path_to_subprocess_arg(src_path) + )) # output arguments from presets jpeg_items.extend(ffmpeg_args.get("output") or []) # we just want one frame from movie files - jpeg_items.extend(["-vframes", "1"]) + jpeg_items.append("-vframes 1") # output file jpeg_items.append(path_to_subprocess_arg(dst_path)) subprocess_command = " ".join(jpeg_items) @@ -225,7 +246,7 @@ def create_thumbnail_ffmpeg(self, src_path, dst_path): return True except Exception: self.log.warning( - "Failed to create thumbnail using ffmpeg", + "Failed to create thubmnail using ffmpeg", exc_info=True ) return False diff --git a/openpype/plugins/publish/extract_thumbnail_from_source.py b/openpype/plugins/publish/extract_thumbnail_from_source.py index 401a5d615da..4646f0494dd 100644 --- a/openpype/plugins/publish/extract_thumbnail_from_source.py +++ b/openpype/plugins/publish/extract_thumbnail_from_source.py @@ -107,9 +107,15 @@ def _create_thumbnail(self, context, thumbnail_source): self.log.debug("Trying to convert with OIIO") # If the input can read by OIIO then use OIIO method for # conversion otherwise use ffmpeg - thumbnail_created = self.create_thumbnail_oiio( - thumbnail_source, full_output_path - ) + try: + thumbnail_created = self.create_thumbnail_oiio( + thumbnail_source, full_output_path + ) + except Exception as e: + self.log.debug(( + "Converting with OIIO failed " + "with the following error {}".format(e) + )) # Try to use FFMPEG if OIIO is not supported or for cases when # oiiotool isn't available diff --git a/openpype/plugins/publish/integrate.py b/openpype/plugins/publish/integrate.py index 7e48155b9e4..98867abbcbe 100644 --- a/openpype/plugins/publish/integrate.py +++ b/openpype/plugins/publish/integrate.py @@ -278,7 +278,8 @@ def register(self, instance, file_transactions, filtered_repres): for src, dst in prepared["transfers"]: # todo: add support for hardlink transfers - file_transactions.add(src, dst) + file_transaction_mode = self.get_file_transaction_mode(instance, src) + file_transactions.add(src, dst, mode=file_transaction_mode) prepared_representations.append(prepared) @@ -290,7 +291,8 @@ def register(self, instance, file_transactions, filtered_repres): file_copy_modes = [ ("transfers", FileTransaction.MODE_COPY), - ("hardlinks", FileTransaction.MODE_HARDLINK) + ("hardlinks", FileTransaction.MODE_HARDLINK), + ("symlinks", FileTransaction.MODE_SYMLINK) ] for files_type, copy_mode in file_copy_modes: for src, dst in instance.data.get(files_type, []): @@ -400,6 +402,29 @@ def register(self, instance, file_transactions, filtered_repres): ) ) + @staticmethod + def get_file_transaction_mode(instance, src): + import re + is_symlink_mode_enable = False + hierarchy_data = instance.data.get("hierarchyData") + if hierarchy_data: + is_symlink_mode_enable = (hierarchy_data.get("symlink") == "True") + + if not is_symlink_mode_enable: + return FileTransaction.MODE_COPY + + pattern = instance.context.data["project_settings"]["global"]["tools"]["publish"]["symlink"][ + "file_regex_pattern"] + if not pattern: + is_valid_symlink_path = True + else: + is_valid_symlink_path = bool(re.match(pattern, src)) + + if is_symlink_mode_enable and is_valid_symlink_path: + return FileTransaction.MODE_SYMLINK + + return FileTransaction.MODE_COPY + def prepare_subset(self, instance, op_session, project_name): asset_doc = instance.data["assetEntity"] subset_name = instance.data["subset"] diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index 1b8c8397d76..586ebf0ccdf 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -1,5 +1,7 @@ { "deadline_servers": [], + "deadline_batch_name": "{asset}_{task[name]}_{version}_{subversion}.{ext}", + "deadline_job_name": "{asset}_{task[name]}_{version}_{subversion}.{ext} - {inst_name}", "publish": { "CollectDefaultDeadlineServer": { "pass_mongo_url": true diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index 06a595d1c50..542f8a32ba6 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -561,7 +561,10 @@ "template_name": "simpleUnrealTextureHero" } ], - "custom_staging_dir_profiles": [] + "custom_staging_dir_profiles": [], + "symlink": { + "file_regex_pattern": "^[^\\/\\\\]*[\\/\\\\]prod[\\/\\\\].*$" + } } }, "project_folder_structure": "{\"__project_root__\": {\"prod\": {}, \"resources\": {\"footage\": {\"plates\": {}, \"offline\": {}}, \"audio\": {}, \"art_dept\": {}}, \"editorial\": {}, \"assets\": {\"characters\": {}, \"locations\": {}}, \"shots\": {}}}", diff --git a/openpype/settings/defaults/project_settings/hiero.json b/openpype/settings/defaults/project_settings/hiero.json index 9c83733b096..0c5048d19fd 100644 --- a/openpype/settings/defaults/project_settings/hiero.json +++ b/openpype/settings/defaults/project_settings/hiero.json @@ -41,6 +41,7 @@ "sequence": "sq01", "track": "{_track_}", "shot": "sh###", + "symlink": false, "vSyncOn": false, "workfileFrameStart": 1001, "handleStart": 10, diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 38f14ec022c..c6709771d30 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -457,6 +457,9 @@ "include_handles_default": false, "per_task_type": [] }, + "update_publishable_frame_range": { + "enabled": true + }, "scriptsmenu": { "name": "OpenPype Tools", "definition": [ diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json index 6d59b5a92b4..1066e28d803 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -11,6 +11,16 @@ "label": "Deadline Webservice URLs", "multiselect": true }, + { + "type": "text", + "key": "deadline_batch_name", + "label": "Batch name" + }, + { + "type": "text", + "key": "deadline_job_name", + "label": "Job name" + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json index d80edf902b2..259b42d94d6 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json @@ -197,6 +197,11 @@ "type": "text", "key": "shot", "label": "{shot}" + }, + { + "type": "boolean", + "key": "symlink", + "label": "Publish using symlinks" } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json index dca955dab43..b6804533216 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json @@ -197,6 +197,24 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "update_publishable_frame_range", + "label": "Update publishable instances on Reset Frame Range", + "checkbox_key": "enabled", + "children": [ + { + "type": "label", + "label": "If enabled, the frame range and the handles of all the publishable instances will be updated when using the 'Reset Frame Range' functionality" + }, + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "dict", "key": "include_handles", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json index 23fc7c9351b..ea886662b4c 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json @@ -469,6 +469,20 @@ } ] } + }, + { + "type": "dict", + "collapsible": true, + "key": "symlink", + "label": "Symlink", + "is_group": true, + "children": [ + { + "type": "text", + "key": "file_regex_pattern", + "label": "File Regex Pattern" + } + ] } ] } diff --git a/openpype/tools/workfiles/save_as_dialog.py b/openpype/tools/workfiles/save_as_dialog.py index 7052eaed067..5b6580bd3f1 100644 --- a/openpype/tools/workfiles/save_as_dialog.py +++ b/openpype/tools/workfiles/save_as_dialog.py @@ -9,6 +9,12 @@ registered_host, legacy_io, ) +from openpype.pipeline.context_tools import ( + get_current_task_name, + get_current_asset_name, + get_current_context, + get_global_context +) from openpype.pipeline.workfile import get_last_workfile_with_version from openpype.pipeline.template_data import get_template_data_with_names from openpype.tools.utils import PlaceholderLineEdit @@ -243,7 +249,22 @@ def __init__( # Preview widget preview_label = QtWidgets.QLabel("Preview filename", inputs_widget) - + current_task_name = get_current_task_name() + target_task_name = self.data.get("task").get("name") + current_asset_name = get_current_asset_name() + target_asset_name = self.data.get("asset") + task_warning_label = QtWidgets.QLabel( + "Warning: You are saving to a different task " + "than the current one. " + "Current task: {}. Target task: {}" + "".format(current_task_name, target_task_name) + ) + asset_warning_label = QtWidgets.QLabel( + "Warning: You are saving to a different asset " + "than the current one. " + "Current asset: {}. Target asset: {}" + "".format(current_asset_name, target_asset_name) + ) # Subversion input subversion = SubversionLineEdit(inputs_widget) subversion.set_placeholder("Will be part of filename.") @@ -290,6 +311,10 @@ def __init__( subversion.setVisible(False) inputs_layout.addRow("Extension:", ext_combo) inputs_layout.addRow("Preview:", preview_label) + if current_asset_name != target_asset_name: + inputs_layout.addRow(asset_warning_label) + if current_task_name != target_task_name: + inputs_layout.addRow(task_warning_label) # Build layout main_layout = QtWidgets.QVBoxLayout(self) @@ -324,6 +349,8 @@ def __init__( self.last_version_check = last_version_check self.preview_label = preview_label + self.task_warning_label = task_warning_label + self.asset_warning_label = asset_warning_label self.subversion = subversion self.ext_combo = ext_combo self._ext_delegate = ext_delegate diff --git a/openpype/version.py b/openpype/version.py index c593f0f71f3..79d663c72d2 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.16.7" +__version__ = "3.15.12-quad.3.13" diff --git a/website/docs/assets/deadline_batch_name.png b/website/docs/assets/deadline_batch_name.png new file mode 100644 index 00000000000..7928237d703 Binary files /dev/null and b/website/docs/assets/deadline_batch_name.png differ diff --git a/website/docs/module_deadline.md b/website/docs/module_deadline.md index bca2a839365..7ab74d9058e 100644 --- a/website/docs/module_deadline.md +++ b/website/docs/module_deadline.md @@ -25,6 +25,33 @@ For [AWS Thinkbox Deadline](https://www.awsthinkbox.com/deadline) support you ne Multiple different DL webservice could be configured. First set them in point 4., then they could be configured per project in `project_settings/deadline/deadline_servers`. Only single webservice could be a target of publish though. +You also can set the batch name you want in the settings: +![Deadline Batch Name](assets/deadline_batch_name.png) + +### Available template keys +Here's the keys you can use for your batch name + +
+
+ + +| Context key | Description | +| --- | --- | +| `project[name]` | Project's full name | +| `project[code]` | Project's code | +| `asset` | Name of asset or shot | +| `task[name]` | Name of task | +| `task[type]` | Type of task | +| `task[short]` | Short name of task type (eg. 'Modeling' > 'mdl') | +| `version` | Version number | +| `family` | Main family name | +| `ext` | File extension | +| `subversion` | File name's subversion +| `comment` | | + +
+
+ ## Configuration