diff --git a/client/ayon_core/plugins/publish/integrate_hero_version.py b/client/ayon_core/plugins/publish/integrate_hero_version.py index 8c36719b77..4fb8b886a9 100644 --- a/client/ayon_core/plugins/publish/integrate_hero_version.py +++ b/client/ayon_core/plugins/publish/integrate_hero_version.py @@ -87,7 +87,9 @@ class IntegrateHeroVersion( ] # QUESTION/TODO this process should happen on server if crashed due to # permissions error on files (files were used or user didn't have perms) - # *but all other plugins must be sucessfully completed + # *but all other plugins must be successfully completed + + use_hardlinks = False def process(self, instance): if not self.is_active(instance.data): @@ -617,24 +619,32 @@ def copy_file(self, src_path, dst_path): self.log.debug("Folder already exists: \"{}\"".format(dirname)) + if self.use_hardlinks: + # First try hardlink and copy if paths are cross drive + self.log.debug("Hardlinking file \"{}\" to \"{}\"".format( + src_path, dst_path + )) + try: + create_hard_link(src_path, dst_path) + # Return when successful + return + + except OSError as exc: + # re-raise exception if different than + # EXDEV - cross drive path + # EINVAL - wrong format, must be NTFS + self.log.debug( + "Hardlink failed with errno:'{}'".format(exc.errno)) + if exc.errno not in [errno.EXDEV, errno.EINVAL]: + raise + + self.log.debug( + "Hardlinking failed, falling back to regular copy...") + self.log.debug("Copying file \"{}\" to \"{}\"".format( src_path, dst_path )) - # First try hardlink and copy if paths are cross drive - try: - create_hard_link(src_path, dst_path) - # Return when successful - return - - except OSError as exc: - # re-raise exception if different than - # EXDEV - cross drive path - # EINVAL - wrong format, must be NTFS - self.log.debug("Hardlink failed with errno:'{}'".format(exc.errno)) - if exc.errno not in [errno.EXDEV, errno.EINVAL]: - raise - shutil.copy(src_path, dst_path) def version_from_representations(self, project_name, repres): diff --git a/server/settings/publish_plugins.py b/server/settings/publish_plugins.py index b37be1afe6..1b3d382f01 100644 --- a/server/settings/publish_plugins.py +++ b/server/settings/publish_plugins.py @@ -743,6 +743,14 @@ class IntegrateHeroVersionModel(BaseSettingsModel): optional: bool = SettingsField(False, title="Optional") active: bool = SettingsField(True, title="Active") families: list[str] = SettingsField(default_factory=list, title="Families") + use_hardlinks: bool = SettingsField( + False, title="Use Hardlinks", + description="When enabled first try to make a hardlink of the version " + "instead of a copy. This helps reduce disk usage, but may " + "create issues.\nFor example there are known issues on " + "Windows being unable to delete any of the hardlinks if " + "any of the links is in use creating issues with updating " + "hero versions.") class CleanUpModel(BaseSettingsModel): @@ -1136,7 +1144,8 @@ class PublishPuginsModel(BaseSettingsModel): "layout", "mayaScene", "simpleUnrealTexture" - ] + ], + "use_hardlinks": False }, "CleanUp": { "paterns": [], diff --git a/server_addon/celaction/client/ayon_celaction/__init__.py b/server_addon/celaction/client/ayon_celaction/__init__.py deleted file mode 100644 index 0df0224125..0000000000 --- a/server_addon/celaction/client/ayon_celaction/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .version import __version__ -from .addon import ( - CELACTION_ROOT_DIR, - CelactionAddon, -) - - -__all__ = ( - "__version__", - - "CELACTION_ROOT_DIR", - "CelactionAddon", -) diff --git a/server_addon/celaction/client/ayon_celaction/addon.py b/server_addon/celaction/client/ayon_celaction/addon.py deleted file mode 100644 index ad04a54088..0000000000 --- a/server_addon/celaction/client/ayon_celaction/addon.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -from ayon_core.addon import AYONAddon, IHostAddon - -from .version import __version__ - -CELACTION_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class CelactionAddon(AYONAddon, IHostAddon): - name = "celaction" - version = __version__ - host_name = "celaction" - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(CELACTION_ROOT_DIR, "hooks") - ] - - def add_implementation_envs(self, env, _app): - # Set default values if are not already set via settings - defaults = { - "LOGLEVEL": "DEBUG" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - def get_workfile_extensions(self): - return [".scn"] diff --git a/server_addon/celaction/client/ayon_celaction/hooks/pre_celaction_setup.py b/server_addon/celaction/client/ayon_celaction/hooks/pre_celaction_setup.py deleted file mode 100644 index 52622d43b8..0000000000 --- a/server_addon/celaction/client/ayon_celaction/hooks/pre_celaction_setup.py +++ /dev/null @@ -1,152 +0,0 @@ -import os -import shutil -import winreg -import subprocess -from ayon_core.lib import get_ayon_launcher_args -from ayon_applications import PreLaunchHook, LaunchTypes -from ayon_celaction import CELACTION_ROOT_DIR - - -class CelactionPrelaunchHook(PreLaunchHook): - """Bootstrap celacion with AYON""" - app_groups = {"celaction"} - platforms = {"windows"} - launch_types = {LaunchTypes.local} - - def execute(self): - folder_attributes = self.data["folder_entity"]["attrib"] - width = folder_attributes["resolutionWidth"] - height = folder_attributes["resolutionHeight"] - - # Add workfile path to launch arguments - workfile_path = self.workfile_path() - if workfile_path: - self.launch_context.launch_args.append(workfile_path) - - # setting output parameters - path_user_settings = "\\".join([ - "Software", "CelAction", "CelAction2D", "User Settings" - ]) - winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_user_settings) - hKey = winreg.OpenKey( - winreg.HKEY_CURRENT_USER, path_user_settings, 0, - winreg.KEY_ALL_ACCESS - ) - - path_to_cli = os.path.join( - CELACTION_ROOT_DIR, "scripts", "publish_cli.py" - ) - subprocess_args = get_ayon_launcher_args("run", path_to_cli) - executable = subprocess_args.pop(0) - workfile_settings = self.get_workfile_settings() - - winreg.SetValueEx( - hKey, - "SubmitAppTitle", - 0, - winreg.REG_SZ, - executable - ) - - # add required arguments for workfile path - parameters = subprocess_args + [ - "--currentFile", "*SCENE*" - ] - - # Add custom parameters from workfile settings - if "render_chunk" in workfile_settings["submission_overrides"]: - parameters += [ - "--chunk", "*CHUNK*" - ] - if "resolution" in workfile_settings["submission_overrides"]: - parameters += [ - "--resolutionWidth", "*X*", - "--resolutionHeight", "*Y*" - ] - if "frame_range" in workfile_settings["submission_overrides"]: - parameters += [ - "--frameStart", "*START*", - "--frameEnd", "*END*" - ] - - winreg.SetValueEx( - hKey, "SubmitParametersTitle", 0, winreg.REG_SZ, - subprocess.list2cmdline(parameters) - ) - - self.log.debug(f"__ parameters: \"{parameters}\"") - - # setting resolution parameters - path_submit = "\\".join([ - path_user_settings, "Dialogs", "SubmitOutput" - ]) - winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_submit) - hKey = winreg.OpenKey( - winreg.HKEY_CURRENT_USER, path_submit, 0, - winreg.KEY_ALL_ACCESS - ) - winreg.SetValueEx(hKey, "SaveScene", 0, winreg.REG_DWORD, 1) - winreg.SetValueEx(hKey, "CustomX", 0, winreg.REG_DWORD, width) - winreg.SetValueEx(hKey, "CustomY", 0, winreg.REG_DWORD, height) - - # making sure message dialogs don't appear when overwriting - path_overwrite_scene = "\\".join([ - path_user_settings, "Messages", "OverwriteScene" - ]) - winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_overwrite_scene) - hKey = winreg.OpenKey( - winreg.HKEY_CURRENT_USER, path_overwrite_scene, 0, - winreg.KEY_ALL_ACCESS - ) - winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 6) - winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1) - - # set scane as not saved - path_scene_saved = "\\".join([ - path_user_settings, "Messages", "SceneSaved" - ]) - winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_scene_saved) - hKey = winreg.OpenKey( - winreg.HKEY_CURRENT_USER, path_scene_saved, 0, - winreg.KEY_ALL_ACCESS - ) - winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 1) - winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1) - - def workfile_path(self): - workfile_path = self.data["last_workfile_path"] - - # copy workfile from template if doesn't exist any on path - if not os.path.exists(workfile_path): - # TODO add ability to set different template workfile path via - # settings - template_path = os.path.join( - CELACTION_ROOT_DIR, - "resources", - "celaction_template_scene.scn" - ) - - if not os.path.exists(template_path): - self.log.warning( - "Couldn't find workfile template file in {}".format( - template_path - ) - ) - return - - self.log.info( - f"Creating workfile from template: \"{template_path}\"" - ) - - # Copy template workfile to new destinantion - shutil.copy2( - os.path.normpath(template_path), - os.path.normpath(workfile_path) - ) - - self.log.info(f"Workfile to open: \"{workfile_path}\"") - - return workfile_path - - def get_workfile_settings(self): - return self.data["project_settings"]["celaction"]["workfile"] diff --git a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_cli_kwargs.py b/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_cli_kwargs.py deleted file mode 100644 index 1820569918..0000000000 --- a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_cli_kwargs.py +++ /dev/null @@ -1,60 +0,0 @@ -import pyblish.api -import sys -from pprint import pformat - - -class CollectCelactionCliKwargs(pyblish.api.ContextPlugin): - """ Collects all keyword arguments passed from the terminal """ - - label = "Collect Celaction Cli Kwargs" - order = pyblish.api.CollectorOrder - 0.1 - - def process(self, context): - args = list(sys.argv[1:]) - self.log.info(str(args)) - missing_kwargs = [] - passing_kwargs = {} - for key in ( - "chunk", - "frameStart", - "frameEnd", - "resolutionWidth", - "resolutionHeight", - "currentFile", - ): - arg_key = f"--{key}" - if arg_key not in args: - missing_kwargs.append(key) - continue - arg_idx = args.index(arg_key) - args.pop(arg_idx) - if key != "currentFile": - value = args.pop(arg_idx) - else: - path_parts = [] - while arg_idx < len(args): - path_parts.append(args.pop(arg_idx)) - value = " ".join(path_parts).strip('"') - - passing_kwargs[key] = value - - if missing_kwargs: - self.log.debug("Missing arguments {}".format( - ", ".join( - [f'"{key}"' for key in missing_kwargs] - ) - )) - - self.log.info("Storing kwargs ...") - self.log.debug("_ passing_kwargs: {}".format(pformat(passing_kwargs))) - - # set kwargs to context data - context.set_data("passingKwargs", passing_kwargs) - - # get kwargs onto context data as keys with values - for k, v in passing_kwargs.items(): - self.log.info(f"Setting `{k}` to instance.data with value: `{v}`") - if k in ["frameStart", "frameEnd"]: - context.data[k] = passing_kwargs[k] = int(v) - else: - context.data[k] = v diff --git a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_instances.py b/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_instances.py deleted file mode 100644 index 7c22201e3e..0000000000 --- a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_celaction_instances.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -import pyblish.api - - -class CollectCelactionInstances(pyblish.api.ContextPlugin): - """ Adds the celaction render instances """ - - label = "Collect Celaction Instances" - order = pyblish.api.CollectorOrder + 0.1 - - def process(self, context): - task = context.data["task"] - current_file = context.data["currentFile"] - staging_dir = os.path.dirname(current_file) - scene_file = os.path.basename(current_file) - version = context.data["version"] - - folder_entity = context.data["folderEntity"] - - folder_attributes = folder_entity["attrib"] - - shared_instance_data = { - "folderPath": folder_entity["path"], - "frameStart": folder_attributes["frameStart"], - "frameEnd": folder_attributes["frameEnd"], - "handleStart": folder_attributes["handleStart"], - "handleEnd": folder_attributes["handleEnd"], - "fps": folder_attributes["fps"], - "resolutionWidth": folder_attributes["resolutionWidth"], - "resolutionHeight": folder_attributes["resolutionHeight"], - "pixelAspect": 1, - "step": 1, - "version": version - } - - celaction_kwargs = context.data.get( - "passingKwargs", {}) - - if celaction_kwargs: - shared_instance_data.update(celaction_kwargs) - - # workfile instance - product_type = "workfile" - product_name = product_type + task.capitalize() - # Create instance - instance = context.create_instance(product_name) - - # creating instance data - instance.data.update({ - "label": scene_file, - "productName": product_name, - "productType": product_type, - "family": product_type, - "families": [product_type], - "representations": [] - }) - - # adding basic script data - instance.data.update(shared_instance_data) - - # creating representation - representation = { - 'name': 'scn', - 'ext': 'scn', - 'files': scene_file, - "stagingDir": staging_dir, - } - - instance.data["representations"].append(representation) - - self.log.info('Publishing Celaction workfile') - - # render instance - product_name = f"render{task}Main" - product_type = "render.farm" - instance = context.create_instance(name=product_name) - # getting instance state - instance.data["publish"] = True - - # add folderEntity data into instance - instance.data.update({ - "label": "{} - farm".format(product_name), - "productType": product_type, - "family": product_type, - "families": [product_type], - "productName": product_name - }) - - # adding basic script data - instance.data.update(shared_instance_data) - - self.log.info('Publishing Celaction render instance') - self.log.debug(f"Instance data: `{instance.data}`") - - for i in context: - self.log.debug(f"{i.data['families']}") diff --git a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_render_path.py b/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_render_path.py deleted file mode 100644 index 3bcd1c69b3..0000000000 --- a/server_addon/celaction/client/ayon_celaction/plugins/publish/collect_render_path.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -import copy -import pyblish.api - - -class CollectRenderPath(pyblish.api.InstancePlugin): - """Generate file and directory path where rendered images will be""" - - label = "Collect Render Path" - order = pyblish.api.CollectorOrder + 0.495 - families = ["render.farm"] - - settings_category = "celaction" - - # Presets - output_extension = "png" - anatomy_template_key_render_files = None - anatomy_template_key_metadata = None - - def process(self, instance): - anatomy = instance.context.data["anatomy"] - anatomy_data = copy.deepcopy(instance.data["anatomyData"]) - padding = anatomy.templates_obj.frame_padding - product_type = "render" - anatomy_data.update({ - "frame": f"%0{padding}d", - "family": product_type, - "representation": self.output_extension, - "ext": self.output_extension - }) - anatomy_data["product"]["type"] = product_type - - # get anatomy rendering keys - r_anatomy_key = self.anatomy_template_key_render_files - m_anatomy_key = self.anatomy_template_key_metadata - - # get folder and path for rendering images from celaction - r_template_item = anatomy.get_template_item("publish", r_anatomy_key) - render_dir = r_template_item["directory"].format_strict(anatomy_data) - render_path = r_template_item["path"].format_strict(anatomy_data) - self.log.debug("__ render_path: `{}`".format(render_path)) - - # create dir if it doesn't exists - try: - if not os.path.isdir(render_dir): - os.makedirs(render_dir, exist_ok=True) - except OSError: - # directory is not available - self.log.warning("Path is unreachable: `{}`".format(render_dir)) - - # add rendering path to instance data - instance.data["path"] = render_path - - # get anatomy for published renders folder path - m_template_item = anatomy.get_template_item( - "publish", m_anatomy_key, default=None - ) - if m_template_item is not None: - metadata_path = m_template_item["directory"].format_strict( - anatomy_data - ) - instance.data["publishRenderMetadataFolder"] = metadata_path - self.log.info("Metadata render path: `{}`".format(metadata_path)) - - self.log.info(f"Render output path set to: `{render_path}`") diff --git a/server_addon/celaction/client/ayon_celaction/plugins/publish/integrate_version_up.py b/server_addon/celaction/client/ayon_celaction/plugins/publish/integrate_version_up.py deleted file mode 100644 index c165b0c871..0000000000 --- a/server_addon/celaction/client/ayon_celaction/plugins/publish/integrate_version_up.py +++ /dev/null @@ -1,22 +0,0 @@ -import shutil - -import pyblish.api - -from ayon_core.lib import version_up - - -class VersionUpScene(pyblish.api.ContextPlugin): - order = pyblish.api.IntegratorOrder + 0.5 - label = 'Version Up Scene' - families = ['workfile'] - optional = True - active = True - - def process(self, context): - current_file = context.data.get('currentFile') - v_up = version_up(current_file) - self.log.debug('Current file is: {}'.format(current_file)) - self.log.debug('Version up: {}'.format(v_up)) - - shutil.copy2(current_file, v_up) - self.log.info('Scene saved into new version: {}'.format(v_up)) diff --git a/server_addon/celaction/client/ayon_celaction/resources/celaction_template_scene.scn b/server_addon/celaction/client/ayon_celaction/resources/celaction_template_scene.scn deleted file mode 100644 index 54e4497a31..0000000000 Binary files a/server_addon/celaction/client/ayon_celaction/resources/celaction_template_scene.scn and /dev/null differ diff --git a/server_addon/celaction/client/ayon_celaction/scripts/__init__.py b/server_addon/celaction/client/ayon_celaction/scripts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/server_addon/celaction/client/ayon_celaction/scripts/publish_cli.py b/server_addon/celaction/client/ayon_celaction/scripts/publish_cli.py deleted file mode 100644 index 4e54aa253a..0000000000 --- a/server_addon/celaction/client/ayon_celaction/scripts/publish_cli.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import sys - -import pyblish.api -import pyblish.util - -from ayon_celaction import CELACTION_ROOT_DIR -from ayon_core.lib import Logger -from ayon_core.tools.utils import host_tools -from ayon_core.pipeline import install_ayon_plugins - - -log = Logger.get_logger("celaction") - -PUBLISH_HOST = "celaction" -PLUGINS_DIR = os.path.join(CELACTION_ROOT_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") - - -def main(): - # Registers global pyblish plugins - install_ayon_plugins() - - if os.path.exists(PUBLISH_PATH): - log.info(f"Registering path: {PUBLISH_PATH}") - pyblish.api.register_plugin_path(PUBLISH_PATH) - - pyblish.api.register_host(PUBLISH_HOST) - pyblish.api.register_target("local") - - return host_tools.show_publish() - - -if __name__ == "__main__": - result = main() - sys.exit(not bool(result)) diff --git a/server_addon/celaction/client/ayon_celaction/version.py b/server_addon/celaction/client/ayon_celaction/version.py deleted file mode 100644 index ceed47c3a0..0000000000 --- a/server_addon/celaction/client/ayon_celaction/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring AYON addon 'celaction' version.""" -__version__ = "0.2.0" diff --git a/server_addon/celaction/package.py b/server_addon/celaction/package.py deleted file mode 100644 index 8b9069d019..0000000000 --- a/server_addon/celaction/package.py +++ /dev/null @@ -1,12 +0,0 @@ -name = "celaction" -title = "CelAction" -version = "0.2.0" - -client_dir = "ayon_celaction" - -ayon_required_addons = { - "core": ">0.3.2", -} -ayon_compatible_addons = { - "applications": ">=0.2.0", -} diff --git a/server_addon/celaction/server/__init__.py b/server_addon/celaction/server/__init__.py deleted file mode 100644 index e3769a4b7f..0000000000 --- a/server_addon/celaction/server/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Type - -from ayon_server.addons import BaseServerAddon - -from .settings import CelActionSettings, DEFAULT_VALUES - - -class CelActionAddon(BaseServerAddon): - settings_model: Type[CelActionSettings] = CelActionSettings - - async def get_default_settings(self): - settings_model_cls = self.get_settings_model() - return settings_model_cls(**DEFAULT_VALUES) diff --git a/server_addon/celaction/server/imageio.py b/server_addon/celaction/server/imageio.py deleted file mode 100644 index e0e685a244..0000000000 --- a/server_addon/celaction/server/imageio.py +++ /dev/null @@ -1,63 +0,0 @@ -from pydantic import validator -from ayon_server.settings import BaseSettingsModel, SettingsField -from ayon_server.settings.validators import ensure_unique_names - - -class ImageIOConfigModel(BaseSettingsModel): - """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config - path in the Core addon profiles here - (ayon+settings://core/imageio/ocio_config_profiles). - """ - - override_global_config: bool = SettingsField( - False, - title="Override global OCIO config", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - filepath: list[str] = SettingsField( - default_factory=list, - title="Config path", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - - -class ImageIOFileRuleModel(BaseSettingsModel): - name: str = SettingsField("", title="Rule name") - pattern: str = SettingsField("", title="Regex pattern") - colorspace: str = SettingsField("", title="Colorspace name") - ext: str = SettingsField("", title="File extension") - - -class ImageIOFileRulesModel(BaseSettingsModel): - activate_host_rules: bool = SettingsField(False) - rules: list[ImageIOFileRuleModel] = SettingsField( - default_factory=list, - title="Rules" - ) - - @validator("rules") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -class CelActionImageIOModel(BaseSettingsModel): - activate_host_color_management: bool = SettingsField( - True, title="Enable Color Management" - ) - ocio_config: ImageIOConfigModel = SettingsField( - default_factory=ImageIOConfigModel, - title="OCIO config" - ) - file_rules: ImageIOFileRulesModel = SettingsField( - default_factory=ImageIOFileRulesModel, - title="File Rules" - ) diff --git a/server_addon/celaction/server/settings.py b/server_addon/celaction/server/settings.py deleted file mode 100644 index afa9773477..0000000000 --- a/server_addon/celaction/server/settings.py +++ /dev/null @@ -1,91 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField -from .imageio import CelActionImageIOModel - - -class CollectRenderPathModel(BaseSettingsModel): - output_extension: str = SettingsField( - "", - title="Output render file extension" - ) - anatomy_template_key_render_files: str = SettingsField( - "", - title="Anatomy template key: render files" - ) - anatomy_template_key_metadata: str = SettingsField( - "", - title="Anatomy template key: metadata job file" - ) - - -def _workfile_submit_overrides(): - return [ - { - "value": "render_chunk", - "label": "Pass chunk size" - }, - { - "value": "frame_range", - "label": "Pass frame range" - }, - { - "value": "resolution", - "label": "Pass resolution" - } - ] - - -class WorkfileModel(BaseSettingsModel): - submission_overrides: list[str] = SettingsField( - default_factory=list, - title="Submission workfile overrides", - enum_resolver=_workfile_submit_overrides - ) - - -class PublishPluginsModel(BaseSettingsModel): - CollectRenderPath: CollectRenderPathModel = SettingsField( - default_factory=CollectRenderPathModel, - title="Collect Render Path" - ) - - -class CelActionSettings(BaseSettingsModel): - imageio: CelActionImageIOModel = SettingsField( - default_factory=CelActionImageIOModel, - title="Color Management (ImageIO)" - ) - workfile: WorkfileModel = SettingsField( - title="Workfile" - ) - publish: PublishPluginsModel = SettingsField( - default_factory=PublishPluginsModel, - title="Publish plugins", - ) - - -DEFAULT_VALUES = { - "imageio": { - "ocio_config": { - "enabled": False, - "filepath": [] - }, - "file_rules": { - "enabled": False, - "rules": [] - } - }, - "workfile": { - "submission_overrides": [ - "render_chunk", - "frame_range", - "resolution" - ] - }, - "publish": { - "CollectRenderPath": { - "output_extension": "png", - "anatomy_template_key_render_files": "render", - "anatomy_template_key_metadata": "render" - } - } -} diff --git a/server_addon/clockify/client/ayon_clockify/__init__.py b/server_addon/clockify/client/ayon_clockify/__init__.py deleted file mode 100644 index 75fb87494e..0000000000 --- a/server_addon/clockify/client/ayon_clockify/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .addon import ClockifyAddon - -__all__ = ( - "ClockifyAddon", -) diff --git a/server_addon/clockify/client/ayon_clockify/addon.py b/server_addon/clockify/client/ayon_clockify/addon.py deleted file mode 100644 index cf35e77ce4..0000000000 --- a/server_addon/clockify/client/ayon_clockify/addon.py +++ /dev/null @@ -1,290 +0,0 @@ -import os -import threading -import time - -from ayon_core.addon import AYONAddon, ITrayAddon, IPluginPaths - -from .version import __version__ -from .constants import CLOCKIFY_FTRACK_USER_PATH, CLOCKIFY_FTRACK_SERVER_PATH - - -class ClockifyAddon(AYONAddon, ITrayAddon, IPluginPaths): - name = "clockify" - version = __version__ - - def initialize(self, studio_settings): - enabled = self.name in studio_settings - workspace_name = None - if enabled: - clockify_settings = studio_settings[self.name] - workspace_name = clockify_settings["workspace_name"] - - if enabled and workspace_name: - self.log.warning("Clockify Workspace is not set in settings.") - enabled = False - self.enabled = enabled - self.workspace_name = workspace_name - - self.timer_manager = None - self.MessageWidgetClass = None - self.message_widget = None - self._clockify_api = None - - # TimersManager attributes - # - set `timers_manager_connector` only in `tray_init` - self.timers_manager_connector = None - self._timer_manager_addon = None - - @property - def clockify_api(self): - if self._clockify_api is None: - from .clockify_api import ClockifyAPI - - self._clockify_api = ClockifyAPI(master_parent=self) - return self._clockify_api - - def get_global_environments(self): - return {"CLOCKIFY_WORKSPACE": self.workspace_name} - - def tray_init(self): - from .widgets import ClockifySettings, MessageWidget - - self.MessageWidgetClass = MessageWidget - - self.message_widget = None - self.widget_settings = ClockifySettings(self.clockify_api) - self.widget_settings_required = None - - self.thread_timer_check = None - # Bools - self.bool_thread_check_running = False - self.bool_api_key_set = False - self.bool_workspace_set = False - self.bool_timer_run = False - self.bool_api_key_set = self.clockify_api.set_api() - - # Define itself as TimersManager connector - self.timers_manager_connector = self - - def tray_start(self): - if self.bool_api_key_set is False: - self.show_settings() - return - - self.bool_workspace_set = self.clockify_api.workspace_id is not None - if self.bool_workspace_set is False: - return - - self.start_timer_check() - self.set_menu_visibility() - - def tray_exit(self, *_a, **_kw): - return - - def get_plugin_paths(self): - """Implementation of IPluginPaths to get plugin paths.""" - actions_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "launcher_actions" - ) - return {"actions": [actions_path]} - - def get_ftrack_event_handler_paths(self): - """Function for ftrack addon to add ftrack event handler paths.""" - return { - "user": [CLOCKIFY_FTRACK_USER_PATH], - "server": [CLOCKIFY_FTRACK_SERVER_PATH], - } - - def clockify_timer_stopped(self): - self.bool_timer_run = False - self.timer_stopped() - - def start_timer_check(self): - self.bool_thread_check_running = True - if self.thread_timer_check is None: - self.thread_timer_check = threading.Thread( - target=self.check_running - ) - self.thread_timer_check.daemon = True - self.thread_timer_check.start() - - def stop_timer_check(self): - self.bool_thread_check_running = True - if self.thread_timer_check is not None: - self.thread_timer_check.join() - self.thread_timer_check = None - - def check_running(self): - while self.bool_thread_check_running is True: - bool_timer_run = False - if self.clockify_api.get_in_progress() is not None: - bool_timer_run = True - - if self.bool_timer_run != bool_timer_run: - if self.bool_timer_run is True: - self.clockify_timer_stopped() - elif self.bool_timer_run is False: - current_timer = self.clockify_api.get_in_progress() - if current_timer is None: - continue - current_proj_id = current_timer.get("projectId") - if not current_proj_id: - continue - - project = self.clockify_api.get_project_by_id( - current_proj_id - ) - if project and project.get("code") == 501: - continue - - project_name = project.get("name") - - current_timer_hierarchy = current_timer.get("description") - if not current_timer_hierarchy: - continue - hierarchy_items = current_timer_hierarchy.split("/") - # Each pype timer must have at least 2 items! - if len(hierarchy_items) < 2: - continue - - task_name = hierarchy_items[-1] - hierarchy = hierarchy_items[:-1] - - data = { - "task_name": task_name, - "hierarchy": hierarchy, - "project_name": project_name, - } - self.timer_started(data) - - self.bool_timer_run = bool_timer_run - self.set_menu_visibility() - time.sleep(5) - - def signed_in(self): - if not self.timer_manager: - return - - if not self.timer_manager.last_task: - return - - if self.timer_manager.is_running: - self.start_timer_manager(self.timer_manager.last_task) - - def on_message_widget_close(self): - self.message_widget = None - - # Definition of Tray menu - def tray_menu(self, parent_menu): - # Menu for Tray App - from qtpy import QtWidgets - - menu = QtWidgets.QMenu("Clockify", parent_menu) - menu.setProperty("submenu", "on") - - # Actions - action_show_settings = QtWidgets.QAction("Settings", menu) - action_stop_timer = QtWidgets.QAction("Stop timer", menu) - - menu.addAction(action_show_settings) - menu.addAction(action_stop_timer) - - action_show_settings.triggered.connect(self.show_settings) - action_stop_timer.triggered.connect(self.stop_timer) - - self.action_stop_timer = action_stop_timer - - self.set_menu_visibility() - - parent_menu.addMenu(menu) - - def show_settings(self): - self.widget_settings.input_api_key.setText( - self.clockify_api.get_api_key() - ) - self.widget_settings.show() - - def set_menu_visibility(self): - self.action_stop_timer.setVisible(self.bool_timer_run) - - # --- TimersManager connection methods --- - def register_timers_manager(self, timer_manager_addon): - """Store TimersManager for future use.""" - self._timer_manager_addon = timer_manager_addon - - def timer_started(self, data): - """Tell TimersManager that timer started.""" - if self._timer_manager_addon is not None: - self._timer_manager_addon.timer_started(self.id, data) - - def timer_stopped(self): - """Tell TimersManager that timer stopped.""" - if self._timer_manager_addon is not None: - self._timer_manager_addon.timer_stopped(self.id) - - def stop_timer(self): - """Called from TimersManager to stop timer.""" - self.clockify_api.finish_time_entry() - - def _verify_project_exists(self, project_name): - project_id = self.clockify_api.get_project_id(project_name) - if not project_id: - self.log.warning( - 'Project "{}" was not found in Clockify. Timer won\'t start.' - ).format(project_name) - - if not self.MessageWidgetClass: - return - - msg = ( - 'Project "{}" is not' - ' in Clockify Workspace "{}".' - "

Please inform your Project Manager." - ).format(project_name, str(self.clockify_api.workspace_name)) - - self.message_widget = self.MessageWidgetClass( - msg, "Clockify - Info Message" - ) - self.message_widget.closed.connect(self.on_message_widget_close) - self.message_widget.show() - return False - return project_id - - def start_timer(self, input_data): - """Called from TimersManager to start timer.""" - # If not api key is not entered then skip - if not self.clockify_api.get_api_key(): - return - - project_name = input_data.get("project_name") - folder_path = input_data.get("folder_path") - task_name = input_data.get("task_name") - task_type = input_data.get("task_type") - if not all((project_name, folder_path, task_name, task_type)): - return - - # Concatenate hierarchy and task to get description - description = "/".join([folder_path.lstrip("/"), task_name]) - - # Check project existence - project_id = self._verify_project_exists(project_name) - if not project_id: - return - - # Setup timer tags - if not task_type: - self.log.info("No tag information found for the timer") - - tag_ids = [] - task_tag_id = self.clockify_api.get_tag_id(task_type) - if task_tag_id is not None: - tag_ids.append(task_tag_id) - - # Start timer - self.clockify_api.start_time_entry( - description, - project_id, - tag_ids=tag_ids, - workspace_id=self.clockify_api.workspace_id, - user_id=self.clockify_api.user_id, - ) diff --git a/server_addon/clockify/client/ayon_clockify/clockify_api.py b/server_addon/clockify/client/ayon_clockify/clockify_api.py deleted file mode 100644 index 38ca6cdb66..0000000000 --- a/server_addon/clockify/client/ayon_clockify/clockify_api.py +++ /dev/null @@ -1,447 +0,0 @@ -import os -import json -import datetime - -import requests - -from ayon_core.lib.local_settings import AYONSecureRegistry -from ayon_core.lib import Logger - -from .constants import ( - CLOCKIFY_ENDPOINT, - ADMIN_PERMISSION_NAMES, -) - - -class ClockifyAPI: - log = Logger.get_logger(__name__) - - def __init__(self, api_key=None, master_parent=None): - self.workspace_name = None - self.master_parent = master_parent - self.api_key = api_key - self._workspace_id = None - self._user_id = None - self._secure_registry = None - - @property - def secure_registry(self): - if self._secure_registry is None: - self._secure_registry = AYONSecureRegistry("clockify") - return self._secure_registry - - @property - def headers(self): - return {"x-api-key": self.api_key} - - @property - def workspace_id(self): - return self._workspace_id - - @property - def user_id(self): - return self._user_id - - def verify_api(self): - for key, value in self.headers.items(): - if value is None or value.strip() == "": - return False - return True - - def set_api(self, api_key=None): - if api_key is None: - api_key = self.get_api_key() - - if api_key is not None and self.validate_api_key(api_key) is True: - self.api_key = api_key - self.set_workspace() - self.set_user_id() - if self.master_parent: - self.master_parent.signed_in() - return True - return False - - def validate_api_key(self, api_key): - test_headers = {"x-api-key": api_key} - action_url = "user" - response = requests.get( - CLOCKIFY_ENDPOINT + action_url, headers=test_headers - ) - if response.status_code != 200: - return False - return True - - def validate_workspace_permissions(self, workspace_id=None, user_id=None): - if user_id is None: - self.log.info("No user_id found during validation") - return False - if workspace_id is None: - workspace_id = self.workspace_id - action_url = f"workspaces/{workspace_id}/users?includeRoles=1" - response = requests.get( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers - ) - data = response.json() - for user in data: - if user.get("id") == user_id: - roles_data = user.get("roles") - for entities in roles_data: - if entities.get("role") in ADMIN_PERMISSION_NAMES: - return True - return False - - def get_user_id(self): - action_url = "user" - response = requests.get( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers - ) - result = response.json() - user_id = result.get("id", None) - - return user_id - - def set_workspace(self, name=None): - if name is None: - name = os.environ.get("CLOCKIFY_WORKSPACE", None) - self.workspace_name = name - if self.workspace_name is None: - return - try: - result = self.validate_workspace() - except Exception: - result = False - if result is not False: - self._workspace_id = result - if self.master_parent is not None: - self.master_parent.start_timer_check() - return True - return False - - def validate_workspace(self, name=None): - if name is None: - name = self.workspace_name - all_workspaces = self.get_workspaces() - if name in all_workspaces: - return all_workspaces[name] - return False - - def set_user_id(self): - try: - user_id = self.get_user_id() - except Exception: - user_id = None - if user_id is not None: - self._user_id = user_id - - def get_api_key(self): - return self.secure_registry.get_item("api_key", None) - - def save_api_key(self, api_key): - self.secure_registry.set_item("api_key", api_key) - - def get_workspaces(self): - action_url = "workspaces/" - response = requests.get( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers - ) - return { - workspace["name"]: workspace["id"] for workspace in response.json() - } - - def get_projects(self, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - action_url = f"workspaces/{workspace_id}/projects" - response = requests.get( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers - ) - if response.status_code != 403: - result = response.json() - return {project["name"]: project["id"] for project in result} - - def get_project_by_id(self, project_id, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - action_url = "workspaces/{}/projects/{}".format( - workspace_id, project_id - ) - response = requests.get( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers - ) - - return response.json() - - def get_tags(self, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - action_url = "workspaces/{}/tags".format(workspace_id) - response = requests.get( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers - ) - - return {tag["name"]: tag["id"] for tag in response.json()} - - def get_tasks(self, project_id, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - action_url = "workspaces/{}/projects/{}/tasks".format( - workspace_id, project_id - ) - response = requests.get( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers - ) - - return {task["name"]: task["id"] for task in response.json()} - - def get_workspace_id(self, workspace_name): - all_workspaces = self.get_workspaces() - if workspace_name not in all_workspaces: - return None - return all_workspaces[workspace_name] - - def get_project_id(self, project_name, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - all_projects = self.get_projects(workspace_id) - if project_name not in all_projects: - return None - return all_projects[project_name] - - def get_tag_id(self, tag_name, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - all_tasks = self.get_tags(workspace_id) - if tag_name not in all_tasks: - return None - return all_tasks[tag_name] - - def get_task_id(self, task_name, project_id, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - all_tasks = self.get_tasks(project_id, workspace_id) - if task_name not in all_tasks: - return None - return all_tasks[task_name] - - def get_current_time(self): - return str(datetime.datetime.utcnow().isoformat()) + "Z" - - def start_time_entry( - self, - description, - project_id, - task_id=None, - tag_ids=None, - workspace_id=None, - user_id=None, - billable=True, - ): - # Workspace - if workspace_id is None: - workspace_id = self.workspace_id - # User ID - if user_id is None: - user_id = self._user_id - - # get running timer to check if we need to start it - current_timer = self.get_in_progress() - - # Check if is currently run another times and has same values - # DO not restart the timer, if it is already running for current task - if current_timer: - current_timer_hierarchy = current_timer.get("description") - current_project_id = current_timer.get("projectId") - current_task_id = current_timer.get("taskId") - if ( - description == current_timer_hierarchy - and project_id == current_project_id - and task_id == current_task_id - ): - self.log.info( - "Timer for the current project is already running" - ) - self.bool_timer_run = True - return self.bool_timer_run - self.finish_time_entry() - - # Convert billable to strings - if billable: - billable = "true" - else: - billable = "false" - # Rest API Action - action_url = "workspaces/{}/user/{}/time-entries".format( - workspace_id, user_id - ) - start = self.get_current_time() - body = { - "start": start, - "billable": billable, - "description": description, - "projectId": project_id, - "taskId": task_id, - "tagIds": tag_ids, - } - response = requests.post( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body - ) - if response.status_code < 300: - return True - return False - - def _get_current_timer_values(self, response): - if response is None: - return - try: - output = response.json() - except json.decoder.JSONDecodeError: - return None - if output and isinstance(output, list): - return output[0] - return None - - def get_in_progress(self, user_id=None, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - if user_id is None: - user_id = self.user_id - - action_url = ( - f"workspaces/{workspace_id}/user/" - f"{user_id}/time-entries?in-progress=1" - ) - response = requests.get( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers - ) - return self._get_current_timer_values(response) - - def finish_time_entry(self, workspace_id=None, user_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - if user_id is None: - user_id = self.user_id - current_timer = self.get_in_progress() - if not current_timer: - return - action_url = "workspaces/{}/user/{}/time-entries".format( - workspace_id, user_id - ) - body = {"end": self.get_current_time()} - response = requests.patch( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body - ) - return response.json() - - def get_time_entries(self, workspace_id=None, user_id=None, quantity=10): - if workspace_id is None: - workspace_id = self.workspace_id - if user_id is None: - user_id = self.user_id - action_url = "workspaces/{}/user/{}/time-entries".format( - workspace_id, user_id - ) - response = requests.get( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers - ) - return response.json()[:quantity] - - def remove_time_entry(self, tid, workspace_id=None, user_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - action_url = "workspaces/{}/user/{}/time-entries/{}".format( - workspace_id, user_id, tid - ) - response = requests.delete( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers - ) - return response.json() - - def add_project(self, name, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - action_url = "workspaces/{}/projects".format(workspace_id) - body = { - "name": name, - "clientId": "", - "isPublic": "false", - "estimate": {"estimate": 0, "type": "AUTO"}, - "color": "#f44336", - "billable": "true", - } - response = requests.post( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body - ) - return response.json() - - def add_workspace(self, name): - action_url = "workspaces/" - body = {"name": name} - response = requests.post( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body - ) - return response.json() - - def add_task(self, name, project_id, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - action_url = "workspaces/{}/projects/{}/tasks".format( - workspace_id, project_id - ) - body = {"name": name, "projectId": project_id} - response = requests.post( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body - ) - return response.json() - - def add_tag(self, name, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - action_url = "workspaces/{}/tags".format(workspace_id) - body = {"name": name} - response = requests.post( - CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body - ) - return response.json() - - def delete_project(self, project_id, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - action_url = "/workspaces/{}/projects/{}".format( - workspace_id, project_id - ) - response = requests.delete( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers, - ) - return response.json() - - def convert_input( - self, entity_id, entity_name, mode="Workspace", project_id=None - ): - if entity_id is None: - error = False - error_msg = 'Missing information "{}"' - if mode.lower() == "workspace": - if entity_id is None and entity_name is None: - if self.workspace_id is not None: - entity_id = self.workspace_id - else: - error = True - else: - entity_id = self.get_workspace_id(entity_name) - else: - if entity_id is None and entity_name is None: - error = True - elif mode.lower() == "project": - entity_id = self.get_project_id(entity_name) - elif mode.lower() == "task": - entity_id = self.get_task_id( - task_name=entity_name, project_id=project_id - ) - else: - raise TypeError("Unknown type") - # Raise error - if error: - raise ValueError(error_msg.format(mode)) - - return entity_id diff --git a/server_addon/clockify/client/ayon_clockify/constants.py b/server_addon/clockify/client/ayon_clockify/constants.py deleted file mode 100644 index 4574f91be1..0000000000 --- a/server_addon/clockify/client/ayon_clockify/constants.py +++ /dev/null @@ -1,12 +0,0 @@ -import os - - -CLOCKIFY_FTRACK_SERVER_PATH = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "ftrack", "server" -) -CLOCKIFY_FTRACK_USER_PATH = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "ftrack", "user" -) - -ADMIN_PERMISSION_NAMES = ["WORKSPACE_OWN", "WORKSPACE_ADMIN"] -CLOCKIFY_ENDPOINT = "https://api.clockify.me/api/v1/" diff --git a/server_addon/clockify/client/ayon_clockify/ftrack/server/action_clockify_sync_server.py b/server_addon/clockify/client/ayon_clockify/ftrack/server/action_clockify_sync_server.py deleted file mode 100644 index ed83fed287..0000000000 --- a/server_addon/clockify/client/ayon_clockify/ftrack/server/action_clockify_sync_server.py +++ /dev/null @@ -1,146 +0,0 @@ -import os -import json - -from ayon_clockify.clockify_api import ClockifyAPI - -from ayon_ftrack.lib import ServerAction - - -class SyncClockifyServer(ServerAction): - '''Synchronise project names and task types.''' - - identifier = "clockify.sync.server" - label = "Sync To Clockify (server)" - description = "Synchronise data to Clockify workspace" - - role_list = ["Administrator", "project Manager"] - - def __init__(self, *args, **kwargs): - super(SyncClockifyServer, self).__init__(*args, **kwargs) - - workspace_name = os.environ.get("CLOCKIFY_WORKSPACE") - api_key = os.environ.get("CLOCKIFY_API_KEY") - self.clockify_api = ClockifyAPI(api_key) - self.clockify_api.set_workspace(workspace_name) - if api_key is None: - modified_key = "None" - else: - str_len = int(len(api_key) / 2) - start_replace = int(len(api_key) / 4) - modified_key = "" - for idx in range(len(api_key)): - if idx >= start_replace and idx < start_replace + str_len: - replacement = "X" - else: - replacement = api_key[idx] - modified_key += replacement - - self.log.info( - "Clockify info. Workspace: \"{}\" API key: \"{}\"".format( - str(workspace_name), str(modified_key) - ) - ) - - def discover(self, session, entities, event): - if ( - len(entities) != 1 - or entities[0].entity_type.lower() != "project" - ): - return False - return True - - def launch(self, session, entities, event): - self.clockify_api.set_api() - if self.clockify_api.workspace_id is None: - return { - "success": False, - "message": "Clockify Workspace or API key are not set!" - } - - if not self.clockify_api.validate_workspace_permissions( - self.clockify_api.workspace_id, self.clockify_api.user_id - ): - return { - "success": False, - "message": "Missing permissions for this action!" - } - - # JOB SETTINGS - user_id = event["source"]["user"]["id"] - user = session.query("User where id is " + user_id).one() - - job = session.create("Job", { - "user": user, - "status": "running", - "data": json.dumps({"description": "Sync Ftrack to Clockify"}) - }) - session.commit() - - project_entity = entities[0] - if project_entity.entity_type.lower() != "project": - project_entity = self.get_project_from_entity(project_entity) - - project_name = project_entity["full_name"] - self.log.info( - "Synchronization of project \"{}\" to clockify begins.".format( - project_name - ) - ) - task_types = ( - project_entity["project_schema"]["_task_type_schema"]["types"] - ) - task_type_names = [ - task_type["name"] for task_type in task_types - ] - try: - clockify_projects = self.clockify_api.get_projects() - if project_name not in clockify_projects: - response = self.clockify_api.add_project(project_name) - if "id" not in response: - self.log.warning( - "Project \"{}\" can't be created. Response: {}".format( - project_name, response - ) - ) - return { - "success": False, - "message": ( - "Can't create clockify project \"{}\"." - " Unexpected error." - ).format(project_name) - } - - clockify_workspace_tags = self.clockify_api.get_tags() - for task_type_name in task_type_names: - if task_type_name in clockify_workspace_tags: - self.log.debug( - "Task \"{}\" already exist".format(task_type_name) - ) - continue - - response = self.clockify_api.add_tag(task_type_name) - if "id" not in response: - self.log.warning( - "Task \"{}\" can't be created. Response: {}".format( - task_type_name, response - ) - ) - - job["status"] = "done" - - except Exception: - self.log.warning( - "Synchronization to clockify failed.", - exc_info=True - ) - - finally: - if job["status"] != "done": - job["status"] = "failed" - session.commit() - - return True - - -def register(session, **kw): - SyncClockifyServer(session).register() diff --git a/server_addon/clockify/client/ayon_clockify/ftrack/user/action_clockify_sync_local.py b/server_addon/clockify/client/ayon_clockify/ftrack/user/action_clockify_sync_local.py deleted file mode 100644 index 05a94e56fd..0000000000 --- a/server_addon/clockify/client/ayon_clockify/ftrack/user/action_clockify_sync_local.py +++ /dev/null @@ -1,123 +0,0 @@ -import json -from ayon_clockify.clockify_api import ClockifyAPI -from ayon_ftrack.lib import BaseAction, statics_icon - - -class SyncClockifyLocal(BaseAction): - """Synchronise project names and task types.""" - - identifier = "clockify.sync.local" - label = "Sync To Clockify" - description = "Synchronise data to Clockify workspace" - role_list = ["Administrator", "project Manager"] - icon = statics_icon("app_icons", "clockify-white.png") - - def __init__(self, *args, **kwargs): - super(SyncClockifyLocal, self).__init__(*args, **kwargs) - - self.clockify_api = ClockifyAPI() - - def discover(self, session, entities, event): - if ( - len(entities) == 1 - and entities[0].entity_type.lower() == "project" - ): - return True - return False - - def launch(self, session, entities, event): - self.clockify_api.set_api() - if self.clockify_api.workspace_id is None: - return { - "success": False, - "message": "Clockify Workspace or API key are not set!" - } - - if ( - self.clockify_api.validate_workspace_permissions( - self.clockify_api.workspace_id, self.clockify_api.user_id) - is False - ): - return { - "success": False, - "message": "Missing permissions for this action!" - } - - # JOB SETTINGS - userId = event['source']['user']['id'] - user = session.query('User where id is ' + userId).one() - - job = session.create('Job', { - 'user': user, - 'status': 'running', - 'data': json.dumps({ - 'description': 'Sync ftrack to Clockify' - }) - }) - session.commit() - - project_entity = entities[0] - if project_entity.entity_type.lower() != "project": - project_entity = self.get_project_from_entity(project_entity) - - project_name = project_entity["full_name"] - self.log.info( - "Synchronization of project \"{}\" to clockify begins.".format( - project_name - ) - ) - task_types = ( - project_entity["project_schema"]["_task_type_schema"]["types"] - ) - task_type_names = [ - task_type["name"] for task_type in task_types - ] - try: - clockify_projects = self.clockify_api.get_projects() - if project_name not in clockify_projects: - response = self.clockify_api.add_project(project_name) - if "id" not in response: - self.log.warning( - "Project \"{}\" can't be created. Response: {}".format( - project_name, response - ) - ) - return { - "success": False, - "message": ( - "Can't create clockify project \"{}\"." - " Unexpected error." - ).format(project_name) - } - - clockify_workspace_tags = self.clockify_api.get_tags() - for task_type_name in task_type_names: - if task_type_name in clockify_workspace_tags: - self.log.debug( - "Task \"{}\" already exist".format(task_type_name) - ) - continue - - response = self.clockify_api.add_tag(task_type_name) - if "id" not in response: - self.log.warning( - "Task \"{}\" can't be created. Response: {}".format( - task_type_name, response - ) - ) - - job["status"] = "done" - - except Exception: - pass - - finally: - if job["status"] != "done": - job["status"] = "failed" - session.commit() - - return True - - -def register(session, **kw): - SyncClockifyLocal(session).register() diff --git a/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifyStart.py b/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifyStart.py deleted file mode 100644 index d69d0371c0..0000000000 --- a/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifyStart.py +++ /dev/null @@ -1,49 +0,0 @@ -import ayon_api - -from ayon_clockify.clockify_api import ClockifyAPI - -from ayon_core.pipeline import LauncherAction - - -class ClockifyStart(LauncherAction): - name = "clockify_start_timer" - label = "Clockify - Start Timer" - icon = "app_icons/clockify.png" - order = 500 - clockify_api = ClockifyAPI() - - def is_compatible(self, selection): - """Return whether the action is compatible with the session""" - return selection.is_task_selected - - def process(self, selection, **kwargs): - self.clockify_api.set_api() - user_id = self.clockify_api.user_id - workspace_id = self.clockify_api.workspace_id - project_name = selection.project_name - folder_path = selection.folder_path - task_name = selection.task_name - description = "/".join([folder_path.lstrip("/"), task_name]) - - # fetch folder entity - folder_entity = ayon_api.get_folder_by_path(project_name, folder_path) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - - # get task type to fill the timer tag - task_type = task_entity["taskType"] - - project_id = self.clockify_api.get_project_id( - project_name, workspace_id - ) - tag_ids = [] - tag_name = task_type - tag_ids.append(self.clockify_api.get_tag_id(tag_name, workspace_id)) - self.clockify_api.start_time_entry( - description, - project_id, - tag_ids=tag_ids, - workspace_id=workspace_id, - user_id=user_id, - ) diff --git a/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifySync.py b/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifySync.py deleted file mode 100644 index a32f2a8082..0000000000 --- a/server_addon/clockify/client/ayon_clockify/launcher_actions/ClockifySync.py +++ /dev/null @@ -1,81 +0,0 @@ -import ayon_api - -from ayon_clockify.clockify_api import ClockifyAPI -from ayon_core.pipeline import LauncherAction - - -class ClockifyPermissionsCheckFailed(Exception): - """Timer start failed due to user permissions check. - Message should be self explanatory as traceback won't be shown. - """ - - pass - - -class ClockifySync(LauncherAction): - name = "sync_to_clockify" - label = "Sync to Clockify" - icon = "app_icons/clockify-white.png" - order = 500 - clockify_api = ClockifyAPI() - - def is_compatible(self, selection): - """Check if there's some projects to sync""" - if selection.is_project_selected: - return True - - try: - next(ayon_api.get_projects()) - return True - except StopIteration: - return False - - def process(self, selection, **kwargs): - self.clockify_api.set_api() - workspace_id = self.clockify_api.workspace_id - user_id = self.clockify_api.user_id - if not self.clockify_api.validate_workspace_permissions( - workspace_id, user_id - ): - raise ClockifyPermissionsCheckFailed( - "Current CLockify user is missing permissions for this action!" - ) - - if selection.is_project_selected: - projects_to_sync = [selection.project_entity] - else: - projects_to_sync = ayon_api.get_projects() - - projects_info = { - project["name"]: { - task_type["name"] - for task_type in project["taskTypes"] - } - for project in projects_to_sync - } - - clockify_projects = self.clockify_api.get_projects(workspace_id) - for project_name, task_types in projects_info.items(): - if project_name in clockify_projects: - continue - - response = self.clockify_api.add_project( - project_name, workspace_id - ) - if "id" not in response: - self.log.error( - "Project {} can't be created".format(project_name) - ) - continue - - clockify_workspace_tags = self.clockify_api.get_tags(workspace_id) - for task_type in task_types: - if task_type not in clockify_workspace_tags: - response = self.clockify_api.add_tag( - task_type, workspace_id - ) - if "id" not in response: - self.log.error( - "Task {} can't be created".format(task_type) - ) - continue diff --git a/server_addon/clockify/client/ayon_clockify/version.py b/server_addon/clockify/client/ayon_clockify/version.py deleted file mode 100644 index 36bfd79364..0000000000 --- a/server_addon/clockify/client/ayon_clockify/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring AYON addon 'clockify' version.""" -__version__ = "0.2.1" diff --git a/server_addon/clockify/client/ayon_clockify/widgets.py b/server_addon/clockify/client/ayon_clockify/widgets.py deleted file mode 100644 index e64b64601d..0000000000 --- a/server_addon/clockify/client/ayon_clockify/widgets.py +++ /dev/null @@ -1,207 +0,0 @@ -from qtpy import QtCore, QtGui, QtWidgets -from ayon_core import resources, style - - -class MessageWidget(QtWidgets.QWidget): - - SIZE_W = 300 - SIZE_H = 130 - - closed = QtCore.Signal() - - def __init__(self, messages, title): - super(MessageWidget, self).__init__() - - # Icon - icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) - self.setWindowIcon(icon) - - self.setWindowFlags( - QtCore.Qt.WindowCloseButtonHint | - QtCore.Qt.WindowMinimizeButtonHint - ) - - # Size setting - self.resize(self.SIZE_W, self.SIZE_H) - self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) - self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100)) - - # Style - self.setStyleSheet(style.load_stylesheet()) - - self.setLayout(self._ui_layout(messages)) - self.setWindowTitle(title) - - def _ui_layout(self, messages): - if not messages: - messages = ["*Missing messages (This is a bug)*", ] - - elif not isinstance(messages, (tuple, list)): - messages = [messages, ] - - main_layout = QtWidgets.QVBoxLayout(self) - - labels = [] - for message in messages: - label = QtWidgets.QLabel(message) - label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor)) - label.setTextFormat(QtCore.Qt.RichText) - label.setWordWrap(True) - - labels.append(label) - main_layout.addWidget(label) - - btn_close = QtWidgets.QPushButton("Close") - btn_close.setToolTip('Close this window') - btn_close.clicked.connect(self.on_close_clicked) - - btn_group = QtWidgets.QHBoxLayout() - btn_group.addStretch(1) - btn_group.addWidget(btn_close) - - main_layout.addLayout(btn_group) - - self.labels = labels - self.btn_group = btn_group - self.btn_close = btn_close - self.main_layout = main_layout - - return main_layout - - def on_close_clicked(self): - self.close() - - def close(self, *args, **kwargs): - self.closed.emit() - super(MessageWidget, self).close(*args, **kwargs) - - -class ClockifySettings(QtWidgets.QWidget): - SIZE_W = 500 - SIZE_H = 130 - - loginSignal = QtCore.Signal(object, object, object) - - def __init__(self, clockify_api, optional=True): - super(ClockifySettings, self).__init__() - - self.clockify_api = clockify_api - self.optional = optional - self.validated = False - - # Icon - icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) - self.setWindowIcon(icon) - - self.setWindowTitle("Clockify settings") - self.setWindowFlags( - QtCore.Qt.WindowCloseButtonHint | - QtCore.Qt.WindowMinimizeButtonHint - ) - - # Size setting - self.resize(self.SIZE_W, self.SIZE_H) - self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) - self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100)) - self.setStyleSheet(style.load_stylesheet()) - - self._ui_init() - - def _ui_init(self): - label_api_key = QtWidgets.QLabel("Clockify API key:") - - input_api_key = QtWidgets.QLineEdit() - input_api_key.setFrame(True) - input_api_key.setPlaceholderText("e.g. XX1XxXX2x3x4xXxx") - - error_label = QtWidgets.QLabel("") - error_label.setTextFormat(QtCore.Qt.RichText) - error_label.setWordWrap(True) - error_label.hide() - - form_layout = QtWidgets.QFormLayout() - form_layout.setContentsMargins(10, 15, 10, 5) - form_layout.addRow(label_api_key, input_api_key) - form_layout.addRow(error_label) - - btn_ok = QtWidgets.QPushButton("Ok") - btn_ok.setToolTip('Sets Clockify API Key so can Start/Stop timer') - - btn_cancel = QtWidgets.QPushButton("Cancel") - cancel_tooltip = 'Application won\'t start' - if self.optional: - cancel_tooltip = 'Close this window' - btn_cancel.setToolTip(cancel_tooltip) - - btn_group = QtWidgets.QHBoxLayout() - btn_group.addStretch(1) - btn_group.addWidget(btn_ok) - btn_group.addWidget(btn_cancel) - - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.addLayout(form_layout) - main_layout.addLayout(btn_group) - - btn_ok.clicked.connect(self.click_ok) - btn_cancel.clicked.connect(self._close_widget) - - self.label_api_key = label_api_key - self.input_api_key = input_api_key - self.error_label = error_label - - self.btn_ok = btn_ok - self.btn_cancel = btn_cancel - - def setError(self, msg): - self.error_label.setText(msg) - self.error_label.show() - - def invalid_input(self, entity): - entity.setStyleSheet("border: 1px solid red;") - - def click_ok(self): - api_key = self.input_api_key.text().strip() - if self.optional is True and api_key == '': - self.clockify_api.save_api_key(None) - self.clockify_api.set_api(api_key) - self.validated = False - self._close_widget() - return - - validation = self.clockify_api.validate_api_key(api_key) - - if validation: - self.clockify_api.save_api_key(api_key) - self.clockify_api.set_api(api_key) - self.validated = True - self._close_widget() - else: - self.invalid_input(self.input_api_key) - self.validated = False - self.setError( - "Entered invalid API key" - ) - - def showEvent(self, event): - super(ClockifySettings, self).showEvent(event) - - # Make btns same width - max_width = max( - self.btn_ok.sizeHint().width(), - self.btn_cancel.sizeHint().width() - ) - self.btn_ok.setMinimumWidth(max_width) - self.btn_cancel.setMinimumWidth(max_width) - - def closeEvent(self, event): - if self.optional is True: - event.ignore() - self._close_widget() - else: - self.validated = False - - def _close_widget(self): - if self.optional is True: - self.hide() - else: - self.close() diff --git a/server_addon/clockify/package.py b/server_addon/clockify/package.py deleted file mode 100644 index 3245e61ca1..0000000000 --- a/server_addon/clockify/package.py +++ /dev/null @@ -1,9 +0,0 @@ -name = "clockify" -title = "Clockify" -version = "0.2.1" -client_dir = "ayon_clockify" - -ayon_required_addons = { - "core": ">0.3.2", -} -ayon_compatible_addons = {} diff --git a/server_addon/clockify/server/__init__.py b/server_addon/clockify/server/__init__.py deleted file mode 100644 index 11bbfed261..0000000000 --- a/server_addon/clockify/server/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from typing import Type - -from ayon_server.addons import BaseServerAddon - -from .settings import ClockifySettings - - -class ClockifyAddon(BaseServerAddon): - settings_model: Type[ClockifySettings] = ClockifySettings diff --git a/server_addon/clockify/server/settings.py b/server_addon/clockify/server/settings.py deleted file mode 100644 index c01d4c1545..0000000000 --- a/server_addon/clockify/server/settings.py +++ /dev/null @@ -1,9 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -class ClockifySettings(BaseSettingsModel): - workspace_name: str = SettingsField( - "", - title="Workspace name", - scope=["studio"] - ) diff --git a/server_addon/deadline/server/settings/publish_plugins.py b/server_addon/deadline/server/settings/publish_plugins.py index 85a93d49cd..1cf699db23 100644 --- a/server_addon/deadline/server/settings/publish_plugins.py +++ b/server_addon/deadline/server/settings/publish_plugins.py @@ -153,8 +153,8 @@ class FusionSubmitDeadlineModel(BaseSettingsModel): ) group: str = SettingsField("", title="Group Name") plugin: str = SettingsField("Fusion", - enum_resolver=fusion_deadline_plugin_enum, - title="Deadline Plugin") + enum_resolver=fusion_deadline_plugin_enum, + title="Deadline Plugin") class NukeSubmitDeadlineModel(BaseSettingsModel): @@ -375,11 +375,11 @@ class PublishPluginsModel(BaseSettingsModel): title="Nuke Submit to deadline") ProcessSubmittedCacheJobOnFarm: ProcessCacheJobFarmModel = SettingsField( default_factory=ProcessCacheJobFarmModel, - title="Process submitted cache Job on farm.", - section="Publish Jobs") + title="Process submitted cache Job on farm", + section="Publish Jobs") ProcessSubmittedJobOnFarm: ProcessSubmittedJobOnFarmModel = SettingsField( default_factory=ProcessSubmittedJobOnFarmModel, - title="Process submitted job on farm.") + title="Process submitted job on farm") DEFAULT_DEADLINE_PLUGINS_SETTINGS = { diff --git a/server_addon/flame/client/ayon_flame/__init__.py b/server_addon/flame/client/ayon_flame/__init__.py deleted file mode 100644 index d2d89bdb01..0000000000 --- a/server_addon/flame/client/ayon_flame/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .version import __version__ -from .addon import ( - FLAME_ADDON_ROOT, - FlameAddon, -) - - -__all__ = ( - "__version__", - - "FLAME_ADDON_ROOT", - "FlameAddon", -) diff --git a/server_addon/flame/client/ayon_flame/addon.py b/server_addon/flame/client/ayon_flame/addon.py deleted file mode 100644 index 5a96a9332e..0000000000 --- a/server_addon/flame/client/ayon_flame/addon.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -from ayon_core.addon import AYONAddon, IHostAddon - -from .version import __version__ - -FLAME_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__)) - - -class FlameAddon(AYONAddon, IHostAddon): - name = "flame" - version = __version__ - host_name = "flame" - - def add_implementation_envs(self, env, _app): - # Add requirements to DL_PYTHON_HOOK_PATH - env["DL_PYTHON_HOOK_PATH"] = os.path.join(FLAME_ADDON_ROOT, "startup") - env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) - - # Set default values if are not already set via settings - defaults = { - "LOGLEVEL": "DEBUG" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(FLAME_ADDON_ROOT, "hooks") - ] - - def get_workfile_extensions(self): - return [".otoc"] diff --git a/server_addon/flame/client/ayon_flame/api/__init__.py b/server_addon/flame/client/ayon_flame/api/__init__.py deleted file mode 100644 index 8fcf0c92b0..0000000000 --- a/server_addon/flame/client/ayon_flame/api/__init__.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -AYON Autodesk Flame api -""" -from .constants import ( - COLOR_MAP, - MARKER_NAME, - MARKER_COLOR, - MARKER_DURATION, - MARKER_PUBLISH_DEFAULT -) -from .lib import ( - CTX, - FlameAppFramework, - get_current_project, - get_current_sequence, - create_segment_data_marker, - get_segment_data_marker, - set_segment_data_marker, - set_publish_attribute, - get_publish_attribute, - get_sequence_segments, - maintained_segment_selection, - reset_segment_selection, - get_segment_attributes, - get_clips_in_reels, - get_reformatted_filename, - get_frame_from_filename, - get_padding_from_filename, - maintained_object_duplication, - maintained_temp_file_path, - get_clip_segment, - get_batch_group_from_desktop, - MediaInfoFile, - TimeEffectMetadata -) -from .utils import ( - setup, - get_flame_version, - get_flame_install_root -) -from .pipeline import ( - install, - uninstall, - ls, - containerise, - update_container, - remove_instance, - list_instances, - imprint, - maintained_selection -) -from .menu import ( - FlameMenuProjectConnect, - FlameMenuTimeline, - FlameMenuUniversal -) -from .plugin import ( - Creator, - PublishableClip, - ClipLoader, - OpenClipSolver -) -from .workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root -) -from .render_utils import ( - export_clip, - get_preset_path_by_xml_name, - modify_preset_file -) -from .batch_utils import ( - create_batch_group, - create_batch_group_conent -) - -__all__ = [ - # constants - "COLOR_MAP", - "MARKER_NAME", - "MARKER_COLOR", - "MARKER_DURATION", - "MARKER_PUBLISH_DEFAULT", - - # lib - "CTX", - "FlameAppFramework", - "get_current_project", - "get_current_sequence", - "create_segment_data_marker", - "get_segment_data_marker", - "set_segment_data_marker", - "set_publish_attribute", - "get_publish_attribute", - "get_sequence_segments", - "maintained_segment_selection", - "reset_segment_selection", - "get_segment_attributes", - "get_clips_in_reels", - "get_reformatted_filename", - "get_frame_from_filename", - "get_padding_from_filename", - "maintained_object_duplication", - "maintained_temp_file_path", - "get_clip_segment", - "get_batch_group_from_desktop", - "MediaInfoFile", - "TimeEffectMetadata", - - # pipeline - "install", - "uninstall", - "ls", - "containerise", - "update_container", - "reload_pipeline", - "maintained_selection", - "remove_instance", - "list_instances", - "imprint", - "maintained_selection", - - # utils - "setup", - "get_flame_version", - "get_flame_install_root", - - # menu - "FlameMenuProjectConnect", - "FlameMenuTimeline", - "FlameMenuUniversal", - - # plugin - "Creator", - "PublishableClip", - "ClipLoader", - "OpenClipSolver", - - # workio - "open_file", - "save_file", - "current_file", - "has_unsaved_changes", - "file_extensions", - "work_root", - - # render utils - "export_clip", - "get_preset_path_by_xml_name", - "modify_preset_file", - - # batch utils - "create_batch_group", - "create_batch_group_conent" -] diff --git a/server_addon/flame/client/ayon_flame/api/batch_utils.py b/server_addon/flame/client/ayon_flame/api/batch_utils.py deleted file mode 100644 index 9d419a4a90..0000000000 --- a/server_addon/flame/client/ayon_flame/api/batch_utils.py +++ /dev/null @@ -1,151 +0,0 @@ -import flame - - -def create_batch_group( - name, - frame_start, - frame_duration, - update_batch_group=None, - **kwargs -): - """Create Batch Group in active project's Desktop - - Args: - name (str): name of batch group to be created - frame_start (int): start frame of batch - frame_end (int): end frame of batch - update_batch_group (PyBatch)[optional]: batch group to update - - Return: - PyBatch: active flame batch group - """ - # make sure some batch obj is present - batch_group = update_batch_group or flame.batch - - schematic_reels = kwargs.get("shematic_reels") or ['LoadedReel1'] - shelf_reels = kwargs.get("shelf_reels") or ['ShelfReel1'] - - handle_start = kwargs.get("handleStart") or 0 - handle_end = kwargs.get("handleEnd") or 0 - - frame_start -= handle_start - frame_duration += handle_start + handle_end - - if not update_batch_group: - # Create batch group with name, start_frame value, duration value, - # set of schematic reel names, set of shelf reel names - batch_group = batch_group.create_batch_group( - name, - start_frame=frame_start, - duration=frame_duration, - reels=schematic_reels, - shelf_reels=shelf_reels - ) - else: - batch_group.name = name - batch_group.start_frame = frame_start - batch_group.duration = frame_duration - - # add reels to batch group - _add_reels_to_batch_group( - batch_group, schematic_reels, shelf_reels) - - # TODO: also update write node if there is any - # TODO: also update loaders to start from correct frameStart - - if kwargs.get("switch_batch_tab"): - # use this command to switch to the batch tab - batch_group.go_to() - - return batch_group - - -def _add_reels_to_batch_group(batch_group, reels, shelf_reels): - # update or create defined reels - # helper variables - reel_names = [ - r.name.get_value() - for r in batch_group.reels - ] - shelf_reel_names = [ - r.name.get_value() - for r in batch_group.shelf_reels - ] - # add schematic reels - for _r in reels: - if _r in reel_names: - continue - batch_group.create_reel(_r) - - # add shelf reels - for _sr in shelf_reels: - if _sr in shelf_reel_names: - continue - batch_group.create_shelf_reel(_sr) - - -def create_batch_group_conent(batch_nodes, batch_links, batch_group=None): - """Creating batch group with links - - Args: - batch_nodes (list of dict): each dict is node definition - batch_links (list of dict): each dict is link definition - batch_group (PyBatch, optional): batch group. Defaults to None. - - Return: - dict: all batch nodes {name or id: PyNode} - """ - # make sure some batch obj is present - batch_group = batch_group or flame.batch - all_batch_nodes = { - b.name.get_value(): b - for b in batch_group.nodes - } - for node in batch_nodes: - # NOTE: node_props needs to be ideally OrederDict type - node_id, node_type, node_props = ( - node["id"], node["type"], node["properties"]) - - # get node name for checking if exists - node_name = node_props.pop("name", None) or node_id - - if all_batch_nodes.get(node_name): - # update existing batch node - batch_node = all_batch_nodes[node_name] - else: - # create new batch node - batch_node = batch_group.create_node(node_type) - - # set name - batch_node.name.set_value(node_name) - - # set attributes found in node props - for key, value in node_props.items(): - if not hasattr(batch_node, key): - continue - setattr(batch_node, key, value) - - # add created node for possible linking - all_batch_nodes[node_id] = batch_node - - # link nodes to each other - for link in batch_links: - _from_n, _to_n = link["from_node"], link["to_node"] - - # check if all linking nodes are available - if not all([ - all_batch_nodes.get(_from_n["id"]), - all_batch_nodes.get(_to_n["id"]) - ]): - continue - - # link nodes in defined link - batch_group.connect_nodes( - all_batch_nodes[_from_n["id"]], _from_n["connector"], - all_batch_nodes[_to_n["id"]], _to_n["connector"] - ) - - # sort batch nodes - batch_group.organize() - - return all_batch_nodes diff --git a/server_addon/flame/client/ayon_flame/api/constants.py b/server_addon/flame/client/ayon_flame/api/constants.py deleted file mode 100644 index 04191c539d..0000000000 --- a/server_addon/flame/client/ayon_flame/api/constants.py +++ /dev/null @@ -1,24 +0,0 @@ - -""" -AYON Flame api constances -""" -# AYON marker workflow variables -MARKER_NAME = "OpenPypeData" -MARKER_DURATION = 0 -MARKER_COLOR = "cyan" -MARKER_PUBLISH_DEFAULT = False - -# AYON color definitions -COLOR_MAP = { - "red": (1.0, 0.0, 0.0), - "orange": (1.0, 0.5, 0.0), - "yellow": (1.0, 1.0, 0.0), - "pink": (1.0, 0.5, 1.0), - "white": (1.0, 1.0, 1.0), - "green": (0.0, 1.0, 0.0), - "cyan": (0.0, 1.0, 1.0), - "blue": (0.0, 0.0, 1.0), - "purple": (0.5, 0.0, 0.5), - "magenta": (0.5, 0.0, 1.0), - "black": (0.0, 0.0, 0.0) -} diff --git a/server_addon/flame/client/ayon_flame/api/lib.py b/server_addon/flame/client/ayon_flame/api/lib.py deleted file mode 100644 index 8bfe6348ea..0000000000 --- a/server_addon/flame/client/ayon_flame/api/lib.py +++ /dev/null @@ -1,1272 +0,0 @@ -import sys -import os -import re -import json -import pickle -import clique -import tempfile -import traceback -import itertools -import contextlib -import xml.etree.cElementTree as cET -from copy import deepcopy, copy -from xml.etree import ElementTree as ET -from pprint import pformat - -from ayon_core.lib import Logger, run_subprocess - -from .constants import ( - MARKER_COLOR, - MARKER_DURATION, - MARKER_NAME, - COLOR_MAP, - MARKER_PUBLISH_DEFAULT -) - -log = Logger.get_logger(__name__) - -FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]") - - -class CTX: - # singleton used for passing data between api modules - app_framework = None - flame_apps = [] - selection = None - - -@contextlib.contextmanager -def io_preferences_file(klass, filepath, write=False): - try: - flag = "w" if write else "r" - yield open(filepath, flag) - - except IOError as _error: - klass.log.info("Unable to work with preferences `{}`: {}".format( - filepath, _error)) - - -class FlameAppFramework(object): - # flameAppFramework class takes care of preferences - - class prefs_dict(dict): - - def __init__(self, master, name, **kwargs): - self.name = name - self.master = master - if not self.master.get(self.name): - self.master[self.name] = {} - self.master[self.name].__init__() - - def __getitem__(self, k): - return self.master[self.name].__getitem__(k) - - def __setitem__(self, k, v): - return self.master[self.name].__setitem__(k, v) - - def __delitem__(self, k): - return self.master[self.name].__delitem__(k) - - def get(self, k, default=None): - return self.master[self.name].get(k, default) - - def setdefault(self, k, default=None): - return self.master[self.name].setdefault(k, default) - - def pop(self, *args, **kwargs): - return self.master[self.name].pop(*args, **kwargs) - - def update(self, mapping=(), **kwargs): - self.master[self.name].update(mapping, **kwargs) - - def __contains__(self, k): - return self.master[self.name].__contains__(k) - - def copy(self): # don"t delegate w/ super - dict.copy() -> dict :( - return type(self)(self) - - def keys(self): - return self.master[self.name].keys() - - @classmethod - def fromkeys(cls, keys, v=None): - return cls.master[cls.name].fromkeys(keys, v) - - def __repr__(self): - return "{0}({1})".format( - type(self).__name__, self.master[self.name].__repr__()) - - def master_keys(self): - return self.master.keys() - - def __init__(self): - self.name = self.__class__.__name__ - self.bundle_name = "OpenPypeFlame" - # self.prefs scope is limited to flame project and user - self.prefs = {} - self.prefs_user = {} - self.prefs_global = {} - self.log = log - - try: - import flame - self.flame = flame - self.flame_project_name = self.flame.project.current_project.name - self.flame_user_name = flame.users.current_user.name - except Exception: - self.flame = None - self.flame_project_name = None - self.flame_user_name = None - - import socket - self.hostname = socket.gethostname() - - if sys.platform == "darwin": - self.prefs_folder = os.path.join( - os.path.expanduser("~"), - "Library", - "Caches", - "OpenPype", - self.bundle_name - ) - elif sys.platform.startswith("linux"): - self.prefs_folder = os.path.join( - os.path.expanduser("~"), - ".OpenPype", - self.bundle_name) - - self.prefs_folder = os.path.join( - self.prefs_folder, - self.hostname, - ) - - self.log.info("[{}] waking up".format(self.__class__.__name__)) - - try: - self.load_prefs() - except RuntimeError: - self.save_prefs() - - # menu auto-refresh defaults - if not self.prefs_global.get("menu_auto_refresh"): - self.prefs_global["menu_auto_refresh"] = { - "media_panel": True, - "batch": True, - "main_menu": True, - "timeline_menu": True - } - - self.apps = [] - - def get_pref_file_paths(self): - - prefix = self.prefs_folder + os.path.sep + self.bundle_name - prefs_file_path = "_".join([ - prefix, self.flame_user_name, - self.flame_project_name]) + ".prefs" - prefs_user_file_path = "_".join([ - prefix, self.flame_user_name]) + ".prefs" - prefs_global_file_path = prefix + ".prefs" - - return (prefs_file_path, prefs_user_file_path, prefs_global_file_path) - - def load_prefs(self): - - (proj_pref_path, user_pref_path, - glob_pref_path) = self.get_pref_file_paths() - - with io_preferences_file(self, proj_pref_path) as prefs_file: - self.prefs = pickle.load(prefs_file) - self.log.info( - "Project - preferences contents:\n{}".format( - pformat(self.prefs) - )) - - with io_preferences_file(self, user_pref_path) as prefs_file: - self.prefs_user = pickle.load(prefs_file) - self.log.info( - "User - preferences contents:\n{}".format( - pformat(self.prefs_user) - )) - - with io_preferences_file(self, glob_pref_path) as prefs_file: - self.prefs_global = pickle.load(prefs_file) - self.log.info( - "Global - preferences contents:\n{}".format( - pformat(self.prefs_global) - )) - - return True - - def save_prefs(self): - # make sure the preference folder is available - if not os.path.isdir(self.prefs_folder): - try: - os.makedirs(self.prefs_folder) - except Exception: - self.log.info("Unable to create folder {}".format( - self.prefs_folder)) - return False - - # get all pref file paths - (proj_pref_path, user_pref_path, - glob_pref_path) = self.get_pref_file_paths() - - with io_preferences_file(self, proj_pref_path, True) as prefs_file: - pickle.dump(self.prefs, prefs_file) - self.log.info( - "Project - preferences contents:\n{}".format( - pformat(self.prefs) - )) - - with io_preferences_file(self, user_pref_path, True) as prefs_file: - pickle.dump(self.prefs_user, prefs_file) - self.log.info( - "User - preferences contents:\n{}".format( - pformat(self.prefs_user) - )) - - with io_preferences_file(self, glob_pref_path, True) as prefs_file: - pickle.dump(self.prefs_global, prefs_file) - self.log.info( - "Global - preferences contents:\n{}".format( - pformat(self.prefs_global) - )) - - return True - - -def get_current_project(): - import flame - return flame.project.current_project - - -def get_current_sequence(selection): - import flame - - def segment_to_sequence(_segment): - track = _segment.parent - version = track.parent - return version.parent - - process_timeline = None - - if len(selection) == 1: - if isinstance(selection[0], flame.PySequence): - process_timeline = selection[0] - if isinstance(selection[0], flame.PySegment): - process_timeline = segment_to_sequence(selection[0]) - else: - for segment in selection: - if isinstance(segment, flame.PySegment): - process_timeline = segment_to_sequence(segment) - break - - return process_timeline - - -def rescan_hooks(): - import flame - try: - flame.execute_shortcut("Rescan Python Hooks") - except Exception: - pass - - -def get_metadata(project_name, _log=None): - # TODO: can be replaced by MediaInfoFile class method - from adsk.libwiretapPythonClientAPI import ( - WireTapClient, - WireTapServerHandle, - WireTapNodeHandle, - WireTapStr - ) - - class GetProjectColorPolicy(object): - def __init__(self, host_name=None, _log=None): - # Create a connection to the Backburner manager using the Wiretap - # python API. - # - self.log = _log or log - self.host_name = host_name or "localhost" - self._wiretap_client = WireTapClient() - if not self._wiretap_client.init(): - raise Exception("Could not initialize Wiretap Client") - self._server = WireTapServerHandle( - "{}:IFFFS".format(self.host_name)) - - def process(self, project_name): - policy_node_handle = WireTapNodeHandle( - self._server, - "/projects/{}/syncolor/policy".format(project_name) - ) - self.log.info(policy_node_handle) - - policy = WireTapStr() - if not policy_node_handle.getNodeTypeStr(policy): - self.log.warning( - "Could not retrieve policy of '%s': %s" % ( - policy_node_handle.getNodeId().id(), - policy_node_handle.lastError() - ) - ) - - return policy.c_str() - - policy_wiretap = GetProjectColorPolicy(_log=_log) - return policy_wiretap.process(project_name) - - -def get_segment_data_marker(segment, with_marker=None): - """ - Get openpype track item tag created by creator or loader plugin. - - Attributes: - segment (flame.PySegment): flame api object - with_marker (bool)[optional]: if true it will return also marker object - - Returns: - dict: openpype tag data - - Returns(with_marker=True): - flame.PyMarker, dict - """ - for marker in segment.markers: - comment = marker.comment.get_value() - color = marker.colour.get_value() - name = marker.name.get_value() - - if (name == MARKER_NAME) and ( - color == COLOR_MAP[MARKER_COLOR]): - if not with_marker: - return json.loads(comment) - else: - return marker, json.loads(comment) - - -def set_segment_data_marker(segment, data=None): - """ - Set openpype track item tag to input segment. - - Attributes: - segment (flame.PySegment): flame api object - - Returns: - dict: json loaded data - """ - data = data or dict() - - marker_data = get_segment_data_marker(segment, True) - - if marker_data: - # get available openpype tag if any - marker, tag_data = marker_data - # update tag data with new data - tag_data.update(data) - # update marker with tag data - marker.comment = json.dumps(tag_data) - else: - # update tag data with new data - marker = create_segment_data_marker(segment) - # add tag data to marker's comment - marker.comment = json.dumps(data) - - -def set_publish_attribute(segment, value): - """ Set Publish attribute in input Tag object - - Attribute: - segment (flame.PySegment)): flame api object - value (bool): True or False - """ - tag_data = get_segment_data_marker(segment) - tag_data["publish"] = value - - # set data to the publish attribute - set_segment_data_marker(segment, tag_data) - - -def get_publish_attribute(segment): - """ Get Publish attribute from input Tag object - - Attribute: - segment (flame.PySegment)): flame api object - - Returns: - bool: True or False - """ - tag_data = get_segment_data_marker(segment) - - if not tag_data: - set_publish_attribute(segment, MARKER_PUBLISH_DEFAULT) - return MARKER_PUBLISH_DEFAULT - - return tag_data["publish"] - - -def create_segment_data_marker(segment): - """ Create openpype marker on a segment. - - Attributes: - segment (flame.PySegment): flame api object - - Returns: - flame.PyMarker: flame api object - """ - # get duration of segment - duration = segment.record_duration.relative_frame - # calculate start frame of the new marker - start_frame = int(segment.record_in.relative_frame) + int(duration / 2) - # create marker - marker = segment.create_marker(start_frame) - # set marker name - marker.name = MARKER_NAME - # set duration - marker.duration = MARKER_DURATION - # set colour - marker.colour = COLOR_MAP[MARKER_COLOR] # Red - - return marker - - -def get_sequence_segments(sequence, selected=False): - segments = [] - # loop versions in sequence - for ver in sequence.versions: - # loop track in versions - for track in ver.tracks: - # ignore all empty tracks and hidden too - if len(track.segments) == 0 and track.hidden: - continue - # loop all segment in remaining tracks - for segment in track.segments: - if segment.name.get_value() == "": - continue - if segment.hidden.get_value() is True: - continue - if ( - selected is True - and segment.selected.get_value() is not True - ): - continue - # add it to original selection - segments.append(segment) - return segments - - -@contextlib.contextmanager -def maintained_segment_selection(sequence): - """Maintain selection during context - - Attributes: - sequence (flame.PySequence): python api object - - Yield: - list of flame.PySegment - - Example: - >>> with maintained_segment_selection(sequence) as selected_segments: - ... for segment in selected_segments: - ... segment.selected = False - >>> print(segment.selected) - True - """ - selected_segments = get_sequence_segments(sequence, True) - try: - # do the operation on selected segments - yield selected_segments - finally: - # reset all selected clips - reset_segment_selection(sequence) - # select only original selection of segments - for segment in selected_segments: - segment.selected = True - - -def reset_segment_selection(sequence): - """Deselect all selected nodes - """ - for ver in sequence.versions: - for track in ver.tracks: - if len(track.segments) == 0 and track.hidden: - continue - for segment in track.segments: - segment.selected = False - - -def _get_shot_tokens_values(clip, tokens): - old_value = None - output = {} - - if not clip.shot_name: - return output - - old_value = clip.shot_name.get_value() - - for token in tokens: - clip.shot_name.set_value(token) - _key = str(re.sub("[<>]", "", token)).replace(" ", "_") - - try: - output[_key] = int(clip.shot_name.get_value()) - except ValueError: - output[_key] = clip.shot_name.get_value() - - clip.shot_name.set_value(old_value) - - return output - - -def get_segment_attributes(segment): - if segment.name.get_value() == "": - return None - - # Add timeline segment to tree - clip_data = { - "shot_name": segment.shot_name.get_value(), - "segment_name": segment.name.get_value(), - "segment_comment": segment.comment.get_value(), - "tape_name": segment.tape_name, - "source_name": segment.source_name, - "fpath": segment.file_path, - "PySegment": segment - } - - # head and tail with forward compatibility - if segment.head: - # `infinite` can be also returned - if isinstance(segment.head, str): - clip_data["segment_head"] = 0 - else: - clip_data["segment_head"] = int(segment.head) - if segment.tail: - # `infinite` can be also returned - if isinstance(segment.tail, str): - clip_data["segment_tail"] = 0 - else: - clip_data["segment_tail"] = int(segment.tail) - - # add all available shot tokens - shot_tokens = _get_shot_tokens_values(segment, [ - "", "", "", "", "", - "", "" - ]) - clip_data.update(shot_tokens) - - # populate shot source metadata - segment_attrs = [ - "record_duration", "record_in", "record_out", - "source_duration", "source_in", "source_out" - ] - segment_attrs_data = {} - for attr_name in segment_attrs: - if not hasattr(segment, attr_name): - continue - attr = getattr(segment, attr_name) - segment_attrs_data[attr_name] = str(attr).replace("+", ":") - - if attr_name in ["record_in", "record_out"]: - clip_data[attr_name] = attr.relative_frame - else: - clip_data[attr_name] = attr.frame - - clip_data["segment_timecodes"] = segment_attrs_data - - return clip_data - - -def get_clips_in_reels(project): - output_clips = [] - project_desktop = project.current_workspace.desktop - - for reel_group in project_desktop.reel_groups: - for reel in reel_group.reels: - for clip in reel.clips: - clip_data = { - "PyClip": clip, - "fps": float(str(clip.frame_rate)[:-4]) - } - - attrs = [ - "name", "width", "height", - "ratio", "sample_rate", "bit_depth" - ] - - for attr in attrs: - val = getattr(clip, attr) - clip_data[attr] = val - - version = clip.versions[-1] - track = version.tracks[-1] - for segment in track.segments: - segment_data = get_segment_attributes(segment) - clip_data.update(segment_data) - - output_clips.append(clip_data) - - return output_clips - - -def get_reformatted_filename(filename, padded=True): - """ - Return fixed python expression path - - Args: - filename (str): file name - - Returns: - type: string with reformatted path - - Example: - get_reformatted_filename("plate.1001.exr") > plate.%04d.exr - - """ - found = FRAME_PATTERN.search(filename) - - if not found: - log.info("File name is not sequence: {}".format(filename)) - return filename - - padding = get_padding_from_filename(filename) - - replacement = "%0{}d".format(padding) if padded else "%d" - start_idx, end_idx = found.span(1) - - return replacement.join( - [filename[:start_idx], filename[end_idx:]] - ) - - -def get_padding_from_filename(filename): - """ - Return padding number from Flame path style - - Args: - filename (str): file name - - Returns: - int: padding number - - Example: - get_padding_from_filename("plate.0001.exr") > 4 - - """ - found = get_frame_from_filename(filename) - - return len(found) if found else None - - -def get_frame_from_filename(filename): - """ - Return sequence number from Flame path style - - Args: - filename (str): file name - - Returns: - int: sequence frame number - - Example: - def get_frame_from_filename(path): - ("plate.0001.exr") > 0001 - - """ - - found = re.findall(FRAME_PATTERN, filename) - - return found.pop() if found else None - - -@contextlib.contextmanager -def maintained_object_duplication(item): - """Maintain input item duplication - - Attributes: - item (any flame.PyObject): python api object - - Yield: - duplicate input PyObject type - """ - import flame - # Duplicate the clip to avoid modifying the original clip - duplicate = flame.duplicate(item) - - try: - # do the operation on selected segments - yield duplicate - finally: - # delete the item at the end - flame.delete(duplicate) - - -@contextlib.contextmanager -def maintained_temp_file_path(suffix=None): - _suffix = suffix or "" - - try: - # Store dumped json to temporary file - temporary_file = tempfile.mktemp( - suffix=_suffix, prefix="flame_maintained_") - yield temporary_file.replace("\\", "/") - - except IOError as _error: - raise IOError( - "Not able to create temp json file: {}".format(_error)) - - finally: - # Remove the temporary json - os.remove(temporary_file) - - -def get_clip_segment(flame_clip): - name = flame_clip.name.get_value() - version = flame_clip.versions[0] - track = version.tracks[0] - segments = track.segments - - if len(segments) < 1: - raise ValueError("Clip `{}` has no segments!".format(name)) - - if len(segments) > 1: - raise ValueError("Clip `{}` has too many segments!".format(name)) - - return segments[0] - - -def get_batch_group_from_desktop(name): - project = get_current_project() - project_desktop = project.current_workspace.desktop - - for bgroup in project_desktop.batch_groups: - if bgroup.name.get_value() in name: - return bgroup - - -class MediaInfoFile(object): - """Class to get media info file clip data - - Raises: - IOError: MEDIA_SCRIPT_PATH path doesn't exists - TypeError: Not able to generate clip xml data file - ET.ParseError: Missing clip in xml clip data - IOError: Not able to save xml clip data to file - - Attributes: - str: `MEDIA_SCRIPT_PATH` path to flame binary - logging.Logger: `log` logger - - TODO: add method for getting metadata to dict - """ - MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info" - - log = log - - _clip_data = None - _start_frame = None - _fps = None - _drop_mode = None - _file_pattern = None - - def __init__(self, path, logger=None): - - # replace log if any - if logger: - self.log = logger - - # test if `dl_get_media_info` path exists - self._validate_media_script_path() - - # derivate other feed variables - feed_basename = os.path.basename(path) - feed_dir = os.path.dirname(path) - feed_ext = os.path.splitext(feed_basename)[1][1:].lower() - - with maintained_temp_file_path(".clip") as tmp_path: - self.log.info("Temp File: {}".format(tmp_path)) - self._generate_media_info_file(tmp_path, feed_ext, feed_dir) - - # get collection containing feed_basename from path - self.file_pattern = self._get_collection( - feed_basename, feed_dir, feed_ext) - - if ( - not self.file_pattern - and os.path.exists(os.path.join(feed_dir, feed_basename)) - ): - self.file_pattern = feed_basename - - # get clip data and make them single if there is multiple - # clips data - xml_data = self._make_single_clip_media_info( - tmp_path, feed_basename, self.file_pattern) - self.log.debug("xml_data: {}".format(xml_data)) - self.log.debug("type: {}".format(type(xml_data))) - - # get all time related data and assign them - self._get_time_info_from_origin(xml_data) - self.log.debug("start_frame: {}".format(self.start_frame)) - self.log.debug("fps: {}".format(self.fps)) - self.log.debug("drop frame: {}".format(self.drop_mode)) - self.clip_data = xml_data - - def _get_collection(self, feed_basename, feed_dir, feed_ext): - """ Get collection string - - Args: - feed_basename (str): file base name - feed_dir (str): file's directory - feed_ext (str): file extension - - Raises: - AttributeError: feed_ext is not matching feed_basename - - Returns: - str: collection basename with range of sequence - """ - partialname = self._separate_file_head(feed_basename, feed_ext) - self.log.debug("__ partialname: {}".format(partialname)) - - # make sure partial input basename is having correct extensoon - if not partialname: - raise AttributeError( - "Wrong input attributes. Basename - {}, Ext - {}".format( - feed_basename, feed_ext - ) - ) - - # get all related files - files = [ - f for f in os.listdir(feed_dir) - if partialname == self._separate_file_head(f, feed_ext) - ] - - # ignore reminders as we dont need them - collections = clique.assemble(files)[0] - - # in case no collection found return None - # it is probably just single file - if not collections: - return - - # we expect only one collection - collection = collections[0] - - self.log.debug("__ collection: {}".format(collection)) - - if collection.is_contiguous(): - return self._format_collection(collection) - - # add `[` in front to make sure it want capture - # shot name with the same number - number_from_path = self._separate_number(feed_basename, feed_ext) - search_number_pattern = "[" + number_from_path - # convert to multiple collections - _continues_colls = collection.separate() - for _coll in _continues_colls: - coll_to_text = self._format_collection( - _coll, len(number_from_path)) - self.log.debug("__ coll_to_text: {}".format(coll_to_text)) - if search_number_pattern in coll_to_text: - return coll_to_text - - @staticmethod - def _format_collection(collection, padding=None): - padding = padding or collection.padding - # if no holes then return collection - head = collection.format("{head}") - tail = collection.format("{tail}") - range_template = "[{{:0{0}d}}-{{:0{0}d}}]".format( - padding) - ranges = range_template.format( - min(collection.indexes), - max(collection.indexes) - ) - # if no holes then return collection - return "{}{}{}".format(head, ranges, tail) - - def _separate_file_head(self, basename, extension): - """ Get only head with out sequence and extension - - Args: - basename (str): file base name - extension (str): file extension - - Returns: - str: file head - """ - # in case sequence file - found = re.findall( - r"(.*)[._][\d]*(?=.{})".format(extension), - basename, - ) - if found: - return found.pop() - - # in case single file - name, ext = os.path.splitext(basename) - - if extension == ext[1:]: - return name - - def _separate_number(self, basename, extension): - """ Get only sequence number as string - - Args: - basename (str): file base name - extension (str): file extension - - Returns: - str: number with padding - """ - # in case sequence file - found = re.findall( - r"[._]([\d]*)(?=.{})".format(extension), - basename, - ) - if found: - return found.pop() - - @property - def clip_data(self): - """Clip's xml clip data - - Returns: - xml.etree.ElementTree: xml data - """ - return self._clip_data - - @clip_data.setter - def clip_data(self, data): - self._clip_data = data - - @property - def start_frame(self): - """ Clip's starting frame found in timecode - - Returns: - int: number of frames - """ - return self._start_frame - - @start_frame.setter - def start_frame(self, number): - self._start_frame = int(number) - - @property - def fps(self): - """ Clip's frame rate - - Returns: - float: frame rate - """ - return self._fps - - @fps.setter - def fps(self, fl_number): - self._fps = float(fl_number) - - @property - def drop_mode(self): - """ Clip's drop frame mode - - Returns: - str: drop frame flag - """ - return self._drop_mode - - @drop_mode.setter - def drop_mode(self, text): - self._drop_mode = str(text) - - @property - def file_pattern(self): - """Clips file pattern. - - Returns: - str: file pattern. ex. file.[1-2].exr - """ - return self._file_pattern - - @file_pattern.setter - def file_pattern(self, fpattern): - self._file_pattern = fpattern - - def _validate_media_script_path(self): - if not os.path.isfile(self.MEDIA_SCRIPT_PATH): - raise IOError("Media Script does not exist: `{}`".format( - self.MEDIA_SCRIPT_PATH)) - - def _generate_media_info_file(self, fpath, feed_ext, feed_dir): - """ Generate media info xml .clip file - - Args: - fpath (str): .clip file path - feed_ext (str): file extension to be filtered - feed_dir (str): look up directory - - Raises: - TypeError: Type error if it fails - """ - # Create cmd arguments for gettig xml file info file - cmd_args = [ - self.MEDIA_SCRIPT_PATH, - "-e", feed_ext, - "-o", fpath, - feed_dir - ] - - try: - # execute creation of clip xml template data - run_subprocess(cmd_args) - except TypeError as error: - raise TypeError( - "Error creating `{}` due: {}".format(fpath, error)) - - def _make_single_clip_media_info(self, fpath, feed_basename, path_pattern): - """ Separate only relative clip object form .clip file - - Args: - fpath (str): clip file path - feed_basename (str): search basename - path_pattern (str): search file pattern (file.[1-2].exr) - - Raises: - ET.ParseError: if nothing found - - Returns: - ET.Element: xml element data of matching clip - """ - with open(fpath) as f: - lines = f.readlines() - _added_root = itertools.chain( - "", deepcopy(lines)[1:], "") - new_root = ET.fromstringlist(_added_root) - - # find the clip which is matching to my input name - xml_clips = new_root.findall("clip") - matching_clip = None - for xml_clip in xml_clips: - clip_name = xml_clip.find("name").text - self.log.debug("__ clip_name: `{}`".format(clip_name)) - if clip_name not in feed_basename: - continue - - # test path pattern - for out_track in xml_clip.iter("track"): - for out_feed in out_track.iter("feed"): - for span in out_feed.iter("span"): - # start frame - span_path = span.find("path") - self.log.debug( - "__ span_path.text: {}, path_pattern: {}".format( - span_path.text, path_pattern - ) - ) - if path_pattern in span_path.text: - matching_clip = xml_clip - - if matching_clip is None: - # return warning there is missing clip - raise ET.ParseError( - "Missing clip in `{}`. Available clips {}".format( - feed_basename, [ - xml_clip.find("name").text - for xml_clip in xml_clips - ] - )) - - return matching_clip - - def _get_time_info_from_origin(self, xml_data): - """Set time info to class attributes - - Args: - xml_data (ET.Element): clip data - """ - try: - for out_track in xml_data.iter("track"): - for out_feed in out_track.iter("feed"): - # start frame - out_feed_nb_ticks_obj = out_feed.find( - "startTimecode/nbTicks") - self.start_frame = out_feed_nb_ticks_obj.text - - # fps - out_feed_fps_obj = out_feed.find( - "startTimecode/rate") - self.fps = out_feed_fps_obj.text - - # drop frame mode - out_feed_drop_mode_obj = out_feed.find( - "startTimecode/dropMode") - self.drop_mode = out_feed_drop_mode_obj.text - break - except Exception as msg: - self.log.warning(msg) - - @staticmethod - def write_clip_data_to_file(fpath, xml_element_data): - """ Write xml element of clip data to file - - Args: - fpath (string): file path - xml_element_data (xml.etree.ElementTree.Element): xml data - - Raises: - IOError: If data could not be written to file - """ - try: - # save it as new file - tree = cET.ElementTree(xml_element_data) - tree.write( - fpath, xml_declaration=True, - method="xml", encoding="UTF-8" - ) - except IOError as error: - raise IOError( - "Not able to write data to file: {}".format(error)) - - -class TimeEffectMetadata(object): - log = log - _data = {} - _retime_modes = { - 0: "speed", - 1: "timewarp", - 2: "duration" - } - - def __init__(self, segment, logger=None): - if logger: - self.log = logger - - self._data = self._get_metadata(segment) - - @property - def data(self): - """ Returns timewarp effect data - - Returns: - dict: retime data - """ - return self._data - - def _get_metadata(self, segment): - effects = segment.effects or [] - for effect in effects: - if effect.type == "Timewarp": - with maintained_temp_file_path(".timewarp_node") as tmp_path: - self.log.info("Temp File: {}".format(tmp_path)) - effect.save_setup(tmp_path) - return self._get_attributes_from_xml(tmp_path) - - return {} - - def _get_attributes_from_xml(self, tmp_path): - with open(tmp_path, "r") as tw_setup_file: - tw_setup_string = tw_setup_file.read() - tw_setup_file.close() - - tw_setup_xml = ET.fromstring(tw_setup_string) - tw_setup = self._dictify(tw_setup_xml) - # pprint(tw_setup) - try: - tw_setup_state = tw_setup["Setup"]["State"][0] - mode = int( - tw_setup_state["TW_RetimerMode"][0]["_text"] - ) - r_data = { - "type": self._retime_modes[mode], - "effectStart": int( - tw_setup["Setup"]["Base"][0]["Range"][0]["Start"]), - "effectEnd": int( - tw_setup["Setup"]["Base"][0]["Range"][0]["End"]) - } - - if mode == 0: # speed - r_data[self._retime_modes[mode]] = float( - tw_setup_state["TW_Speed"] - [0]["Channel"][0]["Value"][0]["_text"] - ) / 100 - elif mode == 1: # timewarp - print("timing") - r_data[self._retime_modes[mode]] = self._get_anim_keys( - tw_setup_state["TW_Timing"] - ) - elif mode == 2: # duration - r_data[self._retime_modes[mode]] = { - "start": { - "source": int( - tw_setup_state["TW_DurationTiming"][0]["Channel"] - [0]["KFrames"][0]["Key"][0]["Value"][0]["_text"] - ), - "timeline": int( - tw_setup_state["TW_DurationTiming"][0]["Channel"] - [0]["KFrames"][0]["Key"][0]["Frame"][0]["_text"] - ) - }, - "end": { - "source": int( - tw_setup_state["TW_DurationTiming"][0]["Channel"] - [0]["KFrames"][0]["Key"][1]["Value"][0]["_text"] - ), - "timeline": int( - tw_setup_state["TW_DurationTiming"][0]["Channel"] - [0]["KFrames"][0]["Key"][1]["Frame"][0]["_text"] - ) - } - } - except Exception: - lines = traceback.format_exception(*sys.exc_info()) - self.log.error("\n".join(lines)) - return - - return r_data - - def _get_anim_keys(self, setup_cat, index=None): - return_data = { - "extrapolation": ( - setup_cat[0]["Channel"][0]["Extrap"][0]["_text"] - ), - "animKeys": [] - } - for key in setup_cat[0]["Channel"][0]["KFrames"][0]["Key"]: - if index and int(key["Index"]) != index: - continue - key_data = { - "source": float(key["Value"][0]["_text"]), - "timeline": float(key["Frame"][0]["_text"]), - "index": int(key["Index"]), - "curveMode": key["CurveMode"][0]["_text"], - "curveOrder": key["CurveOrder"][0]["_text"] - } - if key.get("TangentMode"): - key_data["tangentMode"] = key["TangentMode"][0]["_text"] - - return_data["animKeys"].append(key_data) - - return return_data - - def _dictify(self, xml_, root=True): - """ Convert xml object to dictionary - - Args: - xml_ (xml.etree.ElementTree.Element): xml data - root (bool, optional): is root available. Defaults to True. - - Returns: - dict: dictionarized xml - """ - - if root: - return {xml_.tag: self._dictify(xml_, False)} - - d = copy(xml_.attrib) - if xml_.text: - d["_text"] = xml_.text - - for x in xml_.findall("./*"): - if x.tag not in d: - d[x.tag] = [] - d[x.tag].append(self._dictify(x, False)) - return d diff --git a/server_addon/flame/client/ayon_flame/api/menu.py b/server_addon/flame/client/ayon_flame/api/menu.py deleted file mode 100644 index 83d75d18d3..0000000000 --- a/server_addon/flame/client/ayon_flame/api/menu.py +++ /dev/null @@ -1,256 +0,0 @@ -from copy import deepcopy -from pprint import pformat - -from qtpy import QtWidgets - -from ayon_core.pipeline import get_current_project_name -from ayon_core.tools.utils.host_tools import HostToolsHelper - -menu_group_name = 'OpenPype' - -default_flame_export_presets = { - 'Publish': { - 'PresetVisibility': 2, - 'PresetType': 0, - 'PresetFile': 'OpenEXR/OpenEXR (16-bit fp PIZ).xml' - }, - 'Preview': { - 'PresetVisibility': 3, - 'PresetType': 2, - 'PresetFile': 'Generate Preview.xml' - }, - 'Thumbnail': { - 'PresetVisibility': 3, - 'PresetType': 0, - 'PresetFile': 'Generate Thumbnail.xml' - } -} - - -def callback_selection(selection, function): - import ayon_flame.api as opfapi - opfapi.CTX.selection = selection - print("Hook Selection: \n\t{}".format( - pformat({ - index: (type(item), item.name) - for index, item in enumerate(opfapi.CTX.selection)}) - )) - function() - - -class _FlameMenuApp(object): - def __init__(self, framework): - self.name = self.__class__.__name__ - self.framework = framework - self.log = framework.log - self.menu_group_name = menu_group_name - self.dynamic_menu_data = {} - - # flame module is only available when a - # flame project is loaded and initialized - self.flame = None - try: - import flame - self.flame = flame - except ImportError: - self.flame = None - - self.flame_project_name = flame.project.current_project.name - self.prefs = self.framework.prefs_dict(self.framework.prefs, self.name) - self.prefs_user = self.framework.prefs_dict( - self.framework.prefs_user, self.name) - self.prefs_global = self.framework.prefs_dict( - self.framework.prefs_global, self.name) - - self.mbox = QtWidgets.QMessageBox() - project_name = get_current_project_name() - self.menu = { - "actions": [{ - 'name': project_name or "project", - 'isEnabled': False - }], - "name": self.menu_group_name - } - self.tools_helper = HostToolsHelper() - - def __getattr__(self, name): - def method(*args, **kwargs): - print('calling %s' % name) - return method - - def rescan(self, *args, **kwargs): - if not self.flame: - try: - import flame - self.flame = flame - except ImportError: - self.flame = None - - if self.flame: - self.flame.execute_shortcut('Rescan Python Hooks') - self.log.info('Rescan Python Hooks') - - -class FlameMenuProjectConnect(_FlameMenuApp): - - # flameMenuProjectconnect app takes care of the preferences dialog as well - - def __init__(self, framework): - _FlameMenuApp.__init__(self, framework) - - def __getattr__(self, name): - def method(*args, **kwargs): - project = self.dynamic_menu_data.get(name) - if project: - self.link_project(project) - return method - - def build_menu(self): - if not self.flame: - return [] - - menu = deepcopy(self.menu) - - menu['actions'].append({ - "name": "Workfiles...", - "execute": lambda x: self.tools_helper.show_workfiles() - }) - menu['actions'].append({ - "name": "Load...", - "execute": lambda x: self.tools_helper.show_loader() - }) - menu['actions'].append({ - "name": "Manage...", - "execute": lambda x: self.tools_helper.show_scene_inventory() - }) - menu['actions'].append({ - "name": "Library...", - "execute": lambda x: self.tools_helper.show_library_loader() - }) - return menu - - def refresh(self, *args, **kwargs): - self.rescan() - - def rescan(self, *args, **kwargs): - if not self.flame: - try: - import flame - self.flame = flame - except ImportError: - self.flame = None - - if self.flame: - self.flame.execute_shortcut('Rescan Python Hooks') - self.log.info('Rescan Python Hooks') - - -class FlameMenuTimeline(_FlameMenuApp): - - # flameMenuProjectconnect app takes care of the preferences dialog as well - - def __init__(self, framework): - _FlameMenuApp.__init__(self, framework) - - def __getattr__(self, name): - def method(*args, **kwargs): - project = self.dynamic_menu_data.get(name) - if project: - self.link_project(project) - return method - - def build_menu(self): - if not self.flame: - return [] - - menu = deepcopy(self.menu) - - menu['actions'].append({ - "name": "Create...", - "execute": lambda x: callback_selection( - x, self.tools_helper.show_creator) - }) - menu['actions'].append({ - "name": "Publish...", - "execute": lambda x: callback_selection( - x, self.tools_helper.show_publish) - }) - menu['actions'].append({ - "name": "Load...", - "execute": lambda x: self.tools_helper.show_loader() - }) - menu['actions'].append({ - "name": "Manage...", - "execute": lambda x: self.tools_helper.show_scene_inventory() - }) - menu['actions'].append({ - "name": "Library...", - "execute": lambda x: self.tools_helper.show_library_loader() - }) - return menu - - def refresh(self, *args, **kwargs): - self.rescan() - - def rescan(self, *args, **kwargs): - if not self.flame: - try: - import flame - self.flame = flame - except ImportError: - self.flame = None - - if self.flame: - self.flame.execute_shortcut('Rescan Python Hooks') - self.log.info('Rescan Python Hooks') - - -class FlameMenuUniversal(_FlameMenuApp): - - # flameMenuProjectconnect app takes care of the preferences dialog as well - - def __init__(self, framework): - _FlameMenuApp.__init__(self, framework) - - def __getattr__(self, name): - def method(*args, **kwargs): - project = self.dynamic_menu_data.get(name) - if project: - self.link_project(project) - return method - - def build_menu(self): - if not self.flame: - return [] - - menu = deepcopy(self.menu) - - menu['actions'].append({ - "name": "Load...", - "execute": lambda x: callback_selection( - x, self.tools_helper.show_loader) - }) - menu['actions'].append({ - "name": "Manage...", - "execute": lambda x: self.tools_helper.show_scene_inventory() - }) - menu['actions'].append({ - "name": "Library...", - "execute": lambda x: self.tools_helper.show_library_loader() - }) - return menu - - def refresh(self, *args, **kwargs): - self.rescan() - - def rescan(self, *args, **kwargs): - if not self.flame: - try: - import flame - self.flame = flame - except ImportError: - self.flame = None - - if self.flame: - self.flame.execute_shortcut('Rescan Python Hooks') - self.log.info('Rescan Python Hooks') diff --git a/server_addon/flame/client/ayon_flame/api/pipeline.py b/server_addon/flame/client/ayon_flame/api/pipeline.py deleted file mode 100644 index 121b925920..0000000000 --- a/server_addon/flame/client/ayon_flame/api/pipeline.py +++ /dev/null @@ -1,174 +0,0 @@ -""" -Basic avalon integration -""" -import os -import contextlib -from pyblish import api as pyblish - -from ayon_core.lib import Logger -from ayon_core.pipeline import ( - register_loader_plugin_path, - register_creator_plugin_path, - deregister_loader_plugin_path, - deregister_creator_plugin_path, - AVALON_CONTAINER_ID, -) -from ayon_flame import FLAME_ADDON_ROOT -from .lib import ( - set_segment_data_marker, - set_publish_attribute, - maintained_segment_selection, - get_current_sequence, - reset_segment_selection -) - -PLUGINS_DIR = os.path.join(FLAME_ADDON_ROOT, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") - -AVALON_CONTAINERS = "AVALON_CONTAINERS" - -log = Logger.get_logger(__name__) - - -def install(): - pyblish.register_host("flame") - pyblish.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - log.info("AYON Flame plug-ins registered ...") - - # register callback for switching publishable - pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) - - log.info("AYON Flame host installed ...") - - -def uninstall(): - pyblish.deregister_host("flame") - - log.info("Deregistering Flame plug-ins..") - pyblish.deregister_plugin_path(PUBLISH_PATH) - deregister_loader_plugin_path(LOAD_PATH) - deregister_creator_plugin_path(CREATE_PATH) - - # register callback for switching publishable - pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) - - log.info("AYON Flame host uninstalled ...") - - -def containerise(flame_clip_segment, - name, - namespace, - context, - loader=None, - data=None): - - data_imprint = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": str(name), - "namespace": str(namespace), - "loader": str(loader), - "representation": context["representation"]["id"], - } - - if data: - for k, v in data.items(): - data_imprint[k] = v - - log.debug("_ data_imprint: {}".format(data_imprint)) - - set_segment_data_marker(flame_clip_segment, data_imprint) - - return True - - -def ls(): - """List available containers. - """ - return [] - - -def parse_container(tl_segment, validate=True): - """Return container data from timeline_item's openpype tag. - """ - # TODO: parse_container - pass - - -def update_container(tl_segment, data=None): - """Update container data to input timeline_item's openpype tag. - """ - # TODO: update_container - pass - - -def on_pyblish_instance_toggled(instance, old_value, new_value): - """Toggle node passthrough states on instance toggles.""" - - log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( - instance, old_value, new_value)) - - # # Whether instances should be passthrough based on new value - # timeline_item = instance.data["item"] - # set_publish_attribute(timeline_item, new_value) - - -def remove_instance(instance): - """Remove instance marker from track item.""" - # TODO: remove_instance - pass - - -def list_instances(): - """List all created instances from current workfile.""" - # TODO: list_instances - pass - - -def imprint(segment, data=None): - """ - Adding openpype data to Flame timeline segment. - - Also including publish attribute into tag. - - Arguments: - segment (flame.PySegment)): flame api object - data (dict): Any data which needst to be imprinted - - Examples: - data = { - 'asset': 'sq020sh0280', - 'productType': 'render', - 'productName': 'productMain' - } - """ - data = data or {} - - set_segment_data_marker(segment, data) - - # add publish attribute - set_publish_attribute(segment, True) - - -@contextlib.contextmanager -def maintained_selection(): - import flame - from .lib import CTX - - # check if segment is selected - if isinstance(CTX.selection[0], flame.PySegment): - sequence = get_current_sequence(CTX.selection) - - try: - with maintained_segment_selection(sequence) as selected: - yield - finally: - # reset all selected clips - reset_segment_selection(sequence) - # select only original selection of segments - for segment in selected: - segment.selected = True diff --git a/server_addon/flame/client/ayon_flame/api/plugin.py b/server_addon/flame/client/ayon_flame/api/plugin.py deleted file mode 100644 index e656f33052..0000000000 --- a/server_addon/flame/client/ayon_flame/api/plugin.py +++ /dev/null @@ -1,1089 +0,0 @@ -import os -import re -import shutil -from copy import deepcopy -from xml.etree import ElementTree as ET - -import qargparse -from qtpy import QtCore, QtWidgets - -from ayon_core import style -from ayon_core.lib import Logger, StringTemplate -from ayon_core.pipeline import LegacyCreator, LoaderPlugin -from ayon_core.pipeline.colorspace import get_remapped_colorspace_to_native -from ayon_core.settings import get_current_project_settings - -from . import constants -from . import lib as flib -from . import pipeline as fpipeline - -log = Logger.get_logger(__name__) - - -class CreatorWidget(QtWidgets.QDialog): - - # output items - items = dict() - _results_back = None - - def __init__(self, name, info, ui_inputs, parent=None): - super(CreatorWidget, self).__init__(parent) - - self.setObjectName(name) - - self.setWindowFlags( - QtCore.Qt.Window - | QtCore.Qt.CustomizeWindowHint - | QtCore.Qt.WindowTitleHint - | QtCore.Qt.WindowCloseButtonHint - | QtCore.Qt.WindowStaysOnTopHint - ) - self.setWindowTitle(name or "AYON Creator Input") - self.resize(500, 700) - - # Where inputs and labels are set - self.content_widget = [QtWidgets.QWidget(self)] - top_layout = QtWidgets.QFormLayout(self.content_widget[0]) - top_layout.setObjectName("ContentLayout") - top_layout.addWidget(Spacer(5, self)) - - # first add widget tag line - top_layout.addWidget(QtWidgets.QLabel(info)) - - # main dynamic layout - self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True) - self.scroll_area.setVerticalScrollBarPolicy( - QtCore.Qt.ScrollBarAsNeeded) - self.scroll_area.setVerticalScrollBarPolicy( - QtCore.Qt.ScrollBarAlwaysOn) - self.scroll_area.setHorizontalScrollBarPolicy( - QtCore.Qt.ScrollBarAlwaysOff) - self.scroll_area.setWidgetResizable(True) - - self.content_widget.append(self.scroll_area) - - scroll_widget = QtWidgets.QWidget(self) - in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget) - self.content_layout = [in_scroll_area] - - # add preset data into input widget layout - self.items = self.populate_widgets(ui_inputs) - self.scroll_area.setWidget(scroll_widget) - - # Confirmation buttons - btns_widget = QtWidgets.QWidget(self) - btns_layout = QtWidgets.QHBoxLayout(btns_widget) - - cancel_btn = QtWidgets.QPushButton("Cancel") - btns_layout.addWidget(cancel_btn) - - ok_btn = QtWidgets.QPushButton("Ok") - btns_layout.addWidget(ok_btn) - - # Main layout of the dialog - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setContentsMargins(10, 10, 10, 10) - main_layout.setSpacing(0) - - # adding content widget - for w in self.content_widget: - main_layout.addWidget(w) - - main_layout.addWidget(btns_widget) - - ok_btn.clicked.connect(self._on_ok_clicked) - cancel_btn.clicked.connect(self._on_cancel_clicked) - - self.setStyleSheet(style.load_stylesheet()) - - @classmethod - def set_results_back(cls, value): - cls._results_back = value - - @classmethod - def get_results_back(cls): - return cls._results_back - - def _on_ok_clicked(self): - log.debug("ok is clicked: {}".format(self.items)) - results_back = self._values(self.items) - self.set_results_back(results_back) - self.close() - - def _on_cancel_clicked(self): - self.set_results_back(None) - self.close() - - def showEvent(self, event): - self.set_results_back(None) - super(CreatorWidget, self).showEvent(event) - - def _values(self, data, new_data=None): - new_data = new_data or dict() - for k, v in data.items(): - new_data[k] = { - "target": None, - "value": None - } - if v["type"] == "dict": - new_data[k]["target"] = v["target"] - new_data[k]["value"] = self._values(v["value"]) - if v["type"] == "section": - new_data.pop(k) - new_data = self._values(v["value"], new_data) - elif getattr(v["value"], "currentText", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].currentText() - elif getattr(v["value"], "isChecked", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].isChecked() - elif getattr(v["value"], "value", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].value() - elif getattr(v["value"], "text", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].text() - - return new_data - - def camel_case_split(self, text): - matches = re.finditer( - '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text) - return " ".join([str(m.group(0)).capitalize() for m in matches]) - - def create_row(self, layout, type_name, text, **kwargs): - # get type attribute from qwidgets - attr = getattr(QtWidgets, type_name) - - # convert label text to normal capitalized text with spaces - label_text = self.camel_case_split(text) - - # assign the new text to label widget - label = QtWidgets.QLabel(label_text) - label.setObjectName("LineLabel") - - # create attribute name text strip of spaces - attr_name = text.replace(" ", "") - - # create attribute and assign default values - setattr( - self, - attr_name, - attr(parent=self)) - - # assign the created attribute to variable - item = getattr(self, attr_name) - for func, val in kwargs.items(): - if getattr(item, func): - func_attr = getattr(item, func) - func_attr(val) - - # add to layout - layout.addRow(label, item) - - return item - - def populate_widgets(self, data, content_layout=None): - """ - Populate widget from input dict. - - Each plugin has its own set of widget rows defined in dictionary - each row values should have following keys: `type`, `target`, - `label`, `order`, `value` and optionally also `toolTip`. - - Args: - data (dict): widget rows or organized groups defined - by types `dict` or `section` - content_layout (QtWidgets.QFormLayout)[optional]: used when nesting - - Returns: - dict: redefined data dict updated with created widgets - - """ - - content_layout = content_layout or self.content_layout[-1] - # fix order of process by defined order value - ordered_keys = list(data.keys()) - for k, v in data.items(): - try: - # try removing a key from index which should - # be filled with new - ordered_keys.pop(v["order"]) - except IndexError: - pass - # add key into correct order - ordered_keys.insert(v["order"], k) - - # process ordered - for k in ordered_keys: - v = data[k] - tool_tip = v.get("toolTip", "") - if v["type"] == "dict": - self.content_layout.append(QtWidgets.QWidget(self)) - content_layout.addWidget(self.content_layout[-1]) - self.content_layout[-1].setObjectName("sectionHeadline") - - headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) - headline.addWidget(Spacer(20, self)) - headline.addWidget(QtWidgets.QLabel(v["label"])) - - # adding nested layout with label - self.content_layout.append(QtWidgets.QWidget(self)) - self.content_layout[-1].setObjectName("sectionContent") - - nested_content_layout = QtWidgets.QFormLayout( - self.content_layout[-1]) - nested_content_layout.setObjectName("NestedContentLayout") - content_layout.addWidget(self.content_layout[-1]) - - # add nested key as label - data[k]["value"] = self.populate_widgets( - v["value"], nested_content_layout) - - if v["type"] == "section": - self.content_layout.append(QtWidgets.QWidget(self)) - content_layout.addWidget(self.content_layout[-1]) - self.content_layout[-1].setObjectName("sectionHeadline") - - headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) - headline.addWidget(Spacer(20, self)) - headline.addWidget(QtWidgets.QLabel(v["label"])) - - # adding nested layout with label - self.content_layout.append(QtWidgets.QWidget(self)) - self.content_layout[-1].setObjectName("sectionContent") - - nested_content_layout = QtWidgets.QFormLayout( - self.content_layout[-1]) - nested_content_layout.setObjectName("NestedContentLayout") - content_layout.addWidget(self.content_layout[-1]) - - # add nested key as label - data[k]["value"] = self.populate_widgets( - v["value"], nested_content_layout) - - elif v["type"] == "QLineEdit": - data[k]["value"] = self.create_row( - content_layout, "QLineEdit", v["label"], - setText=v["value"], setToolTip=tool_tip) - elif v["type"] == "QComboBox": - data[k]["value"] = self.create_row( - content_layout, "QComboBox", v["label"], - addItems=v["value"], setToolTip=tool_tip) - elif v["type"] == "QCheckBox": - data[k]["value"] = self.create_row( - content_layout, "QCheckBox", v["label"], - setChecked=v["value"], setToolTip=tool_tip) - elif v["type"] == "QSpinBox": - data[k]["value"] = self.create_row( - content_layout, "QSpinBox", v["label"], - setValue=v["value"], setMinimum=0, - setMaximum=100000, setToolTip=tool_tip) - return data - - -class Spacer(QtWidgets.QWidget): - def __init__(self, height, *args, **kwargs): - super(self.__class__, self).__init__(*args, **kwargs) - - self.setFixedHeight(height) - - real_spacer = QtWidgets.QWidget(self) - real_spacer.setObjectName("Spacer") - real_spacer.setFixedHeight(height) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(real_spacer) - - self.setLayout(layout) - - -class Creator(LegacyCreator): - """Creator class wrapper - """ - clip_color = constants.COLOR_MAP["purple"] - rename_index = None - - def __init__(self, *args, **kwargs): - super(Creator, self).__init__(*args, **kwargs) - self.presets = get_current_project_settings()[ - "flame"]["create"].get(self.__class__.__name__, {}) - - # adding basic current context flame objects - self.project = flib.get_current_project() - self.sequence = flib.get_current_sequence(flib.CTX.selection) - - if (self.options or {}).get("useSelection"): - self.selected = flib.get_sequence_segments(self.sequence, True) - else: - self.selected = flib.get_sequence_segments(self.sequence) - - def create_widget(self, *args, **kwargs): - widget = CreatorWidget(*args, **kwargs) - widget.exec_() - return widget.get_results_back() - - -class PublishableClip: - """ - Convert a segment to publishable instance - - Args: - segment (flame.PySegment): flame api object - kwargs (optional): additional data needed for rename=True (presets) - - Returns: - flame.PySegment: flame api object - """ - vertical_clip_match = {} - marker_data = {} - types = { - "shot": "shot", - "folder": "folder", - "episode": "episode", - "sequence": "sequence", - "track": "sequence", - } - - # parents search pattern - parents_search_pattern = r"\{([a-z]*?)\}" - - # default templates for non-ui use - rename_default = False - hierarchy_default = "{_folder_}/{_sequence_}/{_track_}" - clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}" - review_track_default = "[ none ]" - base_product_name_default = "[ track name ]" - base_product_type_default = "plate" - count_from_default = 10 - count_steps_default = 10 - vertical_sync_default = False - driving_layer_default = "" - index_from_segment_default = False - use_shot_name_default = False - include_handles_default = False - retimed_handles_default = True - retimed_framerange_default = True - - def __init__(self, segment, **kwargs): - self.rename_index = kwargs["rename_index"] - self.product_type = kwargs["family"] - self.log = kwargs["log"] - - # get main parent objects - self.current_segment = segment - sequence_name = flib.get_current_sequence([segment]).name.get_value() - self.sequence_name = str(sequence_name).replace(" ", "_") - - self.clip_data = flib.get_segment_attributes(segment) - # segment (clip) main attributes - self.cs_name = self.clip_data["segment_name"] - self.cs_index = int(self.clip_data["segment"]) - self.shot_name = self.clip_data["shot_name"] - - # get track name and index - self.track_index = int(self.clip_data["track"]) - track_name = self.clip_data["track_name"] - self.track_name = str(track_name).replace(" ", "_").replace( - "*", "noname{}".format(self.track_index)) - - # adding tag.family into tag - if kwargs.get("avalon"): - self.marker_data.update(kwargs["avalon"]) - - # add publish attribute to marker data - self.marker_data.update({"publish": True}) - - # adding ui inputs if any - self.ui_inputs = kwargs.get("ui_inputs", {}) - - self.log.info("Inside of plugin: {}".format( - self.marker_data - )) - # populate default data before we get other attributes - self._populate_segment_default_data() - - # use all populated default data to create all important attributes - self._populate_attributes() - - # create parents with correct types - self._create_parents() - - def convert(self): - - # solve segment data and add them to marker data - self._convert_to_marker_data() - - # if track name is in review track name and also if driving track name - # is not in review track name: skip tag creation - if (self.track_name in self.review_layer) and ( - self.driving_layer not in self.review_layer): - return - - # deal with clip name - new_name = self.marker_data.pop("newClipName") - - if self.rename and not self.use_shot_name: - # rename segment - self.current_segment.name = str(new_name) - self.marker_data["asset"] = str(new_name) - elif self.use_shot_name: - self.marker_data["asset"] = self.shot_name - self.marker_data["hierarchyData"]["shot"] = self.shot_name - else: - self.marker_data["asset"] = self.cs_name - self.marker_data["hierarchyData"]["shot"] = self.cs_name - - if self.marker_data["heroTrack"] and self.review_layer: - self.marker_data["reviewTrack"] = self.review_layer - else: - self.marker_data["reviewTrack"] = None - - # create pype tag on track_item and add data - fpipeline.imprint(self.current_segment, self.marker_data) - - return self.current_segment - - def _populate_segment_default_data(self): - """ Populate default formatting data from segment. """ - - self.current_segment_default_data = { - "_folder_": "shots", - "_sequence_": self.sequence_name, - "_track_": self.track_name, - "_clip_": self.cs_name, - "_trackIndex_": self.track_index, - "_clipIndex_": self.cs_index - } - - def _populate_attributes(self): - """ Populate main object attributes. """ - # segment frame range and parent track name for vertical sync check - self.clip_in = int(self.clip_data["record_in"]) - self.clip_out = int(self.clip_data["record_out"]) - - # define ui inputs if non gui mode was used - self.shot_num = self.cs_index - self.log.debug( - "____ self.shot_num: {}".format(self.shot_num)) - - # ui_inputs data or default values if gui was not used - self.rename = self.ui_inputs.get( - "clipRename", {}).get("value") or self.rename_default - self.use_shot_name = self.ui_inputs.get( - "useShotName", {}).get("value") or self.use_shot_name_default - self.clip_name = self.ui_inputs.get( - "clipName", {}).get("value") or self.clip_name_default - self.hierarchy = self.ui_inputs.get( - "hierarchy", {}).get("value") or self.hierarchy_default - self.hierarchy_data = self.ui_inputs.get( - "hierarchyData", {}).get("value") or \ - self.current_segment_default_data.copy() - self.index_from_segment = self.ui_inputs.get( - "segmentIndex", {}).get("value") or self.index_from_segment_default - self.count_from = self.ui_inputs.get( - "countFrom", {}).get("value") or self.count_from_default - self.count_steps = self.ui_inputs.get( - "countSteps", {}).get("value") or self.count_steps_default - self.base_product_name = self.ui_inputs.get( - "productName", {}).get("value") or self.base_product_name_default - self.base_product_type = self.ui_inputs.get( - "productType", {}).get("value") or self.base_product_type_default - self.vertical_sync = self.ui_inputs.get( - "vSyncOn", {}).get("value") or self.vertical_sync_default - self.driving_layer = self.ui_inputs.get( - "vSyncTrack", {}).get("value") or self.driving_layer_default - self.review_track = self.ui_inputs.get( - "reviewTrack", {}).get("value") or self.review_track_default - self.audio = self.ui_inputs.get( - "audio", {}).get("value") or False - self.include_handles = self.ui_inputs.get( - "includeHandles", {}).get("value") or self.include_handles_default - self.retimed_handles = ( - self.ui_inputs.get("retimedHandles", {}).get("value") - or self.retimed_handles_default - ) - self.retimed_framerange = ( - self.ui_inputs.get("retimedFramerange", {}).get("value") - or self.retimed_framerange_default - ) - - # build product name from layer name - if self.base_product_name == "[ track name ]": - self.base_product_name = self.track_name - - # create product for publishing - self.product_name = ( - self.base_product_type + self.base_product_name.capitalize() - ) - - def _replace_hash_to_expression(self, name, text): - """ Replace hash with number in correct padding. """ - _spl = text.split("#") - _len = (len(_spl) - 1) - _repl = "{{{0}:0>{1}}}".format(name, _len) - return text.replace(("#" * _len), _repl) - - def _convert_to_marker_data(self): - """ Convert internal data to marker data. - - Populating the marker data into internal variable self.marker_data - """ - # define vertical sync attributes - hero_track = True - self.review_layer = "" - if self.vertical_sync and self.track_name not in self.driving_layer: - # if it is not then define vertical sync as None - hero_track = False - - # increasing steps by index of rename iteration - if not self.index_from_segment: - self.count_steps *= self.rename_index - - hierarchy_formatting_data = {} - hierarchy_data = deepcopy(self.hierarchy_data) - _data = self.current_segment_default_data.copy() - if self.ui_inputs: - # adding tag metadata from ui - for _k, _v in self.ui_inputs.items(): - if _v["target"] == "tag": - self.marker_data[_k] = _v["value"] - - # driving layer is set as positive match - if hero_track or self.vertical_sync: - # mark review layer - if self.review_track and ( - self.review_track not in self.review_track_default): - # if review layer is defined and not the same as default - self.review_layer = self.review_track - - # shot num calculate - if self.index_from_segment: - # use clip index from timeline - self.shot_num = self.count_steps * self.cs_index - else: - if self.rename_index == 0: - self.shot_num = self.count_from - else: - self.shot_num = self.count_from + self.count_steps - - # clip name sequence number - _data.update({"shot": self.shot_num}) - - # solve # in test to pythonic expression - for _k, _v in hierarchy_data.items(): - if "#" not in _v["value"]: - continue - hierarchy_data[ - _k]["value"] = self._replace_hash_to_expression( - _k, _v["value"]) - - # fill up pythonic expresisons in hierarchy data - for k, _v in hierarchy_data.items(): - hierarchy_formatting_data[k] = _v["value"].format(**_data) - else: - # if no gui mode then just pass default data - hierarchy_formatting_data = hierarchy_data - - tag_hierarchy_data = self._solve_tag_hierarchy_data( - hierarchy_formatting_data - ) - - tag_hierarchy_data.update({"heroTrack": True}) - if hero_track and self.vertical_sync: - self.vertical_clip_match.update({ - (self.clip_in, self.clip_out): tag_hierarchy_data - }) - - if not hero_track and self.vertical_sync: - # driving layer is set as negative match - for (_in, _out), hero_data in self.vertical_clip_match.items(): - """ - Since only one instance of hero clip is expected in - `self.vertical_clip_match`, this will loop only once - until none hero clip will be matched with hero clip. - - `tag_hierarchy_data` will be set only once for every - clip which is not hero clip. - """ - _hero_data = deepcopy(hero_data) - _hero_data.update({"heroTrack": False}) - if _in <= self.clip_in and _out >= self.clip_out: - data_product_name = hero_data["productName"] - # add track index in case duplicity of names in hero data - if self.product_name in data_product_name: - _hero_data["productName"] = self.product_name + str( - self.track_index) - # in case track name and product name is the same then add - if self.base_product_name == self.track_name: - _hero_data["productName"] = self.product_name - # assign data to return hierarchy data to tag - tag_hierarchy_data = _hero_data - break - - # add data to return data dict - self.marker_data.update(tag_hierarchy_data) - - def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): - """ Solve marker data from hierarchy data and templates. """ - # fill up clip name and hierarchy keys - hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) - clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) - - # remove shot from hierarchy data: is not needed anymore - hierarchy_formatting_data.pop("shot") - - return { - "newClipName": clip_name_filled, - "hierarchy": hierarchy_filled, - "parents": self.parents, - "hierarchyData": hierarchy_formatting_data, - "productName": self.product_name, - "productType": self.base_product_type, - "families": [self.base_product_type, self.product_type] - } - - def _convert_to_entity(self, src_type, template): - """ Converting input key to key with type. """ - # convert to entity type - folder_type = self.types.get(src_type, None) - - assert folder_type, "Missing folder type for `{}`".format( - src_type - ) - - # first collect formatting data to use for formatting template - formatting_data = {} - for _k, _v in self.hierarchy_data.items(): - value = _v["value"].format( - **self.current_segment_default_data) - formatting_data[_k] = value - - return { - "folder_type": folder_type, - "entity_name": template.format( - **formatting_data - ) - } - - def _create_parents(self): - """ Create parents and return it in list. """ - self.parents = [] - - pattern = re.compile(self.parents_search_pattern) - - par_split = [(pattern.findall(t).pop(), t) - for t in self.hierarchy.split("/")] - - for type, template in par_split: - parent = self._convert_to_entity(type, template) - self.parents.append(parent) - - -# Publishing plugin functions - -# Loader plugin functions -class ClipLoader(LoaderPlugin): - """A basic clip loader for Flame - - This will implement the basic behavior for a loader to inherit from that - will containerize the reference and will implement the `remove` and - `update` logic. - - """ - log = log - - options = [ - qargparse.Boolean( - "handles", - label="Set handles", - default=0, - help="Also set handles to clip as In/Out marks" - ) - ] - - _mapping = None - _host_settings = None - - def apply_settings(cls, project_settings): - - plugin_type_settings = ( - project_settings - .get("flame", {}) - .get("load", {}) - ) - - if not plugin_type_settings: - return - - plugin_name = cls.__name__ - - plugin_settings = None - # Look for plugin settings in host specific settings - if plugin_name in plugin_type_settings: - plugin_settings = plugin_type_settings[plugin_name] - - if not plugin_settings: - return - - print(">>> We have preset for {}".format(plugin_name)) - for option, value in plugin_settings.items(): - if option == "enabled" and value is False: - print(" - is disabled by preset") - elif option == "representations": - continue - else: - print(" - setting `{}`: `{}`".format(option, value)) - setattr(cls, option, value) - - def get_colorspace(self, context): - """Get colorspace name - - Look either to version data or representation data. - - Args: - context (dict): version context data - - Returns: - str: colorspace name or None - """ - version_entity = context["version"] - version_attributes = version_entity["attrib"] - colorspace = version_attributes.get("colorSpace") - - if ( - not colorspace - or colorspace == "Unknown" - ): - colorspace = context["representation"]["data"].get( - "colorspace") - - return colorspace - - @classmethod - def get_native_colorspace(cls, input_colorspace): - """Return native colorspace name. - - Args: - input_colorspace (str | None): colorspace name - - Returns: - str: native colorspace name defined in mapping or None - """ - # TODO: rewrite to support only pipeline's remapping - if not cls._host_settings: - cls._host_settings = get_current_project_settings()["flame"] - - # [Deprecated] way of remapping - if not cls._mapping: - mapping = ( - cls._host_settings["imageio"]["profilesMapping"]["inputs"]) - cls._mapping = { - input["ocioName"]: input["flameName"] - for input in mapping - } - - native_name = cls._mapping.get(input_colorspace) - - if not native_name: - native_name = get_remapped_colorspace_to_native( - input_colorspace, "flame", cls._host_settings["imageio"]) - - return native_name - - -class OpenClipSolver(flib.MediaInfoFile): - create_new_clip = False - - log = log - - def __init__(self, openclip_file_path, feed_data, logger=None): - self.out_file = openclip_file_path - - # replace log if any - if logger: - self.log = logger - - # new feed variables: - feed_path = feed_data.pop("path") - - # initialize parent class - super(OpenClipSolver, self).__init__( - feed_path, - logger=logger - ) - - # get other metadata - self.feed_version_name = feed_data["version"] - self.feed_colorspace = feed_data.get("colorspace") - self.log.debug("feed_version_name: {}".format(self.feed_version_name)) - - # layer rename variables - self.layer_rename_template = feed_data["layer_rename_template"] - self.layer_rename_patterns = feed_data["layer_rename_patterns"] - self.context_data = feed_data["context_data"] - - # derivate other feed variables - self.feed_basename = os.path.basename(feed_path) - self.feed_dir = os.path.dirname(feed_path) - self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower() - self.log.debug("feed_ext: {}".format(self.feed_ext)) - self.log.debug("out_file: {}".format(self.out_file)) - if not self._is_valid_tmp_file(self.out_file): - self.create_new_clip = True - - def _is_valid_tmp_file(self, file): - # check if file exists - if os.path.isfile(file): - # test also if file is not empty - with open(file) as f: - lines = f.readlines() - - if len(lines) > 2: - return True - - # file is probably corrupted - os.remove(file) - return False - - def make(self): - - if self.create_new_clip: - # New openClip - self._create_new_open_clip() - else: - self._update_open_clip() - - def _clear_handler(self, xml_object): - for handler in xml_object.findall("./handler"): - self.log.info("Handler found") - xml_object.remove(handler) - - def _create_new_open_clip(self): - self.log.info("Building new openClip") - - for tmp_xml_track in self.clip_data.iter("track"): - # solve track (layer) name - self._rename_track_name(tmp_xml_track) - - tmp_xml_feeds = tmp_xml_track.find('feeds') - tmp_xml_feeds.set('currentVersion', self.feed_version_name) - - for tmp_feed in tmp_xml_track.iter("feed"): - tmp_feed.set('vuid', self.feed_version_name) - - # add colorspace if any is set - if self.feed_colorspace: - self._add_colorspace(tmp_feed, self.feed_colorspace) - - self._clear_handler(tmp_feed) - - tmp_xml_versions_obj = self.clip_data.find('versions') - tmp_xml_versions_obj.set('currentVersion', self.feed_version_name) - for xml_new_version in tmp_xml_versions_obj: - xml_new_version.set('uid', self.feed_version_name) - xml_new_version.set('type', 'version') - - self._clear_handler(self.clip_data) - self.log.info("Adding feed version: {}".format(self.feed_basename)) - - self.write_clip_data_to_file(self.out_file, self.clip_data) - - def _get_xml_track_obj_by_uid(self, xml_data, uid): - # loop all tracks of input xml data - for xml_track in xml_data.iter("track"): - track_uid = xml_track.get("uid") - self.log.debug( - ">> track_uid:uid: {}:{}".format(track_uid, uid)) - - # get matching uids - if uid == track_uid: - return xml_track - - def _rename_track_name(self, xml_track_data): - layer_uid = xml_track_data.get("uid") - name_obj = xml_track_data.find("name") - layer_name = name_obj.text - - if ( - self.layer_rename_patterns - and not any( - re.search(lp_.lower(), layer_name.lower()) - for lp_ in self.layer_rename_patterns - ) - ): - return - - formatting_data = self._update_formatting_data( - layerName=layer_name, - layerUID=layer_uid - ) - name_obj.text = StringTemplate( - self.layer_rename_template - ).format(formatting_data) - - def _update_formatting_data(self, **kwargs): - """ Updating formatting data for layer rename - - Attributes: - key=value (optional): will be included to formatting data - as {key: value} - Returns: - dict: anatomy context data for formatting - """ - self.log.debug(">> self.clip_data: {}".format(self.clip_data)) - clip_name_obj = self.clip_data.find("name") - data = { - "originalBasename": clip_name_obj.text - } - # include version context data - data.update(self.context_data) - # include input kwargs data - data.update(kwargs) - return data - - def _update_open_clip(self): - self.log.info("Updating openClip ..") - - out_xml = ET.parse(self.out_file) - out_xml = out_xml.getroot() - - self.log.debug(">> out_xml: {}".format(out_xml)) - # loop tmp tracks - updated_any = False - for tmp_xml_track in self.clip_data.iter("track"): - # solve track (layer) name - self._rename_track_name(tmp_xml_track) - - # get tmp track uid - tmp_track_uid = tmp_xml_track.get("uid") - self.log.debug(">> tmp_track_uid: {}".format(tmp_track_uid)) - - # get out data track by uid - out_track_element = self._get_xml_track_obj_by_uid( - out_xml, tmp_track_uid) - self.log.debug( - ">> out_track_element: {}".format(out_track_element)) - - # loop tmp feeds - for tmp_xml_feed in tmp_xml_track.iter("feed"): - new_path_obj = tmp_xml_feed.find( - "spans/span/path") - new_path = new_path_obj.text - - # check if feed path already exists in track's feeds - if ( - out_track_element is not None - and self._feed_exists(out_track_element, new_path) - ): - continue - - # rename versions on feeds - tmp_xml_feed.set('vuid', self.feed_version_name) - self._clear_handler(tmp_xml_feed) - - # update fps from MediaInfoFile class - if self.fps is not None: - tmp_feed_fps_obj = tmp_xml_feed.find( - "startTimecode/rate") - tmp_feed_fps_obj.text = str(self.fps) - - # update start_frame from MediaInfoFile class - if self.start_frame is not None: - tmp_feed_nb_ticks_obj = tmp_xml_feed.find( - "startTimecode/nbTicks") - tmp_feed_nb_ticks_obj.text = str(self.start_frame) - - # update drop_mode from MediaInfoFile class - if self.drop_mode is not None: - tmp_feed_drop_mode_obj = tmp_xml_feed.find( - "startTimecode/dropMode") - tmp_feed_drop_mode_obj.text = str(self.drop_mode) - - # add colorspace if any is set - if self.feed_colorspace is not None: - self._add_colorspace(tmp_xml_feed, self.feed_colorspace) - - # then append/update feed to correct track in output - if out_track_element: - self.log.debug("updating track element ..") - # update already present track - out_feeds = out_track_element.find('feeds') - out_feeds.set('currentVersion', self.feed_version_name) - out_feeds.append(tmp_xml_feed) - - self.log.info( - "Appending new feed: {}".format( - self.feed_version_name)) - else: - self.log.debug("adding new track element ..") - # create new track as it doesn't exist yet - # set current version to feeds on tmp - tmp_xml_feeds = tmp_xml_track.find('feeds') - tmp_xml_feeds.set('currentVersion', self.feed_version_name) - out_tracks = out_xml.find("tracks") - out_tracks.append(tmp_xml_track) - - updated_any = True - - if updated_any: - # Append vUID to versions - out_xml_versions_obj = out_xml.find('versions') - out_xml_versions_obj.set( - 'currentVersion', self.feed_version_name) - new_version_obj = ET.Element( - "version", {"type": "version", "uid": self.feed_version_name}) - out_xml_versions_obj.insert(0, new_version_obj) - - self._clear_handler(out_xml) - - # fist create backup - self._create_openclip_backup_file(self.out_file) - - self.log.info("Adding feed version: {}".format( - self.feed_version_name)) - - self.write_clip_data_to_file(self.out_file, out_xml) - - self.log.debug("OpenClip Updated: {}".format(self.out_file)) - - def _feed_exists(self, xml_data, path): - # loop all available feed paths and check if - # the path is not already in file - for src_path in xml_data.iter('path'): - if path == src_path.text: - self.log.warning( - "Not appending file as it already is in .clip file") - return True - - def _create_openclip_backup_file(self, file): - bck_file = "{}.bak".format(file) - # if backup does not exist - if not os.path.isfile(bck_file): - shutil.copy2(file, bck_file) - else: - # in case it exists and is already multiplied - created = False - for _i in range(1, 99): - bck_file = "{name}.bak.{idx:0>2}".format( - name=file, - idx=_i) - # create numbered backup file - if not os.path.isfile(bck_file): - shutil.copy2(file, bck_file) - created = True - break - # in case numbered does not exists - if not created: - bck_file = "{}.bak.last".format(file) - shutil.copy2(file, bck_file) - - def _add_colorspace(self, feed_obj, profile_name): - feed_storage_obj = feed_obj.find("storageFormat") - feed_clr_obj = feed_storage_obj.find("colourSpace") - if feed_clr_obj is not None: - feed_clr_obj = ET.Element( - "colourSpace", {"type": "string"}) - feed_clr_obj.text = profile_name - feed_storage_obj.append(feed_clr_obj) diff --git a/server_addon/flame/client/ayon_flame/api/render_utils.py b/server_addon/flame/client/ayon_flame/api/render_utils.py deleted file mode 100644 index a0c77cb155..0000000000 --- a/server_addon/flame/client/ayon_flame/api/render_utils.py +++ /dev/null @@ -1,185 +0,0 @@ -import os -from xml.etree import ElementTree as ET -from ayon_core.lib import Logger - -log = Logger.get_logger(__name__) - - -def export_clip(export_path, clip, preset_path, **kwargs): - """Flame exported wrapper - - Args: - export_path (str): exporting directory path - clip (PyClip): flame api object - preset_path (str): full export path to xml file - - Kwargs: - thumb_frame_number (int)[optional]: source frame number - in_mark (int)[optional]: cut in mark - out_mark (int)[optional]: cut out mark - - Raises: - KeyError: Missing input kwarg `thumb_frame_number` - in case `thumbnail` in `export_preset` - FileExistsError: Missing export preset in shared folder - """ - import flame - - in_mark = out_mark = None - - # Set exporter - exporter = flame.PyExporter() - exporter.foreground = True - exporter.export_between_marks = True - - if kwargs.get("thumb_frame_number"): - thumb_frame_number = kwargs["thumb_frame_number"] - # make sure it exists in kwargs - if not thumb_frame_number: - raise KeyError( - "Missing key `thumb_frame_number` in input kwargs") - - in_mark = int(thumb_frame_number) - out_mark = int(thumb_frame_number) + 1 - - elif kwargs.get("in_mark") and kwargs.get("out_mark"): - in_mark = int(kwargs["in_mark"]) - out_mark = int(kwargs["out_mark"]) - else: - exporter.export_between_marks = False - - try: - # set in and out marks if they are available - if in_mark and out_mark: - clip.in_mark = in_mark - clip.out_mark = out_mark - - # export with exporter - exporter.export(clip, preset_path, export_path) - finally: - print('Exported: {} at {}-{}'.format( - clip.name.get_value(), - clip.in_mark, - clip.out_mark - )) - - -def get_preset_path_by_xml_name(xml_preset_name): - def _search_path(root): - output = [] - for root, _dirs, files in os.walk(root): - for f in files: - if f != xml_preset_name: - continue - file_path = os.path.join(root, f) - output.append(file_path) - return output - - def _validate_results(results): - if results and len(results) == 1: - return results.pop() - elif results and len(results) > 1: - print(( - "More matching presets for `{}`: /n" - "{}").format(xml_preset_name, results)) - return results.pop() - else: - return None - - from .utils import ( - get_flame_install_root, - get_flame_version - ) - - # get actual flame version and install path - _version = get_flame_version()["full"] - _install_root = get_flame_install_root() - - # search path templates - shared_search_root = "{install_root}/shared/export/presets" - install_search_root = ( - "{install_root}/presets/{version}/export/presets/flame") - - # fill templates - shared_search_root = shared_search_root.format( - install_root=_install_root - ) - install_search_root = install_search_root.format( - install_root=_install_root, - version=_version - ) - - # get search results - shared_results = _search_path(shared_search_root) - installed_results = _search_path(install_search_root) - - # first try to return shared results - shared_preset_path = _validate_results(shared_results) - - if shared_preset_path: - return os.path.dirname(shared_preset_path) - - # then try installed results - installed_preset_path = _validate_results(installed_results) - - if installed_preset_path: - return os.path.dirname(installed_preset_path) - - # if nothing found then return False - return False - - -def modify_preset_file(xml_path, staging_dir, data): - """Modify xml preset with input data - - Args: - xml_path (str ): path for input xml preset - staging_dir (str): staging dir path - data (dict): data where key is xmlTag and value as string - - Returns: - str: _description_ - """ - # create temp path - dirname, basename = os.path.split(xml_path) - temp_path = os.path.join(staging_dir, basename) - - # change xml following data keys - with open(xml_path, "r") as datafile: - _root = ET.parse(datafile) - - for key, value in data.items(): - try: - if "/" in key: - if not key.startswith("./"): - key = ".//" + key - - split_key_path = key.split("/") - element_key = split_key_path[-1] - parent_obj_path = "/".join(split_key_path[:-1]) - - parent_obj = _root.find(parent_obj_path) - element_obj = parent_obj.find(element_key) - if not element_obj: - append_element(parent_obj, element_key, value) - else: - finds = _root.findall(".//{}".format(key)) - if not finds: - raise AttributeError - for element in finds: - element.text = str(value) - except AttributeError: - log.warning( - "Cannot create attribute: {}: {}. Skipping".format( - key, value - )) - _root.write(temp_path) - - return temp_path - - -def append_element(root_element_obj, key, value): - new_element_obj = ET.Element(key) - log.debug("__ new_element_obj: {}".format(new_element_obj)) - new_element_obj.text = str(value) - root_element_obj.insert(0, new_element_obj) diff --git a/server_addon/flame/client/ayon_flame/api/scripts/wiretap_com.py b/server_addon/flame/client/ayon_flame/api/scripts/wiretap_com.py deleted file mode 100644 index 42b9257cbe..0000000000 --- a/server_addon/flame/client/ayon_flame/api/scripts/wiretap_com.py +++ /dev/null @@ -1,504 +0,0 @@ -#!/usr/bin/env python2.7 -# -*- coding: utf-8 -*- - -from __future__ import absolute_import -import os -import sys -import subprocess -import json -import xml.dom.minidom as minidom -from copy import deepcopy -import datetime -from libwiretapPythonClientAPI import ( # noqa - WireTapClientInit, - WireTapClientUninit, - WireTapNodeHandle, - WireTapServerHandle, - WireTapInt, - WireTapStr -) - - -class WireTapCom(object): - """ - Comunicator class wrapper for talking to WireTap db. - - This way we are able to set new project with settings and - correct colorspace policy. Also we are able to create new user - or get actual user with similar name (users are usually cloning - their profiles and adding date stamp into suffix). - """ - - def __init__(self, host_name=None, volume_name=None, group_name=None): - """Initialisation of WireTap communication class - - Args: - host_name (str, optional): Name of host server. Defaults to None. - volume_name (str, optional): Name of volume. Defaults to None. - group_name (str, optional): Name of user group. Defaults to None. - """ - # set main attributes of server - # if there are none set the default installation - self.host_name = host_name or "localhost" - self.volume_name = volume_name or "stonefs" - self.group_name = group_name or "staff" - - # wiretap tools dir path - self.wiretap_tools_dir = os.getenv("AYON_WIRETAP_TOOLS") - - # initialize WireTap client - WireTapClientInit() - - # add the server to shared variable - self._server = WireTapServerHandle("{}:IFFFS".format(self.host_name)) - print("WireTap connected at '{}'...".format( - self.host_name)) - - def close(self): - self._server = None - WireTapClientUninit() - print("WireTap closed...") - - def get_launch_args( - self, project_name, project_data, user_name, *args, **kwargs): - """Forming launch arguments for AYON launcher. - - Args: - project_name (str): name of project - project_data (dict): Flame compatible project data - user_name (str): name of user - - Returns: - list: arguments - """ - - workspace_name = kwargs.get("workspace_name") - color_policy = kwargs.get("color_policy") - - project_exists = self._project_prep(project_name) - if not project_exists: - self._set_project_settings(project_name, project_data) - self._set_project_colorspace(project_name, color_policy) - - user_name = self._user_prep(user_name) - - if workspace_name is None: - # default workspace - print("Using a default workspace") - return [ - "--start-project={}".format(project_name), - "--start-user={}".format(user_name), - "--create-workspace" - ] - - else: - print( - "Using a custom workspace '{}'".format(workspace_name)) - - self._workspace_prep(project_name, workspace_name) - return [ - "--start-project={}".format(project_name), - "--start-user={}".format(user_name), - "--create-workspace", - "--start-workspace={}".format(workspace_name) - ] - - def _workspace_prep(self, project_name, workspace_name): - """Preparing a workspace - - In case it doesn not exists it will create one - - Args: - project_name (str): project name - workspace_name (str): workspace name - - Raises: - AttributeError: unable to create workspace - """ - workspace_exists = self._child_is_in_parent_path( - "/projects/{}".format(project_name), workspace_name, "WORKSPACE" - ) - if not workspace_exists: - project = WireTapNodeHandle( - self._server, "/projects/{}".format(project_name)) - - workspace_node = WireTapNodeHandle() - created_workspace = project.createNode( - workspace_name, "WORKSPACE", workspace_node) - - if not created_workspace: - raise AttributeError( - "Cannot create workspace `{}` in " - "project `{}`: `{}`".format( - workspace_name, project_name, project.lastError()) - ) - - print( - "Workspace `{}` is successfully created".format(workspace_name)) - - def _project_prep(self, project_name): - """Preparing a project - - In case it doesn not exists it will create one - - Args: - project_name (str): project name - - Raises: - AttributeError: unable to create project - """ - # test if projeft exists - project_exists = self._child_is_in_parent_path( - "/projects", project_name, "PROJECT") - - if not project_exists: - volumes = self._get_all_volumes() - - if len(volumes) == 0: - raise AttributeError( - "Not able to create new project. No Volumes existing" - ) - - # check if volumes exists - if self.volume_name not in volumes: - raise AttributeError( - ("Volume '{}' does not exist in '{}'").format( - self.volume_name, volumes) - ) - - # form cmd arguments - project_create_cmd = [ - os.path.join( - self.wiretap_tools_dir, - "wiretap_create_node" - ), - '-n', - os.path.join("/volumes", self.volume_name), - '-d', - project_name, - '-g', - ] - - project_create_cmd.append(self.group_name) - - print(project_create_cmd) - - exit_code = subprocess.call( - project_create_cmd, - cwd=os.path.expanduser('~'), - preexec_fn=_subprocess_preexec_fn - ) - - if exit_code != 0: - RuntimeError("Cannot create project in flame db") - - print( - "A new project '{}' is created.".format(project_name)) - return project_exists - - def _get_all_volumes(self): - """Request all available volumens from WireTap - - Returns: - list: all available volumes in server - - Rises: - AttributeError: unable to get any volumes children from server - """ - root = WireTapNodeHandle(self._server, "/volumes") - children_num = WireTapInt(0) - - get_children_num = root.getNumChildren(children_num) - if not get_children_num: - raise AttributeError( - "Cannot get number of volumes: {}".format(root.lastError()) - ) - - volumes = [] - - # go through all children and get volume names - child_obj = WireTapNodeHandle() - for child_idx in range(children_num): - - # get a child - if not root.getChild(child_idx, child_obj): - raise AttributeError( - "Unable to get child: {}".format(root.lastError())) - - node_name = WireTapStr() - get_children_name = child_obj.getDisplayName(node_name) - - if not get_children_name: - raise AttributeError( - "Unable to get child name: {}".format( - child_obj.lastError()) - ) - - volumes.append(node_name.c_str()) - - return volumes - - def _user_prep(self, user_name): - """Ensuring user does exists in user's stack - - Args: - user_name (str): name of a user - - Raises: - AttributeError: unable to create user - """ - - # get all used usernames in db - used_names = self._get_usernames() - print(">> used_names: {}".format(used_names)) - - # filter only those which are sharing input user name - filtered_users = [user for user in used_names if user_name in user] - - if filtered_users: - # TODO: need to find lastly created following regex pattern for - # date used in name - return filtered_users.pop() - - # create new user name with date in suffix - now = datetime.datetime.now() # current date and time - date = now.strftime("%Y%m%d") - new_user_name = "{}_{}".format(user_name, date) - print(new_user_name) - - if not self._child_is_in_parent_path("/users", new_user_name, "USER"): - # Create the new user - users = WireTapNodeHandle(self._server, "/users") - - user_node = WireTapNodeHandle() - created_user = users.createNode(new_user_name, "USER", user_node) - if not created_user: - raise AttributeError( - "User {} cannot be created: {}".format( - new_user_name, users.lastError()) - ) - - print("User `{}` is created".format(new_user_name)) - return new_user_name - - def _get_usernames(self): - """Requesting all available users from WireTap - - Returns: - list: all available user names - - Raises: - AttributeError: there are no users in server - """ - root = WireTapNodeHandle(self._server, "/users") - children_num = WireTapInt(0) - - get_children_num = root.getNumChildren(children_num) - if not get_children_num: - raise AttributeError( - "Cannot get number of volumes: {}".format(root.lastError()) - ) - - usernames = [] - - # go through all children and get volume names - child_obj = WireTapNodeHandle() - for child_idx in range(children_num): - - # get a child - if not root.getChild(child_idx, child_obj): - raise AttributeError( - "Unable to get child: {}".format(root.lastError())) - - node_name = WireTapStr() - get_children_name = child_obj.getDisplayName(node_name) - - if not get_children_name: - raise AttributeError( - "Unable to get child name: {}".format( - child_obj.lastError()) - ) - - usernames.append(node_name.c_str()) - - return usernames - - def _child_is_in_parent_path(self, parent_path, child_name, child_type): - """Checking if a given child is in parent path. - - Args: - parent_path (str): db path to parent - child_name (str): name of child - child_type (str): type of child - - Raises: - AttributeError: Not able to get number of children - AttributeError: Not able to get children form parent - AttributeError: Not able to get children name - AttributeError: Not able to get children type - - Returns: - bool: True if child is in parent path - """ - parent = WireTapNodeHandle(self._server, parent_path) - - # iterate number of children - children_num = WireTapInt(0) - requested = parent.getNumChildren(children_num) - if not requested: - raise AttributeError(( - "Error: Cannot request number of " - "children from the node {}. Make sure your " - "wiretap service is running: {}").format( - parent_path, parent.lastError()) - ) - - # iterate children - child_obj = WireTapNodeHandle() - for child_idx in range(children_num): - if not parent.getChild(child_idx, child_obj): - raise AttributeError( - "Cannot get child: {}".format( - parent.lastError())) - - node_name = WireTapStr() - node_type = WireTapStr() - - if not child_obj.getDisplayName(node_name): - raise AttributeError( - "Unable to get child name: %s" % child_obj.lastError() - ) - if not child_obj.getNodeTypeStr(node_type): - raise AttributeError( - "Unable to obtain child type: %s" % child_obj.lastError() - ) - - if (node_name.c_str() == child_name) and ( - node_type.c_str() == child_type): - return True - - return False - - def _set_project_settings(self, project_name, project_data): - """Setting project attributes. - - Args: - project_name (str): name of project - project_data (dict): data with project attributes - (flame compatible) - - Raises: - AttributeError: Not able to set project attributes - """ - # generated xml from project_data dict - _xml = "" - for key, value in project_data.items(): - _xml += "<{}>{}".format(key, value, key) - _xml += "" - - pretty_xml = minidom.parseString(_xml).toprettyxml() - print("__ xml: {}".format(pretty_xml)) - - # set project data to wiretap - project_node = WireTapNodeHandle( - self._server, "/projects/{}".format(project_name)) - - if not project_node.setMetaData("XML", _xml): - raise AttributeError( - "Not able to set project attributes {}. Error: {}".format( - project_name, project_node.lastError()) - ) - - print("Project settings successfully set.") - - def _set_project_colorspace(self, project_name, color_policy): - """Set project's colorspace policy. - - Args: - project_name (str): name of project - color_policy (str): name of policy - - Raises: - RuntimeError: Not able to set colorspace policy - """ - color_policy = color_policy or "Legacy" - - # check if the colour policy in custom dir - if "/" in color_policy: - # if unlikelly full path was used make it redundant - color_policy = color_policy.replace("/syncolor/policies/", "") - # expecting input is `Shared/NameOfPolicy` - color_policy = "/syncolor/policies/{}".format( - color_policy) - else: - color_policy = "/syncolor/policies/Autodesk/{}".format( - color_policy) - - # create arguments - project_colorspace_cmd = [ - os.path.join( - self.wiretap_tools_dir, - "wiretap_duplicate_node" - ), - "-s", - color_policy, - "-n", - "/projects/{}/syncolor".format(project_name) - ] - - print(project_colorspace_cmd) - - exit_code = subprocess.call( - project_colorspace_cmd, - cwd=os.path.expanduser('~'), - preexec_fn=_subprocess_preexec_fn - ) - - if exit_code != 0: - RuntimeError("Cannot set colorspace {} on project {}".format( - color_policy, project_name - )) - - -def _subprocess_preexec_fn(): - """ Helper function - - Setting permission mask to 0777 - """ - os.setpgrp() - os.umask(0o000) - - -if __name__ == "__main__": - # get json exchange data - json_path = sys.argv[-1] - json_data = open(json_path).read() - in_data = json.loads(json_data) - out_data = deepcopy(in_data) - - # get main server attributes - host_name = in_data.pop("host_name") - volume_name = in_data.pop("volume_name") - group_name = in_data.pop("group_name") - - # initialize class - wiretap_handler = WireTapCom(host_name, volume_name, group_name) - - try: - app_args = wiretap_handler.get_launch_args( - project_name=in_data.pop("project_name"), - project_data=in_data.pop("project_data"), - user_name=in_data.pop("user_name"), - **in_data - ) - finally: - wiretap_handler.close() - - # set returned args back to out data - out_data.update({ - "app_args": app_args - }) - - # write it out back to the exchange json file - with open(json_path, "w") as file_stream: - json.dump(out_data, file_stream, indent=4) diff --git a/server_addon/flame/client/ayon_flame/api/utils.py b/server_addon/flame/client/ayon_flame/api/utils.py deleted file mode 100644 index 03a694c25c..0000000000 --- a/server_addon/flame/client/ayon_flame/api/utils.py +++ /dev/null @@ -1,143 +0,0 @@ -""" -Flame utils for syncing scripts -""" - -import os -import shutil -from ayon_core.lib import Logger -from ayon_flame import FLAME_ADDON_ROOT - -log = Logger.get_logger(__name__) - - -def _sync_utility_scripts(env=None): - """ Synchronizing basic utlility scripts for flame. - - To be able to run start AYON within Flame we have to copy - all utility_scripts and additional FLAME_SCRIPT_DIR into - `/opt/Autodesk/shared/python`. This will be always synchronizing those - folders. - """ - - env = env or os.environ - - # initiate inputs - scripts = {} - fsd_env = env.get("FLAME_SCRIPT_DIRS", "") - flame_shared_dir = "/opt/Autodesk/shared/python" - - fsd_paths = [os.path.join( - FLAME_ADDON_ROOT, - "api", - "utility_scripts" - )] - - # collect script dirs - log.info("FLAME_SCRIPT_DIRS: `{fsd_env}`".format(**locals())) - log.info("fsd_paths: `{fsd_paths}`".format(**locals())) - - # add application environment setting for FLAME_SCRIPT_DIR - # to script path search - for _dirpath in fsd_env.split(os.pathsep): - if not os.path.isdir(_dirpath): - log.warning("Path is not a valid dir: `{_dirpath}`".format( - **locals())) - continue - fsd_paths.append(_dirpath) - - # collect scripts from dirs - for path in fsd_paths: - scripts.update({path: os.listdir(path)}) - - remove_black_list = [] - for _k, s_list in scripts.items(): - remove_black_list += s_list - - log.info("remove_black_list: `{remove_black_list}`".format(**locals())) - log.info("Additional Flame script paths: `{fsd_paths}`".format(**locals())) - log.info("Flame Scripts: `{scripts}`".format(**locals())) - - # make sure no script file is in folder - if next(iter(os.listdir(flame_shared_dir)), None): - for _itm in os.listdir(flame_shared_dir): - skip = False - - # skip all scripts and folders which are not maintained - if _itm not in remove_black_list: - skip = True - - # do not skip if pyc in extension - if not os.path.isdir(_itm) and "pyc" in os.path.splitext(_itm)[-1]: - skip = False - - # continue if skip in true - if skip: - continue - - path = os.path.join(flame_shared_dir, _itm) - log.info("Removing `{path}`...".format(**locals())) - - try: - if os.path.isdir(path): - shutil.rmtree(path, onerror=None) - else: - os.remove(path) - except PermissionError as msg: - log.warning( - "Not able to remove: `{}`, Problem with: `{}`".format( - path, - msg - ) - ) - - # copy scripts into Resolve's utility scripts dir - for dirpath, scriptlist in scripts.items(): - # directory and scripts list - for _script in scriptlist: - # script in script list - src = os.path.join(dirpath, _script) - dst = os.path.join(flame_shared_dir, _script) - log.info("Copying `{src}` to `{dst}`...".format(**locals())) - - try: - if os.path.isdir(src): - shutil.copytree( - src, dst, symlinks=False, - ignore=None, ignore_dangling_symlinks=False - ) - else: - shutil.copy2(src, dst) - except (PermissionError, FileExistsError) as msg: - log.warning( - "Not able to copy to: `{}`, Problem with: `{}`".format( - dst, - msg - ) - ) - - -def setup(env=None): - """ Wrapper installer started from - `flame/hooks/pre_flame_setup.py` - """ - env = env or os.environ - - # synchronize resolve utility scripts - _sync_utility_scripts(env) - - log.info("Flame AYON wrapper has been installed") - - -def get_flame_version(): - import flame - - return { - "full": flame.get_version(), - "major": flame.get_version_major(), - "minor": flame.get_version_minor(), - "patch": flame.get_version_patch() - } - - -def get_flame_install_root(): - return "/opt/Autodesk" diff --git a/server_addon/flame/client/ayon_flame/api/workio.py b/server_addon/flame/client/ayon_flame/api/workio.py deleted file mode 100644 index eef10a4847..0000000000 --- a/server_addon/flame/client/ayon_flame/api/workio.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Host API required Work Files tool""" - -import os -from ayon_core.lib import Logger -# from .. import ( -# get_project_manager, -# get_current_project -# ) - - -log = Logger.get_logger(__name__) - -exported_projet_ext = ".otoc" - - -def file_extensions(): - return [exported_projet_ext] - - -def has_unsaved_changes(): - pass - - -def save_file(filepath): - pass - - -def open_file(filepath): - pass - - -def current_file(): - pass - - -def work_root(session): - return os.path.normpath(session["AYON_WORKDIR"]).replace("\\", "/") diff --git a/server_addon/flame/client/ayon_flame/hooks/pre_flame_setup.py b/server_addon/flame/client/ayon_flame/hooks/pre_flame_setup.py deleted file mode 100644 index e9e9aca3f4..0000000000 --- a/server_addon/flame/client/ayon_flame/hooks/pre_flame_setup.py +++ /dev/null @@ -1,239 +0,0 @@ -import os -import json -import tempfile -import contextlib -import socket -from pprint import pformat - -from ayon_core.lib import ( - get_ayon_username, - run_subprocess, -) -from ayon_applications import PreLaunchHook, LaunchTypes -from ayon_flame import FLAME_ADDON_ROOT - - -class FlamePrelaunch(PreLaunchHook): - """ Flame prelaunch hook - - Will make sure flame_script_dirs are copied to user's folder defined - in environment var FLAME_SCRIPT_DIR. - """ - app_groups = {"flame"} - permissions = 0o777 - - wtc_script_path = os.path.join( - FLAME_ADDON_ROOT, "api", "scripts", "wiretap_com.py" - ) - launch_types = {LaunchTypes.local} - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.signature = "( {} )".format(self.__class__.__name__) - - def execute(self): - _env = self.launch_context.env - self.flame_python_exe = _env["AYON_FLAME_PYTHON_EXEC"] - self.flame_pythonpath = _env["AYON_FLAME_PYTHONPATH"] - - """Hook entry method.""" - project_entity = self.data["project_entity"] - project_name = project_entity["name"] - volume_name = _env.get("FLAME_WIRETAP_VOLUME") - - # get image io - project_settings = self.data["project_settings"] - - imageio_flame = project_settings["flame"]["imageio"] - - # Check whether 'enabled' key from host imageio settings exists - # so we can tell if host is using the new colormanagement framework. - # If the 'enabled' isn't found we want 'colormanaged' set to True - # because prior to the key existing we always did colormanagement for - # Flame - colormanaged = imageio_flame.get("enabled") - # if key was not found, set to True - # ensuring backward compatibility - if colormanaged is None: - colormanaged = True - - # get user name and host name - user_name = get_ayon_username() - user_name = user_name.replace(".", "_") - - hostname = socket.gethostname() # not returning wiretap host name - - self.log.debug("Collected user \"{}\"".format(user_name)) - self.log.info(pformat(project_entity)) - project_attribs = project_entity["attrib"] - width = project_attribs["resolutionWidth"] - height = project_attribs["resolutionHeight"] - fps = float(project_attribs["fps"]) - - project_data = { - "Name": project_entity["name"], - "Nickname": project_entity["code"], - "Description": "Created by AYON", - "SetupDir": project_entity["name"], - "FrameWidth": int(width), - "FrameHeight": int(height), - "AspectRatio": float( - (width / height) * project_attribs["pixelAspect"] - ), - "FrameRate": self._get_flame_fps(fps) - } - - data_to_script = { - # from settings - "host_name": _env.get("FLAME_WIRETAP_HOSTNAME") or hostname, - "volume_name": volume_name, - "group_name": _env.get("FLAME_WIRETAP_GROUP"), - - # from project - "project_name": project_name, - "user_name": user_name, - "project_data": project_data - } - - # add color management data - if colormanaged: - project_data.update({ - "FrameDepth": str(imageio_flame["project"]["frameDepth"]), - "FieldDominance": str( - imageio_flame["project"]["fieldDominance"]) - }) - data_to_script["color_policy"] = str( - imageio_flame["project"]["colourPolicy"]) - - self.log.info(pformat(dict(_env))) - self.log.info(pformat(data_to_script)) - - # add to python path from settings - self._add_pythonpath() - - app_arguments = self._get_launch_arguments(data_to_script) - - # fix project data permission issue - self._fix_permissions(project_name, volume_name) - - self.launch_context.launch_args.extend(app_arguments) - - def _fix_permissions(self, project_name, volume_name): - """Work around for project data permissions - - Reported issue: when project is created locally on one machine, - it is impossible to migrate it to other machine. Autodesk Flame - is crating some unmanagable files which needs to be opened to 0o777. - - Args: - project_name (str): project name - volume_name (str): studio volume - """ - dirs_to_modify = [ - "/usr/discreet/project/{}".format(project_name), - "/opt/Autodesk/clip/{}/{}.prj".format(volume_name, project_name), - "/usr/discreet/clip/{}/{}.prj".format(volume_name, project_name) - ] - - for dirtm in dirs_to_modify: - for root, dirs, files in os.walk(dirtm): - try: - for name in set(dirs) | set(files): - path = os.path.join(root, name) - st = os.stat(path) - if oct(st.st_mode) != self.permissions: - os.chmod(path, self.permissions) - - except OSError as exc: - self.log.warning("Not able to open files: {}".format(exc)) - - def _get_flame_fps(self, fps_num): - fps_table = { - float(23.976): "23.976 fps", - int(25): "25 fps", - int(24): "24 fps", - float(29.97): "29.97 fps DF", - int(30): "30 fps", - int(50): "50 fps", - float(59.94): "59.94 fps DF", - int(60): "60 fps" - } - - match_key = min(fps_table.keys(), key=lambda x: abs(x - fps_num)) - - try: - return fps_table[match_key] - except KeyError as msg: - raise KeyError(( - "Missing FPS key in conversion table. " - "Following keys are available: {}".format(fps_table.keys()) - )) from msg - - def _add_pythonpath(self): - pythonpath = self.launch_context.env.get("PYTHONPATH") - - # separate it explicitly by `;` that is what we use in settings - new_pythonpath = self.flame_pythonpath.split(os.pathsep) - new_pythonpath += pythonpath.split(os.pathsep) - - self.launch_context.env["PYTHONPATH"] = os.pathsep.join(new_pythonpath) - - def _get_launch_arguments(self, script_data): - # Dump data to string - dumped_script_data = json.dumps(script_data) - - with make_temp_file(dumped_script_data) as tmp_json_path: - # Prepare subprocess arguments - args = [ - self.flame_python_exe.format( - **self.launch_context.env - ), - self.wtc_script_path, - tmp_json_path - ] - self.log.info("Executing: {}".format(" ".join(args))) - - process_kwargs = { - "logger": self.log, - "env": self.launch_context.env - } - - run_subprocess(args, **process_kwargs) - - # process returned json file to pass launch args - return_json_data = open(tmp_json_path).read() - returned_data = json.loads(return_json_data) - app_args = returned_data.get("app_args") - self.log.info("____ app_args: `{}`".format(app_args)) - - if not app_args: - RuntimeError("App arguments were not solved") - - return app_args - - -@contextlib.contextmanager -def make_temp_file(data): - try: - # Store dumped json to temporary file - temporary_json_file = tempfile.NamedTemporaryFile( - mode="w", suffix=".json", delete=False - ) - temporary_json_file.write(data) - temporary_json_file.close() - temporary_json_filepath = temporary_json_file.name.replace( - "\\", "/" - ) - - yield temporary_json_filepath - - except IOError as _error: - raise IOError( - "Not able to create temp json file: {}".format( - _error - ) - ) - - finally: - # Remove the temporary json - os.remove(temporary_json_filepath) diff --git a/server_addon/flame/client/ayon_flame/otio/__init__.py b/server_addon/flame/client/ayon_flame/otio/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/server_addon/flame/client/ayon_flame/otio/flame_export.py b/server_addon/flame/client/ayon_flame/otio/flame_export.py deleted file mode 100644 index bebe9be1c1..0000000000 --- a/server_addon/flame/client/ayon_flame/otio/flame_export.py +++ /dev/null @@ -1,624 +0,0 @@ -""" compatibility OpenTimelineIO 0.12.0 and newer -""" - -import os -import re -import json -import logging -import opentimelineio as otio -from . import utils - -import flame -from pprint import pformat - -log = logging.getLogger(__name__) - - -TRACK_TYPES = { - "video": otio.schema.TrackKind.Video, - "audio": otio.schema.TrackKind.Audio -} -MARKERS_COLOR_MAP = { - (1.0, 0.0, 0.0): otio.schema.MarkerColor.RED, - (1.0, 0.5, 0.0): otio.schema.MarkerColor.ORANGE, - (1.0, 1.0, 0.0): otio.schema.MarkerColor.YELLOW, - (1.0, 0.5, 1.0): otio.schema.MarkerColor.PINK, - (1.0, 1.0, 1.0): otio.schema.MarkerColor.WHITE, - (0.0, 1.0, 0.0): otio.schema.MarkerColor.GREEN, - (0.0, 1.0, 1.0): otio.schema.MarkerColor.CYAN, - (0.0, 0.0, 1.0): otio.schema.MarkerColor.BLUE, - (0.5, 0.0, 0.5): otio.schema.MarkerColor.PURPLE, - (0.5, 0.0, 1.0): otio.schema.MarkerColor.MAGENTA, - (0.0, 0.0, 0.0): otio.schema.MarkerColor.BLACK -} -MARKERS_INCLUDE = True - - -class CTX: - _fps = None - _tl_start_frame = None - project = None - clips = None - - @classmethod - def set_fps(cls, new_fps): - if not isinstance(new_fps, float): - raise TypeError("Invalid fps type {}".format(type(new_fps))) - if cls._fps != new_fps: - cls._fps = new_fps - - @classmethod - def get_fps(cls): - return cls._fps - - @classmethod - def set_tl_start_frame(cls, number): - if not isinstance(number, int): - raise TypeError("Invalid timeline start frame type {}".format( - type(number))) - if cls._tl_start_frame != number: - cls._tl_start_frame = number - - @classmethod - def get_tl_start_frame(cls): - return cls._tl_start_frame - - -def flatten(_list): - for item in _list: - if isinstance(item, (list, tuple)): - for sub_item in flatten(item): - yield sub_item - else: - yield item - - -def get_current_flame_project(): - project = flame.project.current_project - return project - - -def create_otio_rational_time(frame, fps): - return otio.opentime.RationalTime( - float(frame), - float(fps) - ) - - -def create_otio_time_range(start_frame, frame_duration, fps): - return otio.opentime.TimeRange( - start_time=create_otio_rational_time(start_frame, fps), - duration=create_otio_rational_time(frame_duration, fps) - ) - - -def _get_metadata(item): - if hasattr(item, 'metadata'): - return dict(item.metadata) if item.metadata else {} - return {} - - -def create_time_effects(otio_clip, speed): - otio_effect = None - - # retime on track item - if speed != 1.: - # make effect - otio_effect = otio.schema.LinearTimeWarp() - otio_effect.name = "Speed" - otio_effect.time_scalar = speed - otio_effect.metadata = {} - - # freeze frame effect - if speed == 0.: - otio_effect = otio.schema.FreezeFrame() - otio_effect.name = "FreezeFrame" - otio_effect.metadata = {} - - if otio_effect: - # add otio effect to clip effects - otio_clip.effects.append(otio_effect) - - -def _get_marker_color(flame_colour): - # clamp colors to closes half numbers - _flame_colour = [ - (lambda x: round(x * 2) / 2)(c) - for c in flame_colour] - - for color, otio_color_type in MARKERS_COLOR_MAP.items(): - if _flame_colour == list(color): - return otio_color_type - - return otio.schema.MarkerColor.RED - - -def _get_flame_markers(item): - output_markers = [] - - time_in = item.record_in.relative_frame - - for marker in item.markers: - log.debug(marker) - start_frame = marker.location.get_value().relative_frame - - start_frame = (start_frame - time_in) + 1 - - marker_data = { - "name": marker.name.get_value(), - "duration": marker.duration.get_value().relative_frame, - "comment": marker.comment.get_value(), - "start_frame": start_frame, - "colour": marker.colour.get_value() - } - - output_markers.append(marker_data) - - return output_markers - - -def create_otio_markers(otio_item, item): - markers = _get_flame_markers(item) - for marker in markers: - frame_rate = CTX.get_fps() - - marked_range = otio.opentime.TimeRange( - start_time=otio.opentime.RationalTime( - marker["start_frame"], - frame_rate - ), - duration=otio.opentime.RationalTime( - marker["duration"], - frame_rate - ) - ) - - # testing the comment if it is not containing json string - check_if_json = re.findall( - re.compile(r"[{:}]"), - marker["comment"] - ) - - # to identify this as json, at least 3 items in the list should - # be present ["{", ":", "}"] - metadata = {} - if len(check_if_json) >= 3: - # this is json string - try: - # capture exceptions which are related to strings only - metadata.update( - json.loads(marker["comment"]) - ) - except ValueError as msg: - log.error("Marker json conversion: {}".format(msg)) - else: - metadata["comment"] = marker["comment"] - - otio_marker = otio.schema.Marker( - name=marker["name"], - color=_get_marker_color( - marker["colour"]), - marked_range=marked_range, - metadata=metadata - ) - - otio_item.markers.append(otio_marker) - - -def create_otio_reference(clip_data, fps=None): - metadata = _get_metadata(clip_data) - duration = int(clip_data["source_duration"]) - - # get file info for path and start frame - frame_start = 0 - fps = fps or CTX.get_fps() - - path = clip_data["fpath"] - - file_name = os.path.basename(path) - file_head, extension = os.path.splitext(file_name) - - # get padding and other file infos - log.debug("_ path: {}".format(path)) - - otio_ex_ref_item = None - - is_sequence = frame_number = utils.get_frame_from_filename(file_name) - if is_sequence: - file_head = file_name.split(frame_number)[:-1] - frame_start = int(frame_number) - padding = len(frame_number) - - metadata.update({ - "isSequence": True, - "padding": padding - }) - - # if it is file sequence try to create `ImageSequenceReference` - # the OTIO might not be compatible so return nothing and do it old way - try: - dirname = os.path.dirname(path) - otio_ex_ref_item = otio.schema.ImageSequenceReference( - target_url_base=dirname + os.sep, - name_prefix=file_head, - name_suffix=extension, - start_frame=frame_start, - frame_zero_padding=padding, - rate=fps, - available_range=create_otio_time_range( - frame_start, - duration, - fps - ) - ) - except AttributeError: - pass - - if not otio_ex_ref_item: - dirname, file_name = os.path.split(path) - file_name = utils.get_reformatted_filename(file_name, padded=False) - reformated_path = os.path.join(dirname, file_name) - # in case old OTIO or video file create `ExternalReference` - otio_ex_ref_item = otio.schema.ExternalReference( - target_url=reformated_path, - available_range=create_otio_time_range( - frame_start, - duration, - fps - ) - ) - - # add metadata to otio item - add_otio_metadata(otio_ex_ref_item, clip_data, **metadata) - - return otio_ex_ref_item - - -def create_otio_clip(clip_data): - from ayon_flame.api import MediaInfoFile, TimeEffectMetadata - - segment = clip_data["PySegment"] - - # calculate source in - media_info = MediaInfoFile(clip_data["fpath"], logger=log) - media_timecode_start = media_info.start_frame - media_fps = media_info.fps - - # Timewarp metadata - tw_data = TimeEffectMetadata(segment, logger=log).data - log.debug("__ tw_data: {}".format(tw_data)) - - # define first frame - file_first_frame = utils.get_frame_from_filename( - clip_data["fpath"]) - if file_first_frame: - file_first_frame = int(file_first_frame) - - first_frame = media_timecode_start or file_first_frame or 0 - - _clip_source_in = int(clip_data["source_in"]) - _clip_source_out = int(clip_data["source_out"]) - _clip_record_in = clip_data["record_in"] - _clip_record_out = clip_data["record_out"] - _clip_record_duration = int(clip_data["record_duration"]) - - log.debug("_ file_first_frame: {}".format(file_first_frame)) - log.debug("_ first_frame: {}".format(first_frame)) - log.debug("_ _clip_source_in: {}".format(_clip_source_in)) - log.debug("_ _clip_source_out: {}".format(_clip_source_out)) - log.debug("_ _clip_record_in: {}".format(_clip_record_in)) - log.debug("_ _clip_record_out: {}".format(_clip_record_out)) - - # first solve if the reverse timing - speed = 1 - if clip_data["source_in"] > clip_data["source_out"]: - source_in = _clip_source_out - int(first_frame) - source_out = _clip_source_in - int(first_frame) - speed = -1 - else: - source_in = _clip_source_in - int(first_frame) - source_out = _clip_source_out - int(first_frame) - - log.debug("_ source_in: {}".format(source_in)) - log.debug("_ source_out: {}".format(source_out)) - - if file_first_frame: - log.debug("_ file_source_in: {}".format( - file_first_frame + source_in)) - log.debug("_ file_source_in: {}".format( - file_first_frame + source_out)) - - source_duration = (source_out - source_in + 1) - - # secondly check if any change of speed - if source_duration != _clip_record_duration: - retime_speed = float(source_duration) / float(_clip_record_duration) - log.debug("_ calculated speed: {}".format(retime_speed)) - speed *= retime_speed - - # get speed from metadata if available - if tw_data.get("speed"): - speed = tw_data["speed"] - log.debug("_ metadata speed: {}".format(speed)) - - log.debug("_ speed: {}".format(speed)) - log.debug("_ source_duration: {}".format(source_duration)) - log.debug("_ _clip_record_duration: {}".format(_clip_record_duration)) - - # create media reference - media_reference = create_otio_reference( - clip_data, media_fps) - - # creatae source range - source_range = create_otio_time_range( - source_in, - _clip_record_duration, - CTX.get_fps() - ) - - otio_clip = otio.schema.Clip( - name=clip_data["segment_name"], - source_range=source_range, - media_reference=media_reference - ) - - # Add markers - if MARKERS_INCLUDE: - create_otio_markers(otio_clip, segment) - - if speed != 1: - create_time_effects(otio_clip, speed) - - return otio_clip - - -def create_otio_gap(gap_start, clip_start, tl_start_frame, fps): - return otio.schema.Gap( - source_range=create_otio_time_range( - gap_start, - (clip_start - tl_start_frame) - gap_start, - fps - ) - ) - - -def _get_colourspace_policy(): - - output = {} - # get policies project path - policy_dir = "/opt/Autodesk/project/{}/synColor/policy".format( - CTX.project.name - ) - log.debug(policy_dir) - policy_fp = os.path.join(policy_dir, "policy.cfg") - - if not os.path.exists(policy_fp): - return output - - with open(policy_fp) as file: - dict_conf = dict(line.strip().split(' = ', 1) for line in file) - output.update( - {"openpype.flame.{}".format(k): v for k, v in dict_conf.items()} - ) - return output - - -def _create_otio_timeline(sequence): - - metadata = _get_metadata(sequence) - - # find colour policy files and add them to metadata - colorspace_policy = _get_colourspace_policy() - metadata.update(colorspace_policy) - - metadata.update({ - "openpype.timeline.width": int(sequence.width), - "openpype.timeline.height": int(sequence.height), - "openpype.timeline.pixelAspect": 1 - }) - - rt_start_time = create_otio_rational_time( - CTX.get_tl_start_frame(), CTX.get_fps()) - - return otio.schema.Timeline( - name=str(sequence.name)[1:-1], - global_start_time=rt_start_time, - metadata=metadata - ) - - -def create_otio_track(track_type, track_name): - return otio.schema.Track( - name=track_name, - kind=TRACK_TYPES[track_type] - ) - - -def add_otio_gap(clip_data, otio_track, prev_out): - gap_length = clip_data["record_in"] - prev_out - if prev_out != 0: - gap_length -= 1 - - gap = otio.opentime.TimeRange( - duration=otio.opentime.RationalTime( - gap_length, - CTX.get_fps() - ) - ) - otio_gap = otio.schema.Gap(source_range=gap) - otio_track.append(otio_gap) - - -def add_otio_metadata(otio_item, item, **kwargs): - metadata = _get_metadata(item) - - # add additional metadata from kwargs - if kwargs: - metadata.update(kwargs) - - # add metadata to otio item metadata - for key, value in metadata.items(): - otio_item.metadata.update({key: value}) - - -def _get_shot_tokens_values(clip, tokens): - old_value = None - output = {} - - old_value = clip.shot_name.get_value() - - for token in tokens: - clip.shot_name.set_value(token) - _key = re.sub("[ <>]", "", token) - - try: - output[_key] = int(clip.shot_name.get_value()) - except ValueError: - output[_key] = clip.shot_name.get_value() - - clip.shot_name.set_value(old_value) - - return output - - -def _get_segment_attributes(segment): - - log.debug("Segment name|hidden: {}|{}".format( - segment.name.get_value(), segment.hidden - )) - if ( - segment.name.get_value() == "" - or segment.hidden.get_value() - ): - return None - - # Add timeline segment to tree - clip_data = { - "segment_name": segment.name.get_value(), - "segment_comment": segment.comment.get_value(), - "shot_name": segment.shot_name.get_value(), - "tape_name": segment.tape_name, - "source_name": segment.source_name, - "fpath": segment.file_path, - "PySegment": segment - } - - # add all available shot tokens - shot_tokens = _get_shot_tokens_values( - segment, - ["", "", "", ""] - ) - clip_data.update(shot_tokens) - - # populate shot source metadata - segment_attrs = [ - "record_duration", "record_in", "record_out", - "source_duration", "source_in", "source_out" - ] - segment_attrs_data = {} - for attr in segment_attrs: - if not hasattr(segment, attr): - continue - _value = getattr(segment, attr) - segment_attrs_data[attr] = str(_value).replace("+", ":") - - if attr in ["record_in", "record_out"]: - clip_data[attr] = _value.relative_frame - else: - clip_data[attr] = _value.frame - - clip_data["segment_timecodes"] = segment_attrs_data - - return clip_data - - -def create_otio_timeline(sequence): - log.info(dir(sequence)) - log.info(sequence.attributes) - - CTX.project = get_current_flame_project() - - # get current timeline - CTX.set_fps( - float(str(sequence.frame_rate)[:-4])) - - tl_start_frame = utils.timecode_to_frames( - str(sequence.start_time).replace("+", ":"), - CTX.get_fps() - ) - CTX.set_tl_start_frame(tl_start_frame) - - # convert timeline to otio - otio_timeline = _create_otio_timeline(sequence) - - # create otio tracks and clips - for ver in sequence.versions: - for track in ver.tracks: - # avoid all empty tracks - # or hidden tracks - if ( - len(track.segments) == 0 - or track.hidden.get_value() - ): - continue - - # convert track to otio - otio_track = create_otio_track( - "video", str(track.name)[1:-1]) - - all_segments = [] - for segment in track.segments: - clip_data = _get_segment_attributes(segment) - if not clip_data: - continue - all_segments.append(clip_data) - - segments_ordered = dict(enumerate(all_segments)) - log.debug("_ segments_ordered: {}".format( - pformat(segments_ordered) - )) - if not segments_ordered: - continue - - for itemindex, segment_data in segments_ordered.items(): - log.debug("_ itemindex: {}".format(itemindex)) - - # Add Gap if needed - prev_item = ( - segment_data - if itemindex == 0 - else segments_ordered[itemindex - 1] - ) - log.debug("_ segment_data: {}".format(segment_data)) - - # calculate clip frame range difference from each other - clip_diff = segment_data["record_in"] - prev_item["record_out"] - - # add gap if first track item is not starting - # at first timeline frame - if itemindex == 0 and segment_data["record_in"] > 0: - add_otio_gap(segment_data, otio_track, 0) - - # or add gap if following track items are having - # frame range differences from each other - elif itemindex and clip_diff != 1: - add_otio_gap( - segment_data, otio_track, prev_item["record_out"]) - - # create otio clip and add it to track - otio_clip = create_otio_clip(segment_data) - otio_track.append(otio_clip) - - log.debug("_ otio_clip: {}".format(otio_clip)) - - # create otio marker - # create otio metadata - - # add track to otio timeline - otio_timeline.tracks.append(otio_track) - - return otio_timeline - - -def write_to_file(otio_timeline, path): - otio.adapters.write_to_file(otio_timeline, path) diff --git a/server_addon/flame/client/ayon_flame/otio/utils.py b/server_addon/flame/client/ayon_flame/otio/utils.py deleted file mode 100644 index 5a28263fc2..0000000000 --- a/server_addon/flame/client/ayon_flame/otio/utils.py +++ /dev/null @@ -1,91 +0,0 @@ -import re -import opentimelineio as otio -import logging -log = logging.getLogger(__name__) - -FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]") - - -def timecode_to_frames(timecode, framerate): - rt = otio.opentime.from_timecode(timecode, framerate) - return int(otio.opentime.to_frames(rt)) - - -def frames_to_timecode(frames, framerate): - rt = otio.opentime.from_frames(frames, framerate) - return otio.opentime.to_timecode(rt) - - -def frames_to_seconds(frames, framerate): - rt = otio.opentime.from_frames(frames, framerate) - return otio.opentime.to_seconds(rt) - - -def get_reformatted_filename(filename, padded=True): - """ - Return fixed python expression path - - Args: - filename (str): file name - - Returns: - type: string with reformatted path - - Example: - get_reformatted_filename("plate.1001.exr") > plate.%04d.exr - - """ - found = FRAME_PATTERN.search(filename) - - if not found: - log.info("File name is not sequence: {}".format(filename)) - return filename - - padding = get_padding_from_filename(filename) - - replacement = "%0{}d".format(padding) if padded else "%d" - start_idx, end_idx = found.span(1) - - return replacement.join( - [filename[:start_idx], filename[end_idx:]] - ) - - -def get_padding_from_filename(filename): - """ - Return padding number from Flame path style - - Args: - filename (str): file name - - Returns: - int: padding number - - Example: - get_padding_from_filename("plate.0001.exr") > 4 - - """ - found = get_frame_from_filename(filename) - - return len(found) if found else None - - -def get_frame_from_filename(filename): - """ - Return sequence number from Flame path style - - Args: - filename (str): file name - - Returns: - int: sequence frame number - - Example: - def get_frame_from_filename(path): - ("plate.0001.exr") > 0001 - - """ - - found = re.findall(FRAME_PATTERN, filename) - - return found.pop() if found else None diff --git a/server_addon/flame/client/ayon_flame/plugins/create/create_shot_clip.py b/server_addon/flame/client/ayon_flame/plugins/create/create_shot_clip.py deleted file mode 100644 index 120c8c559d..0000000000 --- a/server_addon/flame/client/ayon_flame/plugins/create/create_shot_clip.py +++ /dev/null @@ -1,307 +0,0 @@ -from copy import deepcopy -import ayon_flame.api as opfapi - - -class CreateShotClip(opfapi.Creator): - """Publishable clip""" - - label = "Create Publishable Clip" - product_type = "clip" - icon = "film" - defaults = ["Main"] - - presets = None - - def process(self): - # Creator copy of object attributes that are modified during `process` - presets = deepcopy(self.presets) - gui_inputs = self.get_gui_inputs() - - # get key pairs from presets and match it on ui inputs - for k, v in gui_inputs.items(): - if v["type"] in ("dict", "section"): - # nested dictionary (only one level allowed - # for sections and dict) - for _k, _v in v["value"].items(): - if presets.get(_k) is not None: - gui_inputs[k][ - "value"][_k]["value"] = presets[_k] - - if presets.get(k) is not None: - gui_inputs[k]["value"] = presets[k] - - # open widget for plugins inputs - results_back = self.create_widget( - "AYON publish attributes creator", - "Define sequential rename and fill hierarchy data.", - gui_inputs - ) - - if len(self.selected) < 1: - return - - if not results_back: - print("Operation aborted") - return - - # get ui output for track name for vertical sync - v_sync_track = results_back["vSyncTrack"]["value"] - - # sort selected trackItems by - sorted_selected_segments = [] - unsorted_selected_segments = [] - for _segment in self.selected: - if _segment.parent.name.get_value() in v_sync_track: - sorted_selected_segments.append(_segment) - else: - unsorted_selected_segments.append(_segment) - - sorted_selected_segments.extend(unsorted_selected_segments) - - kwargs = { - "log": self.log, - "ui_inputs": results_back, - "avalon": self.data, - "product_type": self.data["productType"] - } - - for i, segment in enumerate(sorted_selected_segments): - kwargs["rename_index"] = i - # convert track item to timeline media pool item - opfapi.PublishableClip(segment, **kwargs).convert() - - def get_gui_inputs(self): - gui_tracks = self._get_video_track_names( - opfapi.get_current_sequence(opfapi.CTX.selection) - ) - return deepcopy({ - "renameHierarchy": { - "type": "section", - "label": "Shot Hierarchy And Rename Settings", - "target": "ui", - "order": 0, - "value": { - "hierarchy": { - "value": "{folder}/{sequence}", - "type": "QLineEdit", - "label": "Shot Parent Hierarchy", - "target": "tag", - "toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa - "order": 0}, - "useShotName": { - "value": True, - "type": "QCheckBox", - "label": "Use Shot Name", - "target": "ui", - "toolTip": "Use name form Shot name clip attribute", # noqa - "order": 1}, - "clipRename": { - "value": False, - "type": "QCheckBox", - "label": "Rename clips", - "target": "ui", - "toolTip": "Renaming selected clips on fly", # noqa - "order": 2}, - "clipName": { - "value": "{sequence}{shot}", - "type": "QLineEdit", - "label": "Clip Name Template", - "target": "ui", - "toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa - "order": 3}, - "segmentIndex": { - "value": True, - "type": "QCheckBox", - "label": "Segment index", - "target": "ui", - "toolTip": "Take number from segment index", # noqa - "order": 4}, - "countFrom": { - "value": 10, - "type": "QSpinBox", - "label": "Count sequence from", - "target": "ui", - "toolTip": "Set when the sequence number stafrom", # noqa - "order": 5}, - "countSteps": { - "value": 10, - "type": "QSpinBox", - "label": "Stepping number", - "target": "ui", - "toolTip": "What number is adding every new step", # noqa - "order": 6}, - } - }, - "hierarchyData": { - "type": "dict", - "label": "Shot Template Keywords", - "target": "tag", - "order": 1, - "value": { - "folder": { - "value": "shots", - "type": "QLineEdit", - "label": "{folder}", - "target": "tag", - "toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 0}, - "episode": { - "value": "ep01", - "type": "QLineEdit", - "label": "{episode}", - "target": "tag", - "toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 1}, - "sequence": { - "value": "sq01", - "type": "QLineEdit", - "label": "{sequence}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 2}, - "track": { - "value": "{_track_}", - "type": "QLineEdit", - "label": "{track}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 3}, - "shot": { - "value": "sh###", - "type": "QLineEdit", - "label": "{shot}", - "target": "tag", - "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 4} - } - }, - "verticalSync": { - "type": "section", - "label": "Vertical Synchronization Of Attributes", - "target": "ui", - "order": 2, - "value": { - "vSyncOn": { - "value": True, - "type": "QCheckBox", - "label": "Enable Vertical Sync", - "target": "ui", - "toolTip": "Switch on if you want clips above each other to share its attributes", # noqa - "order": 0}, - "vSyncTrack": { - "value": gui_tracks, # noqa - "type": "QComboBox", - "label": "Hero track", - "target": "ui", - "toolTip": "Select driving track name which should be hero for all others", # noqa - "order": 1} - } - }, - "publishSettings": { - "type": "section", - "label": "Publish Settings", - "target": "ui", - "order": 3, - "value": { - "productName": { - "value": ["[ track name ]", "main", "bg", "fg", "bg", - "animatic"], - "type": "QComboBox", - "label": "Product Name", - "target": "ui", - "toolTip": "chose product name pattern, if [ track name ] is selected, name of track layer will be used", # noqa - "order": 0}, - "productType": { - "value": ["plate", "take"], - "type": "QComboBox", - "label": "Product Type", - "target": "ui", "toolTip": "What use of this product is for", # noqa - "order": 1}, - "reviewTrack": { - "value": ["< none >"] + gui_tracks, - "type": "QComboBox", - "label": "Use Review Track", - "target": "ui", - "toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa - "order": 2}, - "audio": { - "value": False, - "type": "QCheckBox", - "label": "Include audio", - "target": "tag", - "toolTip": "Process products with corresponding audio", # noqa - "order": 3}, - "sourceResolution": { - "value": False, - "type": "QCheckBox", - "label": "Source resolution", - "target": "tag", - "toolTip": "Is resolution taken from timeline or source?", # noqa - "order": 4}, - } - }, - "frameRangeAttr": { - "type": "section", - "label": "Shot Attributes", - "target": "ui", - "order": 4, - "value": { - "workfileFrameStart": { - "value": 1001, - "type": "QSpinBox", - "label": "Workfiles Start Frame", - "target": "tag", - "toolTip": "Set workfile starting frame number", # noqa - "order": 0 - }, - "handleStart": { - "value": 0, - "type": "QSpinBox", - "label": "Handle Start", - "target": "tag", - "toolTip": "Handle at start of clip", # noqa - "order": 1 - }, - "handleEnd": { - "value": 0, - "type": "QSpinBox", - "label": "Handle End", - "target": "tag", - "toolTip": "Handle at end of clip", # noqa - "order": 2 - }, - "includeHandles": { - "value": False, - "type": "QCheckBox", - "label": "Include handles", - "target": "tag", - "toolTip": "By default handles are excluded", # noqa - "order": 3 - }, - "retimedHandles": { - "value": True, - "type": "QCheckBox", - "label": "Retimed handles", - "target": "tag", - "toolTip": "By default handles are retimed.", # noqa - "order": 4 - }, - "retimedFramerange": { - "value": True, - "type": "QCheckBox", - "label": "Retimed framerange", - "target": "tag", - "toolTip": "By default framerange is retimed.", # noqa - "order": 5 - } - } - } - }) - - def _get_video_track_names(self, sequence): - track_names = [] - for ver in sequence.versions: - for track in ver.tracks: - track_names.append(track.name.get_value()) - - return track_names diff --git a/server_addon/flame/client/ayon_flame/plugins/load/load_clip.py b/server_addon/flame/client/ayon_flame/plugins/load/load_clip.py deleted file mode 100644 index c8ec7b36c9..0000000000 --- a/server_addon/flame/client/ayon_flame/plugins/load/load_clip.py +++ /dev/null @@ -1,274 +0,0 @@ -from copy import deepcopy -import os -import flame -from pprint import pformat -import ayon_flame.api as opfapi -from ayon_core.lib import StringTemplate -from ayon_core.lib.transcoding import ( - VIDEO_EXTENSIONS, - IMAGE_EXTENSIONS -) - - -class LoadClip(opfapi.ClipLoader): - """Load a product to timeline as clip - - Place clip to timeline on its asset origin timings collected - during conforming to project - """ - - product_types = {"render2d", "source", "plate", "render", "review"} - representations = {"*"} - extensions = set( - ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) - ) - - label = "Load as clip" - order = -10 - icon = "code-fork" - color = "orange" - - # settings - reel_group_name = "OpenPype_Reels" - reel_name = "Loaded" - clip_name_template = "{folder[name]}_{product[name]}<_{output}>" - - """ Anatomy keys from version context data and dynamically added: - - {layerName} - original layer name token - - {layerUID} - original layer UID token - - {originalBasename} - original clip name taken from file - """ - layer_rename_template = "{folder[name]}_{product[name]}<_{output}>" - layer_rename_patterns = [] - - def load(self, context, name, namespace, options): - - # get flame objects - fproject = flame.project.current_project - self.fpd = fproject.current_workspace.desktop - - # load clip to timeline and get main variables - version_entity = context["version"] - version_attributes = version_entity["attrib"] - version_name = version_entity["version"] - colorspace = self.get_colorspace(context) - - # in case output is not in context replace key to representation - if not context["representation"]["context"].get("output"): - self.clip_name_template = self.clip_name_template.replace( - "output", "representation") - self.layer_rename_template = self.layer_rename_template.replace( - "output", "representation") - - formatting_data = deepcopy(context["representation"]["context"]) - clip_name = StringTemplate(self.clip_name_template).format( - formatting_data) - - # convert colorspace with ocio to flame mapping - # in imageio flame section - colorspace = self.get_native_colorspace(colorspace) - self.log.info("Loading with colorspace: `{}`".format(colorspace)) - - # create workfile path - workfile_dir = os.environ["AYON_WORKDIR"] - openclip_dir = os.path.join( - workfile_dir, clip_name - ) - openclip_path = os.path.join( - openclip_dir, clip_name + ".clip" - ) - if not os.path.exists(openclip_dir): - os.makedirs(openclip_dir) - - # prepare clip data from context ad send it to openClipLoader - path = self.filepath_from_context(context) - loading_context = { - "path": path.replace("\\", "/"), - "colorspace": colorspace, - "version": "v{:0>3}".format(version_name), - "layer_rename_template": self.layer_rename_template, - "layer_rename_patterns": self.layer_rename_patterns, - "context_data": formatting_data - } - self.log.debug(pformat( - loading_context - )) - self.log.debug(openclip_path) - - # make openpype clip file - opfapi.OpenClipSolver( - openclip_path, loading_context, logger=self.log).make() - - # prepare Reel group in actual desktop - opc = self._get_clip( - clip_name, - openclip_path - ) - - # add additional metadata from the version to imprint Avalon knob - add_keys = [ - "frameStart", "frameEnd", "source", "author", - "fps", "handleStart", "handleEnd" - ] - - # move all version data keys to tag data - data_imprint = { - key: version_attributes.get(key, str(None)) - for key in add_keys - } - - # add variables related to version context - data_imprint.update({ - "version": version_name, - "colorspace": colorspace, - "objectName": clip_name - }) - - # TODO: finish the containerisation - # opc_segment = opfapi.get_clip_segment(opc) - - # return opfapi.containerise( - # opc_segment, - # name, namespace, context, - # self.__class__.__name__, - # data_imprint) - - return opc - - def _get_clip(self, name, clip_path): - reel = self._get_reel() - # with maintained openclip as opc - matching_clip = [cl for cl in reel.clips - if cl.name.get_value() == name] - if matching_clip: - return matching_clip.pop() - else: - created_clips = flame.import_clips(str(clip_path), reel) - return created_clips.pop() - - def _get_reel(self): - - matching_rgroup = [ - rg for rg in self.fpd.reel_groups - if rg.name.get_value() == self.reel_group_name - ] - - if not matching_rgroup: - reel_group = self.fpd.create_reel_group(str(self.reel_group_name)) - for _r in reel_group.reels: - if "reel" not in _r.name.get_value().lower(): - continue - self.log.debug("Removing: {}".format(_r.name)) - flame.delete(_r) - else: - reel_group = matching_rgroup.pop() - - matching_reel = [ - re for re in reel_group.reels - if re.name.get_value() == self.reel_name - ] - - if not matching_reel: - reel_group = reel_group.create_reel(str(self.reel_name)) - else: - reel_group = matching_reel.pop() - - return reel_group - - def _get_segment_from_clip(self, clip): - # unwrapping segment from input clip - pass - - # def switch(self, container, context): - # self.update(container, context) - - # def update(self, container, context): - # """ Updating previously loaded clips - # """ - # # load clip to timeline and get main variables - # repre_entity = context['representation'] - # name = container['name'] - # namespace = container['namespace'] - # track_item = phiero.get_track_items( - # track_item_name=namespace) - # version = io.find_one({ - # "type": "version", - # "id": repre_entity["versionId"] - # }) - # version_data = version.get("data", {}) - # version_name = version.get("name", None) - # colorspace = version_data.get("colorSpace", None) - # object_name = "{}_{}".format(name, namespace) - # file = get_representation_path(repre_entity).replace("\\", "/") - # clip = track_item.source() - - # # reconnect media to new path - # clip.reconnectMedia(file) - - # # set colorspace - # if colorspace: - # clip.setSourceMediaColourTransform(colorspace) - - # # add additional metadata from the version to imprint Avalon knob - # add_keys = [ - # "frameStart", "frameEnd", "source", "author", - # "fps", "handleStart", "handleEnd" - # ] - - # # move all version data keys to tag data - # data_imprint = {} - # for key in add_keys: - # data_imprint.update({ - # key: version_data.get(key, str(None)) - # }) - - # # add variables related to version context - # data_imprint.update({ - # "representation": repre_entity["id"], - # "version": version_name, - # "colorspace": colorspace, - # "objectName": object_name - # }) - - # # update color of clip regarding the version order - # self.set_item_color(track_item, version) - - # return phiero.update_container(track_item, data_imprint) - - # def remove(self, container): - # """ Removing previously loaded clips - # """ - # # load clip to timeline and get main variables - # namespace = container['namespace'] - # track_item = phiero.get_track_items( - # track_item_name=namespace) - # track = track_item.parent() - - # # remove track item from track - # track.removeItem(track_item) - - # @classmethod - # def multiselection(cls, track_item): - # if not cls.track: - # cls.track = track_item.parent() - # cls.sequence = cls.track.parent() - - # @classmethod - # def set_item_color(cls, track_item, version): - - # clip = track_item.source() - # # define version name - # version_name = version.get("name", None) - # # get all versions in list - # versions = io.find({ - # "type": "version", - # "parent": version["parent"] - # }).distinct('name') - - # max_version = max(versions) - - # # set clip colour - # if version_name == max_version: - # clip.binItem().setColor(cls.clip_color_last) - # else: - # clip.binItem().setColor(cls.clip_color) diff --git a/server_addon/flame/client/ayon_flame/plugins/load/load_clip_batch.py b/server_addon/flame/client/ayon_flame/plugins/load/load_clip_batch.py deleted file mode 100644 index 0d7a125af7..0000000000 --- a/server_addon/flame/client/ayon_flame/plugins/load/load_clip_batch.py +++ /dev/null @@ -1,180 +0,0 @@ -from copy import deepcopy -import os -import flame -from pprint import pformat -import ayon_flame.api as opfapi -from ayon_core.lib import StringTemplate -from ayon_core.lib.transcoding import ( - VIDEO_EXTENSIONS, - IMAGE_EXTENSIONS -) - -class LoadClipBatch(opfapi.ClipLoader): - """Load a product to timeline as clip - - Place clip to timeline on its asset origin timings collected - during conforming to project - """ - - product_types = {"render2d", "source", "plate", "render", "review"} - representations = {"*"} - extensions = set( - ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) - ) - - label = "Load as clip to current batch" - order = -10 - icon = "code-fork" - color = "orange" - - # settings - reel_name = "OP_LoadedReel" - clip_name_template = "{batch}_{folder[name]}_{product[name]}<_{output}>" - - """ Anatomy keys from version context data and dynamically added: - - {layerName} - original layer name token - - {layerUID} - original layer UID token - - {originalBasename} - original clip name taken from file - """ - layer_rename_template = "{folder[name]}_{product[name]}<_{output}>" - layer_rename_patterns = [] - - def load(self, context, name, namespace, options): - - # get flame objects - self.batch = options.get("batch") or flame.batch - - # load clip to timeline and get main variables - version_entity = context["version"] - version_attributes =version_entity["attrib"] - version_name = version_entity["version"] - colorspace = self.get_colorspace(context) - - clip_name_template = self.clip_name_template - layer_rename_template = self.layer_rename_template - # in case output is not in context replace key to representation - if not context["representation"]["context"].get("output"): - clip_name_template = clip_name_template.replace( - "output", "representation") - layer_rename_template = layer_rename_template.replace( - "output", "representation") - - folder_entity = context["folder"] - product_entity = context["product"] - formatting_data = deepcopy(context["representation"]["context"]) - formatting_data["batch"] = self.batch.name.get_value() - formatting_data.update({ - "asset": folder_entity["name"], - "folder": { - "name": folder_entity["name"], - }, - "subset": product_entity["name"], - "family": product_entity["productType"], - "product": { - "name": product_entity["name"], - "type": product_entity["productType"], - } - }) - - clip_name = StringTemplate(clip_name_template).format( - formatting_data) - - # convert colorspace with ocio to flame mapping - # in imageio flame section - colorspace = self.get_native_colorspace(colorspace) - self.log.info("Loading with colorspace: `{}`".format(colorspace)) - - # create workfile path - workfile_dir = options.get("workdir") or os.environ["AYON_WORKDIR"] - openclip_dir = os.path.join( - workfile_dir, clip_name - ) - openclip_path = os.path.join( - openclip_dir, clip_name + ".clip" - ) - - if not os.path.exists(openclip_dir): - os.makedirs(openclip_dir) - - # prepare clip data from context and send it to openClipLoader - path = self.filepath_from_context(context) - loading_context = { - "path": path.replace("\\", "/"), - "colorspace": colorspace, - "version": "v{:0>3}".format(version_name), - "layer_rename_template": layer_rename_template, - "layer_rename_patterns": self.layer_rename_patterns, - "context_data": formatting_data - } - self.log.debug(pformat( - loading_context - )) - self.log.debug(openclip_path) - - # make openpype clip file - opfapi.OpenClipSolver( - openclip_path, loading_context, logger=self.log).make() - - # prepare Reel group in actual desktop - opc = self._get_clip( - clip_name, - openclip_path - ) - - # add additional metadata from the version to imprint Avalon knob - add_keys = [ - "frameStart", "frameEnd", "source", "author", - "fps", "handleStart", "handleEnd" - ] - - # move all version data keys to tag data - data_imprint = { - key: version_attributes.get(key, str(None)) - for key in add_keys - } - # add variables related to version context - data_imprint.update({ - "version": version_name, - "colorspace": colorspace, - "objectName": clip_name - }) - - # TODO: finish the containerisation - # opc_segment = opfapi.get_clip_segment(opc) - - # return opfapi.containerise( - # opc_segment, - # name, namespace, context, - # self.__class__.__name__, - # data_imprint) - - return opc - - def _get_clip(self, name, clip_path): - reel = self._get_reel() - - # with maintained openclip as opc - matching_clip = None - for cl in reel.clips: - if cl.name.get_value() != name: - continue - matching_clip = cl - - if not matching_clip: - created_clips = flame.import_clips(str(clip_path), reel) - return created_clips.pop() - - return matching_clip - - def _get_reel(self): - - matching_reel = [ - rg for rg in self.batch.reels - if rg.name.get_value() == self.reel_name - ] - - return ( - matching_reel.pop() - if matching_reel - else self.batch.create_reel(str(self.reel_name)) - ) diff --git a/server_addon/flame/client/ayon_flame/plugins/publish/collect_test_selection.py b/server_addon/flame/client/ayon_flame/plugins/publish/collect_test_selection.py deleted file mode 100644 index dac2c862e6..0000000000 --- a/server_addon/flame/client/ayon_flame/plugins/publish/collect_test_selection.py +++ /dev/null @@ -1,64 +0,0 @@ -import os -import pyblish.api -import tempfile -import ayon_flame.api as opfapi -from ayon_flame.otio import flame_export as otio_export -import opentimelineio as otio -from pprint import pformat -reload(otio_export) # noqa - - -@pyblish.api.log -class CollectTestSelection(pyblish.api.ContextPlugin): - """testing selection sharing - """ - - order = pyblish.api.CollectorOrder - label = "test selection" - hosts = ["flame"] - active = False - - def process(self, context): - self.log.info( - "Active Selection: {}".format(opfapi.CTX.selection)) - - sequence = opfapi.get_current_sequence(opfapi.CTX.selection) - - self.test_imprint_data(sequence) - self.test_otio_export(sequence) - - def test_otio_export(self, sequence): - test_dir = os.path.normpath( - tempfile.mkdtemp(prefix="test_pyblish_tmp_") - ) - export_path = os.path.normpath( - os.path.join( - test_dir, "otio_timeline_export.otio" - ) - ) - otio_timeline = otio_export.create_otio_timeline(sequence) - otio_export.write_to_file( - otio_timeline, export_path - ) - read_timeline_otio = otio.adapters.read_from_file(export_path) - - if otio_timeline != read_timeline_otio: - raise Exception("Exported timeline is different from original") - - self.log.info(pformat(otio_timeline)) - self.log.info("Otio exported to: {}".format(export_path)) - - def test_imprint_data(self, sequence): - with opfapi.maintained_segment_selection(sequence) as sel_segments: - for segment in sel_segments: - if str(segment.name)[1:-1] == "": - continue - - self.log.debug("Segment with OpenPypeData: {}".format( - segment.name)) - - opfapi.imprint(segment, { - 'asset': segment.name.get_value(), - 'productType': 'render', - 'productName': 'productMain' - }) diff --git a/server_addon/flame/client/ayon_flame/plugins/publish/collect_timeline_instances.py b/server_addon/flame/client/ayon_flame/plugins/publish/collect_timeline_instances.py deleted file mode 100644 index 7680483db1..0000000000 --- a/server_addon/flame/client/ayon_flame/plugins/publish/collect_timeline_instances.py +++ /dev/null @@ -1,419 +0,0 @@ -import re -from types import NoneType -import pyblish -import ayon_flame.api as opfapi -from ayon_flame.otio import flame_export -from ayon_core.pipeline import AYON_INSTANCE_ID, AVALON_INSTANCE_ID -from ayon_core.pipeline.editorial import ( - is_overlapping_otio_ranges, - get_media_range_with_retimes -) - -# # developer reload modules -from pprint import pformat - -# constatns -NUM_PATERN = re.compile(r"([0-9\.]+)") -TXT_PATERN = re.compile(r"([a-zA-Z]+)") - - -class CollectTimelineInstances(pyblish.api.ContextPlugin): - """Collect all Timeline segment selection.""" - - order = pyblish.api.CollectorOrder - 0.09 - label = "Collect timeline Instances" - hosts = ["flame"] - - settings_category = "flame" - - audio_track_items = [] - - # settings - xml_preset_attrs_from_comments = [] - add_tasks = [] - - def process(self, context): - selected_segments = context.data["flameSelectedSegments"] - self.log.debug("__ selected_segments: {}".format(selected_segments)) - - self.otio_timeline = context.data["otioTimeline"] - self.fps = context.data["fps"] - - # process all selected - for segment in selected_segments: - # get openpype tag data - marker_data = opfapi.get_segment_data_marker(segment) - - self.log.debug("__ marker_data: {}".format( - pformat(marker_data))) - - if not marker_data: - continue - - if marker_data.get("id") not in { - AYON_INSTANCE_ID, AVALON_INSTANCE_ID - }: - continue - - self.log.debug("__ segment.name: {}".format( - segment.name - )) - - comment_attributes = self._get_comment_attributes(segment) - - self.log.debug("_ comment_attributes: {}".format( - pformat(comment_attributes))) - - clip_data = opfapi.get_segment_attributes(segment) - clip_name = clip_data["segment_name"] - self.log.debug("clip_name: {}".format(clip_name)) - - # get otio clip data - otio_data = self._get_otio_clip_instance_data(clip_data) or {} - self.log.debug("__ otio_data: {}".format(pformat(otio_data))) - - # get file path - file_path = clip_data["fpath"] - - first_frame = opfapi.get_frame_from_filename(file_path) or 0 - - head, tail = self._get_head_tail( - clip_data, - otio_data["otioClip"], - marker_data["handleStart"], - marker_data["handleEnd"] - ) - - # make sure there is not NoneType rather 0 - if isinstance(head, NoneType): - head = 0 - if isinstance(tail, NoneType): - tail = 0 - - # make sure value is absolute - if head != 0: - head = abs(head) - if tail != 0: - tail = abs(tail) - - # solve handles length - marker_data["handleStart"] = min( - marker_data["handleStart"], head) - marker_data["handleEnd"] = min( - marker_data["handleEnd"], tail) - - # Backward compatibility fix of 'entity_type' > 'folder_type' - if "parents" in marker_data: - for parent in marker_data["parents"]: - if "entity_type" in parent: - parent["folder_type"] = parent.pop("entity_type") - - workfile_start = self._set_workfile_start(marker_data) - - with_audio = bool(marker_data.pop("audio")) - - # add marker data to instance data - inst_data = dict(marker_data.items()) - - # add ocio_data to instance data - inst_data.update(otio_data) - - folder_path = marker_data["folderPath"] - folder_name = folder_path.rsplit("/")[-1] - product_name = marker_data["productName"] - - # insert product type into families - product_type = marker_data["productType"] - families = [str(f) for f in marker_data["families"]] - families.insert(0, str(product_type)) - - # form label - label = folder_name - if folder_name != clip_name: - label += " ({})".format(clip_name) - label += " {} [{}]".format(product_name, ", ".join(families)) - - inst_data.update({ - "name": "{}_{}".format(folder_name, product_name), - "label": label, - "folderPath": folder_path, - "item": segment, - "families": families, - "publish": marker_data["publish"], - "fps": self.fps, - "workfileFrameStart": workfile_start, - "sourceFirstFrame": int(first_frame), - "retimedHandles": marker_data.get("retimedHandles"), - "shotDurationFromSource": ( - not marker_data.get("retimedFramerange")), - "path": file_path, - "flameAddTasks": self.add_tasks, - "tasks": { - task["name"]: {"type": task["type"]} - for task in self.add_tasks}, - "representations": [], - "newHierarchyIntegration": True, - # Backwards compatible (Deprecated since 24/06/06) - "newAssetPublishing": True, - }) - self.log.debug("__ inst_data: {}".format(pformat(inst_data))) - - # add resolution - self._get_resolution_to_data(inst_data, context) - - # add comment attributes if any - inst_data.update(comment_attributes) - - # create instance - instance = context.create_instance(**inst_data) - - # add colorspace data - instance.data.update({ - "versionData": { - "colorspace": clip_data["colour_space"], - } - }) - - # create shot instance for shot attributes create/update - self._create_shot_instance(context, clip_name, **inst_data) - - self.log.info("Creating instance: {}".format(instance)) - self.log.info( - "_ instance.data: {}".format(pformat(instance.data))) - - if not with_audio: - continue - - # add audioReview attribute to plate instance data - # if reviewTrack is on - if marker_data.get("reviewTrack") is not None: - instance.data["reviewAudio"] = True - - @staticmethod - def _set_workfile_start(data): - include_handles = data.get("includeHandles") - workfile_start = data["workfileFrameStart"] - handle_start = data["handleStart"] - - if include_handles: - workfile_start += handle_start - - return workfile_start - - def _get_comment_attributes(self, segment): - comment = segment.comment.get_value() - - # try to find attributes - attributes = { - "xml_overrides": { - "pixelRatio": 1.00} - } - # search for `:` - for split in self._split_comments(comment): - # make sure we ignore if not `:` in key - if ":" not in split: - continue - - self._get_xml_preset_attrs( - attributes, split) - - # add xml overrides resolution to instance data - xml_overrides = attributes["xml_overrides"] - if xml_overrides.get("width"): - attributes.update({ - "resolutionWidth": xml_overrides["width"], - "resolutionHeight": xml_overrides["height"], - "pixelAspect": xml_overrides["pixelRatio"] - }) - - return attributes - - def _get_xml_preset_attrs(self, attributes, split): - - # split to key and value - key, value = split.split(":") - - for attr_data in self.xml_preset_attrs_from_comments: - a_name = attr_data["name"] - a_type = attr_data["type"] - - # exclude all not related attributes - if a_name.lower() not in key.lower(): - continue - - # get pattern defined by type - pattern = TXT_PATERN - if a_type in ("number", "float"): - pattern = NUM_PATERN - - res_goup = pattern.findall(value) - - # raise if nothing is found as it is not correctly defined - if not res_goup: - raise ValueError(( - "Value for `{}` attribute is not " - "set correctly: `{}`").format(a_name, split)) - - if "string" in a_type: - _value = res_goup[0] - if "float" in a_type: - _value = float(res_goup[0]) - if "number" in a_type: - _value = int(res_goup[0]) - - attributes["xml_overrides"][a_name] = _value - - # condition for resolution in key - if "resolution" in key.lower(): - res_goup = NUM_PATERN.findall(value) - # check if axpect was also defined - # 1920x1080x1.5 - aspect = res_goup[2] if len(res_goup) > 2 else 1 - - width = int(res_goup[0]) - height = int(res_goup[1]) - pixel_ratio = float(aspect) - attributes["xml_overrides"].update({ - "width": width, - "height": height, - "pixelRatio": pixel_ratio - }) - - def _split_comments(self, comment_string): - # first split comment by comma - split_comments = [] - if "," in comment_string: - split_comments.extend(comment_string.split(",")) - elif ";" in comment_string: - split_comments.extend(comment_string.split(";")) - else: - split_comments.append(comment_string) - - return split_comments - - def _get_head_tail(self, clip_data, otio_clip, handle_start, handle_end): - # calculate head and tail with forward compatibility - head = clip_data.get("segment_head") - tail = clip_data.get("segment_tail") - self.log.debug("__ head: `{}`".format(head)) - self.log.debug("__ tail: `{}`".format(tail)) - - # HACK: it is here to serve for versions below 2021.1 - if not any([head, tail]): - retimed_attributes = get_media_range_with_retimes( - otio_clip, handle_start, handle_end) - self.log.debug( - ">> retimed_attributes: {}".format(retimed_attributes)) - - # retimed head and tail - head = int(retimed_attributes["handleStart"]) - tail = int(retimed_attributes["handleEnd"]) - - return head, tail - - def _get_resolution_to_data(self, data, context): - assert data.get("otioClip"), "Missing `otioClip` data" - - # solve source resolution option - if data.get("sourceResolution", None): - otio_clip_metadata = data[ - "otioClip"].media_reference.metadata - data.update({ - "resolutionWidth": otio_clip_metadata[ - "openpype.source.width"], - "resolutionHeight": otio_clip_metadata[ - "openpype.source.height"], - "pixelAspect": otio_clip_metadata[ - "openpype.source.pixelAspect"] - }) - else: - otio_tl_metadata = context.data["otioTimeline"].metadata - data.update({ - "resolutionWidth": otio_tl_metadata["openpype.timeline.width"], - "resolutionHeight": otio_tl_metadata[ - "openpype.timeline.height"], - "pixelAspect": otio_tl_metadata[ - "openpype.timeline.pixelAspect"] - }) - - def _create_shot_instance(self, context, clip_name, **data): - master_layer = data.get("heroTrack") - hierarchy_data = data.get("hierarchyData") - - if not master_layer: - return - - if not hierarchy_data: - return - - folder_path = data["folderPath"] - folder_name = folder_path.rsplit("/")[-1] - product_name = "shotMain" - - # insert product type into families - product_type = "shot" - - # form label - label = folder_name - if folder_name != clip_name: - label += " ({}) ".format(clip_name) - label += " {}".format(product_name) - label += " [{}]".format(product_type) - - data.update({ - "name": "{}_{}".format(folder_name, product_name), - "label": label, - "productName": product_name, - "folderPath": folder_path, - "productType": product_type, - "family": product_type, - "families": [product_type] - }) - - instance = context.create_instance(**data) - self.log.info("Creating instance: {}".format(instance)) - self.log.debug( - "_ instance.data: {}".format(pformat(instance.data))) - - def _get_otio_clip_instance_data(self, clip_data): - """ - Return otio objects for timeline, track and clip - - Args: - timeline_item_data (dict): timeline_item_data from list returned by - resolve.get_current_timeline_items() - otio_timeline (otio.schema.Timeline): otio object - - Returns: - dict: otio clip object - - """ - segment = clip_data["PySegment"] - s_track_name = segment.parent.name.get_value() - timeline_range = self._create_otio_time_range_from_timeline_item_data( - clip_data) - - for otio_clip in self.otio_timeline.each_clip(): - track_name = otio_clip.parent().name - parent_range = otio_clip.range_in_parent() - if s_track_name not in track_name: - continue - if otio_clip.name not in segment.name.get_value(): - continue - if is_overlapping_otio_ranges( - parent_range, timeline_range, strict=True): - - # add pypedata marker to otio_clip metadata - for marker in otio_clip.markers: - if opfapi.MARKER_NAME in marker.name: - otio_clip.metadata.update(marker.metadata) - return {"otioClip": otio_clip} - - return None - - def _create_otio_time_range_from_timeline_item_data(self, clip_data): - frame_start = int(clip_data["record_in"]) - frame_duration = int(clip_data["record_duration"]) - - return flame_export.create_otio_time_range( - frame_start, frame_duration, self.fps) diff --git a/server_addon/flame/client/ayon_flame/plugins/publish/collect_timeline_otio.py b/server_addon/flame/client/ayon_flame/plugins/publish/collect_timeline_otio.py deleted file mode 100644 index 139ac5b875..0000000000 --- a/server_addon/flame/client/ayon_flame/plugins/publish/collect_timeline_otio.py +++ /dev/null @@ -1,67 +0,0 @@ -import pyblish.api - -import ayon_flame.api as opfapi -from ayon_flame.otio import flame_export -from ayon_core.pipeline.create import get_product_name - - -class CollecTimelineOTIO(pyblish.api.ContextPlugin): - """Inject the current working context into publish context""" - - label = "Collect Timeline OTIO" - order = pyblish.api.CollectorOrder - 0.099 - - def process(self, context): - # plugin defined - product_type = "workfile" - variant = "otioTimeline" - - # main - folder_entity = context.data["folderEntity"] - project = opfapi.get_current_project() - sequence = opfapi.get_current_sequence(opfapi.CTX.selection) - - # create product name - task_entity = context.data["taskEntity"] - task_name = task_type = None - if task_entity: - task_name = task_entity["name"] - task_type = task_entity["taskType"] - product_name = get_product_name( - context.data["projectName"], - task_name, - task_type, - context.data["hostName"], - product_type, - variant, - project_settings=context.data["project_settings"] - ) - - # adding otio timeline to context - with opfapi.maintained_segment_selection(sequence) as selected_seg: - otio_timeline = flame_export.create_otio_timeline(sequence) - - instance_data = { - "name": product_name, - "folderPath": folder_entity["path"], - "productName": product_name, - "productType": product_type, - "family": product_type, - "families": [product_type] - } - - # create instance with workfile - instance = context.create_instance(**instance_data) - self.log.info("Creating instance: {}".format(instance)) - - # update context with main project attributes - context.data.update({ - "flameProject": project, - "flameSequence": sequence, - "otioTimeline": otio_timeline, - "currentFile": "Flame/{}/{}".format( - project.name, sequence.name - ), - "flameSelectedSegments": selected_seg, - "fps": float(str(sequence.frame_rate)[:-4]) - }) diff --git a/server_addon/flame/client/ayon_flame/plugins/publish/extract_otio_file.py b/server_addon/flame/client/ayon_flame/plugins/publish/extract_otio_file.py deleted file mode 100644 index 41ae981cba..0000000000 --- a/server_addon/flame/client/ayon_flame/plugins/publish/extract_otio_file.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -import pyblish.api -import opentimelineio as otio -from ayon_core.pipeline import publish - - -class ExtractOTIOFile(publish.Extractor): - """ - Extractor export OTIO file - """ - - label = "Extract OTIO file" - order = pyblish.api.ExtractorOrder - 0.45 - families = ["workfile"] - hosts = ["flame"] - - def process(self, instance): - # create representation data - if "representations" not in instance.data: - instance.data["representations"] = [] - - name = instance.data["name"] - staging_dir = self.staging_dir(instance) - - otio_timeline = instance.context.data["otioTimeline"] - # create otio timeline representation - otio_file_name = name + ".otio" - otio_file_path = os.path.join(staging_dir, otio_file_name) - - # export otio file to temp dir - otio.adapters.write_to_file(otio_timeline, otio_file_path) - - representation_otio = { - 'name': "otio", - 'ext': "otio", - 'files': otio_file_name, - "stagingDir": staging_dir, - } - - instance.data["representations"].append(representation_otio) - - self.log.info("Added OTIO file representation: {}".format( - representation_otio)) diff --git a/server_addon/flame/client/ayon_flame/plugins/publish/extract_subset_resources.py b/server_addon/flame/client/ayon_flame/plugins/publish/extract_subset_resources.py deleted file mode 100644 index 66c6181ffb..0000000000 --- a/server_addon/flame/client/ayon_flame/plugins/publish/extract_subset_resources.py +++ /dev/null @@ -1,560 +0,0 @@ -import os -import re -from copy import deepcopy - -import pyblish.api - -from ayon_core.pipeline import publish -from ayon_flame import api as opfapi -from ayon_flame.api import MediaInfoFile -from ayon_core.pipeline.editorial import ( - get_media_range_with_retimes -) - -import flame - - -class ExtractProductResources(publish.Extractor): - """ - Extractor for transcoding files from Flame clip - """ - - label = "Extract product resources" - order = pyblish.api.ExtractorOrder - families = ["clip"] - hosts = ["flame"] - - settings_category = "flame" - - # plugin defaults - keep_original_representation = False - - default_presets = { - "thumbnail": { - "active": True, - "ext": "jpg", - "xml_preset_file": "Jpeg (8-bit).xml", - "xml_preset_dir": "", - "export_type": "File Sequence", - "parsed_comment_attrs": False, - "colorspace_out": "Output - sRGB", - "representation_add_range": False, - "representation_tags": ["thumbnail"], - "path_regex": ".*" - } - } - - # hide publisher during exporting - hide_ui_on_process = True - - # settings - export_presets_mapping = [] - - def process(self, instance): - if not self.keep_original_representation: - # remove previeous representation if not needed - instance.data["representations"] = [] - - # flame objects - segment = instance.data["item"] - folder_path = instance.data["folderPath"] - segment_name = segment.name.get_value() - clip_path = instance.data["path"] - sequence_clip = instance.context.data["flameSequence"] - - # segment's parent track name - s_track_name = segment.parent.name.get_value() - - # get configured workfile frame start/end (handles excluded) - frame_start = instance.data["frameStart"] - # get media source first frame - source_first_frame = instance.data["sourceFirstFrame"] - - self.log.debug("_ frame_start: {}".format(frame_start)) - self.log.debug("_ source_first_frame: {}".format(source_first_frame)) - - # get timeline in/out of segment - clip_in = instance.data["clipIn"] - clip_out = instance.data["clipOut"] - - # get retimed attributres - retimed_data = self._get_retimed_attributes(instance) - - # get individual keys - retimed_handle_start = retimed_data["handle_start"] - retimed_handle_end = retimed_data["handle_end"] - retimed_source_duration = retimed_data["source_duration"] - retimed_speed = retimed_data["speed"] - - # get handles value - take only the max from both - handle_start = instance.data["handleStart"] - handle_end = instance.data["handleEnd"] - handles = max(handle_start, handle_end) - include_handles = instance.data.get("includeHandles") - retimed_handles = instance.data.get("retimedHandles") - - # get media source range with handles - source_start_handles = instance.data["sourceStartH"] - source_end_handles = instance.data["sourceEndH"] - - # retime if needed - if retimed_speed != 1.0: - if retimed_handles: - # handles are retimed - source_start_handles = ( - instance.data["sourceStart"] - retimed_handle_start) - source_end_handles = ( - source_start_handles - + (retimed_source_duration - 1) - + retimed_handle_start - + retimed_handle_end - ) - - else: - # handles are not retimed - source_end_handles = ( - source_start_handles - + (retimed_source_duration - 1) - + handle_start - + handle_end - ) - - # get frame range with handles for representation range - frame_start_handle = frame_start - handle_start - repre_frame_start = frame_start_handle - if include_handles: - if retimed_speed == 1.0 or not retimed_handles: - frame_start_handle = frame_start - else: - frame_start_handle = ( - frame_start - handle_start) + retimed_handle_start - - self.log.debug("_ frame_start_handle: {}".format( - frame_start_handle)) - self.log.debug("_ repre_frame_start: {}".format( - repre_frame_start)) - - # calculate duration with handles - source_duration_handles = ( - source_end_handles - source_start_handles) + 1 - - self.log.debug("_ source_duration_handles: {}".format( - source_duration_handles)) - - # create staging dir path - staging_dir = self.staging_dir(instance) - - # append staging dir for later cleanup - instance.context.data["cleanupFullPaths"].append(staging_dir) - - export_presets_mapping = {} - for preset_mapping in deepcopy(self.export_presets_mapping): - name = preset_mapping.pop("name") - export_presets_mapping[name] = preset_mapping - - # add default preset type for thumbnail and reviewable video - # update them with settings and override in case the same - # are found in there - _preset_keys = [k.split('_')[0] for k in export_presets_mapping] - export_presets = { - k: v - for k, v in deepcopy(self.default_presets).items() - if k not in _preset_keys - } - export_presets.update(export_presets_mapping) - - if not instance.data.get("versionData"): - instance.data["versionData"] = {} - - # set versiondata if any retime - version_data = retimed_data.get("version_data") - self.log.debug("_ version_data: {}".format(version_data)) - - if version_data: - instance.data["versionData"].update(version_data) - - # version data start frame - version_frame_start = frame_start - if include_handles: - version_frame_start = frame_start_handle - if retimed_speed != 1.0: - if retimed_handles: - instance.data["versionData"].update({ - "frameStart": version_frame_start, - "frameEnd": ( - (version_frame_start + source_duration_handles - 1) - - (retimed_handle_start + retimed_handle_end) - ) - }) - else: - instance.data["versionData"].update({ - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": version_frame_start, - "frameEnd": ( - (version_frame_start + source_duration_handles - 1) - - (handle_start + handle_end) - ) - }) - self.log.debug("_ version_data: {}".format( - instance.data["versionData"] - )) - - # loop all preset names and - for unique_name, preset_config in export_presets.items(): - modify_xml_data = {} - - if self._should_skip(preset_config, clip_path, unique_name): - continue - - # get all presets attributes - extension = preset_config["ext"] - preset_file = preset_config["xml_preset_file"] - preset_dir = preset_config["xml_preset_dir"] - export_type = preset_config["export_type"] - repre_tags = preset_config["representation_tags"] - parsed_comment_attrs = preset_config["parsed_comment_attrs"] - color_out = preset_config["colorspace_out"] - - self.log.info( - "Processing `{}` as `{}` to `{}` type...".format( - preset_file, export_type, extension - ) - ) - - exporting_clip = None - name_patern_xml = "_{}.".format( - unique_name) - - if export_type == "Sequence Publish": - # change export clip to sequence - exporting_clip = flame.duplicate(sequence_clip) - - # only keep visible layer where instance segment is child - self.hide_others( - exporting_clip, segment_name, s_track_name) - - # change name pattern - name_patern_xml = ( - "__{}.").format( - unique_name) - - # only for h264 with baked retime - in_mark = clip_in - out_mark = clip_out + 1 - modify_xml_data.update({ - "exportHandles": True, - "nbHandles": handles - }) - else: - in_mark = (source_start_handles - source_first_frame) + 1 - out_mark = in_mark + source_duration_handles - exporting_clip = self.import_clip(clip_path) - exporting_clip.name.set_value("{}_{}".format( - folder_path, segment_name)) - - # add xml tags modifications - modify_xml_data.update({ - # enum position low start from 0 - "frameIndex": 0, - "startFrame": repre_frame_start, - "namePattern": name_patern_xml - }) - - if parsed_comment_attrs: - # add any xml overrides collected form segment.comment - modify_xml_data.update(instance.data["xml_overrides"]) - - self.log.debug("_ in_mark: {}".format(in_mark)) - self.log.debug("_ out_mark: {}".format(out_mark)) - - export_kwargs = {} - # validate xml preset file is filled - if preset_file == "": - raise ValueError( - ("Check Settings for {} preset: " - "`XML preset file` is not filled").format( - unique_name) - ) - - # resolve xml preset dir if not filled - if preset_dir == "": - preset_dir = opfapi.get_preset_path_by_xml_name( - preset_file) - - if not preset_dir: - raise ValueError( - ("Check Settings for {} preset: " - "`XML preset file` {} is not found").format( - unique_name, preset_file) - ) - - # create preset path - preset_orig_xml_path = str(os.path.join( - preset_dir, preset_file - )) - - # define kwargs based on preset type - if "thumbnail" in unique_name: - modify_xml_data.update({ - "video/posterFrame": True, - "video/useFrameAsPoster": 1, - "namePattern": "__thumbnail" - }) - thumb_frame_number = int(in_mark + ( - (out_mark - in_mark + 1) / 2)) - - self.log.debug("__ thumb_frame_number: {}".format( - thumb_frame_number - )) - - export_kwargs["thumb_frame_number"] = thumb_frame_number - else: - export_kwargs.update({ - "in_mark": in_mark, - "out_mark": out_mark - }) - - preset_path = opfapi.modify_preset_file( - preset_orig_xml_path, staging_dir, modify_xml_data) - - # get and make export dir paths - export_dir_path = str(os.path.join( - staging_dir, unique_name - )) - os.makedirs(export_dir_path) - - # export - opfapi.export_clip( - export_dir_path, exporting_clip, preset_path, **export_kwargs) - - repr_name = unique_name - # make sure only first segment is used if underscore in name - # HACK: `ftrackreview_withLUT` will result only in `ftrackreview` - if ( - "thumbnail" in unique_name - or "ftrackreview" in unique_name - ): - repr_name = unique_name.split("_")[0] - - # create representation data - representation_data = { - "name": repr_name, - "outputName": repr_name, - "ext": extension, - "stagingDir": export_dir_path, - "tags": repre_tags, - "data": { - "colorspace": color_out - }, - "load_to_batch_group": preset_config.get( - "load_to_batch_group"), - "batch_group_loader_name": preset_config.get( - "batch_group_loader_name") or None - } - - # collect all available content of export dir - files = os.listdir(export_dir_path) - - # make sure no nested folders inside - n_stage_dir, n_files = self._unfolds_nested_folders( - export_dir_path, files, extension) - - # fix representation in case of nested folders - if n_stage_dir: - representation_data["stagingDir"] = n_stage_dir - files = n_files - - # add files to representation but add - # imagesequence as list - if ( - # first check if path in files is not mov extension - [ - f for f in files - if os.path.splitext(f)[-1] == ".mov" - ] - # then try if thumbnail is not in unique name - or repr_name == "thumbnail" - ): - representation_data["files"] = files.pop() - else: - representation_data["files"] = files - - # add frame range - if preset_config["representation_add_range"]: - representation_data.update({ - "frameStart": repre_frame_start, - "frameEnd": ( - repre_frame_start + source_duration_handles) - 1, - "fps": instance.data["fps"] - }) - - instance.data["representations"].append(representation_data) - - # add review family if found in tags - if "review" in repre_tags: - instance.data["families"].append("review") - - self.log.info("Added representation: {}".format( - representation_data)) - - if export_type == "Sequence Publish": - # at the end remove the duplicated clip - flame.delete(exporting_clip) - - def _get_retimed_attributes(self, instance): - handle_start = instance.data["handleStart"] - handle_end = instance.data["handleEnd"] - - # get basic variables - otio_clip = instance.data["otioClip"] - - # get available range trimmed with processed retimes - retimed_attributes = get_media_range_with_retimes( - otio_clip, handle_start, handle_end) - self.log.debug( - ">> retimed_attributes: {}".format(retimed_attributes)) - - r_media_in = int(retimed_attributes["mediaIn"]) - r_media_out = int(retimed_attributes["mediaOut"]) - version_data = retimed_attributes.get("versionData") - - return { - "version_data": version_data, - "handle_start": int(retimed_attributes["handleStart"]), - "handle_end": int(retimed_attributes["handleEnd"]), - "source_duration": ( - (r_media_out - r_media_in) + 1 - ), - "speed": float(retimed_attributes["speed"]) - } - - def _should_skip(self, preset_config, clip_path, unique_name): - # get activating attributes - activated_preset = preset_config["active"] - filter_path_regex = preset_config.get("filter_path_regex") - - self.log.info( - "Preset `{}` is active `{}` with filter `{}`".format( - unique_name, activated_preset, filter_path_regex - ) - ) - - # skip if not activated presete - if not activated_preset: - return True - - # exclude by regex filter if any - if ( - filter_path_regex - and not re.search(filter_path_regex, clip_path) - ): - return True - - def _unfolds_nested_folders(self, stage_dir, files_list, ext): - """Unfolds nested folders - - Args: - stage_dir (str): path string with directory - files_list (list): list of file names - ext (str): extension (jpg)[without dot] - - Raises: - IOError: in case no files were collected form any directory - - Returns: - str, list: new staging dir path, new list of file names - or - None, None: In case single file in `files_list` - """ - # exclude single files which are having extension - # the same as input ext attr - if ( - # only one file in list - len(files_list) == 1 - # file is having extension as input - and ext in os.path.splitext(files_list[0])[-1] - ): - return None, None - elif ( - # more then one file in list - len(files_list) >= 1 - # extension is correct - and ext in os.path.splitext(files_list[0])[-1] - # test file exists - and os.path.exists( - os.path.join(stage_dir, files_list[0]) - ) - ): - return None, None - - new_stage_dir = None - new_files_list = [] - for file in files_list: - search_path = os.path.join(stage_dir, file) - if not os.path.isdir(search_path): - continue - for root, _dirs, files in os.walk(search_path): - for _file in files: - _fn, _ext = os.path.splitext(_file) - if ext.lower() != _ext[1:].lower(): - continue - new_files_list.append(_file) - if not new_stage_dir: - new_stage_dir = root - - if not new_stage_dir: - raise AssertionError( - "Files in `{}` are not correct! Check `{}`".format( - files_list, stage_dir) - ) - - return new_stage_dir, new_files_list - - def hide_others(self, sequence_clip, segment_name, track_name): - """Helper method used only if sequence clip is used - - Args: - sequence_clip (flame.Clip): sequence clip - segment_name (str): segment name - track_name (str): track name - """ - # create otio tracks and clips - for ver in sequence_clip.versions: - for track in ver.tracks: - if len(track.segments) == 0 and track.hidden.get_value(): - continue - - # hide tracks which are not parent track - if track.name.get_value() != track_name: - track.hidden = True - continue - - # hidde all other segments - for segment in track.segments: - if segment.name.get_value() != segment_name: - segment.hidden = True - - def import_clip(self, path): - """ - Import clip from path - """ - dir_path = os.path.dirname(path) - media_info = MediaInfoFile(path, logger=self.log) - file_pattern = media_info.file_pattern - self.log.debug("__ file_pattern: {}".format(file_pattern)) - - # rejoin the pattern to dir path - new_path = os.path.join(dir_path, file_pattern) - - clips = flame.import_clips(new_path) - self.log.info("Clips [{}] imported from `{}`".format(clips, path)) - - if not clips: - self.log.warning("Path `{}` is not having any clips".format(path)) - return None - elif len(clips) > 1: - self.log.warning( - "Path `{}` is containing more that one clip".format(path) - ) - return clips[0] diff --git a/server_addon/flame/client/ayon_flame/plugins/publish/integrate_batch_group.py b/server_addon/flame/client/ayon_flame/plugins/publish/integrate_batch_group.py deleted file mode 100644 index f77c9e9116..0000000000 --- a/server_addon/flame/client/ayon_flame/plugins/publish/integrate_batch_group.py +++ /dev/null @@ -1,339 +0,0 @@ -import os -import copy -from collections import OrderedDict -from pprint import pformat -import pyblish -import ayon_flame.api as opfapi -import ayon_core.pipeline as op_pipeline -from ayon_core.pipeline.workfile import get_workdir - - -class IntegrateBatchGroup(pyblish.api.InstancePlugin): - """Integrate published shot to batch group""" - - order = pyblish.api.IntegratorOrder + 0.45 - label = "Integrate Batch Groups" - hosts = ["flame"] - families = ["clip"] - - settings_category = "flame" - - # settings - default_loader = "LoadClip" - - def process(self, instance): - add_tasks = instance.data["flameAddTasks"] - - # iterate all tasks from settings - for task_data in add_tasks: - # exclude batch group - if not task_data["create_batch_group"]: - continue - - # create or get already created batch group - bgroup = self._get_batch_group(instance, task_data) - - # add batch group content - all_batch_nodes = self._add_nodes_to_batch_with_links( - instance, task_data, bgroup) - - for name, node in all_batch_nodes.items(): - self.log.debug("name: {}, dir: {}".format( - name, dir(node) - )) - self.log.debug("__ node.attributes: {}".format( - node.attributes - )) - - # load plate to batch group - self.log.info("Loading product `{}` into batch `{}`".format( - instance.data["productName"], bgroup.name.get_value() - )) - self._load_clip_to_context(instance, bgroup) - - def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group): - # get write file node properties > OrederDict because order does matter - write_pref_data = self._get_write_prefs(instance, task_data) - - batch_nodes = [ - { - "type": "comp", - "properties": {}, - "id": "comp_node01" - }, - { - "type": "Write File", - "properties": write_pref_data, - "id": "write_file_node01" - } - ] - batch_links = [ - { - "from_node": { - "id": "comp_node01", - "connector": "Result" - }, - "to_node": { - "id": "write_file_node01", - "connector": "Front" - } - } - ] - - # add nodes into batch group - return opfapi.create_batch_group_conent( - batch_nodes, batch_links, batch_group) - - def _load_clip_to_context(self, instance, bgroup): - # get all loaders for host - loaders_by_name = { - loader.__name__: loader - for loader in op_pipeline.discover_loader_plugins() - } - - # get all published representations - published_representations = instance.data["published_representations"] - repres_db_id_by_name = { - repre_info["representation"]["name"]: repre_id - for repre_id, repre_info in published_representations.items() - } - - # get all loadable representations - repres_by_name = { - repre["name"]: repre for repre in instance.data["representations"] - } - - # get repre_id for the loadable representations - loader_name_by_repre_id = { - repres_db_id_by_name[repr_name]: { - "loader": repr_data["batch_group_loader_name"], - # add repre data for exception logging - "_repre_data": repr_data - } - for repr_name, repr_data in repres_by_name.items() - if repr_data.get("load_to_batch_group") - } - - self.log.debug("__ loader_name_by_repre_id: {}".format(pformat( - loader_name_by_repre_id))) - - # get representation context from the repre_id - repre_contexts = op_pipeline.load.get_repres_contexts( - loader_name_by_repre_id.keys()) - - self.log.debug("__ repre_contexts: {}".format(pformat( - repre_contexts))) - - # loop all returned repres from repre_context dict - for repre_id, repre_context in repre_contexts.items(): - self.log.debug("__ repre_id: {}".format(repre_id)) - # get loader name by representation id - loader_name = ( - loader_name_by_repre_id[repre_id]["loader"] - # if nothing was added to settings fallback to default - or self.default_loader - ) - - # get loader plugin - loader_plugin = loaders_by_name.get(loader_name) - if loader_plugin: - # load to flame by representation context - try: - op_pipeline.load.load_with_repre_context( - loader_plugin, repre_context, **{ - "data": { - "workdir": self.task_workdir, - "batch": bgroup - } - }) - except op_pipeline.load.IncompatibleLoaderError as msg: - self.log.error( - "Check allowed representations for Loader `{}` " - "in settings > error: {}".format( - loader_plugin.__name__, msg)) - self.log.error( - "Representaton context >>{}<< is not compatible " - "with loader `{}`".format( - pformat(repre_context), loader_plugin.__name__ - ) - ) - else: - self.log.warning( - "Something got wrong and there is not Loader found for " - "following data: {}".format( - pformat(loader_name_by_repre_id)) - ) - - def _get_batch_group(self, instance, task_data): - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - handle_start = instance.data["handleStart"] - handle_end = instance.data["handleEnd"] - frame_duration = (frame_end - frame_start) + 1 - folder_path = instance.data["folderPath"] - - task_name = task_data["name"] - batchgroup_name = "{}_{}".format(folder_path, task_name) - - batch_data = { - "shematic_reels": [ - "OP_LoadedReel" - ], - "handleStart": handle_start, - "handleEnd": handle_end - } - self.log.debug( - "__ batch_data: {}".format(pformat(batch_data))) - - # check if the batch group already exists - bgroup = opfapi.get_batch_group_from_desktop(batchgroup_name) - - if not bgroup: - self.log.info( - "Creating new batch group: {}".format(batchgroup_name)) - # create batch with utils - bgroup = opfapi.create_batch_group( - batchgroup_name, - frame_start, - frame_duration, - **batch_data - ) - - else: - self.log.info( - "Updating batch group: {}".format(batchgroup_name)) - # update already created batch group - bgroup = opfapi.create_batch_group( - batchgroup_name, - frame_start, - frame_duration, - update_batch_group=bgroup, - **batch_data - ) - - return bgroup - - def _get_anamoty_data_with_current_task(self, instance, task_data): - anatomy_data = copy.deepcopy(instance.data["anatomyData"]) - task_name = task_data["name"] - task_type = task_data["type"] - anatomy_obj = instance.context.data["anatomy"] - - # update task data in anatomy data - project_task_types = anatomy_obj["tasks"] - task_code = project_task_types.get(task_type, {}).get("shortName") - anatomy_data.update({ - "task": { - "name": task_name, - "type": task_type, - "short": task_code - } - }) - return anatomy_data - - def _get_write_prefs(self, instance, task_data): - # update task in anatomy data - anatomy_data = self._get_anamoty_data_with_current_task( - instance, task_data) - - self.task_workdir = self._get_shot_task_dir_path( - instance, task_data) - self.log.debug("__ task_workdir: {}".format( - self.task_workdir)) - - # TODO: this might be done with template in settings - render_dir_path = os.path.join( - self.task_workdir, "render", "flame") - - if not os.path.exists(render_dir_path): - os.makedirs(render_dir_path, mode=0o777) - - # TODO: add most of these to `imageio/flame/batch/write_node` - name = "{project[code]}_{folder[name]}_{task[name]}".format( - **anatomy_data - ) - - # The path attribute where the rendered clip is exported - # /path/to/file.[0001-0010].exr - media_path = render_dir_path - # name of file represented by tokens - media_path_pattern = ( - "_v/_v.") - # The Create Open Clip attribute of the Write File node. \ - # Determines if an Open Clip is created by the Write File node. - create_clip = True - # The Include Setup attribute of the Write File node. - # Determines if a Batch Setup file is created by the Write File node. - include_setup = True - # The path attribute where the Open Clip file is exported by - # the Write File node. - create_clip_path = "" - # The path attribute where the Batch setup file - # is exported by the Write File node. - include_setup_path = "./_v" - # The file type for the files written by the Write File node. - # Setting this attribute also overwrites format_extension, - # bit_depth and compress_mode to match the defaults for - # this file type. - file_type = "OpenEXR" - # The file extension for the files written by the Write File node. - # This attribute resets to match file_type whenever file_type - # is set. If you require a specific extension, you must - # set format_extension after setting file_type. - format_extension = "exr" - # The bit depth for the files written by the Write File node. - # This attribute resets to match file_type whenever file_type is set. - bit_depth = "16" - # The compressing attribute for the files exported by the Write - # File node. Only relevant when file_type in 'OpenEXR', 'Sgi', 'Tiff' - compress = True - # The compression format attribute for the specific File Types - # export by the Write File node. You must set compress_mode - # after setting file_type. - compress_mode = "DWAB" - # The frame index mode attribute of the Write File node. - # Value range: `Use Timecode` or `Use Start Frame` - frame_index_mode = "Use Start Frame" - frame_padding = 6 - # The versioning mode of the Open Clip exported by the Write File node. - # Only available if create_clip = True. - version_mode = "Follow Iteration" - version_name = "v" - version_padding = 3 - - # need to make sure the order of keys is correct - return OrderedDict(( - ("name", name), - ("media_path", media_path), - ("media_path_pattern", media_path_pattern), - ("create_clip", create_clip), - ("include_setup", include_setup), - ("create_clip_path", create_clip_path), - ("include_setup_path", include_setup_path), - ("file_type", file_type), - ("format_extension", format_extension), - ("bit_depth", bit_depth), - ("compress", compress), - ("compress_mode", compress_mode), - ("frame_index_mode", frame_index_mode), - ("frame_padding", frame_padding), - ("version_mode", version_mode), - ("version_name", version_name), - ("version_padding", version_padding) - )) - - def _get_shot_task_dir_path(self, instance, task_data): - project_entity = instance.data["projectEntity"] - folder_entity = instance.data["folderEntity"] - task_entity = instance.data["taskEntity"] - anatomy = instance.context.data["anatomy"] - project_settings = instance.context.data["project_settings"] - - return get_workdir( - project_entity, - folder_entity, - task_entity, - "flame", - anatomy=anatomy, - project_settings=project_settings - ) diff --git a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/export_preset/openpype_seg_thumbnails_jpg.xml b/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/export_preset/openpype_seg_thumbnails_jpg.xml deleted file mode 100644 index 44a7bd9770..0000000000 --- a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/export_preset/openpype_seg_thumbnails_jpg.xml +++ /dev/null @@ -1,58 +0,0 @@ - - - sequence - Creates a 8-bit Jpeg file per segment. - - NONE - - <name> - True - True - - image - FX - NoChange - False - 10 - - True - False - - audio - FX - FlattenTracks - True - 10 - - - - - 4 - 1 - 2 - - diff --git a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/export_preset/openpype_seg_video_h264.xml b/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/export_preset/openpype_seg_video_h264.xml deleted file mode 100644 index 1d2c5a28bb..0000000000 --- a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/export_preset/openpype_seg_video_h264.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - sequence - Create MOV H264 files per segment with thumbnail - - NONE - - <name> - True - True - - movie - FX - FlattenTracks - True - 5 - - True - False - - audio - Original - NoChange - True - 5 - - - - QuickTime - <shot name> - 0 - PCS_709 - None - Autodesk - Flame - 2021 - - - - 4 - 1 - 2 - - \ No newline at end of file diff --git a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/__init__.py b/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/app_utils.py b/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/app_utils.py deleted file mode 100644 index e639c3f482..0000000000 --- a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/app_utils.py +++ /dev/null @@ -1,162 +0,0 @@ -import os -import io -import ConfigParser as CP -from xml.etree import ElementTree as ET -from contextlib import contextmanager - -PLUGIN_DIR = os.path.dirname(os.path.dirname(__file__)) -EXPORT_PRESETS_DIR = os.path.join(PLUGIN_DIR, "export_preset") - -CONFIG_DIR = os.path.join(os.path.expanduser( - "~/.openpype"), "openpype_babypublisher") - - -@contextmanager -def make_temp_dir(): - import tempfile - - try: - dirpath = tempfile.mkdtemp() - - yield dirpath - - except IOError as _error: - raise IOError("Not able to create temp dir file: {}".format(_error)) - - finally: - pass - - -@contextmanager -def get_config(section=None): - cfg_file_path = os.path.join(CONFIG_DIR, "settings.ini") - - # create config dir - if not os.path.exists(CONFIG_DIR): - print("making dirs at: `{}`".format(CONFIG_DIR)) - os.makedirs(CONFIG_DIR, mode=0o777) - - # write default data to settings.ini - if not os.path.exists(cfg_file_path): - default_cfg = cfg_default() - config = CP.RawConfigParser() - config.readfp(io.BytesIO(default_cfg)) - with open(cfg_file_path, 'wb') as cfg_file: - config.write(cfg_file) - - try: - config = CP.RawConfigParser() - config.read(cfg_file_path) - if section: - _cfg_data = { - k: v - for s in config.sections() - for k, v in config.items(s) - if s == section - } - else: - _cfg_data = {s: dict(config.items(s)) for s in config.sections()} - - yield _cfg_data - - except IOError as _error: - raise IOError('Not able to read settings.ini file: {}'.format(_error)) - - finally: - pass - - -def set_config(cfg_data, section=None): - cfg_file_path = os.path.join(CONFIG_DIR, "settings.ini") - - config = CP.RawConfigParser() - config.read(cfg_file_path) - - try: - if not section: - for section in cfg_data: - for key, value in cfg_data[section].items(): - config.set(section, key, value) - else: - for key, value in cfg_data.items(): - config.set(section, key, value) - - with open(cfg_file_path, 'wb') as cfg_file: - config.write(cfg_file) - - except IOError as _error: - raise IOError('Not able to write settings.ini file: {}'.format(_error)) - - -def cfg_default(): - return """ -[main] -workfile_start_frame = 1001 -shot_handles = 0 -shot_name_template = {sequence}_{shot} -hierarchy_template = shots[Folder]/{sequence}[Sequence] -create_task_type = Compositing -""" - - -def configure_preset(file_path, data): - split_fp = os.path.splitext(file_path) - new_file_path = split_fp[0] + "_tmp" + split_fp[-1] - with open(file_path, "r") as datafile: - tree = ET.parse(datafile) - for key, value in data.items(): - for element in tree.findall(".//{}".format(key)): - print(element) - element.text = str(value) - tree.write(new_file_path) - - return new_file_path - - -def export_thumbnail(sequence, tempdir_path, data): - import flame - export_preset = os.path.join( - EXPORT_PRESETS_DIR, - "openpype_seg_thumbnails_jpg.xml" - ) - new_path = configure_preset(export_preset, data) - poster_frame_exporter = flame.PyExporter() - poster_frame_exporter.foreground = True - poster_frame_exporter.export(sequence, new_path, tempdir_path) - - -def export_video(sequence, tempdir_path, data): - import flame - export_preset = os.path.join( - EXPORT_PRESETS_DIR, - "openpype_seg_video_h264.xml" - ) - new_path = configure_preset(export_preset, data) - poster_frame_exporter = flame.PyExporter() - poster_frame_exporter.foreground = True - poster_frame_exporter.export(sequence, new_path, tempdir_path) - - -def timecode_to_frames(timecode, framerate): - def _seconds(value): - if isinstance(value, str): - _zip_ft = zip((3600, 60, 1, 1 / framerate), value.split(':')) - return sum(f * float(t) for f, t in _zip_ft) - elif isinstance(value, (int, float)): - return value / framerate - return 0 - - def _frames(seconds): - return seconds * framerate - - def tc_to_frames(_timecode, start=None): - return _frames(_seconds(_timecode) - _seconds(start)) - - if '+' in timecode: - timecode = timecode.replace('+', ':') - elif '#' in timecode: - timecode = timecode.replace('#', ':') - - frames = int(round(tc_to_frames(timecode, start='00:00:00:00'))) - - return frames diff --git a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/ftrack_lib.py b/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/ftrack_lib.py deleted file mode 100644 index a66980493e..0000000000 --- a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/ftrack_lib.py +++ /dev/null @@ -1,459 +0,0 @@ -import os -import sys -import six -import re -import json - -import app_utils - -# Fill following constants or set them via environment variable -FTRACK_MODULE_PATH = None -FTRACK_API_KEY = None -FTRACK_API_USER = None -FTRACK_SERVER = None - - -def import_ftrack_api(): - try: - import ftrack_api - return ftrack_api - except ImportError: - import sys - ftrk_m_p = FTRACK_MODULE_PATH or os.getenv("FTRACK_MODULE_PATH") - sys.path.append(ftrk_m_p) - import ftrack_api - return ftrack_api - - -def get_ftrack_session(): - import os - ftrack_api = import_ftrack_api() - - # fill your own credentials - url = FTRACK_SERVER or os.getenv("FTRACK_SERVER") or "" - user = FTRACK_API_USER or os.getenv("FTRACK_API_USER") or "" - api = FTRACK_API_KEY or os.getenv("FTRACK_API_KEY") or "" - - first_validation = True - if not user: - print('- Ftrack Username is not set') - first_validation = False - if not api: - print('- Ftrack API key is not set') - first_validation = False - if not first_validation: - return False - - try: - return ftrack_api.Session( - server_url=url, - api_user=user, - api_key=api - ) - except Exception as _e: - print("Can't log into Ftrack with used credentials: {}".format(_e)) - ftrack_cred = { - 'Ftrack server': str(url), - 'Username': str(user), - 'API key': str(api), - } - - item_lens = [len(key) + 1 for key in ftrack_cred] - justify_len = max(*item_lens) - for key, value in ftrack_cred.items(): - print('{} {}'.format((key + ':').ljust(justify_len, ' '), value)) - return False - - -def get_project_task_types(project_entity): - tasks = {} - proj_template = project_entity['project_schema'] - temp_task_types = proj_template['_task_type_schema']['types'] - - for type in temp_task_types: - if type['name'] not in tasks: - tasks[type['name']] = type - - return tasks - - -class FtrackComponentCreator: - default_location = "ftrack.server" - ftrack_locations = {} - thumbnails = [] - videos = [] - temp_dir = None - - def __init__(self, session): - self.session = session - self._get_ftrack_location() - - def generate_temp_data(self, selection, change_preset_data): - with app_utils.make_temp_dir() as tempdir_path: - for seq in selection: - app_utils.export_thumbnail( - seq, tempdir_path, change_preset_data) - app_utils.export_video(seq, tempdir_path, change_preset_data) - - return tempdir_path - - def collect_generated_data(self, tempdir_path): - temp_files = os.listdir(tempdir_path) - self.thumbnails = [f for f in temp_files if "jpg" in f] - self.videos = [f for f in temp_files if "mov" in f] - self.temp_dir = tempdir_path - - def get_thumb_path(self, shot_name): - # get component files - thumb_f = next((f for f in self.thumbnails if shot_name in f), None) - return os.path.join(self.temp_dir, thumb_f) - - def get_video_path(self, shot_name): - # get component files - video_f = next((f for f in self.videos if shot_name in f), None) - return os.path.join(self.temp_dir, video_f) - - def close(self): - self.ftrack_locations = {} - self.session = None - - def create_comonent(self, shot_entity, data, assetversion_entity=None): - self.shot_entity = shot_entity - location = self._get_ftrack_location() - - file_path = data["file_path"] - - # get extension - file = os.path.basename(file_path) - _n, ext = os.path.splitext(file) - - name = "ftrackreview-mp4" if "mov" in ext else "thumbnail" - - component_data = { - "name": name, - "file_path": file_path, - "file_type": ext, - "location": location - } - - if name == "ftrackreview-mp4": - duration = data["duration"] - handles = data["handles"] - fps = data["fps"] - component_data["metadata"] = { - 'ftr_meta': json.dumps({ - 'frameIn': int(0), - 'frameOut': int(duration + (handles * 2)), - 'frameRate': float(fps) - }) - } - if not assetversion_entity: - # get assettype entity from session - assettype_entity = self._get_assettype({"short": "reference"}) - - # get or create asset entity from session - asset_entity = self._get_asset({ - "name": "plateReference", - "type": assettype_entity, - "parent": self.shot_entity - }) - - # get or create assetversion entity from session - assetversion_entity = self._get_assetversion({ - "version": 0, - "asset": asset_entity - }) - - # get or create component entity - self._set_component(component_data, { - "name": name, - "version": assetversion_entity, - }) - - return assetversion_entity - - def _overwrite_members(self, entity, data): - origin_location = self._get_ftrack_location("ftrack.origin") - location = data.pop("location") - - self._remove_component_from_location(entity, location) - - entity["file_type"] = data["file_type"] - - try: - origin_location.add_component( - entity, data["file_path"] - ) - # Add components to location. - location.add_component( - entity, origin_location, recursive=True) - except Exception as __e: - print("Error: {}".format(__e)) - self._remove_component_from_location(entity, origin_location) - origin_location.add_component( - entity, data["file_path"] - ) - # Add components to location. - location.add_component( - entity, origin_location, recursive=True) - - def _remove_component_from_location(self, entity, location): - print(location) - # Removing existing members from location - components = list(entity.get("members", [])) - components += [entity] - for component in components: - for loc in component.get("component_locations", []): - if location["id"] == loc["location_id"]: - print("<< Removing component: {}".format(component)) - location.remove_component( - component, recursive=False - ) - - # Deleting existing members on component entity - for member in entity.get("members", []): - self.session.delete(member) - print("<< Deleting member: {}".format(member)) - del(member) - - self._commit() - - # Reset members in memory - if "members" in entity.keys(): - entity["members"] = [] - - def _get_assettype(self, data): - return self.session.query( - self._query("AssetType", data)).first() - - def _set_component(self, comp_data, base_data): - component_metadata = comp_data.pop("metadata", {}) - - component_entity = self.session.query( - self._query("Component", base_data) - ).first() - - if component_entity: - # overwrite existing members in component entity - # - get data for member from `ftrack.origin` location - self._overwrite_members(component_entity, comp_data) - - # Adding metadata - existing_component_metadata = component_entity["metadata"] - existing_component_metadata.update(component_metadata) - component_entity["metadata"] = existing_component_metadata - return - - assetversion_entity = base_data["version"] - location = comp_data.pop("location") - - component_entity = assetversion_entity.create_component( - comp_data["file_path"], - data=comp_data, - location=location - ) - - # Adding metadata - existing_component_metadata = component_entity["metadata"] - existing_component_metadata.update(component_metadata) - component_entity["metadata"] = existing_component_metadata - - if comp_data["name"] == "thumbnail": - self.shot_entity["thumbnail_id"] = component_entity["id"] - assetversion_entity["thumbnail_id"] = component_entity["id"] - - self._commit() - - def _get_asset(self, data): - # first find already created - asset_entity = self.session.query( - self._query("Asset", data) - ).first() - - if asset_entity: - return asset_entity - - asset_entity = self.session.create("Asset", data) - - # _commit if created - self._commit() - - return asset_entity - - def _get_assetversion(self, data): - assetversion_entity = self.session.query( - self._query("AssetVersion", data) - ).first() - - if assetversion_entity: - return assetversion_entity - - assetversion_entity = self.session.create("AssetVersion", data) - - # _commit if created - self._commit() - - return assetversion_entity - - def _commit(self): - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - # self.session.rollback() - # self.session._configure_locations() - six.reraise(tp, value, tb) - - def _get_ftrack_location(self, name=None): - name = name or self.default_location - - if name in self.ftrack_locations: - return self.ftrack_locations[name] - - location = self.session.query( - 'Location where name is "{}"'.format(name) - ).one() - self.ftrack_locations[name] = location - return location - - def _query(self, entitytype, data): - """ Generate a query expression from data supplied. - - If a value is not a string, we'll add the id of the entity to the - query. - - Args: - entitytype (str): The type of entity to query. - data (dict): The data to identify the entity. - exclusions (list): All keys to exclude from the query. - - Returns: - str: String query to use with "session.query" - """ - queries = [] - if sys.version_info[0] < 3: - for key, value in data.items(): - if not isinstance(value, (str, int)): - print("value: {}".format(value)) - if "id" in value.keys(): - queries.append( - "{0}.id is \"{1}\"".format(key, value["id"]) - ) - else: - queries.append("{0} is \"{1}\"".format(key, value)) - else: - for key, value in data.items(): - if not isinstance(value, (str, int)): - print("value: {}".format(value)) - if "id" in value.keys(): - queries.append( - "{0}.id is \"{1}\"".format(key, value["id"]) - ) - else: - queries.append("{0} is \"{1}\"".format(key, value)) - - query = ( - "select id from " + entitytype + " where " + " and ".join(queries) - ) - print(query) - return query - - -class FtrackEntityOperator: - existing_tasks = [] - - def __init__(self, session, project_entity): - self.session = session - self.project_entity = project_entity - - def commit(self): - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) - - def create_ftrack_entity(self, session, type, name, parent=None): - parent = parent or self.project_entity - entity = session.create(type, { - 'name': name, - 'parent': parent - }) - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) - return entity - - def get_ftrack_entity(self, session, type, name, parent): - query = '{} where name is "{}" and project_id is "{}"'.format( - type, name, self.project_entity["id"]) - - entity = session.query(query).first() - - # if entity doesn't exist then create one - if not entity: - entity = self.create_ftrack_entity( - session, - type, - name, - parent - ) - - return entity - - def create_parents(self, template): - parents = [] - t_split = template.split("/") - replace_patern = re.compile(r"(\[.*\])") - type_patern = re.compile(r"\[(.*)\]") - - for t_s in t_split: - match_type = type_patern.findall(t_s) - if not match_type: - raise Exception(( - "Missing correct type flag in : {}" - "/n Example: name[Type]").format( - t_s) - ) - new_name = re.sub(replace_patern, "", t_s) - f_type = match_type.pop() - - parents.append((new_name, f_type)) - - return parents - - def create_task(self, task_type, task_types, parent): - _exising_tasks = [ - child for child in parent['children'] - if child.entity_type.lower() == 'task' - ] - - # add task into existing tasks if they are not already there - for _t in _exising_tasks: - if _t in self.existing_tasks: - continue - self.existing_tasks.append(_t) - - existing_task = [ - task for task in self.existing_tasks - if task['name'].lower() in task_type.lower() - if task['parent'] == parent - ] - - if existing_task: - return existing_task.pop() - - task = self.session.create('Task', { - "name": task_type.lower(), - "parent": parent - }) - task["type"] = task_types[task_type] - - self.existing_tasks.append(task) - return task diff --git a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/panel_app.py b/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/panel_app.py deleted file mode 100644 index ce023a9e4d..0000000000 --- a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/panel_app.py +++ /dev/null @@ -1,529 +0,0 @@ -from qtpy import QtWidgets, QtCore - -import uiwidgets -import app_utils -import ftrack_lib - - -def clear_inner_modules(): - import sys - - if "ftrack_lib" in sys.modules.keys(): - del sys.modules["ftrack_lib"] - print("Ftrack Lib module removed from sys.modules") - - if "app_utils" in sys.modules.keys(): - del sys.modules["app_utils"] - print("app_utils module removed from sys.modules") - - if "uiwidgets" in sys.modules.keys(): - del sys.modules["uiwidgets"] - print("uiwidgets module removed from sys.modules") - - -class MainWindow(QtWidgets.QWidget): - - def __init__(self, klass, *args, **kwargs): - super(MainWindow, self).__init__(*args, **kwargs) - self.panel_class = klass - - def closeEvent(self, event): - # clear all temp data - print("Removing temp data") - self.panel_class.clear_temp_data() - self.panel_class.close() - clear_inner_modules() - ftrack_lib.FtrackEntityOperator.existing_tasks = [] - # now the panel can be closed - event.accept() - - -class FlameBabyPublisherPanel(object): - session = None - temp_data_dir = None - processed_components = [] - project_entity = None - task_types = {} - all_task_types = {} - - # TreeWidget - columns = { - "Sequence name": { - "columnWidth": 200, - "order": 0 - }, - "Shot name": { - "columnWidth": 200, - "order": 1 - }, - "Clip duration": { - "columnWidth": 100, - "order": 2 - }, - "Shot description": { - "columnWidth": 500, - "order": 3 - }, - "Task description": { - "columnWidth": 500, - "order": 4 - }, - } - - def __init__(self, selection): - print(selection) - - self.session = ftrack_lib.get_ftrack_session() - self.selection = selection - self.window = MainWindow(self) - - # creating ui - self.window.setMinimumSize(1500, 600) - self.window.setWindowTitle('AYON: Baby-publisher') - self.window.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) - self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose) - self.window.setFocusPolicy(QtCore.Qt.StrongFocus) - self.window.setStyleSheet('background-color: #313131') - - self._create_project_widget() - self._create_tree_widget() - self._set_sequence_params() - self._generate_widgets() - self._generate_layouts() - self._timeline_info() - self._fix_resolution() - - self.window.show() - - def _generate_widgets(self): - with app_utils.get_config("main") as cfg_data: - cfg_d = cfg_data - - self._create_task_type_widget(cfg_d) - - # input fields - self.shot_name_label = uiwidgets.FlameLabel( - 'Shot name template', 'normal', self.window) - self.shot_name_template_input = uiwidgets.FlameLineEdit( - cfg_d["shot_name_template"], self.window) - - self.hierarchy_label = uiwidgets.FlameLabel( - 'Parents template', 'normal', self.window) - self.hierarchy_template_input = uiwidgets.FlameLineEdit( - cfg_d["hierarchy_template"], self.window) - - self.start_frame_label = uiwidgets.FlameLabel( - 'Workfile start frame', 'normal', self.window) - self.start_frame_input = uiwidgets.FlameLineEdit( - cfg_d["workfile_start_frame"], self.window) - - self.handles_label = uiwidgets.FlameLabel( - 'Shot handles', 'normal', self.window) - self.handles_input = uiwidgets.FlameLineEdit( - cfg_d["shot_handles"], self.window) - - self.width_label = uiwidgets.FlameLabel( - 'Sequence width', 'normal', self.window) - self.width_input = uiwidgets.FlameLineEdit( - str(self.seq_width), self.window) - - self.height_label = uiwidgets.FlameLabel( - 'Sequence height', 'normal', self.window) - self.height_input = uiwidgets.FlameLineEdit( - str(self.seq_height), self.window) - - self.pixel_aspect_label = uiwidgets.FlameLabel( - 'Pixel aspect ratio', 'normal', self.window) - self.pixel_aspect_input = uiwidgets.FlameLineEdit( - str(1.00), self.window) - - self.fps_label = uiwidgets.FlameLabel( - 'Frame rate', 'normal', self.window) - self.fps_input = uiwidgets.FlameLineEdit( - str(self.fps), self.window) - - # Button - self.select_all_btn = uiwidgets.FlameButton( - 'Select All', self.select_all, self.window) - - self.remove_temp_data_btn = uiwidgets.FlameButton( - 'Remove temp data', self.clear_temp_data, self.window) - - self.ftrack_send_btn = uiwidgets.FlameButton( - 'Send to Ftrack', self._send_to_ftrack, self.window) - - def _generate_layouts(self): - # left props - v_shift = 0 - prop_layout_l = QtWidgets.QGridLayout() - prop_layout_l.setHorizontalSpacing(30) - if self.project_selector_enabled: - prop_layout_l.addWidget(self.project_select_label, v_shift, 0) - prop_layout_l.addWidget(self.project_select_input, v_shift, 1) - v_shift += 1 - prop_layout_l.addWidget(self.shot_name_label, (v_shift + 0), 0) - prop_layout_l.addWidget( - self.shot_name_template_input, (v_shift + 0), 1) - prop_layout_l.addWidget(self.hierarchy_label, (v_shift + 1), 0) - prop_layout_l.addWidget( - self.hierarchy_template_input, (v_shift + 1), 1) - prop_layout_l.addWidget(self.start_frame_label, (v_shift + 2), 0) - prop_layout_l.addWidget(self.start_frame_input, (v_shift + 2), 1) - prop_layout_l.addWidget(self.handles_label, (v_shift + 3), 0) - prop_layout_l.addWidget(self.handles_input, (v_shift + 3), 1) - prop_layout_l.addWidget(self.task_type_label, (v_shift + 4), 0) - prop_layout_l.addWidget( - self.task_type_input, (v_shift + 4), 1) - - # right props - prop_widget_r = QtWidgets.QWidget(self.window) - prop_layout_r = QtWidgets.QGridLayout(prop_widget_r) - prop_layout_r.setHorizontalSpacing(30) - prop_layout_r.setAlignment( - QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop) - prop_layout_r.setContentsMargins(0, 0, 0, 0) - prop_layout_r.addWidget(self.width_label, 1, 0) - prop_layout_r.addWidget(self.width_input, 1, 1) - prop_layout_r.addWidget(self.height_label, 2, 0) - prop_layout_r.addWidget(self.height_input, 2, 1) - prop_layout_r.addWidget(self.pixel_aspect_label, 3, 0) - prop_layout_r.addWidget(self.pixel_aspect_input, 3, 1) - prop_layout_r.addWidget(self.fps_label, 4, 0) - prop_layout_r.addWidget(self.fps_input, 4, 1) - - # prop layout - prop_main_layout = QtWidgets.QHBoxLayout() - prop_main_layout.addLayout(prop_layout_l, 1) - prop_main_layout.addSpacing(20) - prop_main_layout.addWidget(prop_widget_r, 1) - - # buttons layout - hbox = QtWidgets.QHBoxLayout() - hbox.addWidget(self.remove_temp_data_btn) - hbox.addWidget(self.select_all_btn) - hbox.addWidget(self.ftrack_send_btn) - - # put all layouts together - main_frame = QtWidgets.QVBoxLayout(self.window) - main_frame.setMargin(20) - main_frame.addLayout(prop_main_layout) - main_frame.addWidget(self.tree) - main_frame.addLayout(hbox) - - def _set_sequence_params(self): - for select in self.selection: - self.seq_height = select.height - self.seq_width = select.width - self.fps = float(str(select.frame_rate)[:-4]) - break - - def _create_task_type_widget(self, cfg_d): - print(self.project_entity) - self.task_types = ftrack_lib.get_project_task_types( - self.project_entity) - - self.task_type_label = uiwidgets.FlameLabel( - 'Create Task (type)', 'normal', self.window) - self.task_type_input = uiwidgets.FlamePushButtonMenu( - cfg_d["create_task_type"], self.task_types.keys(), self.window) - - def _create_project_widget(self): - import flame - # get project name from flame current project - self.project_name = flame.project.current_project.name - - # get project from ftrack - - # ftrack project name has to be the same as flame project! - query = 'Project where full_name is "{}"'.format(self.project_name) - - # globally used variables - self.project_entity = self.session.query(query).first() - - self.project_selector_enabled = bool(not self.project_entity) - - if self.project_selector_enabled: - self.all_projects = self.session.query( - "Project where status is active").all() - self.project_entity = self.all_projects[0] - project_names = [p["full_name"] for p in self.all_projects] - self.all_task_types = { - p["full_name"]: ftrack_lib.get_project_task_types(p).keys() - for p in self.all_projects - } - self.project_select_label = uiwidgets.FlameLabel( - 'Select Ftrack project', 'normal', self.window) - self.project_select_input = uiwidgets.FlamePushButtonMenu( - self.project_entity["full_name"], project_names, self.window) - self.project_select_input.selection_changed.connect( - self._on_project_changed) - - def _create_tree_widget(self): - ordered_column_labels = self.columns.keys() - for _name, _value in self.columns.items(): - ordered_column_labels.pop(_value["order"]) - ordered_column_labels.insert(_value["order"], _name) - - self.tree = uiwidgets.FlameTreeWidget( - ordered_column_labels, self.window) - - # Allow multiple items in tree to be selected - self.tree.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection) - - # Set tree column width - for _name, _val in self.columns.items(): - self.tree.setColumnWidth( - _val["order"], - _val["columnWidth"] - ) - - # Prevent weird characters when shrinking tree columns - self.tree.setTextElideMode(QtCore.Qt.ElideNone) - - def _resolve_project_entity(self): - if self.project_selector_enabled: - selected_project_name = self.project_select_input.text() - self.project_entity = next( - (p for p in self.all_projects - if p["full_name"] in selected_project_name), - None - ) - - def _save_ui_state_to_cfg(self): - _cfg_data_back = { - "shot_name_template": self.shot_name_template_input.text(), - "workfile_start_frame": self.start_frame_input.text(), - "shot_handles": self.handles_input.text(), - "hierarchy_template": self.hierarchy_template_input.text(), - "create_task_type": self.task_type_input.text() - } - - # add cfg data back to settings.ini - app_utils.set_config(_cfg_data_back, "main") - - def _send_to_ftrack(self): - # resolve active project and add it to self.project_entity - self._resolve_project_entity() - self._save_ui_state_to_cfg() - - # get handles from gui input - handles = self.handles_input.text() - - # get frame start from gui input - frame_start = int(self.start_frame_input.text()) - - # get task type from gui input - task_type = self.task_type_input.text() - - # get resolution from gui inputs - fps = self.fps_input.text() - - entity_operator = ftrack_lib.FtrackEntityOperator( - self.session, self.project_entity) - component_creator = ftrack_lib.FtrackComponentCreator(self.session) - - if not self.temp_data_dir: - self.window.hide() - self.temp_data_dir = component_creator.generate_temp_data( - self.selection, - { - "nbHandles": handles - } - ) - self.window.show() - - # collect generated files to list data for farther use - component_creator.collect_generated_data(self.temp_data_dir) - - # Get all selected items from treewidget - for item in self.tree.selectedItems(): - # frame ranges - frame_duration = int(item.text(2)) - frame_end = frame_start + frame_duration - - # description - shot_description = item.text(3) - task_description = item.text(4) - - # other - sequence_name = item.text(0) - shot_name = item.text(1) - - thumb_fp = component_creator.get_thumb_path(shot_name) - video_fp = component_creator.get_video_path(shot_name) - - print("processed comps: {}".format(self.processed_components)) - print("processed thumb_fp: {}".format(thumb_fp)) - - processed = False - if thumb_fp not in self.processed_components: - self.processed_components.append(thumb_fp) - else: - processed = True - - print("processed: {}".format(processed)) - - # populate full shot info - shot_attributes = { - "sequence": sequence_name, - "shot": shot_name, - "task": task_type - } - - # format shot name template - _shot_name = self.shot_name_template_input.text().format( - **shot_attributes) - - # format hierarchy template - _hierarchy_text = self.hierarchy_template_input.text().format( - **shot_attributes) - print(_hierarchy_text) - - # solve parents - parents = entity_operator.create_parents(_hierarchy_text) - print(parents) - - # obtain shot parents entities - _parent = None - for _name, _type in parents: - p_entity = entity_operator.get_ftrack_entity( - self.session, - _type, - _name, - _parent - ) - print(p_entity) - _parent = p_entity - - # obtain shot ftrack entity - f_s_entity = entity_operator.get_ftrack_entity( - self.session, - "Shot", - _shot_name, - _parent - ) - print("Shot entity is: {}".format(f_s_entity)) - - if not processed: - # first create thumbnail and get version entity - assetversion_entity = component_creator.create_comonent( - f_s_entity, { - "file_path": thumb_fp - } - ) - - # secondly add video to version entity - component_creator.create_comonent( - f_s_entity, { - "file_path": video_fp, - "duration": frame_duration, - "handles": int(handles), - "fps": float(fps) - }, assetversion_entity - ) - - # create custom attributtes - custom_attrs = { - "frameStart": frame_start, - "frameEnd": frame_end, - "handleStart": int(handles), - "handleEnd": int(handles), - "resolutionWidth": int(self.width_input.text()), - "resolutionHeight": int(self.height_input.text()), - "pixelAspect": float(self.pixel_aspect_input.text()), - "fps": float(fps) - } - - # update custom attributes on shot entity - for key in custom_attrs: - f_s_entity['custom_attributes'][key] = custom_attrs[key] - - task_entity = entity_operator.create_task( - task_type, self.task_types, f_s_entity) - - # Create notes. - user = self.session.query( - "User where username is \"{}\"".format(self.session.api_user) - ).first() - - f_s_entity.create_note(shot_description, author=user) - - if task_description: - task_entity.create_note(task_description, user) - - entity_operator.commit() - - component_creator.close() - - def _fix_resolution(self): - # Center window in linux - resolution = QtWidgets.QDesktopWidget().screenGeometry() - self.window.move( - (resolution.width() / 2) - (self.window.frameSize().width() / 2), - (resolution.height() / 2) - (self.window.frameSize().height() / 2)) - - def _on_project_changed(self): - task_types = self.all_task_types[self.project_name] - self.task_type_input.set_menu_options(task_types) - - def _timeline_info(self): - # identificar as informacoes dos segmentos na timeline - for sequence in self.selection: - frame_rate = float(str(sequence.frame_rate)[:-4]) - for ver in sequence.versions: - for track in ver.tracks: - if len(track.segments) == 0 and track.hidden: - continue - for segment in track.segments: - print(segment.attributes) - if segment.name.get_value() == "": - continue - if segment.hidden.get_value() is True: - continue - # get clip frame duration - record_duration = str(segment.record_duration)[1:-1] - clip_duration = app_utils.timecode_to_frames( - record_duration, frame_rate) - - # populate shot source metadata - shot_description = "" - for attr in ["tape_name", "source_name", "head", - "tail", "file_path"]: - if not hasattr(segment, attr): - continue - _value = getattr(segment, attr) - _label = attr.replace("_", " ").capitalize() - row = "{}: {}\n".format(_label, _value) - shot_description += row - - # Add timeline segment to tree - QtWidgets.QTreeWidgetItem(self.tree, [ - sequence.name.get_value(), # seq name - segment.shot_name.get_value(), # shot name - str(clip_duration), # clip duration - shot_description, # shot description - segment.comment.get_value() # task description - ]).setFlags( - QtCore.Qt.ItemIsEditable - | QtCore.Qt.ItemIsEnabled - | QtCore.Qt.ItemIsSelectable - ) - - # Select top item in tree - self.tree.setCurrentItem(self.tree.topLevelItem(0)) - - def select_all(self, ): - self.tree.selectAll() - - def clear_temp_data(self): - import shutil - - self.processed_components = [] - - if self.temp_data_dir: - shutil.rmtree(self.temp_data_dir) - self.temp_data_dir = None - print("All Temp data were destroyed ...") - - def close(self): - self._save_ui_state_to_cfg() - self.session.close() diff --git a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/uiwidgets.py b/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/uiwidgets.py deleted file mode 100644 index 5498a49197..0000000000 --- a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/modules/uiwidgets.py +++ /dev/null @@ -1,212 +0,0 @@ -from qtpy import QtWidgets, QtCore - - -class FlameLabel(QtWidgets.QLabel): - """ - Custom Qt Flame Label Widget - - For different label looks set label_type as: - 'normal', 'background', or 'outline' - - To use: - - label = FlameLabel('Label Name', 'normal', window) - """ - - def __init__(self, label_name, label_type, parent_window, *args, **kwargs): - super(FlameLabel, self).__init__(*args, **kwargs) - - self.setText(label_name) - self.setParent(parent_window) - self.setMinimumSize(130, 28) - self.setMaximumHeight(28) - self.setFocusPolicy(QtCore.Qt.NoFocus) - - # Set label stylesheet based on label_type - - if label_type == 'normal': - self.setStyleSheet( - 'QLabel {color: #9a9a9a; border-bottom: 1px inset #282828; font: 14px "Discreet"}' # noqa - 'QLabel:disabled {color: #6a6a6a}' - ) - elif label_type == 'background': - self.setAlignment(QtCore.Qt.AlignCenter) - self.setStyleSheet( - 'color: #9a9a9a; background-color: #393939; font: 14px "Discreet"' # noqa - ) - elif label_type == 'outline': - self.setAlignment(QtCore.Qt.AlignCenter) - self.setStyleSheet( - 'color: #9a9a9a; background-color: #212121; border: 1px solid #404040; font: 14px "Discreet"' # noqa - ) - - -class FlameLineEdit(QtWidgets.QLineEdit): - """ - Custom Qt Flame Line Edit Widget - - Main window should include this: - window.setFocusPolicy(QtCore.Qt.StrongFocus) - - To use: - - line_edit = FlameLineEdit('Some text here', window) - """ - - def __init__(self, text, parent_window, *args, **kwargs): - super(FlameLineEdit, self).__init__(*args, **kwargs) - - self.setText(text) - self.setParent(parent_window) - self.setMinimumHeight(28) - self.setMinimumWidth(110) - self.setStyleSheet( - 'QLineEdit {color: #9a9a9a; background-color: #373e47; selection-color: #262626; selection-background-color: #b8b1a7; font: 14px "Discreet"}' # noqa - 'QLineEdit:focus {background-color: #474e58}' # noqa - 'QLineEdit:disabled {color: #6a6a6a; background-color: #373737}' - ) - - -class FlameTreeWidget(QtWidgets.QTreeWidget): - """ - Custom Qt Flame Tree Widget - - To use: - - tree_headers = ['Header1', 'Header2', 'Header3', 'Header4'] - tree = FlameTreeWidget(tree_headers, window) - """ - - def __init__(self, tree_headers, parent_window, *args, **kwargs): - super(FlameTreeWidget, self).__init__(*args, **kwargs) - - self.setMinimumWidth(1000) - self.setMinimumHeight(300) - self.setSortingEnabled(True) - self.sortByColumn(0, QtCore.Qt.AscendingOrder) - self.setAlternatingRowColors(True) - self.setFocusPolicy(QtCore.Qt.NoFocus) - self.setStyleSheet( - 'QTreeWidget {color: #9a9a9a; background-color: #2a2a2a; alternate-background-color: #2d2d2d; font: 14px "Discreet"}' # noqa - 'QTreeWidget::item:selected {color: #d9d9d9; background-color: #474747; border: 1px solid #111111}' # noqa - 'QHeaderView {color: #9a9a9a; background-color: #393939; font: 14px "Discreet"}' # noqa - 'QTreeWidget::item:selected {selection-background-color: #111111}' - 'QMenu {color: #9a9a9a; background-color: #24303d; font: 14px "Discreet"}' # noqa - 'QMenu::item:selected {color: #d9d9d9; background-color: #3a4551}' - ) - self.verticalScrollBar().setStyleSheet('color: #818181') - self.horizontalScrollBar().setStyleSheet('color: #818181') - self.setHeaderLabels(tree_headers) - - -class FlameButton(QtWidgets.QPushButton): - """ - Custom Qt Flame Button Widget - - To use: - - button = FlameButton('Button Name', do_this_when_pressed, window) - """ - - def __init__(self, button_name, do_when_pressed, parent_window, - *args, **kwargs): - super(FlameButton, self).__init__(*args, **kwargs) - - self.setText(button_name) - self.setParent(parent_window) - self.setMinimumSize(QtCore.QSize(110, 28)) - self.setMaximumSize(QtCore.QSize(110, 28)) - self.setFocusPolicy(QtCore.Qt.NoFocus) - self.clicked.connect(do_when_pressed) - self.setStyleSheet( - 'QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black; font: 14px "Discreet"}' # noqa - 'QPushButton:pressed {color: #d9d9d9; background-color: #4f4f4f; border-top: 1px inset #666666; font: italic}' # noqa - 'QPushButton:disabled {color: #747474; background-color: #353535; border-top: 1px solid #444444; border-bottom: 1px solid #242424}' # noqa - ) - - -class FlamePushButton(QtWidgets.QPushButton): - """ - Custom Qt Flame Push Button Widget - - To use: - - pushbutton = FlamePushButton(' Button Name', True_or_False, window) - """ - - def __init__(self, button_name, button_checked, parent_window, - *args, **kwargs): - super(FlamePushButton, self).__init__(*args, **kwargs) - - self.setText(button_name) - self.setParent(parent_window) - self.setCheckable(True) - self.setChecked(button_checked) - self.setMinimumSize(155, 28) - self.setMaximumSize(155, 28) - self.setFocusPolicy(QtCore.Qt.NoFocus) - self.setStyleSheet( - 'QPushButton {color: #9a9a9a; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #424142, stop: .94 #2e3b48); text-align: left; border-top: 1px inset #555555; border-bottom: 1px inset black; font: 14px "Discreet"}' # noqa - 'QPushButton:checked {color: #d9d9d9; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #4f4f4f, stop: .94 #5a7fb4); font: italic; border: 1px inset black; border-bottom: 1px inset #404040; border-right: 1px inset #404040}' # noqa - 'QPushButton:disabled {color: #6a6a6a; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #383838, stop: .94 #353535); font: light; border-top: 1px solid #575757; border-bottom: 1px solid #242424; border-right: 1px solid #353535; border-left: 1px solid #353535}' # noqa - 'QToolTip {color: black; background-color: #ffffde; border: black solid 1px}' # noqa - ) - - -class FlamePushButtonMenu(QtWidgets.QPushButton): - """ - Custom Qt Flame Menu Push Button Widget - - To use: - - push_button_menu_options = ['Item 1', 'Item 2', 'Item 3', 'Item 4'] - menu_push_button = FlamePushButtonMenu('push_button_name', - push_button_menu_options, window) - - or - - push_button_menu_options = ['Item 1', 'Item 2', 'Item 3', 'Item 4'] - menu_push_button = FlamePushButtonMenu(push_button_menu_options[0], - push_button_menu_options, window) - """ - selection_changed = QtCore.Signal(str) - - def __init__(self, button_name, menu_options, parent_window, - *args, **kwargs): - super(FlamePushButtonMenu, self).__init__(*args, **kwargs) - - self.setParent(parent_window) - self.setMinimumHeight(28) - self.setMinimumWidth(110) - self.setFocusPolicy(QtCore.Qt.NoFocus) - self.setStyleSheet( - 'QPushButton {color: #9a9a9a; background-color: #24303d; font: 14px "Discreet"}' # noqa - 'QPushButton:disabled {color: #747474; background-color: #353535; border-top: 1px solid #444444; border-bottom: 1px solid #242424}' # noqa - ) - - pushbutton_menu = QtWidgets.QMenu(parent_window) - pushbutton_menu.setFocusPolicy(QtCore.Qt.NoFocus) - pushbutton_menu.setStyleSheet( - 'QMenu {color: #9a9a9a; background-color:#24303d; font: 14px "Discreet"}' # noqa - 'QMenu::item:selected {color: #d9d9d9; background-color: #3a4551}' - ) - - self._pushbutton_menu = pushbutton_menu - self.setMenu(pushbutton_menu) - self.set_menu_options(menu_options, button_name) - - def set_menu_options(self, menu_options, current_option=None): - self._pushbutton_menu.clear() - current_option = current_option or menu_options[0] - - for option in menu_options: - action = self._pushbutton_menu.addAction(option) - action.triggered.connect(self._on_action_trigger) - - if current_option is not None: - self.setText(current_option) - - def _on_action_trigger(self): - action = self.sender() - self.setText(action.text()) - self.selection_changed.emit(action.text()) diff --git a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/openpype_babypublisher.py b/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/openpype_babypublisher.py deleted file mode 100644 index 76d74b5970..0000000000 --- a/server_addon/flame/client/ayon_flame/startup/openpype_babypublisher/openpype_babypublisher.py +++ /dev/null @@ -1,43 +0,0 @@ -from __future__ import print_function - -import os -import sys - -# only testing dependency for nested modules in package -import six # noqa - - -SCRIPT_DIR = os.path.dirname(__file__) -PACKAGE_DIR = os.path.join(SCRIPT_DIR, "modules") -sys.path.append(PACKAGE_DIR) - - -def flame_panel_executor(selection): - if "panel_app" in sys.modules.keys(): - print("panel_app module is already loaded") - del sys.modules["panel_app"] - import panel_app - reload(panel_app) # noqa - print("panel_app module removed from sys.modules") - - panel_app.FlameBabyPublisherPanel(selection) - - -def scope_sequence(selection): - import flame - return any(isinstance(item, flame.PySequence) for item in selection) - - -def get_media_panel_custom_ui_actions(): - return [ - { - "name": "AYON: Baby-publisher", - "actions": [ - { - "name": "Create Shots", - "isVisible": scope_sequence, - "execute": flame_panel_executor - } - ] - } - ] diff --git a/server_addon/flame/client/ayon_flame/startup/openpype_in_flame.py b/server_addon/flame/client/ayon_flame/startup/openpype_in_flame.py deleted file mode 100644 index 8f319a88eb..0000000000 --- a/server_addon/flame/client/ayon_flame/startup/openpype_in_flame.py +++ /dev/null @@ -1,219 +0,0 @@ -from __future__ import print_function -import sys -from qtpy import QtWidgets -from pprint import pformat -import atexit - -import ayon_flame.api as opfapi -from ayon_core.pipeline import ( - install_host, - registered_host, -) - - -def openpype_install(): - """Registering AYON in context - """ - install_host(opfapi) - print("Registered host: {}".format(registered_host())) - - -# Exception handler -def exeption_handler(exctype, value, _traceback): - """Exception handler for improving UX - - Args: - exctype (str): type of exception - value (str): exception value - tb (str): traceback to show - """ - import traceback - msg = "AYON: Python exception {} in {}".format(value, exctype) - mbox = QtWidgets.QMessageBox() - mbox.setText(msg) - mbox.setDetailedText( - pformat(traceback.format_exception(exctype, value, _traceback))) - mbox.setStyleSheet('QLabel{min-width: 800px;}') - mbox.exec_() - sys.__excepthook__(exctype, value, _traceback) - - -# add exception handler into sys module -sys.excepthook = exeption_handler - - -# register clean up logic to be called at Flame exit -def cleanup(): - """Cleaning up Flame framework context - """ - if opfapi.CTX.flame_apps: - print('`{}` cleaning up flame_apps:\n {}\n'.format( - __file__, pformat(opfapi.CTX.flame_apps))) - while len(opfapi.CTX.flame_apps): - app = opfapi.CTX.flame_apps.pop() - print('`{}` removing : {}'.format(__file__, app.name)) - del app - opfapi.CTX.flame_apps = [] - - if opfapi.CTX.app_framework: - print('openpype\t: {} cleaning up'.format( - opfapi.CTX.app_framework.bundle_name) - ) - opfapi.CTX.app_framework.save_prefs() - opfapi.CTX.app_framework = None - - -atexit.register(cleanup) - - -def load_apps(): - """Load available flame_apps into Flame framework - """ - opfapi.CTX.flame_apps.append( - opfapi.FlameMenuProjectConnect(opfapi.CTX.app_framework)) - opfapi.CTX.flame_apps.append( - opfapi.FlameMenuTimeline(opfapi.CTX.app_framework)) - opfapi.CTX.flame_apps.append( - opfapi.FlameMenuUniversal(opfapi.CTX.app_framework)) - opfapi.CTX.app_framework.log.info("Apps are loaded") - - -def project_changed_dict(info): - """Hook for project change action - - Args: - info (str): info text - """ - cleanup() - - -def app_initialized(parent=None): - """Inicialization of Framework - - Args: - parent (obj, optional): Parent object. Defaults to None. - """ - opfapi.CTX.app_framework = opfapi.FlameAppFramework() - - print("{} initializing".format( - opfapi.CTX.app_framework.bundle_name)) - - load_apps() - - -""" -Initialisation of the hook is starting from here - -First it needs to test if it can import the flame module. -This will happen only in case a project has been loaded. -Then `app_initialized` will load main Framework which will load -all menu objects as flame_apps. -""" - -try: - import flame # noqa - app_initialized(parent=None) -except ImportError: - print("!!!! not able to import flame module !!!!") - - -def rescan_hooks(): - import flame # noqa - flame.execute_shortcut('Rescan Python Hooks') - - -def _build_app_menu(app_name): - """Flame menu object generator - - Args: - app_name (str): name of menu object app - - Returns: - list: menu object - """ - menu = [] - - # first find the relative appname - app = None - for _app in opfapi.CTX.flame_apps: - if _app.__class__.__name__ == app_name: - app = _app - - if app: - menu.append(app.build_menu()) - - if opfapi.CTX.app_framework: - menu_auto_refresh = opfapi.CTX.app_framework.prefs_global.get( - 'menu_auto_refresh', {}) - if menu_auto_refresh.get('timeline_menu', True): - try: - import flame # noqa - flame.schedule_idle_event(rescan_hooks) - except ImportError: - print("!-!!! not able to import flame module !!!!") - - return menu - - -""" Flame hooks are starting here -""" - - -def project_saved(project_name, save_time, is_auto_save): - """Hook to activate when project is saved - - Args: - project_name (str): name of project - save_time (str): time when it was saved - is_auto_save (bool): autosave is on or off - """ - if opfapi.CTX.app_framework: - opfapi.CTX.app_framework.save_prefs() - - -def get_main_menu_custom_ui_actions(): - """Hook to create submenu in start menu - - Returns: - list: menu object - """ - # install openpype and the host - openpype_install() - - return _build_app_menu("FlameMenuProjectConnect") - - -def get_timeline_custom_ui_actions(): - """Hook to create submenu in timeline - - Returns: - list: menu object - """ - # install openpype and the host - openpype_install() - - return _build_app_menu("FlameMenuTimeline") - - -def get_batch_custom_ui_actions(): - """Hook to create submenu in batch - - Returns: - list: menu object - """ - # install openpype and the host - openpype_install() - - return _build_app_menu("FlameMenuUniversal") - - -def get_media_panel_custom_ui_actions(): - """Hook to create submenu in desktop - - Returns: - list: menu object - """ - # install openpype and the host - openpype_install() - - return _build_app_menu("FlameMenuUniversal") diff --git a/server_addon/flame/client/ayon_flame/version.py b/server_addon/flame/client/ayon_flame/version.py deleted file mode 100644 index 68bdb6e6a0..0000000000 --- a/server_addon/flame/client/ayon_flame/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring AYON addon 'flame' version.""" -__version__ = "0.2.1" diff --git a/server_addon/flame/package.py b/server_addon/flame/package.py deleted file mode 100644 index b25a514a9f..0000000000 --- a/server_addon/flame/package.py +++ /dev/null @@ -1,10 +0,0 @@ -name = "flame" -title = "Flame" -version = "0.2.1" - -client_dir = "ayon_flame" - -ayon_required_addons = { - "core": ">0.3.2", -} -ayon_compatible_addons = {} diff --git a/server_addon/flame/server/__init__.py b/server_addon/flame/server/__init__.py deleted file mode 100644 index 4aa46617ee..0000000000 --- a/server_addon/flame/server/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Type - -from ayon_server.addons import BaseServerAddon - -from .settings import FlameSettings, DEFAULT_VALUES - - -class FlameAddon(BaseServerAddon): - settings_model: Type[FlameSettings] = FlameSettings - - async def get_default_settings(self): - settings_model_cls = self.get_settings_model() - return settings_model_cls(**DEFAULT_VALUES) diff --git a/server_addon/flame/server/settings/__init__.py b/server_addon/flame/server/settings/__init__.py deleted file mode 100644 index 39b8220d40..0000000000 --- a/server_addon/flame/server/settings/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .main import ( - FlameSettings, - DEFAULT_VALUES, -) - - -__all__ = ( - "FlameSettings", - "DEFAULT_VALUES", -) diff --git a/server_addon/flame/server/settings/create_plugins.py b/server_addon/flame/server/settings/create_plugins.py deleted file mode 100644 index 2f17ec40c4..0000000000 --- a/server_addon/flame/server/settings/create_plugins.py +++ /dev/null @@ -1,119 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -class CreateShotClipModel(BaseSettingsModel): - hierarchy: str = SettingsField( - "shot", - title="Shot parent hierarchy", - section="Shot Hierarchy And Rename Settings" - ) - useShotName: bool = SettingsField( - True, - title="Use Shot Name", - ) - clipRename: bool = SettingsField( - False, - title="Rename clips", - ) - clipName: str = SettingsField( - "{sequence}{shot}", - title="Clip name template" - ) - segmentIndex: bool = SettingsField( - True, - title="Accept segment order" - ) - countFrom: int = SettingsField( - 10, - title="Count sequence from" - ) - countSteps: int = SettingsField( - 10, - title="Stepping number" - ) - - folder: str = SettingsField( - "shots", - title="{folder}", - section="Shot Template Keywords" - ) - episode: str = SettingsField( - "ep01", - title="{episode}" - ) - sequence: str = SettingsField( - "a", - title="{sequence}" - ) - track: str = SettingsField( - "{_track_}", - title="{track}" - ) - shot: str = SettingsField( - "####", - title="{shot}" - ) - - vSyncOn: bool = SettingsField( - False, - title="Enable Vertical Sync", - section="Vertical Synchronization Of Attributes" - ) - - workfileFrameStart: int = SettingsField( - 1001, - title="Workfiles Start Frame", - section="Shot Attributes" - ) - handleStart: int = SettingsField( - 10, - title="Handle start (head)" - ) - handleEnd: int = SettingsField( - 10, - title="Handle end (tail)" - ) - includeHandles: bool = SettingsField( - False, - title="Enable handles including" - ) - retimedHandles: bool = SettingsField( - True, - title="Enable retimed handles" - ) - retimedFramerange: bool = SettingsField( - True, - title="Enable retimed shot frameranges" - ) - - -class CreatePluginsModel(BaseSettingsModel): - CreateShotClip: CreateShotClipModel = SettingsField( - default_factory=CreateShotClipModel, - title="Create Shot Clip" - ) - - -DEFAULT_CREATE_SETTINGS = { - "CreateShotClip": { - "hierarchy": "{folder}/{sequence}", - "useShotName": True, - "clipRename": False, - "clipName": "{sequence}{shot}", - "segmentIndex": True, - "countFrom": 10, - "countSteps": 10, - "folder": "shots", - "episode": "ep01", - "sequence": "a", - "track": "{_track_}", - "shot": "####", - "vSyncOn": False, - "workfileFrameStart": 1001, - "handleStart": 5, - "handleEnd": 5, - "includeHandles": False, - "retimedHandles": True, - "retimedFramerange": True - } -} diff --git a/server_addon/flame/server/settings/imageio.py b/server_addon/flame/server/settings/imageio.py deleted file mode 100644 index abd058ee13..0000000000 --- a/server_addon/flame/server/settings/imageio.py +++ /dev/null @@ -1,149 +0,0 @@ -from pydantic import validator -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - ensure_unique_names, -) - - -class ImageIOFileRuleModel(BaseSettingsModel): - name: str = SettingsField("", title="Rule name") - pattern: str = SettingsField("", title="Regex pattern") - colorspace: str = SettingsField("", title="Colorspace name") - ext: str = SettingsField("", title="File extension") - - -class ImageIOFileRulesModel(BaseSettingsModel): - activate_host_rules: bool = SettingsField(False) - rules: list[ImageIOFileRuleModel] = SettingsField( - default_factory=list, - title="Rules" - ) - - @validator("rules") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -class ImageIORemappingRulesModel(BaseSettingsModel): - host_native_name: str = SettingsField( - title="Application native colorspace name" - ) - ocio_name: str = SettingsField(title="OCIO colorspace name") - - -class ImageIORemappingModel(BaseSettingsModel): - rules: list[ImageIORemappingRulesModel] = SettingsField( - default_factory=list - ) - - -class ImageIOConfigModel(BaseSettingsModel): - """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config - path in the Core addon profiles here - (ayon+settings://core/imageio/ocio_config_profiles). - """ - - override_global_config: bool = SettingsField( - False, - title="Override global OCIO config", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - filepath: list[str] = SettingsField( - default_factory=list, - title="Config path", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - - -class ProfileNamesMappingInputsModel(BaseSettingsModel): - _layout = "expanded" - - flameName: str = SettingsField("", title="Flame name") - ocioName: str = SettingsField("", title="OCIO name") - - -class ProfileNamesMappingModel(BaseSettingsModel): - _layout = "expanded" - - inputs: list[ProfileNamesMappingInputsModel] = SettingsField( - default_factory=list, - title="Profile names mapping" - ) - - -class ImageIOProjectModel(BaseSettingsModel): - colourPolicy: str = SettingsField( - "ACES 1.1", - title="Colour Policy (name or path)", - section="Project" - ) - frameDepth: str = SettingsField( - "16-bit fp", - title="Image Depth" - ) - fieldDominance: str = SettingsField( - "PROGRESSIVE", - title="Field Dominance" - ) - - -class FlameImageIOModel(BaseSettingsModel): - _isGroup = True - activate_host_color_management: bool = SettingsField( - True, title="Enable Color Management" - ) - remapping: ImageIORemappingModel = SettingsField( - title="Remapping colorspace names", - default_factory=ImageIORemappingModel - ) - ocio_config: ImageIOConfigModel = SettingsField( - default_factory=ImageIOConfigModel, - title="OCIO config" - ) - file_rules: ImageIOFileRulesModel = SettingsField( - default_factory=ImageIOFileRulesModel, - title="File Rules" - ) - # NOTE 'project' attribute was expanded to this model but that caused - # inconsistency with v3 settings and harder conversion handling - # - it can be moved back but keep in mind that it must be handled in v3 - # conversion script too - project: ImageIOProjectModel = SettingsField( - default_factory=ImageIOProjectModel, - title="Project" - ) - profilesMapping: ProfileNamesMappingModel = SettingsField( - default_factory=ProfileNamesMappingModel, - title="Profile names mapping" - ) - - -DEFAULT_IMAGEIO_SETTINGS = { - "project": { - "colourPolicy": "ACES 1.1", - "frameDepth": "16-bit fp", - "fieldDominance": "PROGRESSIVE" - }, - "profilesMapping": { - "inputs": [ - { - "flameName": "ACEScg", - "ocioName": "ACES - ACEScg" - }, - { - "flameName": "Rec.709 video", - "ocioName": "Output - Rec.709" - } - ] - } -} diff --git a/server_addon/flame/server/settings/loader_plugins.py b/server_addon/flame/server/settings/loader_plugins.py deleted file mode 100644 index e616f442b5..0000000000 --- a/server_addon/flame/server/settings/loader_plugins.py +++ /dev/null @@ -1,103 +0,0 @@ -from ayon_server.settings import SettingsField, BaseSettingsModel - - -class LoadClipModel(BaseSettingsModel): - enabled: bool = SettingsField(True) - - product_types: list[str] = SettingsField( - default_factory=list, - title="Product types" - ) - reel_group_name: str = SettingsField( - "OpenPype_Reels", - title="Reel group name" - ) - reel_name: str = SettingsField( - "Loaded", - title="Reel name" - ) - - clip_name_template: str = SettingsField( - "{folder[name]}_{product[name]}<_{output}>", - title="Clip name template" - ) - layer_rename_template: str = SettingsField( - "", title="Layer name template" - ) - layer_rename_patterns: list[str] = SettingsField( - default_factory=list, - title="Layer rename patters", - ) - - -class LoadClipBatchModel(BaseSettingsModel): - enabled: bool = SettingsField(True) - product_types: list[str] = SettingsField( - default_factory=list, - title="Product types" - ) - reel_name: str = SettingsField( - "OP_LoadedReel", - title="Reel name" - ) - clip_name_template: str = SettingsField( - "{batch}_{folder[name]}_{product[name]}<_{output}>", - title="Clip name template" - ) - layer_rename_template: str = SettingsField( - "", title="Layer name template" - ) - layer_rename_patterns: list[str] = SettingsField( - default_factory=list, - title="Layer rename patters", - ) - - -class LoaderPluginsModel(BaseSettingsModel): - LoadClip: LoadClipModel = SettingsField( - default_factory=LoadClipModel, - title="Load Clip" - ) - LoadClipBatch: LoadClipBatchModel = SettingsField( - default_factory=LoadClipBatchModel, - title="Load as clip to current batch" - ) - - -DEFAULT_LOADER_SETTINGS = { - "LoadClip": { - "enabled": True, - "product_types": [ - "render2d", - "source", - "plate", - "render", - "review" - ], - "reel_group_name": "OpenPype_Reels", - "reel_name": "Loaded", - "clip_name_template": "{folder[name]}_{product[name]}<_{output}>", - "layer_rename_template": "{folder[name]}_{product[name]}<_{output}>", - "layer_rename_patterns": [ - "rgb", - "rgba" - ] - }, - "LoadClipBatch": { - "enabled": True, - "product_types": [ - "render2d", - "source", - "plate", - "render", - "review" - ], - "reel_name": "OP_LoadedReel", - "clip_name_template": "{batch}_{folder[name]}_{product[name]}<_{output}>", - "layer_rename_template": "{folder[name]}_{product[name]}<_{output}>", - "layer_rename_patterns": [ - "rgb", - "rgba" - ] - } -} diff --git a/server_addon/flame/server/settings/main.py b/server_addon/flame/server/settings/main.py deleted file mode 100644 index c838ee9646..0000000000 --- a/server_addon/flame/server/settings/main.py +++ /dev/null @@ -1,33 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - -from .imageio import FlameImageIOModel, DEFAULT_IMAGEIO_SETTINGS -from .create_plugins import CreatePluginsModel, DEFAULT_CREATE_SETTINGS -from .publish_plugins import PublishPluginsModel, DEFAULT_PUBLISH_SETTINGS -from .loader_plugins import LoaderPluginsModel, DEFAULT_LOADER_SETTINGS - - -class FlameSettings(BaseSettingsModel): - imageio: FlameImageIOModel = SettingsField( - default_factory=FlameImageIOModel, - title="Color Management (ImageIO)" - ) - create: CreatePluginsModel = SettingsField( - default_factory=CreatePluginsModel, - title="Create plugins" - ) - publish: PublishPluginsModel = SettingsField( - default_factory=PublishPluginsModel, - title="Publish plugins" - ) - load: LoaderPluginsModel = SettingsField( - default_factory=LoaderPluginsModel, - title="Loader plugins" - ) - - -DEFAULT_VALUES = { - "imageio": DEFAULT_IMAGEIO_SETTINGS, - "create": DEFAULT_CREATE_SETTINGS, - "publish": DEFAULT_PUBLISH_SETTINGS, - "load": DEFAULT_LOADER_SETTINGS -} diff --git a/server_addon/flame/server/settings/publish_plugins.py b/server_addon/flame/server/settings/publish_plugins.py deleted file mode 100644 index b34083b4e2..0000000000 --- a/server_addon/flame/server/settings/publish_plugins.py +++ /dev/null @@ -1,196 +0,0 @@ -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - task_types_enum, -) - - -class XMLPresetAttrsFromCommentsModel(BaseSettingsModel): - _layout = "expanded" - name: str = SettingsField("", title="Attribute name") - type: str = SettingsField( - default_factory=str, - title="Attribute type", - enum_resolver=lambda: ["number", "float", "string"] - ) - - -class AddTasksModel(BaseSettingsModel): - _layout = "expanded" - name: str = SettingsField("", title="Task name") - type: str = SettingsField( - default_factory=str, - title="Task type", - enum_resolver=task_types_enum - ) - create_batch_group: bool = SettingsField( - True, - title="Create batch group" - ) - - -class CollectTimelineInstancesModel(BaseSettingsModel): - _isGroup = True - - xml_preset_attrs_from_comments: list[XMLPresetAttrsFromCommentsModel] = ( - SettingsField( - default_factory=list, - title="XML presets attributes parsable from segment comments" - ) - ) - add_tasks: list[AddTasksModel] = SettingsField( - default_factory=list, - title="Add tasks" - ) - - -class ExportPresetsMappingModel(BaseSettingsModel): - _layout = "expanded" - - name: str = SettingsField( - ..., - title="Name" - ) - active: bool = SettingsField(True, title="Is active") - export_type: str = SettingsField( - "File Sequence", - title="Eport clip type", - enum_resolver=lambda: ["Movie", "File Sequence", "Sequence Publish"] - ) - ext: str = SettingsField("exr", title="Output extension") - xml_preset_file: str = SettingsField( - "OpenEXR (16-bit fp DWAA).xml", - title="XML preset file (with ext)" - ) - colorspace_out: str = SettingsField( - "ACES - ACEScg", - title="Output color (imageio)" - ) - # TODO remove when resolved or v3 is not a thing anymore - # NOTE next 4 attributes were grouped under 'other_parameters' but that - # created inconsistency with v3 settings and harder conversion handling - # - it can be moved back but keep in mind that it must be handled in v3 - # conversion script too - xml_preset_dir: str = SettingsField( - "", - title="XML preset directory" - ) - parsed_comment_attrs: bool = SettingsField( - True, - title="Parsed comment attributes" - ) - representation_add_range: bool = SettingsField( - True, - title="Add range to representation name" - ) - representation_tags: list[str] = SettingsField( - default_factory=list, - title="Representation tags" - ) - load_to_batch_group: bool = SettingsField( - True, - title="Load to batch group reel" - ) - batch_group_loader_name: str = SettingsField( - "LoadClipBatch", - title="Use loader name" - ) - filter_path_regex: str = SettingsField( - ".*", - title="Regex in clip path" - ) - - -class ExtractProductResourcesModel(BaseSettingsModel): - _isGroup = True - - keep_original_representation: bool = SettingsField( - False, - title="Publish clip's original media" - ) - export_presets_mapping: list[ExportPresetsMappingModel] = SettingsField( - default_factory=list, - title="Export presets mapping" - ) - - -class IntegrateBatchGroupModel(BaseSettingsModel): - enabled: bool = SettingsField( - False, - title="Enabled" - ) - - -class PublishPluginsModel(BaseSettingsModel): - CollectTimelineInstances: CollectTimelineInstancesModel = SettingsField( - default_factory=CollectTimelineInstancesModel, - title="Collect Timeline Instances" - ) - - ExtractProductResources: ExtractProductResourcesModel = SettingsField( - default_factory=ExtractProductResourcesModel, - title="Extract Product Resources" - ) - - IntegrateBatchGroup: IntegrateBatchGroupModel = SettingsField( - default_factory=IntegrateBatchGroupModel, - title="IntegrateBatchGroup" - ) - - -DEFAULT_PUBLISH_SETTINGS = { - "CollectTimelineInstances": { - "xml_preset_attrs_from_comments": [ - { - "name": "width", - "type": "number" - }, - { - "name": "height", - "type": "number" - }, - { - "name": "pixelRatio", - "type": "float" - }, - { - "name": "resizeType", - "type": "string" - }, - { - "name": "resizeFilter", - "type": "string" - } - ], - "add_tasks": [ - { - "name": "compositing", - "type": "Compositing", - "create_batch_group": True - } - ] - }, - "ExtractProductResources": { - "keep_original_representation": False, - "export_presets_mapping": [ - { - "name": "exr16fpdwaa", - "active": True, - "export_type": "File Sequence", - "ext": "exr", - "xml_preset_file": "OpenEXR (16-bit fp DWAA).xml", - "colorspace_out": "ACES - ACEScg", - "xml_preset_dir": "", - "parsed_comment_attrs": True, - "representation_add_range": True, - "representation_tags": [], - "load_to_batch_group": True, - "batch_group_loader_name": "LoadClipBatch", - "filter_path_regex": ".*" - } - ] - }, - "IntegrateBatchGroup": { - "enabled": False - } -} diff --git a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py b/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py index c4d51c0808..e85df4ee81 100644 --- a/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py +++ b/server_addon/houdini/client/ayon_houdini/plugins/publish/extract_active_view_thumbnail.py @@ -1,10 +1,13 @@ import tempfile import pyblish.api + +from ayon_core.pipeline import OptionalPyblishPluginMixin from ayon_houdini.api import lib, plugin from ayon_houdini.api.pipeline import IS_HEADLESS -class ExtractActiveViewThumbnail(plugin.HoudiniExtractorPlugin): +class ExtractActiveViewThumbnail(plugin.HoudiniExtractorPlugin, + OptionalPyblishPluginMixin): """Set instance thumbnail to a screengrab of current active viewport. This makes it so that if an instance does not have a thumbnail set yet that @@ -17,6 +20,9 @@ class ExtractActiveViewThumbnail(plugin.HoudiniExtractorPlugin): families = ["workfile"] def process(self, instance): + if not self.is_active(instance.data): + return + if IS_HEADLESS: self.log.debug( "Skip extraction of active view thumbnail, due to being in" diff --git a/server_addon/houdini/server/settings/publish.py b/server_addon/houdini/server/settings/publish.py index 336de8e046..2b88f96922 100644 --- a/server_addon/houdini/server/settings/publish.py +++ b/server_addon/houdini/server/settings/publish.py @@ -31,6 +31,7 @@ class AOVFilterSubmodel(BaseSettingsModel): title="AOV regex" ) + class CollectLocalRenderInstancesModel(BaseSettingsModel): use_deadline_aov_filter: bool = SettingsField( @@ -57,7 +58,7 @@ class ValidateWorkfilePathsModel(BaseSettingsModel): ) -class BasicValidateModel(BaseSettingsModel): +class BasicEnabledStatesModel(BaseSettingsModel): enabled: bool = SettingsField(title="Enabled") optional: bool = SettingsField(title="Optional") active: bool = SettingsField(title="Active") @@ -66,36 +67,41 @@ class BasicValidateModel(BaseSettingsModel): class PublishPluginsModel(BaseSettingsModel): CollectAssetHandles: CollectAssetHandlesModel = SettingsField( default_factory=CollectAssetHandlesModel, - title="Collect Asset Handles.", + title="Collect Asset Handles", section="Collectors" ) CollectChunkSize: CollectChunkSizeModel = SettingsField( default_factory=CollectChunkSizeModel, - title="Collect Chunk Size." + title="Collect Chunk Size" ) CollectLocalRenderInstances: CollectLocalRenderInstancesModel = SettingsField( default_factory=CollectLocalRenderInstancesModel, - title="Collect Local Render Instances." + title="Collect Local Render Instances" ) - ValidateInstanceInContextHoudini: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Instance is in same Context.", + ValidateInstanceInContextHoudini: BasicEnabledStatesModel = SettingsField( + default_factory=BasicEnabledStatesModel, + title="Validate Instance is in same Context", section="Validators") - ValidateMeshIsStatic: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Mesh is Static.") - ValidateReviewColorspace: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Review Colorspace.") - ValidateSubsetName: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Subset Name.") - ValidateUnrealStaticMeshName: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Unreal Static Mesh Name.") + ValidateMeshIsStatic: BasicEnabledStatesModel = SettingsField( + default_factory=BasicEnabledStatesModel, + title="Validate Mesh is Static") + ValidateReviewColorspace: BasicEnabledStatesModel = SettingsField( + default_factory=BasicEnabledStatesModel, + title="Validate Review Colorspace") + ValidateSubsetName: BasicEnabledStatesModel = SettingsField( + default_factory=BasicEnabledStatesModel, + title="Validate Subset Name") + ValidateUnrealStaticMeshName: BasicEnabledStatesModel = SettingsField( + default_factory=BasicEnabledStatesModel, + title="Validate Unreal Static Mesh Name") ValidateWorkfilePaths: ValidateWorkfilePathsModel = SettingsField( default_factory=ValidateWorkfilePathsModel, - title="Validate workfile paths settings.") + title="Validate workfile paths settings") + ExtractActiveViewThumbnail: BasicEnabledStatesModel = SettingsField( + default_factory=BasicEnabledStatesModel, + title="Extract Active View Thumbnail", + section="Extractors" + ) DEFAULT_HOUDINI_PUBLISH_SETTINGS = { @@ -109,7 +115,7 @@ class PublishPluginsModel(BaseSettingsModel): }, "CollectLocalRenderInstances": { "use_deadline_aov_filter": False, - "aov_filter" : { + "aov_filter": { "host_name": "houdini", "value": [ ".*([Bb]eauty).*" @@ -152,5 +158,10 @@ class PublishPluginsModel(BaseSettingsModel): "$HIP", "$JOB" ] + }, + "ExtractActiveViewThumbnail": { + "enabled": True, + "optional": False, + "active": True } } diff --git a/server_addon/nuke/client/ayon_nuke/api/lib.py b/server_addon/nuke/client/ayon_nuke/api/lib.py index 905521255f..6caaed3801 100644 --- a/server_addon/nuke/client/ayon_nuke/api/lib.py +++ b/server_addon/nuke/client/ayon_nuke/api/lib.py @@ -561,7 +561,7 @@ def read_avalon_data(node): node (nuke.Node): Nuke node object Returns: - list: A list of nuke.Knob object + Dict[str, nuke.Knob]: A dictionary of knob name to nuke.Knob objects """ def compat_prefixed(knob_name): @@ -613,7 +613,7 @@ def get_node_path(path, padding=4): path (str): The path to render to. Returns: - tuple: head, padding, tail (extension) + Tuple[str, int, str]: head, padding, tail (extension) Examples: >>> get_frame_path("test.exr") @@ -655,8 +655,7 @@ def get_nuke_imageio_settings(): def get_imageio_node_setting(node_class, plugin_name, product_name): - ''' Get preset data for dataflow (fileType, compression, bitDepth) - ''' + """Get preset data for dataflow (fileType, compression, bitDepth)""" imageio_nodes = get_nuke_imageio_settings()["nodes"] required_nodes = imageio_nodes["required_nodes"] @@ -686,8 +685,8 @@ def get_imageio_node_setting(node_class, plugin_name, product_name): def get_imageio_node_override_setting( node_class, plugin_name, product_name, knobs_settings ): - ''' Get imageio node overrides from settings - ''' + """ Get imageio node overrides from settings + """ imageio_nodes = get_nuke_imageio_settings()["nodes"] override_nodes = imageio_nodes["override_nodes"] @@ -745,8 +744,7 @@ def get_imageio_node_override_setting( def get_imageio_input_colorspace(filename): - ''' Get input file colorspace based on regex in settings. - ''' + """Get input file colorspace based on regex in settings.""" imageio_regex_inputs = ( get_nuke_imageio_settings()["regex_inputs"]["inputs"]) @@ -791,8 +789,7 @@ def get_view_process_node(): def on_script_load(): - ''' Callback for ffmpeg support - ''' + """Callback for ffmpeg support""" if nuke.env["LINUX"]: nuke.tcl('load ffmpegReader') nuke.tcl('load ffmpegWriter') @@ -815,7 +812,7 @@ def check_inventory_versions(): # get all Loader nodes by avalon attribute metadata node_with_repre_id = [] repre_ids = set() - # Find all containers and collect it's node and representation ids + # Find all containers and collect its node and representation ids for node in nuke.allNodes(): container = parse_container(node) @@ -896,8 +893,7 @@ def check_inventory_versions(): def writes_version_sync(): - ''' Callback synchronizing version of publishable write nodes - ''' + """Callback synchronizing version of publishable write nodes""" try: rootVersion = get_version_from_path(nuke.root().name()) padding = len(rootVersion) @@ -934,8 +930,7 @@ def writes_version_sync(): def version_up_script(): - ''' Raising working script's version - ''' + """Raising working script's version""" import nukescripts nukescripts.script_and_write_nodes_version_up() @@ -957,14 +952,14 @@ def check_product_name_exists(nodes, product_name): def format_anatomy(data): - ''' Helping function for formatting of anatomy paths + """Helping function for formatting of anatomy paths Arguments: data (dict): dictionary with attributes used for formatting Return: - path (str) - ''' + str: Formatted path. + """ project_name = get_current_project_name() anatomy = Anatomy(project_name) @@ -996,9 +991,8 @@ def format_anatomy(data): return anatomy.format(data) -def script_name(): - ''' Returns nuke script path - ''' +def script_name() -> str: + """Returns nuke script path""" return nuke.root().knob("name").value() @@ -1100,7 +1094,7 @@ def create_write_node( linked_knobs=None, **kwargs ): - ''' Creating write node which is group node + """Creating write node which is group node Arguments: name (str): name of node @@ -1134,8 +1128,8 @@ def create_write_node( Return: - node (obj): group node with avalon data as Knobs - ''' + node (nuke.Node): group node with avalon data as Knobs + """ # Ensure name does not contain any invalid characters. special_chars = re.escape("!@#$%^&*()=[]{}|\\;',.<>/?~+-") special_chars_regex = re.compile(f"[{special_chars}]") @@ -1300,7 +1294,7 @@ def create_write_node( def set_node_knobs_from_settings(node, knob_settings, **kwargs): - """ Overriding knob values from settings + """Overriding knob values from settings Using `schema_nuke_knob_inputs` for knob type definitions. @@ -1393,8 +1387,7 @@ def color_gui_to_int(color_gui): def create_backdrop(label="", color=None, layer=0, nodes=None): - """ - Create Backdrop node + """Create Backdrop node Arguments: color (str): nuke compatible string with color code @@ -1402,6 +1395,9 @@ def create_backdrop(label="", color=None, layer=0, label (str): the message nodes (list): list of nodes to be wrapped into backdrop + Returns: + nuke.Node: The created backdrop node. + """ assert isinstance(nodes, list), "`nodes` should be a list of nodes" @@ -1491,12 +1487,12 @@ def get_nodes(self, nodes=None, nodes_filter=None): return [n for n in self._nodes if filter in n.Class()] def set_viewers_colorspace(self, imageio_nuke): - ''' Adds correct colorspace to viewer + """Adds correct colorspace to viewer Arguments: imageio_nuke (dict): nuke colorspace configurations - ''' + """ filter_knobs = [ "viewerProcess", "wipe_position", @@ -1560,12 +1556,12 @@ def _display_and_view_formatted(self, view_profile): return StringTemplate(display_view).format_strict(self.formatting_data) def set_root_colorspace(self, imageio_host): - ''' Adds correct colorspace to root + """Adds correct colorspace to root Arguments: imageio_host (dict): host colorspace configurations - ''' + """ config_data = get_current_context_imageio_config_preset() workfile_settings = imageio_host["workfile"] @@ -1819,9 +1815,8 @@ def _replace_ocio_path_with_env_var(self, config_data): return new_path def set_writes_colorspace(self): - ''' Adds correct colorspace to write node dict - - ''' + """ Adds correct colorspace to write node dict + """ for node in nuke.allNodes(filter="Group", group=self._root_node): log.info("Setting colorspace to `{}`".format(node.name())) @@ -1943,8 +1938,8 @@ def set_reads_colorspace(self, read_clrs_inputs): knobs["to"])) def set_colorspace(self): - ''' Setting colorspace following presets - ''' + """ Setting colorspace following presets + """ # get imageio nuke_colorspace = get_nuke_imageio_settings() @@ -2152,9 +2147,8 @@ def set_favorites(self): def get_write_node_template_attr(node): - ''' Gets all defined data from presets - - ''' + """ Gets all defined data from presets + """ # TODO: add identifiers to settings and rename settings key plugin_names_mapping = { diff --git a/server_addon/photoshop/LICENSE b/server_addon/photoshop/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/server_addon/photoshop/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/server_addon/photoshop/README.md b/server_addon/photoshop/README.md deleted file mode 100644 index 2d1e1c745c..0000000000 --- a/server_addon/photoshop/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Photoshp Addon -=============== - -Integration with Adobe Photoshop. diff --git a/server_addon/photoshop/client/ayon_photoshop/__init__.py b/server_addon/photoshop/client/ayon_photoshop/__init__.py deleted file mode 100644 index e72c79c812..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from .version import __version__ -from .addon import ( - PHOTOSHOP_ADDON_ROOT, - PhotoshopAddon, - get_launch_script_path, -) - - -__all__ = ( - "__version__", - - "PHOTOSHOP_ADDON_ROOT", - "PhotoshopAddon", - "get_launch_script_path", -) diff --git a/server_addon/photoshop/client/ayon_photoshop/addon.py b/server_addon/photoshop/client/ayon_photoshop/addon.py deleted file mode 100644 index d0fe638f15..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/addon.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -from ayon_core.addon import AYONAddon, IHostAddon - -from .version import __version__ - -PHOTOSHOP_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__)) - - -class PhotoshopAddon(AYONAddon, IHostAddon): - name = "photoshop" - version = __version__ - host_name = "photoshop" - - def add_implementation_envs(self, env, _app): - """Modify environments to contain all required for implementation.""" - defaults = { - "AYON_LOG_NO_COLORS": "1", - "WEBSOCKET_URL": "ws://localhost:8099/ws/" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - def get_workfile_extensions(self): - return [".psd", ".psb"] - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(PHOTOSHOP_ADDON_ROOT, "hooks") - ] - - -def get_launch_script_path(): - return os.path.join( - PHOTOSHOP_ADDON_ROOT, "api", "launch_script.py" - ) diff --git a/server_addon/photoshop/client/ayon_photoshop/api/README.md b/server_addon/photoshop/client/ayon_photoshop/api/README.md deleted file mode 100644 index ef458dea16..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/README.md +++ /dev/null @@ -1,257 +0,0 @@ -# Photoshop Integration - -## Setup - -The Photoshop integration requires two components to work; `extension` and `server`. - -### Extension - -To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd). - -``` -ExManCmd /install {path to addon}/api/extension.zxp -``` - -### Server - -The easiest way to get the server and Photoshop launch is with: - -``` -python -c ^"import ayon_photoshop;ayon_photoshop.launch(""C:\Program Files\Adobe\Adobe Photoshop 2020\Photoshop.exe"")^" -``` - -`avalon.photoshop.launch` launches the application and server, and also closes the server when Photoshop exists. - -## Usage - -The Photoshop extension can be found under `Window > Extensions > Ayon`. Once launched you should be presented with a panel like this: - -![Ayon Panel](panel.png "AYON Panel") - - -## Developing - -### Extension -When developing the extension you can load it [unsigned](https://github.com/Adobe-CEP/CEP-Resources/blob/master/CEP_9.x/Documentation/CEP%209.0%20HTML%20Extension%20Cookbook.md#debugging-unsigned-extensions). - -When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide). - -``` -ZXPSignCmd -selfSignedCert NA NA Ayon Ayon-Photoshop Ayon extension.p12 -ZXPSignCmd -sign {path to avalon-core}\avalon\photoshop\extension {path to avalon-core}\avalon\photoshop\extension.zxp extension.p12 avalon -``` - -### Plugin Examples - -These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py). - -#### Creator Plugin -```python -from avalon import photoshop - - -class CreateImage(photoshop.Creator): - """Image folder for publish.""" - - name = "imageDefault" - label = "Image" - product_type = "image" - - def __init__(self, *args, **kwargs): - super(CreateImage, self).__init__(*args, **kwargs) -``` - -#### Collector Plugin -```python -import pythoncom - -import pyblish.api - - -class CollectInstances(pyblish.api.ContextPlugin): - """Gather instances by LayerSet and file metadata - - This collector takes into account assets that are associated with - an LayerSet and marked with a unique identifier; - - Identifier: - id (str): "ayon.create.instance" - """ - - label = "Instances" - order = pyblish.api.CollectorOrder - hosts = ["photoshop"] - families_mapping = { - "image": [] - } - - def process(self, context): - # Necessary call when running in a different thread which pyblish-qml - # can be. - pythoncom.CoInitialize() - - photoshop_client = PhotoshopClientStub() - layers = photoshop_client.get_layers() - layers_meta = photoshop_client.get_layers_metadata() - for layer in layers: - layer_data = photoshop_client.read(layer, layers_meta) - - # Skip layers without metadata. - if layer_data is None: - continue - - # Skip containers. - if "container" in layer_data["id"]: - continue - - # child_layers = [*layer.Layers] - # self.log.debug("child_layers {}".format(child_layers)) - # if not child_layers: - # self.log.info("%s skipped, it was empty." % layer.Name) - # continue - - instance = context.create_instance(layer.name) - instance.append(layer) - instance.data.update(layer_data) - instance.data["families"] = self.families_mapping[ - layer_data["productType"] - ] - instance.data["publish"] = layer.visible - - # Produce diagnostic message for any graphical - # user interface interested in visualising it. - self.log.info("Found: \"%s\" " % instance.data["name"]) -``` - -#### Extractor Plugin -```python -import os - -from ayon_core.pipeline import publish -from ayon_photoshop import api as photoshop - - -class ExtractImage(publish.Extractor): - """Produce a flattened image file from instance - - This plug-in takes into account only the layers in the group. - """ - - label = "Extract Image" - hosts = ["photoshop"] - families = ["image"] - formats = ["png", "jpg"] - - def process(self, instance): - - staging_dir = self.staging_dir(instance) - self.log.info("Outputting image to {}".format(staging_dir)) - - # Perform extraction - stub = photoshop.stub() - files = {} - with photoshop.maintained_selection(): - self.log.info("Extracting %s" % str(list(instance))) - with photoshop.maintained_visibility(): - # Hide all other layers. - extract_ids = set([ll.id for ll in stub. - get_layers_in_layers([instance[0]])]) - - for layer in stub.get_layers(): - # limit unnecessary calls to client - if layer.visible and layer.id not in extract_ids: - stub.set_visible(layer.id, False) - - save_options = [] - if "png" in self.formats: - save_options.append('png') - if "jpg" in self.formats: - save_options.append('jpg') - - file_basename = os.path.splitext( - stub.get_active_document_name() - )[0] - for extension in save_options: - _filename = "{}.{}".format(file_basename, extension) - files[extension] = _filename - - full_filename = os.path.join(staging_dir, _filename) - stub.saveAs(full_filename, extension, True) - - representations = [] - for extension, filename in files.items(): - representations.append({ - "name": extension, - "ext": extension, - "files": filename, - "stagingDir": staging_dir - }) - instance.data["representations"] = representations - instance.data["stagingDir"] = staging_dir - - self.log.info(f"Extracted {instance} to {staging_dir}") -``` - -#### Loader Plugin -```python -from avalon import api, photoshop -from ayon_core.pipeline import load, get_representation_path - -stub = photoshop.stub() - - -class ImageLoader(load.LoaderPlugin): - """Load images - - Stores the imported asset in a container named after the asset. - """ - - families = ["image"] - representations = {"*"} - - def load(self, context, name=None, namespace=None, data=None): - path = self.filepath_from_context(context) - with photoshop.maintained_selection(): - layer = stub.import_smart_object(path) - - self[:] = [layer] - - return photoshop.containerise( - name, - namespace, - layer, - context, - self.__class__.__name__ - ) - - def update(self, container, context): - layer = container.pop("layer") - repre_entity = context["representation"] - with photoshop.maintained_selection(): - stub.replace_smart_object( - layer, get_representation_path(repre_entity) - ) - - stub.imprint( - layer, {"representation": repre_entity["id"]} - ) - - def remove(self, container): - container["layer"].Delete() - - def switch(self, container, context): - self.update(container, context) -``` -For easier debugging of Javascript: -https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1 -Add --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome -then localhost:8078 (port set in `photoshop\extension\.debug`) - -Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01 - -Or install CEF client from https://github.com/Adobe-CEP/CEP-Resources/tree/master/CEP_9.x -## Resources - - https://github.com/lohriialo/photoshop-scripting-python - - https://www.adobe.com/devnet/photoshop/scripting.html - - https://github.com/Adobe-CEP/Getting-Started-guides - - https://github.com/Adobe-CEP/CEP-Resources diff --git a/server_addon/photoshop/client/ayon_photoshop/api/__init__.py b/server_addon/photoshop/client/ayon_photoshop/api/__init__.py deleted file mode 100644 index c5a12cba06..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Public API - -Anything that isn't defined here is INTERNAL and unreliable for external use. - -""" - -from .launch_logic import stub - -from .pipeline import ( - PhotoshopHost, - ls, - containerise -) -from .plugin import ( - PhotoshopLoader, - get_unique_layer_name -) - - -from .lib import ( - maintained_selection, - maintained_visibility -) - -__all__ = [ - # launch_logic - "stub", - - # pipeline - "PhotoshopHost", - "ls", - "containerise", - - # Plugin - "PhotoshopLoader", - "get_unique_layer_name", - - # lib - "maintained_selection", - "maintained_visibility", -] diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension.zxp b/server_addon/photoshop/client/ayon_photoshop/api/extension.zxp deleted file mode 100644 index 26a73a37fd..0000000000 Binary files a/server_addon/photoshop/client/ayon_photoshop/api/extension.zxp and /dev/null differ diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/.debug b/server_addon/photoshop/client/ayon_photoshop/api/extension/.debug deleted file mode 100644 index 4cea03cb41..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/extension/.debug +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/CSXS/manifest.xml b/server_addon/photoshop/client/ayon_photoshop/api/extension/CSXS/manifest.xml deleted file mode 100644 index 16d85be9b4..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/extension/CSXS/manifest.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - ./index.html - - - - true - - - applicationActivate - com.adobe.csxs.events.ApplicationInitialized - - - - Panel - AYON - - - 300 - 140 - - - 400 - 200 - - - - ./icons/ayon_logo.png - - - - - - diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/CSInterface.js b/server_addon/photoshop/client/ayon_photoshop/api/extension/client/CSInterface.js deleted file mode 100644 index 4239391efd..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/CSInterface.js +++ /dev/null @@ -1,1193 +0,0 @@ -/************************************************************************************************** -* -* ADOBE SYSTEMS INCORPORATED -* Copyright 2013 Adobe Systems Incorporated -* All Rights Reserved. -* -* NOTICE: Adobe permits you to use, modify, and distribute this file in accordance with the -* terms of the Adobe license agreement accompanying it. If you have received this file from a -* source other than Adobe, then your use, modification, or distribution of it requires the prior -* written permission of Adobe. -* -**************************************************************************************************/ - -/** CSInterface - v8.0.0 */ - -/** - * Stores constants for the window types supported by the CSXS infrastructure. - */ -function CSXSWindowType() -{ -} - -/** Constant for the CSXS window type Panel. */ -CSXSWindowType._PANEL = "Panel"; - -/** Constant for the CSXS window type Modeless. */ -CSXSWindowType._MODELESS = "Modeless"; - -/** Constant for the CSXS window type ModalDialog. */ -CSXSWindowType._MODAL_DIALOG = "ModalDialog"; - -/** EvalScript error message */ -EvalScript_ErrMessage = "EvalScript error."; - -/** - * @class Version - * Defines a version number with major, minor, micro, and special - * components. The major, minor and micro values are numeric; the special - * value can be any string. - * - * @param major The major version component, a positive integer up to nine digits long. - * @param minor The minor version component, a positive integer up to nine digits long. - * @param micro The micro version component, a positive integer up to nine digits long. - * @param special The special version component, an arbitrary string. - * - * @return A new \c Version object. - */ -function Version(major, minor, micro, special) -{ - this.major = major; - this.minor = minor; - this.micro = micro; - this.special = special; -} - -/** - * The maximum value allowed for a numeric version component. - * This reflects the maximum value allowed in PlugPlug and the manifest schema. - */ -Version.MAX_NUM = 999999999; - -/** - * @class VersionBound - * Defines a boundary for a version range, which associates a \c Version object - * with a flag for whether it is an inclusive or exclusive boundary. - * - * @param version The \c #Version object. - * @param inclusive True if this boundary is inclusive, false if it is exclusive. - * - * @return A new \c VersionBound object. - */ -function VersionBound(version, inclusive) -{ - this.version = version; - this.inclusive = inclusive; -} - -/** - * @class VersionRange - * Defines a range of versions using a lower boundary and optional upper boundary. - * - * @param lowerBound The \c #VersionBound object. - * @param upperBound The \c #VersionBound object, or null for a range with no upper boundary. - * - * @return A new \c VersionRange object. - */ -function VersionRange(lowerBound, upperBound) -{ - this.lowerBound = lowerBound; - this.upperBound = upperBound; -} - -/** - * @class Runtime - * Represents a runtime related to the CEP infrastructure. - * Extensions can declare dependencies on particular - * CEP runtime versions in the extension manifest. - * - * @param name The runtime name. - * @param version A \c #VersionRange object that defines a range of valid versions. - * - * @return A new \c Runtime object. - */ -function Runtime(name, versionRange) -{ - this.name = name; - this.versionRange = versionRange; -} - -/** -* @class Extension -* Encapsulates a CEP-based extension to an Adobe application. -* -* @param id The unique identifier of this extension. -* @param name The localizable display name of this extension. -* @param mainPath The path of the "index.html" file. -* @param basePath The base path of this extension. -* @param windowType The window type of the main window of this extension. - Valid values are defined by \c #CSXSWindowType. -* @param width The default width in pixels of the main window of this extension. -* @param height The default height in pixels of the main window of this extension. -* @param minWidth The minimum width in pixels of the main window of this extension. -* @param minHeight The minimum height in pixels of the main window of this extension. -* @param maxWidth The maximum width in pixels of the main window of this extension. -* @param maxHeight The maximum height in pixels of the main window of this extension. -* @param defaultExtensionDataXml The extension data contained in the default \c ExtensionDispatchInfo section of the extension manifest. -* @param specialExtensionDataXml The extension data contained in the application-specific \c ExtensionDispatchInfo section of the extension manifest. -* @param requiredRuntimeList An array of \c Runtime objects for runtimes required by this extension. -* @param isAutoVisible True if this extension is visible on loading. -* @param isPluginExtension True if this extension has been deployed in the Plugins folder of the host application. -* -* @return A new \c Extension object. -*/ -function Extension(id, name, mainPath, basePath, windowType, width, height, minWidth, minHeight, maxWidth, maxHeight, - defaultExtensionDataXml, specialExtensionDataXml, requiredRuntimeList, isAutoVisible, isPluginExtension) -{ - this.id = id; - this.name = name; - this.mainPath = mainPath; - this.basePath = basePath; - this.windowType = windowType; - this.width = width; - this.height = height; - this.minWidth = minWidth; - this.minHeight = minHeight; - this.maxWidth = maxWidth; - this.maxHeight = maxHeight; - this.defaultExtensionDataXml = defaultExtensionDataXml; - this.specialExtensionDataXml = specialExtensionDataXml; - this.requiredRuntimeList = requiredRuntimeList; - this.isAutoVisible = isAutoVisible; - this.isPluginExtension = isPluginExtension; -} - -/** - * @class CSEvent - * A standard JavaScript event, the base class for CEP events. - * - * @param type The name of the event type. - * @param scope The scope of event, can be "GLOBAL" or "APPLICATION". - * @param appId The unique identifier of the application that generated the event. - * @param extensionId The unique identifier of the extension that generated the event. - * - * @return A new \c CSEvent object - */ -function CSEvent(type, scope, appId, extensionId) -{ - this.type = type; - this.scope = scope; - this.appId = appId; - this.extensionId = extensionId; -} - -/** Event-specific data. */ -CSEvent.prototype.data = ""; - -/** - * @class SystemPath - * Stores operating-system-specific location constants for use in the - * \c #CSInterface.getSystemPath() method. - * @return A new \c SystemPath object. - */ -function SystemPath() -{ -} - -/** The path to user data. */ -SystemPath.USER_DATA = "userData"; - -/** The path to common files for Adobe applications. */ -SystemPath.COMMON_FILES = "commonFiles"; - -/** The path to the user's default document folder. */ -SystemPath.MY_DOCUMENTS = "myDocuments"; - -/** @deprecated. Use \c #SystemPath.Extension. */ -SystemPath.APPLICATION = "application"; - -/** The path to current extension. */ -SystemPath.EXTENSION = "extension"; - -/** The path to hosting application's executable. */ -SystemPath.HOST_APPLICATION = "hostApplication"; - -/** - * @class ColorType - * Stores color-type constants. - */ -function ColorType() -{ -} - -/** RGB color type. */ -ColorType.RGB = "rgb"; - -/** Gradient color type. */ -ColorType.GRADIENT = "gradient"; - -/** Null color type. */ -ColorType.NONE = "none"; - -/** - * @class RGBColor - * Stores an RGB color with red, green, blue, and alpha values. - * All values are in the range [0.0 to 255.0]. Invalid numeric values are - * converted to numbers within this range. - * - * @param red The red value, in the range [0.0 to 255.0]. - * @param green The green value, in the range [0.0 to 255.0]. - * @param blue The blue value, in the range [0.0 to 255.0]. - * @param alpha The alpha (transparency) value, in the range [0.0 to 255.0]. - * The default, 255.0, means that the color is fully opaque. - * - * @return A new RGBColor object. - */ -function RGBColor(red, green, blue, alpha) -{ - this.red = red; - this.green = green; - this.blue = blue; - this.alpha = alpha; -} - -/** - * @class Direction - * A point value in which the y component is 0 and the x component - * is positive or negative for a right or left direction, - * or the x component is 0 and the y component is positive or negative for - * an up or down direction. - * - * @param x The horizontal component of the point. - * @param y The vertical component of the point. - * - * @return A new \c Direction object. - */ -function Direction(x, y) -{ - this.x = x; - this.y = y; -} - -/** - * @class GradientStop - * Stores gradient stop information. - * - * @param offset The offset of the gradient stop, in the range [0.0 to 1.0]. - * @param rgbColor The color of the gradient at this point, an \c #RGBColor object. - * - * @return GradientStop object. - */ -function GradientStop(offset, rgbColor) -{ - this.offset = offset; - this.rgbColor = rgbColor; -} - -/** - * @class GradientColor - * Stores gradient color information. - * - * @param type The gradient type, must be "linear". - * @param direction A \c #Direction object for the direction of the gradient - (up, down, right, or left). - * @param numStops The number of stops in the gradient. - * @param gradientStopList An array of \c #GradientStop objects. - * - * @return A new \c GradientColor object. - */ -function GradientColor(type, direction, numStops, arrGradientStop) -{ - this.type = type; - this.direction = direction; - this.numStops = numStops; - this.arrGradientStop = arrGradientStop; -} - -/** - * @class UIColor - * Stores color information, including the type, anti-alias level, and specific color - * values in a color object of an appropriate type. - * - * @param type The color type, 1 for "rgb" and 2 for "gradient". - The supplied color object must correspond to this type. - * @param antialiasLevel The anti-alias level constant. - * @param color A \c #RGBColor or \c #GradientColor object containing specific color information. - * - * @return A new \c UIColor object. - */ -function UIColor(type, antialiasLevel, color) -{ - this.type = type; - this.antialiasLevel = antialiasLevel; - this.color = color; -} - -/** - * @class AppSkinInfo - * Stores window-skin properties, such as color and font. All color parameter values are \c #UIColor objects except that systemHighlightColor is \c #RGBColor object. - * - * @param baseFontFamily The base font family of the application. - * @param baseFontSize The base font size of the application. - * @param appBarBackgroundColor The application bar background color. - * @param panelBackgroundColor The background color of the extension panel. - * @param appBarBackgroundColorSRGB The application bar background color, as sRGB. - * @param panelBackgroundColorSRGB The background color of the extension panel, as sRGB. - * @param systemHighlightColor The highlight color of the extension panel, if provided by the host application. Otherwise, the operating-system highlight color. - * - * @return AppSkinInfo object. - */ -function AppSkinInfo(baseFontFamily, baseFontSize, appBarBackgroundColor, panelBackgroundColor, appBarBackgroundColorSRGB, panelBackgroundColorSRGB, systemHighlightColor) -{ - this.baseFontFamily = baseFontFamily; - this.baseFontSize = baseFontSize; - this.appBarBackgroundColor = appBarBackgroundColor; - this.panelBackgroundColor = panelBackgroundColor; - this.appBarBackgroundColorSRGB = appBarBackgroundColorSRGB; - this.panelBackgroundColorSRGB = panelBackgroundColorSRGB; - this.systemHighlightColor = systemHighlightColor; -} - -/** - * @class HostEnvironment - * Stores information about the environment in which the extension is loaded. - * - * @param appName The application's name. - * @param appVersion The application's version. - * @param appLocale The application's current license locale. - * @param appUILocale The application's current UI locale. - * @param appId The application's unique identifier. - * @param isAppOnline True if the application is currently online. - * @param appSkinInfo An \c #AppSkinInfo object containing the application's default color and font styles. - * - * @return A new \c HostEnvironment object. - */ -function HostEnvironment(appName, appVersion, appLocale, appUILocale, appId, isAppOnline, appSkinInfo) -{ - this.appName = appName; - this.appVersion = appVersion; - this.appLocale = appLocale; - this.appUILocale = appUILocale; - this.appId = appId; - this.isAppOnline = isAppOnline; - this.appSkinInfo = appSkinInfo; -} - -/** - * @class HostCapabilities - * Stores information about the host capabilities. - * - * @param EXTENDED_PANEL_MENU True if the application supports panel menu. - * @param EXTENDED_PANEL_ICONS True if the application supports panel icon. - * @param DELEGATE_APE_ENGINE True if the application supports delegated APE engine. - * @param SUPPORT_HTML_EXTENSIONS True if the application supports HTML extensions. - * @param DISABLE_FLASH_EXTENSIONS True if the application disables FLASH extensions. - * - * @return A new \c HostCapabilities object. - */ -function HostCapabilities(EXTENDED_PANEL_MENU, EXTENDED_PANEL_ICONS, DELEGATE_APE_ENGINE, SUPPORT_HTML_EXTENSIONS, DISABLE_FLASH_EXTENSIONS) -{ - this.EXTENDED_PANEL_MENU = EXTENDED_PANEL_MENU; - this.EXTENDED_PANEL_ICONS = EXTENDED_PANEL_ICONS; - this.DELEGATE_APE_ENGINE = DELEGATE_APE_ENGINE; - this.SUPPORT_HTML_EXTENSIONS = SUPPORT_HTML_EXTENSIONS; - this.DISABLE_FLASH_EXTENSIONS = DISABLE_FLASH_EXTENSIONS; // Since 5.0.0 -} - -/** - * @class ApiVersion - * Stores current api version. - * - * Since 4.2.0 - * - * @param major The major version - * @param minor The minor version. - * @param micro The micro version. - * - * @return ApiVersion object. - */ -function ApiVersion(major, minor, micro) -{ - this.major = major; - this.minor = minor; - this.micro = micro; -} - -/** - * @class MenuItemStatus - * Stores flyout menu item status - * - * Since 5.2.0 - * - * @param menuItemLabel The menu item label. - * @param enabled True if user wants to enable the menu item. - * @param checked True if user wants to check the menu item. - * - * @return MenuItemStatus object. - */ -function MenuItemStatus(menuItemLabel, enabled, checked) -{ - this.menuItemLabel = menuItemLabel; - this.enabled = enabled; - this.checked = checked; -} - -/** - * @class ContextMenuItemStatus - * Stores the status of the context menu item. - * - * Since 5.2.0 - * - * @param menuItemID The menu item id. - * @param enabled True if user wants to enable the menu item. - * @param checked True if user wants to check the menu item. - * - * @return MenuItemStatus object. - */ -function ContextMenuItemStatus(menuItemID, enabled, checked) -{ - this.menuItemID = menuItemID; - this.enabled = enabled; - this.checked = checked; -} -//------------------------------ CSInterface ---------------------------------- - -/** - * @class CSInterface - * This is the entry point to the CEP extensibility infrastructure. - * Instantiate this object and use it to: - *
    - *
  • Access information about the host application in which an extension is running
  • - *
  • Launch an extension
  • - *
  • Register interest in event notifications, and dispatch events
  • - *
- * - * @return A new \c CSInterface object - */ -function CSInterface() -{ -} - -/** - * User can add this event listener to handle native application theme color changes. - * Callback function gives extensions ability to fine-tune their theme color after the - * global theme color has been changed. - * The callback function should be like below: - * - * @example - * // event is a CSEvent object, but user can ignore it. - * function OnAppThemeColorChanged(event) - * { - * // Should get a latest HostEnvironment object from application. - * var skinInfo = JSON.parse(window.__adobe_cep__.getHostEnvironment()).appSkinInfo; - * // Gets the style information such as color info from the skinInfo, - * // and redraw all UI controls of your extension according to the style info. - * } - */ -CSInterface.THEME_COLOR_CHANGED_EVENT = "com.adobe.csxs.events.ThemeColorChanged"; - -/** The host environment data object. */ -CSInterface.prototype.hostEnvironment = window.__adobe_cep__ ? JSON.parse(window.__adobe_cep__.getHostEnvironment()) : null; - -/** Retrieves information about the host environment in which the - * extension is currently running. - * - * @return A \c #HostEnvironment object. - */ -CSInterface.prototype.getHostEnvironment = function() -{ - this.hostEnvironment = JSON.parse(window.__adobe_cep__.getHostEnvironment()); - return this.hostEnvironment; -}; - -/** Closes this extension. */ -CSInterface.prototype.closeExtension = function() -{ - window.__adobe_cep__.closeExtension(); -}; - -/** - * Retrieves a path for which a constant is defined in the system. - * - * @param pathType The path-type constant defined in \c #SystemPath , - * - * @return The platform-specific system path string. - */ -CSInterface.prototype.getSystemPath = function(pathType) -{ - var path = decodeURI(window.__adobe_cep__.getSystemPath(pathType)); - var OSVersion = this.getOSInformation(); - if (OSVersion.indexOf("Windows") >= 0) - { - path = path.replace("file:///", ""); - } - else if (OSVersion.indexOf("Mac") >= 0) - { - path = path.replace("file://", ""); - } - return path; -}; - -/** - * Evaluates a JavaScript script, which can use the JavaScript DOM - * of the host application. - * - * @param script The JavaScript script. - * @param callback Optional. A callback function that receives the result of execution. - * If execution fails, the callback function receives the error message \c EvalScript_ErrMessage. - */ -CSInterface.prototype.evalScript = function(script, callback) -{ - if(callback === null || callback === undefined) - { - callback = function(result){}; - } - window.__adobe_cep__.evalScript(script, callback); -}; - -/** - * Retrieves the unique identifier of the application. - * in which the extension is currently running. - * - * @return The unique ID string. - */ -CSInterface.prototype.getApplicationID = function() -{ - var appId = this.hostEnvironment.appId; - return appId; -}; - -/** - * Retrieves host capability information for the application - * in which the extension is currently running. - * - * @return A \c #HostCapabilities object. - */ -CSInterface.prototype.getHostCapabilities = function() -{ - var hostCapabilities = JSON.parse(window.__adobe_cep__.getHostCapabilities() ); - return hostCapabilities; -}; - -/** - * Triggers a CEP event programmatically. Yoy can use it to dispatch - * an event of a predefined type, or of a type you have defined. - * - * @param event A \c CSEvent object. - */ -CSInterface.prototype.dispatchEvent = function(event) -{ - if (typeof event.data == "object") - { - event.data = JSON.stringify(event.data); - } - - window.__adobe_cep__.dispatchEvent(event); -}; - -/** - * Registers an interest in a CEP event of a particular type, and - * assigns an event handler. - * The event infrastructure notifies your extension when events of this type occur, - * passing the event object to the registered handler function. - * - * @param type The name of the event type of interest. - * @param listener The JavaScript handler function or method. - * @param obj Optional, the object containing the handler method, if any. - * Default is null. - */ -CSInterface.prototype.addEventListener = function(type, listener, obj) -{ - window.__adobe_cep__.addEventListener(type, listener, obj); -}; - -/** - * Removes a registered event listener. - * - * @param type The name of the event type of interest. - * @param listener The JavaScript handler function or method that was registered. - * @param obj Optional, the object containing the handler method, if any. - * Default is null. - */ -CSInterface.prototype.removeEventListener = function(type, listener, obj) -{ - window.__adobe_cep__.removeEventListener(type, listener, obj); -}; - -/** - * Loads and launches another extension, or activates the extension if it is already loaded. - * - * @param extensionId The extension's unique identifier. - * @param startupParams Not currently used, pass "". - * - * @example - * To launch the extension "help" with ID "HLP" from this extension, call: - * requestOpenExtension("HLP", ""); - * - */ -CSInterface.prototype.requestOpenExtension = function(extensionId, params) -{ - window.__adobe_cep__.requestOpenExtension(extensionId, params); -}; - -/** - * Retrieves the list of extensions currently loaded in the current host application. - * The extension list is initialized once, and remains the same during the lifetime - * of the CEP session. - * - * @param extensionIds Optional, an array of unique identifiers for extensions of interest. - * If omitted, retrieves data for all extensions. - * - * @return Zero or more \c #Extension objects. - */ -CSInterface.prototype.getExtensions = function(extensionIds) -{ - var extensionIdsStr = JSON.stringify(extensionIds); - var extensionsStr = window.__adobe_cep__.getExtensions(extensionIdsStr); - - var extensions = JSON.parse(extensionsStr); - return extensions; -}; - -/** - * Retrieves network-related preferences. - * - * @return A JavaScript object containing network preferences. - */ -CSInterface.prototype.getNetworkPreferences = function() -{ - var result = window.__adobe_cep__.getNetworkPreferences(); - var networkPre = JSON.parse(result); - - return networkPre; -}; - -/** - * Initializes the resource bundle for this extension with property values - * for the current application and locale. - * To support multiple locales, you must define a property file for each locale, - * containing keyed display-string values for that locale. - * See localization documentation for Extension Builder and related products. - * - * Keys can be in the - * form key.value="localized string", for use in HTML text elements. - * For example, in this input element, the localized \c key.value string is displayed - * instead of the empty \c value string: - * - * - * - * @return An object containing the resource bundle information. - */ -CSInterface.prototype.initResourceBundle = function() -{ - var resourceBundle = JSON.parse(window.__adobe_cep__.initResourceBundle()); - var resElms = document.querySelectorAll('[data-locale]'); - for (var n = 0; n < resElms.length; n++) - { - var resEl = resElms[n]; - // Get the resource key from the element. - var resKey = resEl.getAttribute('data-locale'); - if (resKey) - { - // Get all the resources that start with the key. - for (var key in resourceBundle) - { - if (key.indexOf(resKey) === 0) - { - var resValue = resourceBundle[key]; - if (key.length == resKey.length) - { - resEl.innerHTML = resValue; - } - else if ('.' == key.charAt(resKey.length)) - { - var attrKey = key.substring(resKey.length + 1); - resEl[attrKey] = resValue; - } - } - } - } - } - return resourceBundle; -}; - -/** - * Writes installation information to a file. - * - * @return The file path. - */ -CSInterface.prototype.dumpInstallationInfo = function() -{ - return window.__adobe_cep__.dumpInstallationInfo(); -}; - -/** - * Retrieves version information for the current Operating System, - * See http://www.useragentstring.com/pages/Chrome/ for Chrome \c navigator.userAgent values. - * - * @return A string containing the OS version, or "unknown Operation System". - * If user customizes the User Agent by setting CEF command parameter "--user-agent", only - * "Mac OS X" or "Windows" will be returned. - */ -CSInterface.prototype.getOSInformation = function() -{ - var userAgent = navigator.userAgent; - - if ((navigator.platform == "Win32") || (navigator.platform == "Windows")) - { - var winVersion = "Windows"; - var winBit = ""; - if (userAgent.indexOf("Windows") > -1) - { - if (userAgent.indexOf("Windows NT 5.0") > -1) - { - winVersion = "Windows 2000"; - } - else if (userAgent.indexOf("Windows NT 5.1") > -1) - { - winVersion = "Windows XP"; - } - else if (userAgent.indexOf("Windows NT 5.2") > -1) - { - winVersion = "Windows Server 2003"; - } - else if (userAgent.indexOf("Windows NT 6.0") > -1) - { - winVersion = "Windows Vista"; - } - else if (userAgent.indexOf("Windows NT 6.1") > -1) - { - winVersion = "Windows 7"; - } - else if (userAgent.indexOf("Windows NT 6.2") > -1) - { - winVersion = "Windows 8"; - } - else if (userAgent.indexOf("Windows NT 6.3") > -1) - { - winVersion = "Windows 8.1"; - } - else if (userAgent.indexOf("Windows NT 10") > -1) - { - winVersion = "Windows 10"; - } - - if (userAgent.indexOf("WOW64") > -1 || userAgent.indexOf("Win64") > -1) - { - winBit = " 64-bit"; - } - else - { - winBit = " 32-bit"; - } - } - - return winVersion + winBit; - } - else if ((navigator.platform == "MacIntel") || (navigator.platform == "Macintosh")) - { - var result = "Mac OS X"; - - if (userAgent.indexOf("Mac OS X") > -1) - { - result = userAgent.substring(userAgent.indexOf("Mac OS X"), userAgent.indexOf(")")); - result = result.replace(/_/g, "."); - } - - return result; - } - - return "Unknown Operation System"; -}; - -/** - * Opens a page in the default system browser. - * - * Since 4.2.0 - * - * @param url The URL of the page/file to open, or the email address. - * Must use HTTP/HTTPS/file/mailto protocol. For example: - * "http://www.adobe.com" - * "https://github.com" - * "file:///C:/log.txt" - * "mailto:test@adobe.com" - * - * @return One of these error codes:\n - *
    \n - *
  • NO_ERROR - 0
  • \n - *
  • ERR_UNKNOWN - 1
  • \n - *
  • ERR_INVALID_PARAMS - 2
  • \n - *
  • ERR_INVALID_URL - 201
  • \n - *
\n - */ -CSInterface.prototype.openURLInDefaultBrowser = function(url) -{ - return cep.util.openURLInDefaultBrowser(url); -}; - -/** - * Retrieves extension ID. - * - * Since 4.2.0 - * - * @return extension ID. - */ -CSInterface.prototype.getExtensionID = function() -{ - return window.__adobe_cep__.getExtensionId(); -}; - -/** - * Retrieves the scale factor of screen. - * On Windows platform, the value of scale factor might be different from operating system's scale factor, - * since host application may use its self-defined scale factor. - * - * Since 4.2.0 - * - * @return One of the following float number. - *
    \n - *
  • -1.0 when error occurs
  • \n - *
  • 1.0 means normal screen
  • \n - *
  • >1.0 means HiDPI screen
  • \n - *
\n - */ -CSInterface.prototype.getScaleFactor = function() -{ - return window.__adobe_cep__.getScaleFactor(); -}; - -/** - * Set a handler to detect any changes of scale factor. This only works on Mac. - * - * Since 4.2.0 - * - * @param handler The function to be called when scale factor is changed. - * - */ -CSInterface.prototype.setScaleFactorChangedHandler = function(handler) -{ - window.__adobe_cep__.setScaleFactorChangedHandler(handler); -}; - -/** - * Retrieves current API version. - * - * Since 4.2.0 - * - * @return ApiVersion object. - * - */ -CSInterface.prototype.getCurrentApiVersion = function() -{ - var apiVersion = JSON.parse(window.__adobe_cep__.getCurrentApiVersion()); - return apiVersion; -}; - -/** - * Set panel flyout menu by an XML. - * - * Since 5.2.0 - * - * Register a callback function for "com.adobe.csxs.events.flyoutMenuClicked" to get notified when a - * menu item is clicked. - * The "data" attribute of event is an object which contains "menuId" and "menuName" attributes. - * - * Register callback functions for "com.adobe.csxs.events.flyoutMenuOpened" and "com.adobe.csxs.events.flyoutMenuClosed" - * respectively to get notified when flyout menu is opened or closed. - * - * @param menu A XML string which describes menu structure. - * An example menu XML: - * - * - * - * - * - * - * - * - * - * - * - * - */ -CSInterface.prototype.setPanelFlyoutMenu = function(menu) -{ - if ("string" != typeof menu) - { - return; - } - - window.__adobe_cep__.invokeSync("setPanelFlyoutMenu", menu); -}; - -/** - * Updates a menu item in the extension window's flyout menu, by setting the enabled - * and selection status. - * - * Since 5.2.0 - * - * @param menuItemLabel The menu item label. - * @param enabled True to enable the item, false to disable it (gray it out). - * @param checked True to select the item, false to deselect it. - * - * @return false when the host application does not support this functionality (HostCapabilities.EXTENDED_PANEL_MENU is false). - * Fails silently if menu label is invalid. - * - * @see HostCapabilities.EXTENDED_PANEL_MENU - */ -CSInterface.prototype.updatePanelMenuItem = function(menuItemLabel, enabled, checked) -{ - var ret = false; - if (this.getHostCapabilities().EXTENDED_PANEL_MENU) - { - var itemStatus = new MenuItemStatus(menuItemLabel, enabled, checked); - ret = window.__adobe_cep__.invokeSync("updatePanelMenuItem", JSON.stringify(itemStatus)); - } - return ret; -}; - - -/** - * Set context menu by XML string. - * - * Since 5.2.0 - * - * There are a number of conventions used to communicate what type of menu item to create and how it should be handled. - * - an item without menu ID or menu name is disabled and is not shown. - * - if the item name is "---" (three hyphens) then it is treated as a separator. The menu ID in this case will always be NULL. - * - Checkable attribute takes precedence over Checked attribute. - * - a PNG icon. For optimal display results please supply a 16 x 16px icon as larger dimensions will increase the size of the menu item. - The Chrome extension contextMenus API was taken as a reference. - https://developer.chrome.com/extensions/contextMenus - * - the items with icons and checkable items cannot coexist on the same menu level. The former take precedences over the latter. - * - * @param menu A XML string which describes menu structure. - * @param callback The callback function which is called when a menu item is clicked. The only parameter is the returned ID of clicked menu item. - * - * @description An example menu XML: - * - * - * - * - * - * - * - * - * - * - * - */ -CSInterface.prototype.setContextMenu = function(menu, callback) -{ - if ("string" != typeof menu) - { - return; - } - - window.__adobe_cep__.invokeAsync("setContextMenu", menu, callback); -}; - -/** - * Set context menu by JSON string. - * - * Since 6.0.0 - * - * There are a number of conventions used to communicate what type of menu item to create and how it should be handled. - * - an item without menu ID or menu name is disabled and is not shown. - * - if the item label is "---" (three hyphens) then it is treated as a separator. The menu ID in this case will always be NULL. - * - Checkable attribute takes precedence over Checked attribute. - * - a PNG icon. For optimal display results please supply a 16 x 16px icon as larger dimensions will increase the size of the menu item. - The Chrome extension contextMenus API was taken as a reference. - * - the items with icons and checkable items cannot coexist on the same menu level. The former take precedences over the latter. - https://developer.chrome.com/extensions/contextMenus - * - * @param menu A JSON string which describes menu structure. - * @param callback The callback function which is called when a menu item is clicked. The only parameter is the returned ID of clicked menu item. - * - * @description An example menu JSON: - * - * { - * "menu": [ - * { - * "id": "menuItemId1", - * "label": "testExample1", - * "enabled": true, - * "checkable": true, - * "checked": false, - * "icon": "./image/small_16X16.png" - * }, - * { - * "id": "menuItemId2", - * "label": "testExample2", - * "menu": [ - * { - * "id": "menuItemId2-1", - * "label": "testExample2-1", - * "menu": [ - * { - * "id": "menuItemId2-1-1", - * "label": "testExample2-1-1", - * "enabled": false, - * "checkable": true, - * "checked": true - * } - * ] - * }, - * { - * "id": "menuItemId2-2", - * "label": "testExample2-2", - * "enabled": true, - * "checkable": true, - * "checked": true - * } - * ] - * }, - * { - * "label": "---" - * }, - * { - * "id": "menuItemId3", - * "label": "testExample3", - * "enabled": false, - * "checkable": true, - * "checked": false - * } - * ] - * } - * - */ -CSInterface.prototype.setContextMenuByJSON = function(menu, callback) -{ - if ("string" != typeof menu) - { - return; - } - - window.__adobe_cep__.invokeAsync("setContextMenuByJSON", menu, callback); -}; - -/** - * Updates a context menu item by setting the enabled and selection status. - * - * Since 5.2.0 - * - * @param menuItemID The menu item ID. - * @param enabled True to enable the item, false to disable it (gray it out). - * @param checked True to select the item, false to deselect it. - */ -CSInterface.prototype.updateContextMenuItem = function(menuItemID, enabled, checked) -{ - var itemStatus = new ContextMenuItemStatus(menuItemID, enabled, checked); - ret = window.__adobe_cep__.invokeSync("updateContextMenuItem", JSON.stringify(itemStatus)); -}; - -/** - * Get the visibility status of an extension window. - * - * Since 6.0.0 - * - * @return true if the extension window is visible; false if the extension window is hidden. - */ -CSInterface.prototype.isWindowVisible = function() -{ - return window.__adobe_cep__.invokeSync("isWindowVisible", ""); -}; - -/** - * Resize extension's content to the specified dimensions. - * 1. Works with modal and modeless extensions in all Adobe products. - * 2. Extension's manifest min/max size constraints apply and take precedence. - * 3. For panel extensions - * 3.1 This works in all Adobe products except: - * * Premiere Pro - * * Prelude - * * After Effects - * 3.2 When the panel is in certain states (especially when being docked), - * it will not change to the desired dimensions even when the - * specified size satisfies min/max constraints. - * - * Since 6.0.0 - * - * @param width The new width - * @param height The new height - */ -CSInterface.prototype.resizeContent = function(width, height) -{ - window.__adobe_cep__.resizeContent(width, height); -}; - -/** - * Register the invalid certificate callback for an extension. - * This callback will be triggered when the extension tries to access the web site that contains the invalid certificate on the main frame. - * But if the extension does not call this function and tries to access the web site containing the invalid certificate, a default error page will be shown. - * - * Since 6.1.0 - * - * @param callback the callback function - */ -CSInterface.prototype.registerInvalidCertificateCallback = function(callback) -{ - return window.__adobe_cep__.registerInvalidCertificateCallback(callback); -}; - -/** - * Register an interest in some key events to prevent them from being sent to the host application. - * - * This function works with modeless extensions and panel extensions. - * Generally all the key events will be sent to the host application for these two extensions if the current focused element - * is not text input or dropdown, - * If you want to intercept some key events and want them to be handled in the extension, please call this function - * in advance to prevent them being sent to the host application. - * - * Since 6.1.0 - * - * @param keyEventsInterest A JSON string describing those key events you are interested in. A null object or - an empty string will lead to removing the interest - * - * This JSON string should be an array, each object has following keys: - * - * keyCode: [Required] represents an OS system dependent virtual key code identifying - * the unmodified value of the pressed key. - * ctrlKey: [optional] a Boolean that indicates if the control key was pressed (true) or not (false) when the event occurred. - * altKey: [optional] a Boolean that indicates if the alt key was pressed (true) or not (false) when the event occurred. - * shiftKey: [optional] a Boolean that indicates if the shift key was pressed (true) or not (false) when the event occurred. - * metaKey: [optional] (Mac Only) a Boolean that indicates if the Meta key was pressed (true) or not (false) when the event occurred. - * On Macintosh keyboards, this is the command key. To detect Windows key on Windows, please use keyCode instead. - * An example JSON string: - * - * [ - * { - * "keyCode": 48 - * }, - * { - * "keyCode": 123, - * "ctrlKey": true - * }, - * { - * "keyCode": 123, - * "ctrlKey": true, - * "metaKey": true - * } - * ] - * - */ -CSInterface.prototype.registerKeyEventsInterest = function(keyEventsInterest) -{ - return window.__adobe_cep__.registerKeyEventsInterest(keyEventsInterest); -}; - -/** - * Set the title of the extension window. - * This function works with modal and modeless extensions in all Adobe products, and panel extensions in Photoshop, InDesign, InCopy, Illustrator, Flash Pro and Dreamweaver. - * - * Since 6.1.0 - * - * @param title The window title. - */ -CSInterface.prototype.setWindowTitle = function(title) -{ - window.__adobe_cep__.invokeSync("setWindowTitle", title); -}; - -/** - * Get the title of the extension window. - * This function works with modal and modeless extensions in all Adobe products, and panel extensions in Photoshop, InDesign, InCopy, Illustrator, Flash Pro and Dreamweaver. - * - * Since 6.1.0 - * - * @return The window title. - */ -CSInterface.prototype.getWindowTitle = function() -{ - return window.__adobe_cep__.invokeSync("getWindowTitle", ""); -}; diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/client.js b/server_addon/photoshop/client/ayon_photoshop/api/extension/client/client.js deleted file mode 100644 index f4ba4cfe47..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/client.js +++ /dev/null @@ -1,300 +0,0 @@ - // client facing part of extension, creates WSRPC client (jsx cannot - // do that) - // consumes RPC calls from server (OpenPype) calls ./host/index.jsx and - // returns values back (in json format) - - var logReturn = function(result){ log.warn('Result: ' + result);}; - - var csInterface = new CSInterface(); - - log.warn("script start"); - - WSRPC.DEBUG = false; - WSRPC.TRACE = false; - - function myCallBack(){ - log.warn("Triggered index.jsx"); - } - // importing through manifest.xml isn't working because relative paths - // possibly TODO - jsx.evalFile('./host/index.jsx', myCallBack); - - function runEvalScript(script) { - // because of asynchronous nature of functions in jsx - // this waits for response - return new Promise(function(resolve, reject){ - csInterface.evalScript(script, resolve); - }); - } - - /** main entry point **/ - startUp("WEBSOCKET_URL"); - - // get websocket server url from environment value - async function startUp(url){ - log.warn("url", url); - promis = runEvalScript("getEnv('" + url + "')"); - - var res = await promis; - // run rest only after resolved promise - main(res); - } - - function get_extension_version(){ - /** Returns version number from extension manifest.xml **/ - log.debug("get_extension_version") - var path = csInterface.getSystemPath(SystemPath.EXTENSION); - log.debug("extension path " + path); - - var result = window.cep.fs.readFile(path + "/CSXS/manifest.xml"); - var version = undefined; - if(result.err === 0){ - if (window.DOMParser) { - const parser = new DOMParser(); - const xmlDoc = parser.parseFromString(result.data.toString(), 'text/xml'); - const children = xmlDoc.children; - - for (let i = 0; i <= children.length; i++) { - if (children[i] && children[i].getAttribute('ExtensionBundleVersion')) { - version = children[i].getAttribute('ExtensionBundleVersion'); - } - } - } - } - return version - } - - function main(websocket_url){ - // creates connection to 'websocket_url', registers routes - log.warn("websocket_url", websocket_url); - var default_url = 'ws://localhost:8099/ws/'; - - if (websocket_url == ''){ - websocket_url = default_url; - } - log.warn("connecting to:", websocket_url); - RPC = new WSRPC(websocket_url, 5000); // spin connection - - RPC.connect(); - - log.warn("connected"); - - function EscapeStringForJSX(str){ - // Replaces: - // \ with \\ - // ' with \' - // " with \" - // See: https://stackoverflow.com/a/3967927/5285364 - return str.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/"/g, '\\"'); - } - - RPC.addRoute('Photoshop.open', function (data) { - log.warn('Server called client route "open":', data); - var escapedPath = EscapeStringForJSX(data.path); - return runEvalScript("fileOpen('" + escapedPath +"')") - .then(function(result){ - log.warn("open: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.read', function (data) { - log.warn('Server called client route "read":', data); - return runEvalScript("getHeadline()") - .then(function(result){ - log.warn("getHeadline: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.get_layers', function (data) { - log.warn('Server called client route "get_layers":', data); - return runEvalScript("getLayers()") - .then(function(result){ - log.warn("getLayers: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.set_visible', function (data) { - log.warn('Server called client route "set_visible":', data); - return runEvalScript("setVisible(" + data.layer_id + ", " + - data.visibility + ")") - .then(function(result){ - log.warn("setVisible: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.get_active_document_name', function (data) { - log.warn('Server called client route "get_active_document_name":', - data); - return runEvalScript("getActiveDocumentName()") - .then(function(result){ - log.warn("save: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.get_active_document_full_name', function (data) { - log.warn('Server called client route ' + - '"get_active_document_full_name":', data); - return runEvalScript("getActiveDocumentFullName()") - .then(function(result){ - log.warn("save: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.save', function (data) { - log.warn('Server called client route "save":', data); - - return runEvalScript("save()") - .then(function(result){ - log.warn("save: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.get_selected_layers', function (data) { - log.warn('Server called client route "get_selected_layers":', data); - - return runEvalScript("getSelectedLayers()") - .then(function(result){ - log.warn("get_selected_layers: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.create_group', function (data) { - log.warn('Server called client route "create_group":', data); - - return runEvalScript("createGroup('" + data.name + "')") - .then(function(result){ - log.warn("createGroup: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.group_selected_layers', function (data) { - log.warn('Server called client route "group_selected_layers":', - data); - - return runEvalScript("groupSelectedLayers(null, "+ - "'" + data.name +"')") - .then(function(result){ - log.warn("group_selected_layers: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.import_smart_object', function (data) { - log.warn('Server called client "import_smart_object":', data); - var escapedPath = EscapeStringForJSX(data.path); - return runEvalScript("importSmartObject('" + escapedPath +"', " + - "'"+ data.name +"',"+ - + data.as_reference +")") - .then(function(result){ - log.warn("import_smart_object: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.replace_smart_object', function (data) { - log.warn('Server called route "replace_smart_object":', data); - var escapedPath = EscapeStringForJSX(data.path); - return runEvalScript("replaceSmartObjects("+data.layer_id+"," + - "'" + escapedPath +"',"+ - "'"+ data.name +"')") - .then(function(result){ - log.warn("replaceSmartObjects: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.delete_layer', function (data) { - log.warn('Server called route "delete_layer":', data); - return runEvalScript("deleteLayer("+data.layer_id+")") - .then(function(result){ - log.warn("delete_layer: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.rename_layer', function (data) { - log.warn('Server called route "rename_layer":', data); - return runEvalScript("renameLayer("+data.layer_id+", " + - "'"+ data.name +"')") - .then(function(result){ - log.warn("rename_layer: " + result); - return result; - }); -}); - - RPC.addRoute('Photoshop.select_layers', function (data) { - log.warn('Server called client route "select_layers":', data); - - return runEvalScript("selectLayers('" + data.layers +"')") - .then(function(result){ - log.warn("select_layers: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.is_saved', function (data) { - log.warn('Server called client route "is_saved":', data); - - return runEvalScript("isSaved()") - .then(function(result){ - log.warn("is_saved: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.saveAs', function (data) { - log.warn('Server called client route "saveAsJPEG":', data); - var escapedPath = EscapeStringForJSX(data.image_path); - return runEvalScript("saveAs('" + escapedPath + "', " + - "'" + data.ext + "', " + - data.as_copy + ")") - .then(function(result){ - log.warn("save: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.imprint', function (data) { - log.warn('Server called client route "imprint":', data); - var escaped = data.payload.replace(/\n/g, "\\n"); - return runEvalScript("imprint('" + escaped + "')") - .then(function(result){ - log.warn("imprint: " + result); - return result; - }); - }); - - RPC.addRoute('Photoshop.get_extension_version', function (data) { - log.warn('Server called client route "get_extension_version":', data); - return get_extension_version(); - }); - - RPC.addRoute('Photoshop.close', function (data) { - log.warn('Server called client route "close":', data); - return runEvalScript("close()"); - }); - - RPC.call('Photoshop.ping').then(function (data) { - log.warn('Result for calling server route "ping": ', data); - return runEvalScript("ping()") - .then(function(result){ - log.warn("ping: " + result); - return result; - }); - - }, function (error) { - log.warn(error); - }); - - } - - log.warn("end script"); diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/loglevel.min.js b/server_addon/photoshop/client/ayon_photoshop/api/extension/client/loglevel.min.js deleted file mode 100644 index 648d7e9ff6..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/loglevel.min.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! loglevel - v1.6.8 - https://github.com/pimterry/loglevel - (c) 2020 Tim Perry - licensed MIT */ -!function(a,b){"use strict";"function"==typeof define&&define.amd?define(b):"object"==typeof module&&module.exports?module.exports=b():a.log=b()}(this,function(){"use strict";function a(a,b){var c=a[b];if("function"==typeof c.bind)return c.bind(a);try{return Function.prototype.bind.call(c,a)}catch(b){return function(){return Function.prototype.apply.apply(c,[a,arguments])}}}function b(){console.log&&(console.log.apply?console.log.apply(console,arguments):Function.prototype.apply.apply(console.log,[console,arguments])),console.trace&&console.trace()}function c(c){return"debug"===c&&(c="log"),typeof console!==i&&("trace"===c&&j?b:void 0!==console[c]?a(console,c):void 0!==console.log?a(console,"log"):h)}function d(a,b){for(var c=0;c=0&&b<=j.levels.SILENT))throw"log.setLevel() called with invalid level: "+b;if(h=b,!1!==c&&e(b),d.call(j,b,a),typeof console===i&&b 1 && arguments[1] !== undefined ? arguments[1] : 1000; - - _classCallCheck(this, WSRPC); - - var self = this; - URL = getAbsoluteWsUrl(URL); - self.id = 1; - self.eventId = 0; - self.socketStarted = false; - self.eventStore = { - onconnect: {}, - onerror: {}, - onclose: {}, - onchange: {} - }; - self.connectionNumber = 0; - self.oneTimeEventStore = { - onconnect: [], - onerror: [], - onclose: [], - onchange: [] - }; - self.callQueue = []; - - function createSocket() { - var ws = new WebSocket(URL); - - var rejectQueue = function rejectQueue() { - self.connectionNumber++; // rejects incoming calls - - var deferred; //reject all pending calls - - while (0 < self.callQueue.length) { - var callObj = self.callQueue.shift(); - deferred = self.store[callObj.id]; - delete self.store[callObj.id]; - - if (deferred && deferred.promise.isPending()) { - deferred.reject('WebSocket error occurred'); - } - } // reject all from the store - - - for (var key in self.store) { - if (!self.store.hasOwnProperty(key)) continue; - deferred = self.store[key]; - - if (deferred && deferred.promise.isPending()) { - deferred.reject('WebSocket error occurred'); - } - } - }; - - function reconnect(callEvents) { - setTimeout(function () { - try { - self.socket = createSocket(); - self.id = 1; - } catch (exc) { - callEvents('onerror', exc); - delete self.socket; - console.error(exc); - } - }, reconnectTimeout); - } - - ws.onclose = function (err) { - log('ONCLOSE CALLED', 'STATE', self.public.state()); - trace(err); - - for (var serial in self.store) { - if (!self.store.hasOwnProperty(serial)) continue; - - if (self.store[serial].hasOwnProperty('reject')) { - self.store[serial].reject('Connection closed'); - } - } - - rejectQueue(); - callEvents('onclose', err); - callEvents('onchange', err); - reconnect(callEvents); - }; - - ws.onerror = function (err) { - log('ONERROR CALLED', 'STATE', self.public.state()); - trace(err); - rejectQueue(); - callEvents('onerror', err); - callEvents('onchange', err); - log('WebSocket has been closed by error: ', err); - }; - - function tryCallEvent(func, event) { - try { - return func(event); - } catch (e) { - if (e.hasOwnProperty('stack')) { - log(e.stack); - } else { - log('Event function', func, 'raised unknown error:', e); - } - - console.error(e); - } - } - - function callEvents(evName, event) { - while (0 < self.oneTimeEventStore[evName].length) { - var deferred = self.oneTimeEventStore[evName].shift(); - if (deferred.hasOwnProperty('resolve') && deferred.promise.isPending()) deferred.resolve(); - } - - for (var i in self.eventStore[evName]) { - if (!self.eventStore[evName].hasOwnProperty(i)) continue; - var cur = self.eventStore[evName][i]; - tryCallEvent(cur, event); - } - } - - ws.onopen = function (ev) { - log('ONOPEN CALLED', 'STATE', self.public.state()); - trace(ev); - - while (0 < self.callQueue.length) { - // noinspection JSUnresolvedFunction - self.socket.send(JSON.stringify(self.callQueue.shift(), 0, 1)); - } - - callEvents('onconnect', ev); - callEvents('onchange', ev); - }; - - function handleCall(self, data) { - if (!self.routes.hasOwnProperty(data.method)) throw new Error('Route not found'); - var connectionNumber = self.connectionNumber; - var deferred = new Deferred(); - deferred.promise.then(function (result) { - if (connectionNumber !== self.connectionNumber) return; - self.socket.send(JSON.stringify({ - id: data.id, - result: result - })); - }, function (error) { - if (connectionNumber !== self.connectionNumber) return; - self.socket.send(JSON.stringify({ - id: data.id, - error: error - })); - }); - var func = self.routes[data.method]; - if (self.asyncRoutes[data.method]) return func.apply(deferred, [data.params]); - - function badPromise() { - throw new Error("You should register route with async flag."); - } - - var promiseMock = { - resolve: badPromise, - reject: badPromise - }; - - try { - deferred.resolve(func.apply(promiseMock, [data.params])); - } catch (e) { - deferred.reject(e); - console.error(e); - } - } - - function handleError(self, data) { - if (!self.store.hasOwnProperty(data.id)) return log('Unknown callback'); - var deferred = self.store[data.id]; - if (typeof deferred === 'undefined') return log('Confirmation without handler'); - delete self.store[data.id]; - log('REJECTING', data.error); - deferred.reject(data.error); - } - - function handleResult(self, data) { - var deferred = self.store[data.id]; - if (typeof deferred === 'undefined') return log('Confirmation without handler'); - delete self.store[data.id]; - - if (data.hasOwnProperty('result')) { - return deferred.resolve(data.result); - } - - return deferred.reject(data.error); - } - - ws.onmessage = function (message) { - log('ONMESSAGE CALLED', 'STATE', self.public.state()); - trace(message); - if (message.type !== 'message') return; - var data; - - try { - data = JSON.parse(message.data); - log(data); - - if (data.hasOwnProperty('method')) { - return handleCall(self, data); - } else if (data.hasOwnProperty('error') && data.error === null) { - return handleError(self, data); - } else { - return handleResult(self, data); - } - } catch (exception) { - var err = { - error: exception.message, - result: null, - id: data ? data.id : null - }; - self.socket.send(JSON.stringify(err)); - console.error(exception); - } - }; - - return ws; - } - - function makeCall(func, args, params) { - self.id += 2; - var deferred = new Deferred(); - var callObj = Object.freeze({ - id: self.id, - method: func, - params: args - }); - var state = self.public.state(); - - if (state === 'OPEN') { - self.store[self.id] = deferred; - self.socket.send(JSON.stringify(callObj)); - } else if (state === 'CONNECTING') { - log('SOCKET IS', state); - self.store[self.id] = deferred; - self.callQueue.push(callObj); - } else { - log('SOCKET IS', state); - - if (params && params['noWait']) { - deferred.reject("Socket is: ".concat(state)); - } else { - self.store[self.id] = deferred; - self.callQueue.push(callObj); - } - } - - return deferred.promise; - } - - self.asyncRoutes = {}; - self.routes = {}; - self.store = {}; - self.public = Object.freeze({ - call: function call(func, args, params) { - return makeCall(func, args, params); - }, - addRoute: function addRoute(route, callback, isAsync) { - self.asyncRoutes[route] = isAsync || false; - self.routes[route] = callback; - }, - deleteRoute: function deleteRoute(route) { - delete self.asyncRoutes[route]; - return delete self.routes[route]; - }, - addEventListener: function addEventListener(event, func) { - var eventId = self.eventId++; - self.eventStore[event][eventId] = func; - return eventId; - }, - removeEventListener: function removeEventListener(event, index) { - if (self.eventStore[event].hasOwnProperty(index)) { - delete self.eventStore[event][index]; - return true; - } else { - return false; - } - }, - onEvent: function onEvent(event) { - var deferred = new Deferred(); - self.oneTimeEventStore[event].push(deferred); - return deferred.promise; - }, - destroy: function destroy() { - return self.socket.close(); - }, - state: function state() { - return readyState[this.stateCode()]; - }, - stateCode: function stateCode() { - if (self.socketStarted && self.socket) return self.socket.readyState; - return 3; - }, - connect: function connect() { - self.socketStarted = true; - self.socket = createSocket(); - } - }); - self.public.addRoute('log', function (argsObj) { - //console.info("Websocket sent: ".concat(argsObj)); - }); - self.public.addRoute('ping', function (data) { - return data; - }); - return self.public; - }; - - WSRPC.DEBUG = false; - WSRPC.TRACE = false; - - return WSRPC; - -})); -//# sourceMappingURL=wsrpc.js.map diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/wsrpc.min.js b/server_addon/photoshop/client/ayon_photoshop/api/extension/client/wsrpc.min.js deleted file mode 100644 index f1264b91c4..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/extension/client/wsrpc.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(global,factory){"object"==typeof exports&&"undefined"!=typeof module?module.exports=factory():"function"==typeof define&&define.amd?define(factory):(global=global||self).WSRPC=factory()}(this,function(){"use strict";function _classCallCheck(instance,Constructor){if(!(instance instanceof Constructor))throw new TypeError("Cannot call a class as a function")}function Deferred(){_classCallCheck(this,Deferred);var self=this;function wrapper(func){return function(){if(!self.done)return self.done=!0,func.apply(this,arguments);console.error(new Error("Promise already done"))}}return self.resolve=null,self.reject=null,self.done=!1,self.promise=new Promise(function(resolve,reject){self.resolve=wrapper(resolve),self.reject=wrapper(reject)}),self.promise.isPending=function(){return!self.done},self}function logGroup(group,level,args){console.group(group),console[level].apply(this,args),console.groupEnd()}function log(){WSRPC.DEBUG&&logGroup("WSRPC.DEBUG","trace",arguments)}function trace(msg){if(WSRPC.TRACE){var payload=msg;"data"in msg&&(payload=JSON.parse(msg.data)),logGroup("WSRPC.TRACE","trace",[payload])}}var readyState=Object.freeze({0:"CONNECTING",1:"OPEN",2:"CLOSING",3:"CLOSED"}),WSRPC=function WSRPC(URL){var reconnectTimeout=1 // -// forceEval is now by default true // -// It wraps the scripts in a try catch and an eval providing useful error handling // -// One can set in the jsx engine $.includeStack = true to return the call stack in the event of an error // -/////////////////////////////////////////////////////////////////////////////////////////////////////////// - -/////////////////////////////////////////////////////////////////////////////////////////////////////////// -// JSX.js for calling jsx code from the js engine // -// 2 methods included // -// 1) jsx.evalScript AKA jsx.eval // -// 2) jsx.evalFile AKA jsx.file // -// Special features // -// 1) Allows all changes in your jsx code to be reloaded into your extension at the click of a button // -// 2) Can enable the $.fileName property to work and provides a $.__fileName() method as an alternative // -// 3) Can force a callBack result from InDesign // -// 4) No more csInterface.evalScript('alert("hello "' + title + " " + name + '");') // -// use jsx.evalScript('alert("hello __title__ __name__");', {title: title, name: name}); // -// 5) execute jsx files from your jsx folder like this jsx.evalFile('myFabJsxScript.jsx'); // -// or from a relative path jsx.evalFile('../myFabScripts/myFabJsxScript.jsx'); // -// or from an absolute url jsx.evalFile('/Path/to/my/FabJsxScript.jsx'); (mac) // -// or from an absolute url jsx.evalFile('C:Path/to/my/FabJsxScript.jsx'); (windows) // -// 6) Parameter can be entered in the from of a parameter list which can be in any order or as an object // -// 7) Not camelCase sensitive (very useful for the illiterate) // -// Dead easy to use BUT SPEND THE 3 TO 5 MINUTES IT SHOULD TAKE TO READ THE INSTRUCTIONS // -/////////////////////////////////////////////////////////////////////////////////////////////////////////// - -/* jshint undef:true, unused:true, esversion:6 */ - -////////////////////////////////////// -// jsx is the interface for the API // -////////////////////////////////////// - -var jsx; - -// Wrap everything in an anonymous function to prevent leeks -(function() { - ///////////////////////////////////////////////////////////////////// - // Substitute some CSInterface functions to avoid dependency on it // - ///////////////////////////////////////////////////////////////////// - - var __dirname = (function() { - var path, isMac; - path = decodeURI(window.__adobe_cep__.getSystemPath('extension')); - isMac = navigator.platform[0] === 'M'; // [M]ac - path = path.replace('file://' + (isMac ? '' : '/'), ''); - return path; - })(); - - var evalScript = function(script, callback) { - callback = callback || function() {}; - window.__adobe_cep__.evalScript(script, callback); - }; - - - //////////////////////////////////////////// - // In place of using the node path module // - //////////////////////////////////////////// - - // jshint undef: true, unused: true - - // A very minified version of the NodeJs Path module!! - // For use outside of NodeJs - // Majorly nicked by Trevor from Joyent - var path = (function() { - - var isString = function(arg) { - return typeof arg === 'string'; - }; - - // var isObject = function(arg) { - // return typeof arg === 'object' && arg !== null; - // }; - - var basename = function(path) { - if (!isString(path)) { - throw new TypeError('Argument to path.basename must be a string'); - } - var bits = path.split(/[\/\\]/g); - return bits[bits.length - 1]; - }; - - // jshint undef: true - // Regex to split a windows path into three parts: [*, device, slash, - // tail] windows-only - var splitDeviceRe = - /^([a-zA-Z]:|[\\\/]{2}[^\\\/]+[\\\/]+[^\\\/]+)?([\\\/])?([\s\S]*?)$/; - - // Regex to split the tail part of the above into [*, dir, basename, ext] - // var splitTailRe = - // /^([\s\S]*?)((?:\.{1,2}|[^\\\/]+?|)(\.[^.\/\\]*|))(?:[\\\/]*)$/; - - var win32 = {}; - // Function to split a filename into [root, dir, basename, ext] - // var win32SplitPath = function(filename) { - // // Separate device+slash from tail - // var result = splitDeviceRe.exec(filename), - // device = (result[1] || '') + (result[2] || ''), - // tail = result[3] || ''; - // // Split the tail into dir, basename and extension - // var result2 = splitTailRe.exec(tail), - // dir = result2[1], - // basename = result2[2], - // ext = result2[3]; - // return [device, dir, basename, ext]; - // }; - - var win32StatPath = function(path) { - var result = splitDeviceRe.exec(path), - device = result[1] || '', - isUnc = !!device && device[1] !== ':'; - return { - device: device, - isUnc: isUnc, - isAbsolute: isUnc || !!result[2], // UNC paths are always absolute - tail: result[3] - }; - }; - - var normalizeUNCRoot = function(device) { - return '\\\\' + device.replace(/^[\\\/]+/, '').replace(/[\\\/]+/g, '\\'); - }; - - var normalizeArray = function(parts, allowAboveRoot) { - var res = []; - for (var i = 0; i < parts.length; i++) { - var p = parts[i]; - - // ignore empty parts - if (!p || p === '.') - continue; - - if (p === '..') { - if (res.length && res[res.length - 1] !== '..') { - res.pop(); - } else if (allowAboveRoot) { - res.push('..'); - } - } else { - res.push(p); - } - } - - return res; - }; - - win32.normalize = function(path) { - var result = win32StatPath(path), - device = result.device, - isUnc = result.isUnc, - isAbsolute = result.isAbsolute, - tail = result.tail, - trailingSlash = /[\\\/]$/.test(tail); - - // Normalize the tail path - tail = normalizeArray(tail.split(/[\\\/]+/), !isAbsolute).join('\\'); - - if (!tail && !isAbsolute) { - tail = '.'; - } - if (tail && trailingSlash) { - tail += '\\'; - } - - // Convert slashes to backslashes when `device` points to an UNC root. - // Also squash multiple slashes into a single one where appropriate. - if (isUnc) { - device = normalizeUNCRoot(device); - } - - return device + (isAbsolute ? '\\' : '') + tail; - }; - win32.join = function() { - var paths = []; - for (var i = 0; i < arguments.length; i++) { - var arg = arguments[i]; - if (!isString(arg)) { - throw new TypeError('Arguments to path.join must be strings'); - } - if (arg) { - paths.push(arg); - } - } - - var joined = paths.join('\\'); - - // Make sure that the joined path doesn't start with two slashes, because - // normalize() will mistake it for an UNC path then. - // - // This step is skipped when it is very clear that the user actually - // intended to point at an UNC path. This is assumed when the first - // non-empty string arguments starts with exactly two slashes followed by - // at least one more non-slash character. - // - // Note that for normalize() to treat a path as an UNC path it needs to - // have at least 2 components, so we don't filter for that here. - // This means that the user can use join to construct UNC paths from - // a server name and a share name; for example: - // path.join('//server', 'share') -> '\\\\server\\share\') - if (!/^[\\\/]{2}[^\\\/]/.test(paths[0])) { - joined = joined.replace(/^[\\\/]{2,}/, '\\'); - } - return win32.normalize(joined); - }; - - var posix = {}; - - // posix version - posix.join = function() { - var path = ''; - for (var i = 0; i < arguments.length; i++) { - var segment = arguments[i]; - if (!isString(segment)) { - throw new TypeError('Arguments to path.join must be strings'); - } - if (segment) { - if (!path) { - path += segment; - } else { - path += '/' + segment; - } - } - } - return posix.normalize(path); - }; - - // path.normalize(path) - // posix version - posix.normalize = function(path) { - var isAbsolute = path.charAt(0) === '/', - trailingSlash = path && path[path.length - 1] === '/'; - - // Normalize the path - path = normalizeArray(path.split('/'), !isAbsolute).join('/'); - - if (!path && !isAbsolute) { - path = '.'; - } - if (path && trailingSlash) { - path += '/'; - } - - return (isAbsolute ? '/' : '') + path; - }; - - win32.basename = posix.basename = basename; - - this.win32 = win32; - this.posix = posix; - return (navigator.platform[0] === 'M') ? posix : win32; - })(); - - //////////////////////////////////////////////////////////////////////////////////////////////////////// - // The is the "main" function which is to be prototyped // - // It run a small snippet in the jsx engine that // - // 1) Assigns $.__dirname with the value of the extensions __dirname base path // - // 2) Sets up a method $.__fileName() for retrieving from within the jsx script it's $.fileName value // - // more on that method later // - // At the end of the script the global declaration jsx = new Jsx(); has been made. // - // If you like you can remove that and include in your relevant functions // - // var jsx = new Jsx(); You would never call the Jsx function without the "new" declaration // - //////////////////////////////////////////////////////////////////////////////////////////////////////// - var Jsx = function() { - var jsxScript; - // Setup jsx function to enable the jsx scripts to easily retrieve their file location - jsxScript = [ - '$.level = 0;', - 'if(!$.__fileNames){', - ' $.__fileNames = {};', - ' $.__dirname = "__dirname__";'.replace('__dirname__', __dirname), - ' $.__fileName = function(name){', - ' name = name || $.fileName;', - ' return ($.__fileNames && $.__fileNames[name]) || $.fileName;', - ' };', - '}' - ].join(''); - evalScript(jsxScript); - return this; - }; - - /** - * [evalScript] For calling jsx scripts from the js engine - * - * The jsx.evalScript method is used for calling jsx scripts directly from the js engine - * Allows for easy replacement i.e. variable insertions and for forcing eval. - * For convenience jsx.eval or jsx.script or jsx.evalscript can be used instead of calling jsx.evalScript - * - * @param {String} jsxScript - * The string that makes up the jsx script - * it can contain a simple template like syntax for replacements - * 'alert("__foo__");' - * the __foo__ will be replaced as per the replacements parameter - * - * @param {Function} callback - * The callback function you want the jsx script to trigger on completion - * The result of the jsx script is passed as the argument to that function - * The function can exist in some other file. - * Note that InDesign does not automatically pass the callBack as a string. - * Either write your InDesign in a way that it returns a sting the form of - * return 'this is my result surrounded by quotes' - * or use the force eval option - * [Optional DEFAULT no callBack] - * - * @param {Object} replacements - * The replacements to make on the jsx script - * given the following script (template) - * 'alert("__message__: " + __val__);' - * and we want to change the script to - * 'alert("I was born in the year: " + 1234);' - * we would pass the following object - * {"message": 'I was born in the year', "val": 1234} - * or if not using reserved words like do we can leave out the key quotes - * {message: 'I was born in the year', val: 1234} - * [Optional DEFAULT no replacements] - * - * @param {Bolean} forceEval - * If the script should be wrapped in an eval and try catch - * This will 1) provide useful error feedback if heaven forbid it is needed - * 2) The result will be a string which is required for callback results in InDesign - * [Optional DEFAULT true] - * - * Note 1) The order of the parameters is irrelevant - * Note 2) One can pass the arguments as an object if desired - * jsx.evalScript(myCallBackFunction, 'alert("__myMessage__");', true); - * is the same as - * jsx.evalScript({ - * script: 'alert("__myMessage__");', - * replacements: {myMessage: 'Hi there'}, - * callBack: myCallBackFunction, - * eval: true - * }); - * note that either lower or camelCase key names are valid - * i.e. both callback or callBack will work - * - * The following keys are the same jsx || script || jsxScript || jsxscript || file - * The following keys are the same callBack || callback - * The following keys are the same replacements || replace - * The following keys are the same eval || forceEval || forceeval - * The following keys are the same forceEvalScript || forceevalscript || evalScript || evalscript; - * - * @return {Boolean} if the jsxScript was executed or not - */ - - Jsx.prototype.evalScript = function() { - var arg, i, key, replaceThis, withThis, args, callback, forceEval, replacements, jsxScript, isBin; - - ////////////////////////////////////////////////////////////////////////////////////// - // sort out order which arguments into jsxScript, callback, replacements, forceEval // - ////////////////////////////////////////////////////////////////////////////////////// - - args = arguments; - - // Detect if the parameters were passed as an object and if so allow for various keys - if (args.length === 1 && (arg = args[0]) instanceof Object) { - jsxScript = arg.jsxScript || arg.jsx || arg.script || arg.file || arg.jsxscript; - callback = arg.callBack || arg.callback; - replacements = arg.replacements || arg.replace; - forceEval = arg.eval || arg.forceEval || arg.forceeval; - } else { - for (i = 0; i < 4; i++) { - arg = args[i]; - if (arg === undefined) { - continue; - } - if (arg.constructor === String) { - jsxScript = arg; - continue; - } - if (arg.constructor === Object) { - replacements = arg; - continue; - } - if (arg.constructor === Function) { - callback = arg; - continue; - } - if (arg === false) { - forceEval = false; - } - } - } - - // If no script provide then not too much to do! - if (!jsxScript) { - return false; - } - - // Have changed the forceEval default to be true as I prefer the error handling - if (forceEval !== false) { - forceEval = true; - } - - ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - // On Illustrator and other apps the result of the jsx script is automatically passed as a string // - // if you have a "script" containing the single number 1 and nothing else then the callBack will register as "1" // - // On InDesign that same script will provide a blank callBack // - // Let's say we have a callBack function var callBack = function(result){alert(result);} // - // On Ai your see the 1 in the alert // - // On ID your just see a blank alert // - // To see the 1 in the alert you need to convert the result to a string and then it will show // - // So if we rewrite out 1 byte script to '1' i.e. surround the 1 in quotes then the call back alert will show 1 // - // If the scripts planed one can make sure that the results always passed as a string (including errors) // - // otherwise one can wrap the script in an eval and then have the result passed as a string // - // I have not gone through all the apps but can say // - // for Ai you never need to set the forceEval to true // - // for ID you if you have not coded your script appropriately and your want to send a result to the callBack then set forceEval to true // - // I changed this that even on Illustrator it applies the try catch, Note the try catch will fail if $.level is set to 1 // - ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - - if (forceEval) { - - isBin = (jsxScript.substring(0, 10) === '@JSXBIN@ES') ? '' : '\n'; - jsxScript = ( - // "\n''') + '';} catch(e){(function(e){var n, a=[]; for (n in e){a.push(n + ': ' + e[n])}; return a.join('\n')})(e)}"); - // "\n''') + '';} catch(e){e + (e.line ? ('\\nLine ' + (+e.line - 1)) : '')}"); - [ - "$.level = 0;", - "try{eval('''" + isBin, // need to add an extra line otherwise #targetengine doesn't work ;-] - jsxScript.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/"/g, '\\"') + "\n''') + '';", - "} catch (e) {", - " (function(e) {", - " var line, sourceLine, name, description, ErrorMessage, fileName, start, end, bug;", - " line = +e.line" + (isBin === '' ? ';' : ' - 1;'), // To take into account the extra line added - " fileName = File(e.fileName).fsName;", - " sourceLine = line && e.source.split(/[\\r\\n]/)[line];", - " name = e.name;", - " description = e.description;", - " ErrorMessage = name + ' ' + e.number + ': ' + description;", - " if (fileName.length && !(/[\\/\\\\]\\d+$/.test(fileName))) {", - " ErrorMessage += '\\nFile: ' + fileName;", - " line++;", - " }", - " if (line){", - " ErrorMessage += '\\nLine: ' + line +", - " '-> ' + ((sourceLine.length < 300) ? sourceLine : sourceLine.substring(0,300) + '...');", - " }", - " if (e.start) {ErrorMessage += '\\nBug: ' + e.source.substring(e.start - 1, e.end)}", - " if ($.includeStack) {ErrorMessage += '\\nStack:' + $.stack;}", - " return ErrorMessage;", - " })(e);", - "}" - ].join('') - ); - - } - - ///////////////////////////////////////////////////////////// - // deal with the replacements // - // Note it's probably better to use ${template} `literals` // - ///////////////////////////////////////////////////////////// - - if (replacements) { - for (key in replacements) { - if (replacements.hasOwnProperty(key)) { - replaceThis = new RegExp('__' + key + '__', 'g'); - withThis = replacements[key]; - jsxScript = jsxScript.replace(replaceThis, withThis + ''); - } - } - } - - - try { - evalScript(jsxScript, callback); - return true; - } catch (err) { - //////////////////////////////////////////////// - // Do whatever error handling you want here ! // - //////////////////////////////////////////////// - var newErr; - newErr = new Error(err); - alert('Error Eek: ' + newErr.stack); - return false; - } - - }; - - - /** - * [evalFile] For calling jsx scripts from the js engine - * - * The jsx.evalFiles method is used for executing saved jsx scripts - * where the jsxScript parameter is a string of the jsx scripts file location. - * For convenience jsx.file or jsx.evalfile can be used instead of jsx.evalFile - * - * @param {String} file - * The path to jsx script - * If only the base name is provided then the path will be presumed to be the - * To execute files stored in the jsx folder located in the __dirname folder use - * jsx.evalFile('myFabJsxScript.jsx'); - * To execute files stored in the a folder myFabScripts located in the __dirname folder use - * jsx.evalFile('./myFabScripts/myFabJsxScript.jsx'); - * To execute files stored in the a folder myFabScripts located at an absolute url use - * jsx.evalFile('/Path/to/my/FabJsxScript.jsx'); (mac) - * or jsx.evalFile('C:Path/to/my/FabJsxScript.jsx'); (windows) - * - * @param {Function} callback - * The callback function you want the jsx script to trigger on completion - * The result of the jsx script is passed as the argument to that function - * The function can exist in some other file. - * Note that InDesign does not automatically pass the callBack as a string. - * Either write your InDesign in a way that it returns a sting the form of - * return 'this is my result surrounded by quotes' - * or use the force eval option - * [Optional DEFAULT no callBack] - * - * @param {Object} replacements - * The replacements to make on the jsx script - * give the following script (template) - * 'alert("__message__: " + __val__);' - * and we want to change the script to - * 'alert("I was born in the year: " + 1234);' - * we would pass the following object - * {"message": 'I was born in the year', "val": 1234} - * or if not using reserved words like do we can leave out the key quotes - * {message: 'I was born in the year', val: 1234} - * By default when possible the forceEvalScript will be set to true - * The forceEvalScript option cannot be true when there are replacements - * To force the forceEvalScript to be false you can send a blank set of replacements - * jsx.evalFile('myFabScript.jsx', {}); Will NOT be executed using the $.evalScript method - * jsx.evalFile('myFabScript.jsx'); Will YES be executed using the $.evalScript method - * see the forceEvalScript parameter for details on this - * [Optional DEFAULT no replacements] - * - * @param {Bolean} forceEval - * If the script should be wrapped in an eval and try catch - * This will 1) provide useful error feedback if heaven forbid it is needed - * 2) The result will be a string which is required for callback results in InDesign - * [Optional DEFAULT true] - * - * If no replacements are needed then the jsx script is be executed by using the $.evalFile method - * This exposes the true value of the $.fileName property - * In such a case it's best to avoid using the $.__fileName() with no base name as it won't work - * BUT one can still use the $.__fileName('baseName') method which is more accurate than the standard $.fileName property - * Let's say you have a Drive called "Graphics" AND YOU HAVE a root folder on your "main" drive called "Graphics" - * You call a script jsx.evalFile('/Volumes/Graphics/myFabScript.jsx'); - * $.fileName will give you '/Graphics/myFabScript.jsx' which is wrong - * $.__fileName('myFabScript.jsx') will give you '/Volumes/Graphics/myFabScript.jsx' which is correct - * $.__fileName() will not give you a reliable result - * Note that if your calling multiple versions of myFabScript.jsx stored in multiple folders then you can get stuffed! - * i.e. if the fileName is important to you then don't do that. - * It also will force the result of the jsx file as a string which is particularly useful for InDesign callBacks - * - * Note 1) The order of the parameters is irrelevant - * Note 2) One can pass the arguments as an object if desired - * jsx.evalScript(myCallBackFunction, 'alert("__myMessage__");', true); - * is the same as - * jsx.evalScript({ - * script: 'alert("__myMessage__");', - * replacements: {myMessage: 'Hi there'}, - * callBack: myCallBackFunction, - * eval: false, - * }); - * note that either lower or camelCase key names or valid - * i.e. both callback or callBack will work - * - * The following keys are the same file || jsx || script || jsxScript || jsxscript - * The following keys are the same callBack || callback - * The following keys are the same replacements || replace - * The following keys are the same eval || forceEval || forceeval - * - * @return {Boolean} if the jsxScript was executed or not - */ - - Jsx.prototype.evalFile = function() { - var arg, args, callback, fileName, fileNameScript, forceEval, forceEvalScript, - i, jsxFolder, jsxScript, newLine, replacements, success; - - success = true; // optimistic - args = arguments; - - jsxFolder = path.join(__dirname, 'jsx'); - ////////////////////////////////////////////////////////////////////////////////////////////////////////// - // $.fileName does not return it's correct path in the jsx engine for files called from the js engine // - // In Illustrator it returns an integer in InDesign it returns an empty string // - // This script injection allows for the script to know it's path by calling // - // $.__fileName(); // - // on Illustrator this works pretty well // - // on InDesign it's best to use with a bit of care // - // If the a second script has been called the InDesing will "forget" the path to the first script // - // 2 work-arounds for this // - // 1) at the beginning of your script add var thePathToMeIs = $.fileName(); // - // thePathToMeIs will not be forgotten after running the second script // - // 2) $.__fileName('myBaseName.jsx'); // - // for example you have file with the following path // - // /path/to/me.jsx // - // Call $.__fileName('me.jsx') and you will get /path/to/me.jsx even after executing a second script // - // Note When the forceEvalScript option is used then you just use the regular $.fileName property // - ////////////////////////////////////////////////////////////////////////////////////////////////////////// - fileNameScript = [ - // The if statement should not normally be executed - 'if(!$.__fileNames){', - ' $.__fileNames = {};', - ' $.__dirname = "__dirname__";'.replace('__dirname__', __dirname), - ' $.__fileName = function(name){', - ' name = name || $.fileName;', - ' return ($.__fileNames && $.__fileNames[name]) || $.fileName;', - ' };', - '}', - '$.__fileNames["__basename__"] = $.__fileNames["" + $.fileName] = "__fileName__";' - ].join(''); - - ////////////////////////////////////////////////////////////////////////////////////// - // sort out order which arguments into jsxScript, callback, replacements, forceEval // - ////////////////////////////////////////////////////////////////////////////////////// - - - // Detect if the parameters were passed as an object and if so allow for various keys - if (args.length === 1 && (arg = args[0]) instanceof Object) { - jsxScript = arg.jsxScript || arg.jsx || arg.script || arg.file || arg.jsxscript; - callback = arg.callBack || arg.callback; - replacements = arg.replacements || arg.replace; - forceEval = arg.eval || arg.forceEval || arg.forceeval; - } else { - for (i = 0; i < 5; i++) { - arg = args[i]; - if (arg === undefined) { - continue; - } - if (arg.constructor.name === 'String') { - jsxScript = arg; - continue; - } - if (arg.constructor.name === 'Object') { - ////////////////////////////////////////////////////////////////////////////////////////////////////////////// - // If no replacements are provided then the $.evalScript method will be used // - // This will allow directly for the $.fileName property to be used // - // If one does not want the $.evalScript method to be used then // - // either send a blank object as the replacements {} // - // or explicitly set the forceEvalScript option to false // - // This can only be done if the parameters are passed as an object // - // i.e. jsx.evalFile({file:'myFabScript.jsx', forceEvalScript: false}); // - // if the file was called using // - // i.e. jsx.evalFile('myFabScript.jsx'); // - // then the following jsx code is called $.evalFile(new File('Path/to/myFabScript.jsx', 10000000000)) + ''; // - // forceEval is never needed if the forceEvalScript is triggered // - ////////////////////////////////////////////////////////////////////////////////////////////////////////////// - replacements = arg; - continue; - } - if (arg.constructor === Function) { - callback = arg; - continue; - } - if (arg === false) { - forceEval = false; - } - } - } - - // If no script provide then not too much to do! - if (!jsxScript) { - return false; - } - - forceEvalScript = !replacements; - - - ////////////////////////////////////////////////////// - // Get path of script // - // Check if it's literal, relative or in jsx folder // - ////////////////////////////////////////////////////// - - if (/^\/|[a-zA-Z]+:/.test(jsxScript)) { // absolute path Mac | Windows - jsxScript = path.normalize(jsxScript); - } else if (/^\.+\//.test(jsxScript)) { - jsxScript = path.join(__dirname, jsxScript); // relative path - } else { - jsxScript = path.join(jsxFolder, jsxScript); // files in the jsxFolder - } - - if (forceEvalScript) { - jsxScript = jsxScript.replace(/"/g, '\\"'); - // Check that the path exist, should change this to asynchronous at some point - if (!window.cep.fs.stat(jsxScript).err) { - jsxScript = fileNameScript.replace(/__fileName__/, jsxScript).replace(/__basename__/, path.basename(jsxScript)) + - '$.evalFile(new File("' + jsxScript.replace(/\\/g, '\\\\') + '")) + "";'; - return this.evalScript(jsxScript, callback, forceEval); - } else { - throw new Error(`The file: {jsxScript} could not be found / read`); - } - } - - //////////////////////////////////////////////////////////////////////////////////////////////// - // Replacements made so we can't use $.evalFile and need to read the jsx script for ourselves // - //////////////////////////////////////////////////////////////////////////////////////////////// - - fileName = jsxScript.replace(/\\/g, '\\\\').replace(/"/g, '\\"'); - try { - jsxScript = window.cep.fs.readFile(jsxScript).data; - } catch (er) { - throw new Error(`The file: ${fileName} could not be read`); - } - // It is desirable that the injected fileNameScript is on the same line as the 1st line of the script - // This is so that the $.line or error.line returns the same value as the actual file - // However if the 1st line contains a # directive then we need to insert a new line and stuff the above problem - // When possible i.e. when there's no replacements then $.evalFile will be used and then the whole issue is avoided - newLine = /^\s*#/.test(jsxScript) ? '\n' : ''; - jsxScript = fileNameScript.replace(/__fileName__/, fileName).replace(/__basename__/, path.basename(fileName)) + newLine + jsxScript; - - try { - // evalScript(jsxScript, callback); - return this.evalScript(jsxScript, callback, replacements, forceEval); - } catch (err) { - //////////////////////////////////////////////// - // Do whatever error handling you want here ! // - //////////////////////////////////////////////// - var newErr; - newErr = new Error(err); - alert('Error Eek: ' + newErr.stack); - return false; - } - - return success; // success should be an array but for now it's a Boolean - }; - - - //////////////////////////////////// - // Setup alternative method names // - //////////////////////////////////// - Jsx.prototype.eval = Jsx.prototype.script = Jsx.prototype.evalscript = Jsx.prototype.evalScript; - Jsx.prototype.file = Jsx.prototype.evalfile = Jsx.prototype.evalFile; - - /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - // Examples // - // jsx.evalScript('alert("foo");'); // - // jsx.evalFile('foo.jsx'); // where foo.jsx is stored in the jsx folder at the base of the extensions directory // - // jsx.evalFile('../myFolder/foo.jsx'); // where a relative or absolute file path is given // - // // - // using conventional methods one would use in the case were the values to swap were supplied by variables // - // csInterface.evalScript('var q = "' + name + '"; alert("' + myString + '" ' + myOp + ' q);q;', callback); // - // Using all the '' + foo + '' is very error prone // - // jsx.evalScript('var q = "__name__"; alert(__string__ __opp__ q);q;',{'name':'Fred', 'string':'Hello ', 'opp':'+'}, callBack); // - // is much simpler and less error prone // - // // - // more readable to use object // - // jsx.evalFile({ // - // file: 'yetAnotherFabScript.jsx', // - // replacements: {"this": foo, That: bar, and: "&&", the: foo2, other: bar2}, // - // eval: true // - // }) // - // Enjoy // - /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - - - jsx = new Jsx(); -})(); diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/host/index.jsx b/server_addon/photoshop/client/ayon_photoshop/api/extension/host/index.jsx deleted file mode 100644 index b697ee65ab..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/extension/host/index.jsx +++ /dev/null @@ -1,484 +0,0 @@ -#include "json.js"; -#target photoshop - -var LogFactory=function(file,write,store,level,defaultStatus,continuing){if(file&&(file.constructor===String||file.constructor===File)){file={file:file};}else if(!file)file={file:{}};write=(file.write!==undefined)?file.write:write;if(write===undefined){write=true;}store=(file.store!==undefined)?file.store||false:store||false;level=(file.level!==undefined)?file.level:level;defaultStatus=(file.defaultStatus!==undefined)?file.defaultStatus:defaultStatus;if(defaultStatus===undefined){defaultStatus='LOG';}continuing=(file.continuing!==undefined)?file.continuing:continuing||false;file=file.file||{};var stack,times,logTime,logPoint,icons,statuses,LOG_LEVEL,LOG_STATUS;stack=[];times=[];logTime=new Date();logPoint='Log Factory Start';icons={"1":"\ud83d\udd50","130":"\ud83d\udd5c","2":"\ud83d\udd51","230":"\ud83d\udd5d","3":"\ud83d\udd52","330":"\ud83d\udd5e","4":"\ud83d\udd53","430":"\ud83d\udd5f","5":"\ud83d\udd54","530":"\ud83d\udd60","6":"\ud83d\udd55","630":"\ud83d\udd61","7":"\ud83d\udd56","730":"\ud83d\udd62","8":"\ud83d\udd57","830":"\ud83d\udd63","9":"\ud83d\udd58","930":"\ud83d\udd64","10":"\ud83d\udd59","1030":"\ud83d\udd65","11":"\ud83d\udd5a","1130":"\ud83d\udd66","12":"\ud83d\udd5b","1230":"\ud83d\udd67","AIRPLANE":"\ud83d\udee9","ALARM":"\u23f0","AMBULANCE":"\ud83d\ude91","ANCHOR":"\u2693","ANGRY":"\ud83d\ude20","ANGUISHED":"\ud83d\ude27","ANT":"\ud83d\udc1c","ANTENNA":"\ud83d\udce1","APPLE":"\ud83c\udf4f","APPLE2":"\ud83c\udf4e","ATM":"\ud83c\udfe7","ATOM":"\u269b","BABYBOTTLE":"\ud83c\udf7c","BAD:":"\ud83d\udc4e","BANANA":"\ud83c\udf4c","BANDAGE":"\ud83e\udd15","BANK":"\ud83c\udfe6","BATTERY":"\ud83d\udd0b","BED":"\ud83d\udecf","BEE":"\ud83d\udc1d","BEER":"\ud83c\udf7a","BELL":"\ud83d\udd14","BELLOFF":"\ud83d\udd15","BIRD":"\ud83d\udc26","BLACKFLAG":"\ud83c\udff4","BLUSH":"\ud83d\ude0a","BOMB":"\ud83d\udca3","BOOK":"\ud83d\udcd5","BOOKMARK":"\ud83d\udd16","BOOKS":"\ud83d\udcda","BOW":"\ud83c\udff9","BOWLING":"\ud83c\udfb3","BRIEFCASE":"\ud83d\udcbc","BROKEN":"\ud83d\udc94","BUG":"\ud83d\udc1b","BUILDING":"\ud83c\udfdb","BUILDINGS":"\ud83c\udfd8","BULB":"\ud83d\udca1","BUS":"\ud83d\ude8c","CACTUS":"\ud83c\udf35","CALENDAR":"\ud83d\udcc5","CAMEL":"\ud83d\udc2a","CAMERA":"\ud83d\udcf7","CANDLE":"\ud83d\udd6f","CAR":"\ud83d\ude98","CAROUSEL":"\ud83c\udfa0","CASTLE":"\ud83c\udff0","CATEYES":"\ud83d\ude3b","CATJOY":"\ud83d\ude39","CATMOUTH":"\ud83d\ude3a","CATSMILE":"\ud83d\ude3c","CD":"\ud83d\udcbf","CHECK":"\u2714","CHEQFLAG":"\ud83c\udfc1","CHICK":"\ud83d\udc25","CHICKEN":"\ud83d\udc14","CHICKHEAD":"\ud83d\udc24","CIRCLEBLACK":"\u26ab","CIRCLEBLUE":"\ud83d\udd35","CIRCLERED":"\ud83d\udd34","CIRCLEWHITE":"\u26aa","CIRCUS":"\ud83c\udfaa","CLAPPER":"\ud83c\udfac","CLAPPING":"\ud83d\udc4f","CLIP":"\ud83d\udcce","CLIPBOARD":"\ud83d\udccb","CLOUD":"\ud83c\udf28","CLOVER":"\ud83c\udf40","CLOWN":"\ud83e\udd21","COLDSWEAT":"\ud83d\ude13","COLDSWEAT2":"\ud83d\ude30","COMPRESS":"\ud83d\udddc","CONFOUNDED":"\ud83d\ude16","CONFUSED":"\ud83d\ude15","CONSTRUCTION":"\ud83d\udea7","CONTROL":"\ud83c\udf9b","COOKIE":"\ud83c\udf6a","COOKING":"\ud83c\udf73","COOL":"\ud83d\ude0e","COOLBOX":"\ud83c\udd92","COPYRIGHT":"\u00a9","CRANE":"\ud83c\udfd7","CRAYON":"\ud83d\udd8d","CREDITCARD":"\ud83d\udcb3","CROSS":"\u2716","CROSSBOX:":"\u274e","CRY":"\ud83d\ude22","CRYCAT":"\ud83d\ude3f","CRYSTALBALL":"\ud83d\udd2e","CUSTOMS":"\ud83d\udec3","DELICIOUS":"\ud83d\ude0b","DERELICT":"\ud83c\udfda","DESKTOP":"\ud83d\udda5","DIAMONDLB":"\ud83d\udd37","DIAMONDLO":"\ud83d\udd36","DIAMONDSB":"\ud83d\udd39","DIAMONDSO":"\ud83d\udd38","DICE":"\ud83c\udfb2","DISAPPOINTED":"\ud83d\ude1e","CRY2":"\ud83d\ude25","DIVISION":"\u2797","DIZZY":"\ud83d\ude35","DOLLAR":"\ud83d\udcb5","DOLLAR2":"\ud83d\udcb2","DOWNARROW":"\u2b07","DVD":"\ud83d\udcc0","EJECT":"\u23cf","ELEPHANT":"\ud83d\udc18","EMAIL":"\ud83d\udce7","ENVELOPE":"\ud83d\udce8","ENVELOPE2":"\u2709","ENVELOPE_DOWN":"\ud83d\udce9","EURO":"\ud83d\udcb6","EVIL":"\ud83d\ude08","EXPRESSIONLESS":"\ud83d\ude11","EYES":"\ud83d\udc40","FACTORY":"\ud83c\udfed","FAX":"\ud83d\udce0","FEARFUL":"\ud83d\ude28","FILEBOX":"\ud83d\uddc3","FILECABINET":"\ud83d\uddc4","FIRE":"\ud83d\udd25","FIREENGINE":"\ud83d\ude92","FIST":"\ud83d\udc4a","FLOWER":"\ud83c\udf37","FLOWER2":"\ud83c\udf38","FLUSHED":"\ud83d\ude33","FOLDER":"\ud83d\udcc1","FOLDER2":"\ud83d\udcc2","FREE":"\ud83c\udd93","FROG":"\ud83d\udc38","FROWN":"\ud83d\ude41","GEAR":"\u2699","GLOBE":"\ud83c\udf0d","GLOWINGSTAR":"\ud83c\udf1f","GOOD:":"\ud83d\udc4d","GRIMACING":"\ud83d\ude2c","GRIN":"\ud83d\ude00","GRINNINGCAT":"\ud83d\ude38","HALO":"\ud83d\ude07","HAMMER":"\ud83d\udd28","HAMSTER":"\ud83d\udc39","HAND":"\u270b","HANDDOWN":"\ud83d\udc47","HANDLEFT":"\ud83d\udc48","HANDRIGHT":"\ud83d\udc49","HANDUP":"\ud83d\udc46","HATCHING":"\ud83d\udc23","HAZARD":"\u2623","HEADPHONE":"\ud83c\udfa7","HEARNOEVIL":"\ud83d\ude49","HEARTBLUE":"\ud83d\udc99","HEARTEYES":"\ud83d\ude0d","HEARTGREEN":"\ud83d\udc9a","HEARTYELLOW":"\ud83d\udc9b","HELICOPTER":"\ud83d\ude81","HERB":"\ud83c\udf3f","HIGH_BRIGHTNESS":"\ud83d\udd06","HIGHVOLTAGE":"\u26a1","HIT":"\ud83c\udfaf","HONEY":"\ud83c\udf6f","HOT":"\ud83c\udf36","HOURGLASS":"\u23f3","HOUSE":"\ud83c\udfe0","HUGGINGFACE":"\ud83e\udd17","HUNDRED":"\ud83d\udcaf","HUSHED":"\ud83d\ude2f","ID":"\ud83c\udd94","INBOX":"\ud83d\udce5","INDEX":"\ud83d\uddc2","JOY":"\ud83d\ude02","KEY":"\ud83d\udd11","KISS":"\ud83d\ude18","KISS2":"\ud83d\ude17","KISS3":"\ud83d\ude19","KISS4":"\ud83d\ude1a","KISSINGCAT":"\ud83d\ude3d","KNIFE":"\ud83d\udd2a","LABEL":"\ud83c\udff7","LADYBIRD":"\ud83d\udc1e","LANDING":"\ud83d\udeec","LAPTOP":"\ud83d\udcbb","LEFTARROW":"\u2b05","LEMON":"\ud83c\udf4b","LIGHTNINGCLOUD":"\ud83c\udf29","LINK":"\ud83d\udd17","LITTER":"\ud83d\udeae","LOCK":"\ud83d\udd12","LOLLIPOP":"\ud83c\udf6d","LOUDSPEAKER":"\ud83d\udce2","LOW_BRIGHTNESS":"\ud83d\udd05","MAD":"\ud83d\ude1c","MAGNIFYING_GLASS":"\ud83d\udd0d","MASK":"\ud83d\ude37","MEDAL":"\ud83c\udf96","MEMO":"\ud83d\udcdd","MIC":"\ud83c\udfa4","MICROSCOPE":"\ud83d\udd2c","MINUS":"\u2796","MOBILE":"\ud83d\udcf1","MONEY":"\ud83d\udcb0","MONEYMOUTH":"\ud83e\udd11","MONKEY":"\ud83d\udc35","MOUSE":"\ud83d\udc2d","MOUSE2":"\ud83d\udc01","MOUTHLESS":"\ud83d\ude36","MOVIE":"\ud83c\udfa5","MUGS":"\ud83c\udf7b","NERD":"\ud83e\udd13","NEUTRAL":"\ud83d\ude10","NEW":"\ud83c\udd95","NOENTRY":"\ud83d\udeab","NOTEBOOK":"\ud83d\udcd4","NOTEPAD":"\ud83d\uddd2","NUTANDBOLT":"\ud83d\udd29","O":"\u2b55","OFFICE":"\ud83c\udfe2","OK":"\ud83c\udd97","OKHAND":"\ud83d\udc4c","OLDKEY":"\ud83d\udddd","OPENLOCK":"\ud83d\udd13","OPENMOUTH":"\ud83d\ude2e","OUTBOX":"\ud83d\udce4","PACKAGE":"\ud83d\udce6","PAGE":"\ud83d\udcc4","PAINTBRUSH":"\ud83d\udd8c","PALETTE":"\ud83c\udfa8","PANDA":"\ud83d\udc3c","PASSPORT":"\ud83d\udec2","PAWS":"\ud83d\udc3e","PEN":"\ud83d\udd8a","PEN2":"\ud83d\udd8b","PENSIVE":"\ud83d\ude14","PERFORMING":"\ud83c\udfad","PHONE":"\ud83d\udcde","PILL":"\ud83d\udc8a","PING":"\u2757","PLATE":"\ud83c\udf7d","PLUG":"\ud83d\udd0c","PLUS":"\u2795","POLICE":"\ud83d\ude93","POLICELIGHT":"\ud83d\udea8","POSTOFFICE":"\ud83c\udfe4","POUND":"\ud83d\udcb7","POUTING":"\ud83d\ude21","POUTINGCAT":"\ud83d\ude3e","PRESENT":"\ud83c\udf81","PRINTER":"\ud83d\udda8","PROJECTOR":"\ud83d\udcfd","PUSHPIN":"\ud83d\udccc","QUESTION":"\u2753","RABBIT":"\ud83d\udc30","RADIOACTIVE":"\u2622","RADIOBUTTON":"\ud83d\udd18","RAINCLOUD":"\ud83c\udf27","RAT":"\ud83d\udc00","RECYCLE":"\u267b","REGISTERED":"\u00ae","RELIEVED":"\ud83d\ude0c","ROBOT":"\ud83e\udd16","ROCKET":"\ud83d\ude80","ROLLING":"\ud83d\ude44","ROOSTER":"\ud83d\udc13","RULER":"\ud83d\udccf","SATELLITE":"\ud83d\udef0","SAVE":"\ud83d\udcbe","SCHOOL":"\ud83c\udfeb","SCISSORS":"\u2702","SCREAMING":"\ud83d\ude31","SCROLL":"\ud83d\udcdc","SEAT":"\ud83d\udcba","SEEDLING":"\ud83c\udf31","SEENOEVIL":"\ud83d\ude48","SHIELD":"\ud83d\udee1","SHIP":"\ud83d\udea2","SHOCKED":"\ud83d\ude32","SHOWER":"\ud83d\udebf","SLEEPING":"\ud83d\ude34","SLEEPY":"\ud83d\ude2a","SLIDER":"\ud83c\udf9a","SLOT":"\ud83c\udfb0","SMILE":"\ud83d\ude42","SMILING":"\ud83d\ude03","SMILINGCLOSEDEYES":"\ud83d\ude06","SMILINGEYES":"\ud83d\ude04","SMILINGSWEAT":"\ud83d\ude05","SMIRK":"\ud83d\ude0f","SNAIL":"\ud83d\udc0c","SNAKE":"\ud83d\udc0d","SOCCER":"\u26bd","SOS":"\ud83c\udd98","SPEAKER":"\ud83d\udd08","SPEAKEROFF":"\ud83d\udd07","SPEAKNOEVIL":"\ud83d\ude4a","SPIDER":"\ud83d\udd77","SPIDERWEB":"\ud83d\udd78","STAR":"\u2b50","STOP":"\u26d4","STOPWATCH":"\u23f1","SULK":"\ud83d\ude26","SUNFLOWER":"\ud83c\udf3b","SUNGLASSES":"\ud83d\udd76","SYRINGE":"\ud83d\udc89","TAKEOFF":"\ud83d\udeeb","TAXI":"\ud83d\ude95","TELESCOPE":"\ud83d\udd2d","TEMPORATURE":"\ud83e\udd12","TENNIS":"\ud83c\udfbe","THERMOMETER":"\ud83c\udf21","THINKING":"\ud83e\udd14","THUNDERCLOUD":"\u26c8","TICKBOX":"\u2705","TICKET":"\ud83c\udf9f","TIRED":"\ud83d\ude2b","TOILET":"\ud83d\udebd","TOMATO":"\ud83c\udf45","TONGUE":"\ud83d\ude1b","TOOLS":"\ud83d\udee0","TORCH":"\ud83d\udd26","TORNADO":"\ud83c\udf2a","TOUNG2":"\ud83d\ude1d","TRADEMARK":"\u2122","TRAFFICLIGHT":"\ud83d\udea6","TRASH":"\ud83d\uddd1","TREE":"\ud83c\udf32","TRIANGLE_LEFT":"\u25c0","TRIANGLE_RIGHT":"\u25b6","TRIANGLEDOWN":"\ud83d\udd3b","TRIANGLEUP":"\ud83d\udd3a","TRIANGULARFLAG":"\ud83d\udea9","TROPHY":"\ud83c\udfc6","TRUCK":"\ud83d\ude9a","TRUMPET":"\ud83c\udfba","TURKEY":"\ud83e\udd83","TURTLE":"\ud83d\udc22","UMBRELLA":"\u26f1","UNAMUSED":"\ud83d\ude12","UPARROW":"\u2b06","UPSIDEDOWN":"\ud83d\ude43","WARNING":"\u26a0","WATCH":"\u231a","WAVING":"\ud83d\udc4b","WEARY":"\ud83d\ude29","WEARYCAT":"\ud83d\ude40","WHITEFLAG":"\ud83c\udff3","WINEGLASS":"\ud83c\udf77","WINK":"\ud83d\ude09","WORRIED":"\ud83d\ude1f","WRENCH":"\ud83d\udd27","X":"\u274c","YEN":"\ud83d\udcb4","ZIPPERFACE":"\ud83e\udd10","UNDEFINED":"","":""};statuses={F:'FATAL',B:'BUG',C:'CRITICAL',E:'ERROR',W:'WARNING',I:'INFO',IM:'IMPORTANT',D:'DEBUG',L:'LOG',CO:'CONSTANT',FU:'FUNCTION',R:'RETURN',V:'VARIABLE',S:'STACK',RE:'RESULT',ST:'STOPPER',TI:'TIMER',T:'TRACE'};LOG_LEVEL={NONE:7,OFF:7,FATAL:6,ERROR:5,WARN:4,INFO:3,UNDEFINED:2,'':2,DEFAULT:2,DEBUG:2,TRACE:1,ON:0,ALL:0,};LOG_STATUS={OFF:LOG_LEVEL.OFF,NONE:LOG_LEVEL.OFF,NO:LOG_LEVEL.OFF,NOPE:LOG_LEVEL.OFF,FALSE:LOG_LEVEL.OFF,FATAL:LOG_LEVEL.FATAL,BUG:LOG_LEVEL.ERROR,CRITICAL:LOG_LEVEL.ERROR,ERROR:LOG_LEVEL.ERROR,WARNING:LOG_LEVEL.WARN,INFO:LOG_LEVEL.INFO,IMPORTANT:LOG_LEVEL.INFO,DEBUG:LOG_LEVEL.DEBUG,LOG:LOG_LEVEL.DEBUG,STACK:LOG_LEVEL.DEBUG,CONSTANT:LOG_LEVEL.DEBUG,FUNCTION:LOG_LEVEL.DEBUG,VARIABLE:LOG_LEVEL.DEBUG,RETURN:LOG_LEVEL.DEBUG,RESULT:LOG_LEVEL.TRACE,STOPPER:LOG_LEVEL.TRACE,TIMER:LOG_LEVEL.TRACE,TRACE:LOG_LEVEL.TRACE,ALL:LOG_LEVEL.ALL,YES:LOG_LEVEL.ALL,YEP:LOG_LEVEL.ALL,TRUE:LOG_LEVEL.ALL};var logFile,logFolder;var LOG=function(message,status,icon){if(LOG.level!==LOG_LEVEL.OFF&&(LOG.write||LOG.store)&&LOG.arguments.length)return LOG.addMessage(message,status,icon);};LOG.logDecodeLevel=function(level){if(level==~~level)return Math.abs(level);var lev;level+='';level=level.toUpperCase();if(level in statuses){level=statuses[level];}lev=LOG_LEVEL[level];if(lev!==undefined)return lev;lev=LOG_STATUS[level];if(lev!==undefined)return lev;return LOG_LEVEL.DEFAULT;};LOG.write=write;LOG.store=store;LOG.level=LOG.logDecodeLevel(level);LOG.status=defaultStatus;LOG.addMessage=function(message,status,icon){var date=new Date(),count,bool,logStatus;if(status&&status.constructor.name==='String'){status=status.toUpperCase();status=statuses[status]||status;}else status=LOG.status;logStatus=LOG_STATUS[status]||LOG_STATUS.ALL;if(logStatus999)?'['+LOG.count+'] ':(' ['+LOG.count+'] ').slice(-7);message=count+status+icon+(message instanceof Object?message.toSource():message)+date;if(LOG.store){stack.push(message);}if(LOG.write){bool=file&&file.writable&&logFile.writeln(message);if(!bool){file.writable=true;LOG.setFile(logFile);logFile.writeln(message);}}LOG.count++;return true;};var logNewFile=function(file,isCookie,overwrite){file.encoding='UTF-8';file.lineFeed=($.os[0]=='M')?'Macintosh':' Windows';if(isCookie)return file.open(overwrite?'w':'e')&&file;file.writable=LOG.write;logFile=file;logFolder=file.parent;if(continuing){LOG.count=LOG.setCount(file);}return(!LOG.write&&file||(file.open('a')&&file));};LOG.setFile=function(file,isCookie,overwrite){var bool,folder,fileName,suffix,newFileName,f,d,safeFileName;d=new Date();f=$.stack.split("\n")[0].replace(/^\[\(?/,'').replace(/\)?\]$/,'');if(f==~~f){f=$.fileName.replace(/[^\/]+\//g,'');}safeFileName=File.encode((isCookie?'/COOKIE_':'/LOG_')+f.replace(/^\//,'')+'_'+(1900+d.getYear())+(''+d).replace(/...(...)(..).+/,'_$1_$2')+(isCookie?'.txt':'.log'));if(file&&file.constructor.name=='String'){file=(file.match('/'))?new File(file):new File((logFolder||Folder.temp)+'/'+file);}if(file instanceof File){folder=file.parent;bool=folder.exists||folder.create();if(!bool)folder=Folder.temp;fileName=File.decode(file.name);suffix=fileName.match(/\.[^.]+$/);suffix=suffix?suffix[0]:'';fileName='/'+fileName;newFileName=fileName.replace(/\.[^.]+$/,'')+'_'+(+(new Date())+suffix);f=logNewFile(file,isCookie,overwrite);if(f)return f;f=logNewFile(new File(folder+newFileName),isCookie,overwrite);if(f)return f;f=logNewFile(new File(folder+safeFileName),isCookie,overwrite);if(f)return f;if(folder!=Folder.temp){f=logNewFile(new File(Folder.temp+fileName),isCookie,overwrite);if(f)return f;f=logNewFile(new File(Folder.temp+safeFileName),isCookie,overwrite);return f||new File(Folder.temp+safeFileName);}}return LOG.setFile(((logFile&&!isCookie)?new File(logFile):new File(Folder.temp+safeFileName)),isCookie,overwrite );};LOG.setCount=function(file){if(~~file===file){LOG.count=file;return LOG.count;}if(file===undefined){file=logFile;}if(file&&file.constructor===String){file=new File(file);}var logNumbers,contents;if(!file.length||!file.exists){LOG.count=1;return 1;}file.open('r');file.encoding='utf-8';file.seek(10000,2);contents='\n'+file.read();logNumbers=contents.match(/\n{0,3}\[\d+\] \[\w+\]+/g);if(logNumbers){logNumbers=+logNumbers[logNumbers.length-1].match(/\d+/)+1;file.close();LOG.count=logNumbers;return logNumbers;}if(file.length<10001){file.close();LOG.count=1;return 1;}file.seek(10000000,2);contents='\n'+file.read();logNumbers=contents.match(/\n{0,3}\[\d+\] \[\w+\]+/g);if(logNumbers){logNumbers=+logNumbers[logNumbers.length-1].match(/\d+/)+1;file.close();LOG.count=logNumbers;return logNumbers;}file.close();LOG.count=1;return 1;};LOG.setLevel=function(level){LOG.level=LOG.logDecodeLevel(level);return LOG.level;};LOG.setStatus=function(status){status=(''+status).toUpperCase();LOG.status=statuses[status]||status;return LOG.status;};LOG.cookie=function(file,level,overwrite,setLevel){var log,cookie;if(!file){file={file:file};}if(file&&(file.constructor===String||file.constructor===File)){file={file:file};}log=file;if(log.level===undefined){log.level=(level!==undefined)?level:'NONE';}if(log.overwrite===undefined){log.overwrite=(overwrite!==undefined)?overwrite:false;}if(log.setLevel===undefined){log.setLevel=(setLevel!==undefined)?setLevel:true;}setLevel=log.setLevel;overwrite=log.overwrite;level=log.level;file=log.file;file=LOG.setFile(file,true,overwrite);if(overwrite){file.write(level);}else{cookie=file.read();if(cookie.length){level=cookie;}else{file.write(level);}}file.close();if(setLevel){LOG.setLevel(level);}return{path:file,level:level};};LOG.args=function(args,funct,line){if(LOG.level>LOG_STATUS.FUNCTION)return;if(!(args&&(''+args.constructor).replace(/\s+/g,'')==='functionObject(){[nativecode]}'))return;if(!LOG.args.STRIP_COMMENTS){LOG.args.STRIP_COMMENTS=/((\/.*$)|(\/\*[\s\S]*?\*\/))/mg;}if(!LOG.args.ARGUMENT_NAMES){LOG.args.ARGUMENT_NAMES=/([^\s,]+)/g;}if(!LOG.args.OUTER_BRACKETS){LOG.args.OUTER_BRACKETS=/^\((.+)?\)$/;}if(!LOG.args.NEW_SOMETHING){LOG.args.NEW_SOMETHING=/^new \w+\((.+)?\)$/;}var functionString,argumentNames,stackInfo,report,functionName,arg,argsL,n,argName,argValue,argsTotal;if(funct===~~funct){line=funct;}if(!(funct instanceof Function)){funct=args.callee;}if(!(funct instanceof Function))return;functionName=funct.name;functionString=(''+funct).replace(LOG.args.STRIP_COMMENTS,'');argumentNames=functionString.slice(functionString.indexOf('(')+1,functionString.indexOf(')')).match(LOG.args.ARGUMENT_NAMES);argumentNames=argumentNames||[];report=[];report.push('--------------');report.push('Function Data:');report.push('--------------');report.push('Function Name:'+functionName);argsL=args.length;stackInfo=$.stack.split(/[\n\r]/);stackInfo.pop();stackInfo=stackInfo.join('\n ');report.push('Call stack:'+stackInfo);if(line){report.push('Function Line around:'+line);}report.push('Arguments Provided:'+argsL);report.push('Named Arguments:'+argumentNames.length);if(argumentNames.length){report.push('Arguments Names:'+argumentNames.join(','));}if(argsL){report.push('----------------');report.push('Argument Values:');report.push('----------------');}argsTotal=Math.max(argsL,argumentNames.length);for(n=0;n=argsL){argValue='NO VALUE PROVIDED';}else if(arg===undefined){argValue='undefined';}else if(arg===null){argValue='null';}else{argValue=arg.toSource().replace(LOG.args.OUTER_BRACKETS,'$1').replace(LOG.args.NEW_SOMETHING,'$1');}report.push((argName?argName:'arguments['+n+']')+':'+argValue);}report.push('');report=report.join('\n ');LOG(report,'f');return report;};LOG.stack=function(reverse){var st=$.stack.split('\n');st.pop();st.pop();if(reverse){st.reverse();}return LOG(st.join('\n '),'s');};LOG.values=function(values){var n,value,map=[];if(!(values instanceof Object||values instanceof Array)){return;}if(!LOG.values.OUTER_BRACKETS){LOG.values.OUTER_BRACKETS=/^\((.+)?\)$/;}if(!LOG.values.NEW_SOMETHING){LOG.values.NEW_SOMETHING=/^new \w+\((.+)?\)$/;}for(n in values){try{value=values[n];if(value===undefined){value='undefined';}else if(value===null){value='null';}else{value=value.toSource().replace(LOG.values.OUTER_BRACKETS,'$1').replace(LOG.values.NEW_SOMETHING,'$1');}}catch(e){value='\uD83D\uDEAB '+e;}map.push(n+':'+value);}if(map.length){map=map.join('\n ')+'\n ';return LOG(map,'v');}};LOG.reset=function(all){stack.length=0;LOG.count=1;if(all!==false){if(logFile instanceof File){logFile.close();}logFile=LOG.store=LOG.writeToFile=undefined;LOG.write=true;logFolder=Folder.temp;logTime=new Date();logPoint='After Log Reset';}};LOG.stopper=function(message){var newLogTime,t,m,newLogPoint;newLogTime=new Date();newLogPoint=(LOG.count!==undefined)?'LOG#'+LOG.count:'BEFORE LOG#1';LOG.time=t=newLogTime-logTime;if(message===false){return;}message=message||'Stopper start point';t=LOG.prettyTime(t);m=message+'\n '+'From '+logPoint+' to '+newLogPoint+' took '+t+' Starting '+logTime+' '+logTime.getMilliseconds()+'ms'+' Ending '+newLogTime+' '+newLogTime.getMilliseconds()+'ms';LOG(m,'st');logPoint=newLogPoint;logTime=newLogTime;return m;};LOG.start=function(message){var t=new Date();times.push([t,(message!==undefined)?message+'':'']);};LOG.stop=function(message){if(!times.length)return;message=(message)?message+' ':'';var nt,startLog,ot,om,td,m;nt=new Date();startLog=times.pop();ot=startLog[0];om=startLog[1];td=nt-ot;if(om.length){om+=' ';}m=om+'STARTED ['+ot+' '+ot.getMilliseconds()+'ms]\n '+message+'FINISHED ['+nt+' '+nt.getMilliseconds()+'ms]\n TOTAL TIME ['+LOG.prettyTime(td)+']';LOG(m,'ti');return m;};LOG.prettyTime=function(t){var h,m,s,ms;h=Math.floor(t / 3600000);m=Math.floor((t % 3600000)/ 60000);s=Math.floor((t % 60000)/ 1000);ms=t % 1000;t=(!t)?'<1ms':((h)?h+' hours ':'')+((m)?m+' minutes ':'')+((s)?s+' seconds ':'')+((ms&&(h||m||s))?'&':'')+((ms)?ms+'ms':'');return t;};LOG.get=function(){if(!stack.length)return 'THE LOG IS NOT SET TO STORE';var a=fetchLogLines(arguments);return a?'\n'+a.join('\n'):'NO LOGS AVAILABLE';};var fetchLogLines=function(){var args=arguments[0];if(!args.length)return stack;var c,n,l,a=[],ln,start,end,j,sl;l=args.length;sl=stack.length-1;n=0;for(c=0;cln)?sl+ln+1:ln-1;if(ln>=0&&ln<=sl)a[n++]=stack[ln];}else if(ln instanceof Array&&ln.length===2){start=ln[0];end=ln[1];if(!(~~start===start&&~~end===end))continue;start=(0>start)?sl+start+1:start-1;end=(0>end)?sl+end+1:end-1;start=Math.max(Math.min(sl,start),0);end=Math.min(Math.max(end,0),sl);if(start<=end)for(j=start;j<=end;j++)a[n++]=stack[j];else for(j=start;j>=end;j--)a[n++]=stack[j];}}return(n)?a:false;};LOG.file=function(){return logFile;};LOG.openFolder=function(){if(logFolder)return logFolder.execute();};LOG.show=LOG.execute=function(){if(logFile)return logFile.execute();};LOG.close=function(){if(logFile)return logFile.close();};LOG.setFile(file);if(!$.summary.difference){$.summary.difference=function(){return $.summary().replace(/ *([0-9]+)([^ ]+)(\n?)/g,$.summary.updateSnapshot );};}if(!$.summary.updateSnapshot){$.summary.updateSnapshot=function(full,count,name,lf){var snapshot=$.summary.snapshot;count=Number(count);var prev=snapshot[name]?snapshot[name]:0;snapshot[name]=count;var diff=count-prev;if(diff===0)return "";return " ".substring(String(diff).length)+diff+" "+name+lf;};}if(!$.summary.snapshot){$.summary.snapshot=[];$.summary.difference();}$.gc();$.gc();$.summary.difference();LOG.sumDiff=function(message){$.gc();$.gc();var diff=$.summary.difference();if(diff.length<8){diff=' - NONE -';}if(message===undefined){message='';}message+=diff;return LOG('$.summary.difference():'+message,'v');};return LOG;}; - -var log = new LogFactory('myLog.log'); // =>; creates the new log factory - put full path where - -function getEnv(variable){ - return $.getenv(variable); -} - -function fileOpen(path){ - return app.open(new File(path)); -} - -function getLayerTypeWithName(layerName) { - var type = 'NA'; - var nameParts = layerName.split('_'); - var namePrefix = nameParts[0]; - namePrefix = namePrefix.toLowerCase(); - switch (namePrefix) { - case 'guide': - case 'tl': - case 'tr': - case 'bl': - case 'br': - type = 'GUIDE'; - break; - case 'fg': - type = 'FG'; - break; - case 'bg': - type = 'BG'; - break; - case 'obj': - default: - type = 'OBJ'; - break; - } - - return type; -} - -function getLayers() { - /** - * Get json representation of list of layers. - * Much faster this way than in DOM traversal (2s vs 45s on same file) - * - * Format of single layer info: - * id : number - * name: string - * group: boolean - true if layer is a group - * parents:array - list of ids of parent groups, useful for selection - * all children layers from parent layerSet (eg. group) - * type: string - type of layer guessed from its name - * visible:boolean - true if visible - **/ - if (documents.length == 0){ - return '[]'; - } - var ref1 = new ActionReference(); - ref1.putEnumerated(charIDToTypeID('Dcmn'), charIDToTypeID('Ordn'), - charIDToTypeID('Trgt')); - var count = executeActionGet(ref1).getInteger(charIDToTypeID('NmbL')); - - // get all layer names - var layers = []; - var layer = {}; - - var parents = []; - for (var i = count; i >= 1; i--) { - var layer = {}; - var ref2 = new ActionReference(); - ref2.putIndex(charIDToTypeID('Lyr '), i); - - var desc = executeActionGet(ref2); // Access layer index #i - var layerSection = typeIDToStringID(desc.getEnumerationValue( - stringIDToTypeID('layerSection'))); - - layer.id = desc.getInteger(stringIDToTypeID("layerID")); - layer.name = desc.getString(stringIDToTypeID("name")); - layer.color_code = typeIDToStringID(desc.getEnumerationValue(stringIDToTypeID('color'))); - layer.group = false; - layer.parents = parents.slice(); - layer.type = getLayerTypeWithName(layer.name); - layer.visible = desc.getBoolean(stringIDToTypeID("visible")); - //log(" name: " + layer.name + " groupId " + layer.groupId + - //" group " + layer.group); - if (layerSection == 'layerSectionStart') { // Group start and end - parents.push(layer.id); - layer.group = true; - } - if (layerSection == 'layerSectionEnd') { - parents.pop(); - continue; - } - layers.push(JSON.stringify(layer)); - } - try{ - var bck = activeDocument.backgroundLayer; - layer.id = bck.id; - layer.name = bck.name; - layer.group = false; - layer.parents = []; - layer.type = 'background'; - layer.visible = bck.visible; - layers.push(JSON.stringify(layer)); - }catch(e){ - // do nothing, no background layer - }; - //log("layers " + layers); - return '[' + layers + ']'; -} - -function setVisible(layer_id, visibility){ - /** - * Sets particular 'layer_id' to 'visibility' if true > show - **/ - var desc = new ActionDescriptor(); - var ref = new ActionReference(); - ref.putIdentifier(stringIDToTypeID("layer"), layer_id); - desc.putReference(stringIDToTypeID("null"), ref); - - executeAction(visibility?stringIDToTypeID("show"):stringIDToTypeID("hide"), - desc, DialogModes.NO); - -} - -function getHeadline(){ - /** - * Returns headline of current document with metadata - * - **/ - if (documents.length == 0){ - return ''; - } - var headline = app.activeDocument.info.headline; - - return headline; -} - -function isSaved(){ - return app.activeDocument.saved; -} - -function save(){ - /** Saves active document **/ - return app.activeDocument.save(); -} - -function saveAs(output_path, ext, as_copy){ - /** Exports scene to various formats - * - * Currently implemented: 'jpg', 'png', 'psd' - * - * output_path - escaped file path on local system - * ext - extension for export - * as_copy - create copy, do not overwrite - * - * */ - var saveName = output_path; - var saveOptions; - if (ext == 'jpg'){ - saveOptions = new JPEGSaveOptions(); - saveOptions.quality = 12; - saveOptions.embedColorProfile = true; - saveOptions.formatOptions = FormatOptions.PROGRESSIVE; - if(saveOptions.formatOptions == FormatOptions.PROGRESSIVE){ - saveOptions.scans = 5}; - saveOptions.matte = MatteType.NONE; - } - if (ext == 'png'){ - saveOptions = new PNGSaveOptions(); - saveOptions.interlaced = true; - saveOptions.transparency = true; - } - if (ext == 'psd'){ - saveOptions = null; - return app.activeDocument.saveAs(new File(saveName)); - } - if (ext == 'psb'){ - return savePSB(output_path); - } - - return app.activeDocument.saveAs(new File(saveName), saveOptions, as_copy); - -} - -function getActiveDocumentName(){ - /** - * Returns file name of active document - * */ - if (documents.length == 0){ - return null; - } - return app.activeDocument.name; -} - -function getActiveDocumentFullName(){ - /** - * Returns file name of active document with file path. - * activeDocument.fullName returns path in URI (eg /c/.. instead of c:/) - * */ - if (documents.length == 0){ - return null; - } - var f = new File(app.activeDocument.fullName); - var path = f.fsName; - f.close(); - return path; -} - -function imprint(payload){ - /** - * Sets headline content of current document with metadata. Stores - * information about assets created through AYON. - * Content accessible in PS through File > File Info - * - **/ - app.activeDocument.info.headline = payload; -} - -function getSelectedLayers(doc) { - /** - * Returns json representation of currently selected layers. - * Works in three steps - 1) creates new group with selected layers - * 2) traverses this group - * 3) deletes newly created group, not needed - * Bit weird, but Adobe.. - **/ - if (doc == null){ - doc = app.activeDocument; - } - - var selLayers = []; - _grp = groupSelectedLayers(doc); - - var group = doc.activeLayer; - var layers = group.layers; - - // // group is fake at this point - // var itself_name = ''; - // if (layers){ - // itself_name = layers[0].name; - // } - - - for (var i = 0; i < layers.length; i++) { - var layer = {}; - layer.id = layers[i].id; - layer.name = layers[i].name; - long_names =_get_parents_names(group.parent, layers[i].name); - var t = layers[i].kind; - if ((typeof t !== 'undefined') && - (layers[i].kind.toString() == 'LayerKind.NORMAL')){ - layer.group = false; - }else{ - layer.group = true; - } - layer.long_name = long_names; - - selLayers.push(layer); - } - - _undo(); - - return JSON.stringify(selLayers); -}; - -function selectLayers(selectedLayers){ - /** - * Selects layers from list of ids - **/ - selectedLayers = JSON.parse(selectedLayers); - var layers = new Array(); - var id54 = charIDToTypeID( "slct" ); - var desc12 = new ActionDescriptor(); - var id55 = charIDToTypeID( "null" ); - var ref9 = new ActionReference(); - - var existing_layers = JSON.parse(getLayers()); - var existing_ids = []; - for (var y = 0; y < existing_layers.length; y++){ - existing_ids.push(existing_layers[y]["id"]); - } - for (var i = 0; i < selectedLayers.length; i++) { - // a check to see if the id still exists - var id = selectedLayers[i]; - if(existing_ids.toString().indexOf(id)>=0){ - layers[i] = charIDToTypeID( "Lyr " ); - ref9.putIdentifier(layers[i], id); - } - } - desc12.putReference( id55, ref9 ); - var id58 = charIDToTypeID( "MkVs" ); - desc12.putBoolean( id58, false ); - executeAction( id54, desc12, DialogModes.NO ); -} - -function groupSelectedLayers(doc, name) { - /** - * Groups selected layers into new group. - * Returns json representation of Layer for server to consume - * - * Args: - * doc(activeDocument) - * name (str): new name of created group - **/ - if (doc == null){ - doc = app.activeDocument; - } - - var desc = new ActionDescriptor(); - var ref = new ActionReference(); - ref.putClass( stringIDToTypeID('layerSection') ); - desc.putReference( charIDToTypeID('null'), ref ); - var lref = new ActionReference(); - lref.putEnumerated( charIDToTypeID('Lyr '), charIDToTypeID('Ordn'), - charIDToTypeID('Trgt') ); - desc.putReference( charIDToTypeID('From'), lref); - executeAction( charIDToTypeID('Mk '), desc, DialogModes.NO ); - - var group = doc.activeLayer; - if (name){ - // Add special character to highlight group that will be published - group.name = name; - } - var layer = {}; - layer.id = group.id; - layer.name = name; // keep name clean - layer.group = true; - - layer.long_name = _get_parents_names(group, name); - - return JSON.stringify(layer); -}; - -function importSmartObject(path, name, link){ - /** - * Creates new layer with an image from 'path' - * - * path: absolute path to loaded file - * name: sets name of newly created laye - * - **/ - var desc1 = new ActionDescriptor(); - desc1.putPath( app.charIDToTypeID("null"), new File(path) ); - link = link || false; - if (link) { - desc1.putBoolean( app.charIDToTypeID('Lnkd'), true ); - } - - desc1.putEnumerated(app.charIDToTypeID("FTcs"), app.charIDToTypeID("QCSt"), - app.charIDToTypeID("Qcsa")); - var desc2 = new ActionDescriptor(); - desc2.putUnitDouble(app.charIDToTypeID("Hrzn"), - app.charIDToTypeID("#Pxl"), 0.0); - desc2.putUnitDouble(app.charIDToTypeID("Vrtc"), - app.charIDToTypeID("#Pxl"), 0.0); - - desc1.putObject(charIDToTypeID("Ofst"), charIDToTypeID("Ofst"), desc2); - executeAction(charIDToTypeID("Plc " ), desc1, DialogModes.NO); - - var docRef = app.activeDocument - var currentActivelayer = app.activeDocument.activeLayer; - if (name){ - currentActivelayer.name = name; - } - var layer = {} - layer.id = currentActivelayer.id; - layer.name = currentActivelayer.name; - return JSON.stringify(layer); -} - -function replaceSmartObjects(layer_id, path, name){ - /** - * Updates content of 'layer' with an image from 'path' - * - **/ - - var desc = new ActionDescriptor(); - var ref = new ActionReference(); - ref.putIdentifier(stringIDToTypeID("layer"), layer_id); - desc.putReference(stringIDToTypeID("null"), ref); - - desc.putPath(charIDToTypeID('null'), new File(path) ); - desc.putInteger(charIDToTypeID("PgNm"), 1); - - executeAction(stringIDToTypeID('placedLayerReplaceContents'), - desc, DialogModes.NO ); - var currentActivelayer = app.activeDocument.activeLayer; - if (name){ - currentActivelayer.name = name; - } -} - -function createGroup(name){ - /** - * Creates new group with a 'name' - * Because of asynchronous nature, only group.id is available - **/ - group = app.activeDocument.layerSets.add(); - // Add special character to highlight group that will be published - group.name = name; - - return group.id; // only id available at this time :| -} - -function deleteLayer(layer_id){ - /*** - * Deletes layer by its layer_id - * - * layer_id (int) - **/ - var d = new ActionDescriptor(); - var r = new ActionReference(); - - r.putIdentifier(stringIDToTypeID("layer"), layer_id); - d.putReference(stringIDToTypeID("null"), r); - executeAction(stringIDToTypeID("delete"), d, DialogModes.NO); -} - -function _undo() { - executeAction(charIDToTypeID("undo", undefined, DialogModes.NO)); -}; - -function savePSB(output_path){ - /*** - * Saves file as .psb to 'output_path' - * - * output_path (str) - **/ - var desc1 = new ActionDescriptor(); - var desc2 = new ActionDescriptor(); - desc2.putBoolean( stringIDToTypeID('maximizeCompatibility'), true ); - desc1.putObject( charIDToTypeID('As '), charIDToTypeID('Pht8'), desc2 ); - desc1.putPath( charIDToTypeID('In '), new File(output_path) ); - desc1.putBoolean( charIDToTypeID('LwCs'), true ); - executeAction( charIDToTypeID('save'), desc1, DialogModes.NO ); -} - -function close(){ - executeAction(stringIDToTypeID("quit"), undefined, DialogModes.NO ); -} - -function renameLayer(layer_id, new_name){ - /*** - * Renames 'layer_id' to 'new_name' - * - * Via Action (fast) - * - * Args: - * layer_id(int) - * new_name(str) - * - * output_path (str) - **/ - doc = app.activeDocument; - selectLayers('['+layer_id+']'); - - doc.activeLayer.name = new_name; -} - -function _get_parents_names(layer, itself_name){ - var long_names = [itself_name]; - while (layer.parent){ - if (layer.typename != "LayerSet"){ - break; - } - long_names.push(layer.name); - layer = layer.parent; - } - return long_names; -} - -// triggers when panel is opened, good for debugging -//log(getActiveDocumentName()); -// log.show(); -// var a = app.activeDocument.activeLayer; -// log(a); -//getSelectedLayers(); -// importSmartObject("c:/projects/test.jpg", "a aaNewLayer", true); -// log("dpc"); -// replaceSmartObjects(153, "â–¼Jungle_imageTest_001", "c:/projects/test_project_test_asset_TestTask_v001.png"); \ No newline at end of file diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/host/json.js b/server_addon/photoshop/client/ayon_photoshop/api/extension/host/json.js deleted file mode 100644 index 397349bbfd..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/extension/host/json.js +++ /dev/null @@ -1,530 +0,0 @@ -// json2.js -// 2017-06-12 -// Public Domain. -// NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -// USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO -// NOT CONTROL. - -// This file creates a global JSON object containing two methods: stringify -// and parse. This file provides the ES5 JSON capability to ES3 systems. -// If a project might run on IE8 or earlier, then this file should be included. -// This file does nothing on ES5 systems. - -// JSON.stringify(value, replacer, space) -// value any JavaScript value, usually an object or array. -// replacer an optional parameter that determines how object -// values are stringified for objects. It can be a -// function or an array of strings. -// space an optional parameter that specifies the indentation -// of nested structures. If it is omitted, the text will -// be packed without extra whitespace. If it is a number, -// it will specify the number of spaces to indent at each -// level. If it is a string (such as "\t" or " "), -// it contains the characters used to indent at each level. -// This method produces a JSON text from a JavaScript value. -// When an object value is found, if the object contains a toJSON -// method, its toJSON method will be called and the result will be -// stringified. A toJSON method does not serialize: it returns the -// value represented by the name/value pair that should be serialized, -// or undefined if nothing should be serialized. The toJSON method -// will be passed the key associated with the value, and this will be -// bound to the value. - -// For example, this would serialize Dates as ISO strings. - -// Date.prototype.toJSON = function (key) { -// function f(n) { -// // Format integers to have at least two digits. -// return (n < 10) -// ? "0" + n -// : n; -// } -// return this.getUTCFullYear() + "-" + -// f(this.getUTCMonth() + 1) + "-" + -// f(this.getUTCDate()) + "T" + -// f(this.getUTCHours()) + ":" + -// f(this.getUTCMinutes()) + ":" + -// f(this.getUTCSeconds()) + "Z"; -// }; - -// You can provide an optional replacer method. It will be passed the -// key and value of each member, with this bound to the containing -// object. The value that is returned from your method will be -// serialized. If your method returns undefined, then the member will -// be excluded from the serialization. - -// If the replacer parameter is an array of strings, then it will be -// used to select the members to be serialized. It filters the results -// such that only members with keys listed in the replacer array are -// stringified. - -// Values that do not have JSON representations, such as undefined or -// functions, will not be serialized. Such values in objects will be -// dropped; in arrays they will be replaced with null. You can use -// a replacer function to replace those with JSON values. - -// JSON.stringify(undefined) returns undefined. - -// The optional space parameter produces a stringification of the -// value that is filled with line breaks and indentation to make it -// easier to read. - -// If the space parameter is a non-empty string, then that string will -// be used for indentation. If the space parameter is a number, then -// the indentation will be that many spaces. - -// Example: - -// text = JSON.stringify(["e", {pluribus: "unum"}]); -// // text is '["e",{"pluribus":"unum"}]' - -// text = JSON.stringify(["e", {pluribus: "unum"}], null, "\t"); -// // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]' - -// text = JSON.stringify([new Date()], function (key, value) { -// return this[key] instanceof Date -// ? "Date(" + this[key] + ")" -// : value; -// }); -// // text is '["Date(---current time---)"]' - -// JSON.parse(text, reviver) -// This method parses a JSON text to produce an object or array. -// It can throw a SyntaxError exception. - -// The optional reviver parameter is a function that can filter and -// transform the results. It receives each of the keys and values, -// and its return value is used instead of the original value. -// If it returns what it received, then the structure is not modified. -// If it returns undefined then the member is deleted. - -// Example: - -// // Parse the text. Values that look like ISO date strings will -// // be converted to Date objects. - -// myData = JSON.parse(text, function (key, value) { -// var a; -// if (typeof value === "string") { -// a = -// /^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value); -// if (a) { -// return new Date(Date.UTC( -// +a[1], +a[2] - 1, +a[3], +a[4], +a[5], +a[6] -// )); -// } -// return value; -// } -// }); - -// myData = JSON.parse( -// "[\"Date(09/09/2001)\"]", -// function (key, value) { -// var d; -// if ( -// typeof value === "string" -// && value.slice(0, 5) === "Date(" -// && value.slice(-1) === ")" -// ) { -// d = new Date(value.slice(5, -1)); -// if (d) { -// return d; -// } -// } -// return value; -// } -// ); - -// This is a reference implementation. You are free to copy, modify, or -// redistribute. - -/*jslint - eval, for, this -*/ - -/*property - JSON, apply, call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours, - getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join, - lastIndex, length, parse, prototype, push, replace, slice, stringify, - test, toJSON, toString, valueOf -*/ - - -// Create a JSON object only if one does not already exist. We create the -// methods in a closure to avoid creating global variables. - -if (typeof JSON !== "object") { - JSON = {}; -} - -(function () { - "use strict"; - - var rx_one = /^[\],:{}\s]*$/; - var rx_two = /\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g; - var rx_three = /"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g; - var rx_four = /(?:^|:|,)(?:\s*\[)+/g; - var rx_escapable = /[\\"\u0000-\u001f\u007f-\u009f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g; - var rx_dangerous = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g; - - function f(n) { - // Format integers to have at least two digits. - return (n < 10) - ? "0" + n - : n; - } - - function this_value() { - return this.valueOf(); - } - - if (typeof Date.prototype.toJSON !== "function") { - - Date.prototype.toJSON = function () { - - return isFinite(this.valueOf()) - ? ( - this.getUTCFullYear() - + "-" - + f(this.getUTCMonth() + 1) - + "-" - + f(this.getUTCDate()) - + "T" - + f(this.getUTCHours()) - + ":" - + f(this.getUTCMinutes()) - + ":" - + f(this.getUTCSeconds()) - + "Z" - ) - : null; - }; - - Boolean.prototype.toJSON = this_value; - Number.prototype.toJSON = this_value; - String.prototype.toJSON = this_value; - } - - var gap; - var indent; - var meta; - var rep; - - - function quote(string) { - -// If the string contains no control characters, no quote characters, and no -// backslash characters, then we can safely slap some quotes around it. -// Otherwise we must also replace the offending characters with safe escape -// sequences. - - rx_escapable.lastIndex = 0; - return rx_escapable.test(string) - ? "\"" + string.replace(rx_escapable, function (a) { - var c = meta[a]; - return typeof c === "string" - ? c - : "\\u" + ("0000" + a.charCodeAt(0).toString(16)).slice(-4); - }) + "\"" - : "\"" + string + "\""; - } - - - function str(key, holder) { - -// Produce a string from holder[key]. - - var i; // The loop counter. - var k; // The member key. - var v; // The member value. - var length; - var mind = gap; - var partial; - var value = holder[key]; - -// If the value has a toJSON method, call it to obtain a replacement value. - - if ( - value - && typeof value === "object" - && typeof value.toJSON === "function" - ) { - value = value.toJSON(key); - } - -// If we were called with a replacer function, then call the replacer to -// obtain a replacement value. - - if (typeof rep === "function") { - value = rep.call(holder, key, value); - } - -// What happens next depends on the value's type. - - switch (typeof value) { - case "string": - return quote(value); - - case "number": - -// JSON numbers must be finite. Encode non-finite numbers as null. - - return (isFinite(value)) - ? String(value) - : "null"; - - case "boolean": - case "null": - -// If the value is a boolean or null, convert it to a string. Note: -// typeof null does not produce "null". The case is included here in -// the remote chance that this gets fixed someday. - - return String(value); - -// If the type is "object", we might be dealing with an object or an array or -// null. - - case "object": - -// Due to a specification blunder in ECMAScript, typeof null is "object", -// so watch out for that case. - - if (!value) { - return "null"; - } - -// Make an array to hold the partial results of stringifying this object value. - - gap += indent; - partial = []; - -// Is the value an array? - - if (Object.prototype.toString.apply(value) === "[object Array]") { - -// The value is an array. Stringify every element. Use null as a placeholder -// for non-JSON values. - - length = value.length; - for (i = 0; i < length; i += 1) { - partial[i] = str(i, value) || "null"; - } - -// Join all of the elements together, separated with commas, and wrap them in -// brackets. - - v = partial.length === 0 - ? "[]" - : gap - ? ( - "[\n" - + gap - + partial.join(",\n" + gap) - + "\n" - + mind - + "]" - ) - : "[" + partial.join(",") + "]"; - gap = mind; - return v; - } - -// If the replacer is an array, use it to select the members to be stringified. - - if (rep && typeof rep === "object") { - length = rep.length; - for (i = 0; i < length; i += 1) { - if (typeof rep[i] === "string") { - k = rep[i]; - v = str(k, value); - if (v) { - partial.push(quote(k) + ( - (gap) - ? ": " - : ":" - ) + v); - } - } - } - } else { - -// Otherwise, iterate through all of the keys in the object. - - for (k in value) { - if (Object.prototype.hasOwnProperty.call(value, k)) { - v = str(k, value); - if (v) { - partial.push(quote(k) + ( - (gap) - ? ": " - : ":" - ) + v); - } - } - } - } - -// Join all of the member texts together, separated with commas, -// and wrap them in braces. - - v = partial.length === 0 - ? "{}" - : gap - ? "{\n" + gap + partial.join(",\n" + gap) + "\n" + mind + "}" - : "{" + partial.join(",") + "}"; - gap = mind; - return v; - } - } - -// If the JSON object does not yet have a stringify method, give it one. - - if (typeof JSON.stringify !== "function") { - meta = { // table of character substitutions - "\b": "\\b", - "\t": "\\t", - "\n": "\\n", - "\f": "\\f", - "\r": "\\r", - "\"": "\\\"", - "\\": "\\\\" - }; - JSON.stringify = function (value, replacer, space) { - -// The stringify method takes a value and an optional replacer, and an optional -// space parameter, and returns a JSON text. The replacer can be a function -// that can replace values, or an array of strings that will select the keys. -// A default replacer method can be provided. Use of the space parameter can -// produce text that is more easily readable. - - var i; - gap = ""; - indent = ""; - -// If the space parameter is a number, make an indent string containing that -// many spaces. - - if (typeof space === "number") { - for (i = 0; i < space; i += 1) { - indent += " "; - } - -// If the space parameter is a string, it will be used as the indent string. - - } else if (typeof space === "string") { - indent = space; - } - -// If there is a replacer, it must be a function or an array. -// Otherwise, throw an error. - - rep = replacer; - if (replacer && typeof replacer !== "function" && ( - typeof replacer !== "object" - || typeof replacer.length !== "number" - )) { - throw new Error("JSON.stringify"); - } - -// Make a fake root object containing our value under the key of "". -// Return the result of stringifying the value. - - return str("", {"": value}); - }; - } - - -// If the JSON object does not yet have a parse method, give it one. - - if (typeof JSON.parse !== "function") { - JSON.parse = function (text, reviver) { - -// The parse method takes a text and an optional reviver function, and returns -// a JavaScript value if the text is a valid JSON text. - - var j; - - function walk(holder, key) { - -// The walk method is used to recursively walk the resulting structure so -// that modifications can be made. - - var k; - var v; - var value = holder[key]; - if (value && typeof value === "object") { - for (k in value) { - if (Object.prototype.hasOwnProperty.call(value, k)) { - v = walk(value, k); - if (v !== undefined) { - value[k] = v; - } else { - delete value[k]; - } - } - } - } - return reviver.call(holder, key, value); - } - - -// Parsing happens in four stages. In the first stage, we replace certain -// Unicode characters with escape sequences. JavaScript handles many characters -// incorrectly, either silently deleting them, or treating them as line endings. - - text = String(text); - rx_dangerous.lastIndex = 0; - if (rx_dangerous.test(text)) { - text = text.replace(rx_dangerous, function (a) { - return ( - "\\u" - + ("0000" + a.charCodeAt(0).toString(16)).slice(-4) - ); - }); - } - -// In the second stage, we run the text against regular expressions that look -// for non-JSON patterns. We are especially concerned with "()" and "new" -// because they can cause invocation, and "=" because it can cause mutation. -// But just to be safe, we want to reject all unexpected forms. - -// We split the second stage into 4 regexp operations in order to work around -// crippling inefficiencies in IE's and Safari's regexp engines. First we -// replace the JSON backslash pairs with "@" (a non-JSON character). Second, we -// replace all simple value tokens with "]" characters. Third, we delete all -// open brackets that follow a colon or comma or that begin the text. Finally, -// we look to see that the remaining characters are only whitespace or "]" or -// "," or ":" or "{" or "}". If that is so, then the text is safe for eval. - - if ( - rx_one.test( - text - .replace(rx_two, "@") - .replace(rx_three, "]") - .replace(rx_four, "") - ) - ) { - -// In the third stage we use the eval function to compile the text into a -// JavaScript structure. The "{" operator is subject to a syntactic ambiguity -// in JavaScript: it can begin a block or an object literal. We wrap the text -// in parens to eliminate the ambiguity. - - j = eval("(" + text + ")"); - -// In the optional fourth stage, we recursively walk the new structure, passing -// each name/value pair to a reviver function for possible transformation. - - return (typeof reviver === "function") - ? walk({"": j}, "") - : j; - } - -// If the text is not JSON parseable, then a SyntaxError is thrown. - - throw new SyntaxError("JSON.parse"); - }; - } -}()); \ No newline at end of file diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/icons/ayon_logo.png b/server_addon/photoshop/client/ayon_photoshop/api/extension/icons/ayon_logo.png deleted file mode 100644 index 3a96f8e2b4..0000000000 Binary files a/server_addon/photoshop/client/ayon_photoshop/api/extension/icons/ayon_logo.png and /dev/null differ diff --git a/server_addon/photoshop/client/ayon_photoshop/api/extension/index.html b/server_addon/photoshop/client/ayon_photoshop/api/extension/index.html deleted file mode 100644 index 9d7363e62d..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/extension/index.html +++ /dev/null @@ -1,95 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/server_addon/photoshop/client/ayon_photoshop/api/launch_logic.py b/server_addon/photoshop/client/ayon_photoshop/api/launch_logic.py deleted file mode 100644 index 04401a0972..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/launch_logic.py +++ /dev/null @@ -1,406 +0,0 @@ -import os -import subprocess -import collections -import asyncio - -from wsrpc_aiohttp import ( - WebSocketRoute, - WebSocketAsync -) - -import ayon_api -from qtpy import QtCore - -from ayon_core.lib import Logger -from ayon_core.pipeline import ( - registered_host, - Anatomy, -) -from ayon_core.pipeline.workfile import ( - get_workfile_template_key_from_context, - get_last_workfile, -) -from ayon_core.pipeline.template_data import get_template_data_with_names -from ayon_core.tools.utils import host_tools -from ayon_core.pipeline.context_tools import change_current_context - -from .webserver import WebServerTool -from .ws_stub import PhotoshopServerStub - -log = Logger.get_logger(__name__) - - -class ConnectionNotEstablishedYet(Exception): - pass - - -class MainThreadItem: - """Structure to store information about callback in main thread. - - Item should be used to execute callback in main thread which may be needed - for execution of Qt objects. - - Item store callback (callable variable), arguments and keyword arguments - for the callback. Item hold information about it's process. - """ - not_set = object() - - def __init__(self, callback, *args, **kwargs): - self._done = False - self._exception = self.not_set - self._result = self.not_set - self._callback = callback - self._args = args - self._kwargs = kwargs - - @property - def done(self): - return self._done - - @property - def exception(self): - return self._exception - - @property - def result(self): - return self._result - - def execute(self): - """Execute callback and store its result. - - Method must be called from main thread. Item is marked as `done` - when callback execution finished. Store output of callback of exception - information when callback raises one. - """ - log.debug("Executing process in main thread") - if self.done: - log.warning("- item is already processed") - return - - log.info("Running callback: {}".format(str(self._callback))) - try: - result = self._callback(*self._args, **self._kwargs) - self._result = result - - except Exception as exc: - self._exception = exc - - finally: - self._done = True - - -def stub(): - """ - Convenience function to get server RPC stub to call methods directed - for host (Photoshop). - It expects already created connection, started from client. - Currently created when panel is opened (PS: Window>Extensions>Avalon) - :return: where functions could be called from - """ - ps_stub = PhotoshopServerStub() - if not ps_stub.client: - raise ConnectionNotEstablishedYet("Connection is not created yet") - - return ps_stub - - -def show_tool_by_name(tool_name): - kwargs = {} - if tool_name == "loader": - kwargs["use_context"] = True - - host_tools.show_tool_by_name(tool_name, **kwargs) - - -class ProcessLauncher(QtCore.QObject): - route_name = "Photoshop" - _main_thread_callbacks = collections.deque() - - def __init__(self, subprocess_args): - self._subprocess_args = subprocess_args - self._log = None - - super(ProcessLauncher, self).__init__() - - # Keep track if launcher was already started - self._started = False - - self._process = None - self._websocket_server = None - - start_process_timer = QtCore.QTimer() - start_process_timer.setInterval(100) - - loop_timer = QtCore.QTimer() - loop_timer.setInterval(200) - - start_process_timer.timeout.connect(self._on_start_process_timer) - loop_timer.timeout.connect(self._on_loop_timer) - - self._start_process_timer = start_process_timer - self._loop_timer = loop_timer - - @property - def log(self): - if self._log is None: - self._log = Logger.get_logger( - "{}-launcher".format(self.route_name) - ) - return self._log - - @property - def websocket_server_is_running(self): - if self._websocket_server is not None: - return self._websocket_server.is_running - return False - - @property - def is_process_running(self): - if self._process is not None: - return self._process.poll() is None - return False - - @property - def is_host_connected(self): - """Returns True if connected, False if app is not running at all.""" - if not self.is_process_running: - return False - - try: - _stub = stub() - if _stub: - return True - except Exception: - pass - - return None - - @classmethod - def execute_in_main_thread(cls, callback, *args, **kwargs): - item = MainThreadItem(callback, *args, **kwargs) - cls._main_thread_callbacks.append(item) - return item - - def start(self): - if self._started: - return - self.log.info("Started launch logic of Photoshop") - self._started = True - self._start_process_timer.start() - - def exit(self): - """ Exit whole application. """ - if self._start_process_timer.isActive(): - self._start_process_timer.stop() - if self._loop_timer.isActive(): - self._loop_timer.stop() - - if self._websocket_server is not None: - self._websocket_server.stop() - - if self._process: - self._process.kill() - self._process.wait() - - QtCore.QCoreApplication.exit() - - def _on_loop_timer(self): - # TODO find better way and catch errors - # Run only callbacks that are in queue at the moment - cls = self.__class__ - for _ in range(len(cls._main_thread_callbacks)): - if cls._main_thread_callbacks: - item = cls._main_thread_callbacks.popleft() - item.execute() - - if not self.is_process_running: - self.log.info("Host process is not running. Closing") - self.exit() - - elif not self.websocket_server_is_running: - self.log.info("Websocket server is not running. Closing") - self.exit() - - def _on_start_process_timer(self): - # TODO add try except validations for each part in this method - # Start server as first thing - if self._websocket_server is None: - self._init_server() - return - - # TODO add waiting time - # Wait for webserver - if not self.websocket_server_is_running: - return - - # Start application process - if self._process is None: - self._start_process() - self.log.info("Waiting for host to connect") - return - - # TODO add waiting time - # Wait until host is connected - if self.is_host_connected: - self._start_process_timer.stop() - self._loop_timer.start() - elif ( - not self.is_process_running - or not self.websocket_server_is_running - ): - self.exit() - - def _init_server(self): - if self._websocket_server is not None: - return - - self.log.debug( - "Initialization of websocket server for host communication" - ) - - self._websocket_server = websocket_server = WebServerTool() - if websocket_server.port_occupied( - websocket_server.host_name, - websocket_server.port - ): - self.log.info( - "Server already running, sending actual context and exit." - ) - asyncio.run(websocket_server.send_context_change(self.route_name)) - self.exit() - return - - # Add Websocket route - websocket_server.add_route("*", "/ws/", WebSocketAsync) - # Add after effects route to websocket handler - - print("Adding {} route".format(self.route_name)) - WebSocketAsync.add_route( - self.route_name, PhotoshopRoute - ) - self.log.info("Starting websocket server for host communication") - websocket_server.start_server() - - def _start_process(self): - if self._process is not None: - return - self.log.info("Starting host process") - try: - self._process = subprocess.Popen( - self._subprocess_args, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL - ) - except Exception: - self.log.info("exce", exc_info=True) - self.exit() - - -class PhotoshopRoute(WebSocketRoute): - """ - One route, mimicking external application (like Harmony, etc). - All functions could be called from client. - 'do_notify' function calls function on the client - mimicking - notification after long running job on the server or similar - """ - instance = None - - def init(self, **kwargs): - # Python __init__ must be return "self". - # This method might return anything. - log.debug("someone called Photoshop route") - self.instance = self - return kwargs - - # server functions - async def ping(self): - log.debug("someone called Photoshop route ping") - - # This method calls function on the client side - # client functions - async def set_context(self, project, folder, task): - """ - Sets 'project' and 'folder' to envs, eg. setting context. - - Opens last workile from that context if exists. - - Args: - project (str) - folder (str) - task (str - """ - log.info("Setting context change") - log.info(f"project {project} folder {folder} task {task}") - - folder_entity = ayon_api.get_folder_by_path(project, folder) - task_entity = ayon_api.get_task_by_name( - project, folder_entity["id"], task - ) - change_current_context(folder_entity, task_entity) - - last_workfile_path = self._get_last_workfile_path(project, - folder, - task) - if last_workfile_path and os.path.exists(last_workfile_path): - ProcessLauncher.execute_in_main_thread( - lambda: stub().open(last_workfile_path)) - - - async def read(self): - log.debug("photoshop.read client calls server server calls " - "photoshop client") - return await self.socket.call('photoshop.read') - - # panel routes for tools - async def workfiles_route(self): - self._tool_route("workfiles") - - async def loader_route(self): - self._tool_route("loader") - - async def publish_route(self): - self._tool_route("publisher") - - async def sceneinventory_route(self): - self._tool_route("sceneinventory") - - async def experimental_tools_route(self): - self._tool_route("experimental_tools") - - def _tool_route(self, _tool_name): - """The address accessed when clicking on the buttons.""" - - ProcessLauncher.execute_in_main_thread(show_tool_by_name, _tool_name) - - # Required return statement. - return "nothing" - - def _get_last_workfile_path(self, project_name, folder_path, task_name): - """Returns last workfile path if exists""" - host = registered_host() - host_name = "photoshop" - template_key = get_workfile_template_key_from_context( - project_name, - folder_path, - task_name, - host_name, - ) - anatomy = Anatomy(project_name) - - data = get_template_data_with_names( - project_name, folder_path, task_name, host_name - ) - data["root"] = anatomy.roots - - work_template = anatomy.get_template_item("work", template_key) - - # Define saving file extension - extensions = host.get_workfile_extensions() - - work_root = work_template["directory"].format_strict(data) - file_template = work_template["file"].template - last_workfile_path = get_last_workfile( - work_root, file_template, data, extensions, True - ) - - return last_workfile_path diff --git a/server_addon/photoshop/client/ayon_photoshop/api/launch_script.py b/server_addon/photoshop/client/ayon_photoshop/api/launch_script.py deleted file mode 100644 index de7fc8ba48..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/launch_script.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Script wraps launch mechanism of Photoshop implementations. - -Arguments passed to the script are passed to launch function in host -implementation. In all cases requires host app executable and may contain -workfile or others. -""" - -import os -import sys - -from ayon_photoshop.api.lib import main as host_main - -# Get current file to locate start point of sys.argv -CURRENT_FILE = os.path.abspath(__file__) - - -def show_error_messagebox(title, message, detail_message=None): - """Function will show message and process ends after closing it.""" - from qtpy import QtWidgets, QtCore - from ayon_core import style - - app = QtWidgets.QApplication([]) - app.setStyleSheet(style.load_stylesheet()) - - msgbox = QtWidgets.QMessageBox() - msgbox.setWindowTitle(title) - msgbox.setText(message) - - if detail_message: - msgbox.setDetailedText(detail_message) - - msgbox.setWindowModality(QtCore.Qt.ApplicationModal) - msgbox.show() - - sys.exit(app.exec_()) - - -def on_invalid_args(script_not_found): - """Show to user message box saying that something went wrong. - - Tell user that arguments to launch implementation are invalid with - arguments details. - - Args: - script_not_found (bool): Use different message based on this value. - """ - - title = "Invalid arguments" - joined_args = ", ".join("\"{}\"".format(arg) for arg in sys.argv) - if script_not_found: - submsg = "Where couldn't find script path:\n\"{}\"" - else: - submsg = "Expected Host executable after script path:\n\"{}\"" - - message = "BUG: Got invalid arguments so can't launch Host application." - detail_message = "Process was launched with arguments:\n{}\n\n{}".format( - joined_args, - submsg.format(CURRENT_FILE) - ) - - show_error_messagebox(title, message, detail_message) - - -def main(argv): - # Modify current file path to find match in sys.argv which may be different - # on windows (different letter cases and slashes). - modified_current_file = CURRENT_FILE.replace("\\", "/").lower() - - # Create a copy of sys argv - sys_args = list(argv) - after_script_idx = None - # Find script path in sys.argv to know index of argv where host - # executable should be. - for idx, item in enumerate(sys_args): - if item.replace("\\", "/").lower() == modified_current_file: - after_script_idx = idx + 1 - break - - # Validate that there is at least one argument after script path - launch_args = None - if after_script_idx is not None: - launch_args = sys_args[after_script_idx:] - - if launch_args: - # Launch host implementation - host_main(*launch_args) - else: - # Show message box - on_invalid_args(after_script_idx is None) - - -if __name__ == "__main__": - main(sys.argv) diff --git a/server_addon/photoshop/client/ayon_photoshop/api/lib.py b/server_addon/photoshop/client/ayon_photoshop/api/lib.py deleted file mode 100644 index fd003919ce..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/lib.py +++ /dev/null @@ -1,84 +0,0 @@ -import os -import sys -import contextlib -import traceback - -from ayon_core.lib import env_value_to_bool, Logger, is_in_tests -from ayon_core.addon import AddonsManager -from ayon_core.pipeline import install_host -from ayon_core.tools.utils import host_tools -from ayon_core.tools.utils import get_ayon_qt_app - -from .launch_logic import ProcessLauncher, stub - -log = Logger.get_logger(__name__) - - -def safe_excepthook(*args): - traceback.print_exception(*args) - - -def main(*subprocess_args): - from ayon_photoshop.api import PhotoshopHost - - host = PhotoshopHost() - install_host(host) - - sys.excepthook = safe_excepthook - - # coloring in StdOutBroker - os.environ["AYON_LOG_NO_COLORS"] = "0" - app = get_ayon_qt_app() - app.setQuitOnLastWindowClosed(False) - - launcher = ProcessLauncher(subprocess_args) - launcher.start() - - if env_value_to_bool("HEADLESS_PUBLISH"): - manager = AddonsManager() - webpublisher_addon = manager["webpublisher"] - launcher.execute_in_main_thread( - webpublisher_addon.headless_publish, - log, - "ClosePS", - is_in_tests() - ) - elif env_value_to_bool("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", - default=True): - - launcher.execute_in_main_thread( - host_tools.show_workfiles, - save=env_value_to_bool("WORKFILES_SAVE_AS") - ) - - sys.exit(app.exec_()) - - -@contextlib.contextmanager -def maintained_selection(): - """Maintain selection during context.""" - selection = stub().get_selected_layers() - try: - yield selection - finally: - stub().select_layers(selection) - - -@contextlib.contextmanager -def maintained_visibility(layers=None): - """Maintain visibility during context. - - Args: - layers (list) of PSItem (used for caching) - """ - visibility = {} - if not layers: - layers = stub().get_layers() - for layer in layers: - visibility[layer.id] = layer.visible - try: - yield - finally: - for layer in layers: - stub().set_visible(layer.id, visibility[layer.id]) - pass diff --git a/server_addon/photoshop/client/ayon_photoshop/api/panel.png b/server_addon/photoshop/client/ayon_photoshop/api/panel.png deleted file mode 100644 index be5db3b8df..0000000000 Binary files a/server_addon/photoshop/client/ayon_photoshop/api/panel.png and /dev/null differ diff --git a/server_addon/photoshop/client/ayon_photoshop/api/panel_failure.png b/server_addon/photoshop/client/ayon_photoshop/api/panel_failure.png deleted file mode 100644 index 6e52a77d22..0000000000 Binary files a/server_addon/photoshop/client/ayon_photoshop/api/panel_failure.png and /dev/null differ diff --git a/server_addon/photoshop/client/ayon_photoshop/api/pipeline.py b/server_addon/photoshop/client/ayon_photoshop/api/pipeline.py deleted file mode 100644 index d399bb25e2..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/pipeline.py +++ /dev/null @@ -1,285 +0,0 @@ -import os - -from qtpy import QtWidgets - -import pyblish.api - -from ayon_core.lib import register_event_callback, Logger -from ayon_core.pipeline import ( - register_loader_plugin_path, - register_creator_plugin_path, - AVALON_CONTAINER_ID, - AYON_INSTANCE_ID, - AVALON_INSTANCE_ID, -) - -from ayon_core.host import ( - HostBase, - IWorkfileHost, - ILoadHost, - IPublishHost -) - -from ayon_core.pipeline.load import any_outdated_containers -from ayon_core.tools.utils import get_ayon_qt_app -from ayon_photoshop import PHOTOSHOP_ADDON_ROOT - -from . import lib - -log = Logger.get_logger(__name__) - -PLUGINS_DIR = os.path.join(PHOTOSHOP_ADDON_ROOT, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - - -class PhotoshopHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "photoshop" - - def install(self): - """Install Photoshop-specific functionality needed for integration. - - This function is called automatically on calling - `api.install(photoshop)`. - """ - log.info("Installing OpenPype Photoshop...") - pyblish.api.register_host("photoshop") - - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - - register_event_callback("application.launched", on_application_launch) - - def current_file(self): - try: - full_name = lib.stub().get_active_document_full_name() - if full_name and full_name != "null": - return os.path.normpath(full_name).replace("\\", "/") - except Exception: - pass - - return None - - def work_root(self, session): - return os.path.normpath(session["AYON_WORKDIR"]).replace("\\", "/") - - def open_workfile(self, filepath): - lib.stub().open(filepath) - - return True - - def save_workfile(self, filepath=None): - _, ext = os.path.splitext(filepath) - lib.stub().saveAs(filepath, ext[1:], True) - - def get_current_workfile(self): - return self.current_file() - - def workfile_has_unsaved_changes(self): - if self.current_file(): - return not lib.stub().is_saved() - - return False - - def get_workfile_extensions(self): - return [".psd", ".psb"] - - def get_containers(self): - return ls() - - def get_context_data(self): - """Get stored values for context (validation enable/disable etc)""" - meta = _get_stub().get_layers_metadata() - for item in meta: - if item.get("id") == "publish_context": - item.pop("id") - return item - - return {} - - def update_context_data(self, data, changes): - """Store value needed for context""" - item = data - item["id"] = "publish_context" - _get_stub().imprint(item["id"], item) - - def list_instances(self): - """List all created instances to publish from current workfile. - - Pulls from File > File Info - - Returns: - (list) of dictionaries matching instances format - """ - stub = _get_stub() - - if not stub: - return [] - - instances = [] - layers_meta = stub.get_layers_metadata() - if layers_meta: - for instance in layers_meta: - if instance.get("id") in { - AYON_INSTANCE_ID, AVALON_INSTANCE_ID - }: - instances.append(instance) - - return instances - - def remove_instance(self, instance): - """Remove instance from current workfile metadata. - - Updates metadata of current file in File > File Info and removes - icon highlight on group layer. - - Args: - instance (dict): instance representation from subsetmanager model - """ - stub = _get_stub() - - if not stub: - return - - inst_id = instance.get("instance_id") or instance.get("uuid") # legacy - if not inst_id: - log.warning("No instance identifier for {}".format(instance)) - return - - stub.remove_instance(inst_id) - - if instance.get("members"): - item = stub.get_layer(instance["members"][0]) - if item: - stub.rename_layer(item.id, - item.name.replace(stub.PUBLISH_ICON, '')) - - -def check_inventory(): - if not any_outdated_containers(): - return - - # Warn about outdated containers. - _app = get_ayon_qt_app() - - message_box = QtWidgets.QMessageBox() - message_box.setIcon(QtWidgets.QMessageBox.Warning) - msg = "There are outdated containers in the scene." - message_box.setText(msg) - message_box.exec_() - - -def on_application_launch(): - check_inventory() - - -def ls(): - """Yields containers from active Photoshop document - - This is the host-equivalent of api.ls(), but instead of listing - assets on disk, it lists assets already loaded in Photoshop; once loaded - they are called 'containers' - - Yields: - dict: container - - """ - try: - stub = lib.stub() # only after Photoshop is up - except lib.ConnectionNotEstablishedYet: - print("Not connected yet, ignoring") - return - - if not stub.get_active_document_name(): - return - - layers_meta = stub.get_layers_metadata() # minimalize calls to PS - for layer in stub.get_layers(): - data = stub.read(layer, layers_meta) - - # Skip non-tagged layers. - if not data: - continue - - # Filter to only containers. - if "container" not in data["id"]: - continue - - # Append transient data - data["objectName"] = layer.name.replace(stub.LOADED_ICON, '') - data["layer"] = layer - - yield data - - -def _get_stub(): - """Handle pulling stub from PS to run operations on host - - Returns: - (PhotoshopServerStub) or None - """ - try: - stub = lib.stub() # only after Photoshop is up - except lib.ConnectionNotEstablishedYet: - print("Not connected yet, ignoring") - return - - if not stub.get_active_document_name(): - return - - return stub - - -def containerise( - name, namespace, layer, context, loader=None, suffix="_CON" -): - """Imprint layer with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - layer (PSItem): Layer to containerise - context (dict): Asset information - loader (str, optional): Name of loader used to produce this container. - suffix (str, optional): Suffix of container, defaults to `_CON`. - - Returns: - container (str): Name of container assembly - """ - layer.name = name + suffix - - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace, - "loader": str(loader), - "representation": context["representation"]["id"], - "members": [str(layer.id)] - } - stub = lib.stub() - stub.imprint(layer.id, data) - - return layer - - -def cache_and_get_instances(creator): - """Cache instances in shared data. - - Storing all instances as a list as legacy instances might be still present. - Args: - creator (Creator): Plugin which would like to get instances from host. - Returns: - List[]: list of all instances stored in metadata - """ - shared_key = "openpype.photoshop.instances" - if shared_key not in creator.collection_shared_data: - creator.collection_shared_data[shared_key] = \ - creator.host.list_instances() - return creator.collection_shared_data[shared_key] diff --git a/server_addon/photoshop/client/ayon_photoshop/api/plugin.py b/server_addon/photoshop/client/ayon_photoshop/api/plugin.py deleted file mode 100644 index c11a206834..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/plugin.py +++ /dev/null @@ -1,37 +0,0 @@ -import re - -from ayon_core.pipeline import LoaderPlugin -from .launch_logic import stub - - -def get_unique_layer_name(layers, container_name, product_name): - """Prepare unique layer name. - - Gets all layer names and if '_' is present, - it adds suffix '1', or increases the suffix by 1. - - Args: - layers (list) of dict with layers info (name, id etc.) - container_name (str): - product_name (str): - - Returns: - str: name_00X (without version) - """ - name = "{}_{}".format(container_name, product_name) - names = {} - for layer in layers: - layer_name = re.sub(r'_\d{3}$', '', layer.name) - if layer_name in names.keys(): - names[layer_name] = names[layer_name] + 1 - else: - names[layer_name] = 1 - occurrences = names.get(name, 0) - - return "{}_{:0>3d}".format(name, occurrences + 1) - - -class PhotoshopLoader(LoaderPlugin): - @staticmethod - def get_stub(): - return stub() diff --git a/server_addon/photoshop/client/ayon_photoshop/api/webserver.py b/server_addon/photoshop/client/ayon_photoshop/api/webserver.py deleted file mode 100644 index cd229c65ad..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/webserver.py +++ /dev/null @@ -1,241 +0,0 @@ -"""Webserver for communication with photoshop. - -Aiohttp (Asyncio) based websocket server used for communication with host -application. - -This webserver is started in spawned Python process that opens DCC during -its launch, waits for connection from DCC and handles communication going -forward. Server is closed before Python process is killed. -""" -import os -import logging -import urllib -import threading -import asyncio -import socket - -from aiohttp import web - -from wsrpc_aiohttp import WSRPCClient - -from ayon_core.pipeline import get_global_context - -log = logging.getLogger(__name__) - - -class WebServerTool: - """ - Basic POC implementation of asychronic websocket RPC server. - Uses class in external_app_1.py to mimic implementation for single - external application. - 'test_client' folder contains two test implementations of client - """ - _instance = None - - def __init__(self): - WebServerTool._instance = self - - self.client = None - self.handlers = {} - self.on_stop_callbacks = [] - - port = None - host_name = "localhost" - websocket_url = os.getenv("WEBSOCKET_URL") - if websocket_url: - parsed = urllib.parse.urlparse(websocket_url) - port = parsed.port - host_name = parsed.netloc.split(":")[0] - if not port: - port = 8098 # fallback - - self.port = port - self.host_name = host_name - - self.app = web.Application() - - # add route with multiple methods for single "external app" - self.webserver_thread = WebServerThread(self, self.port) - - def add_route(self, *args, **kwargs): - self.app.router.add_route(*args, **kwargs) - - def add_static(self, *args, **kwargs): - self.app.router.add_static(*args, **kwargs) - - def start_server(self): - if self.webserver_thread and not self.webserver_thread.is_alive(): - self.webserver_thread.start() - - def stop_server(self): - self.stop() - - async def send_context_change(self, host): - """ - Calls running webserver to inform about context change - - Used when new PS/AE should be triggered, - but one already running, without - this publish would point to old context. - """ - client = WSRPCClient(os.getenv("WEBSOCKET_URL"), - loop=asyncio.get_event_loop()) - await client.connect() - - context = get_global_context() - project_name = context["project_name"] - folder_path = context["folder_path"] - task_name = context["task_name"] - log.info("Sending context change to {}{}/{}".format( - project_name, folder_path, task_name - )) - - await client.call( - '{}.set_context'.format(host), - project=project_name, - folder=folder_path, - task=task_name - ) - await client.close() - - def port_occupied(self, host_name, port): - """ - Check if 'url' is already occupied. - - This could mean, that app is already running and we are trying open it - again. In that case, use existing running webserver. - Check here is easier than capturing exception from thread. - """ - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con: - result = con.connect_ex((host_name, port)) == 0 - - if result: - print(f"Port {port} is already in use") - return result - - def call(self, func): - log.debug("websocket.call {}".format(func)) - future = asyncio.run_coroutine_threadsafe( - func, - self.webserver_thread.loop - ) - result = future.result() - return result - - @staticmethod - def get_instance(): - if WebServerTool._instance is None: - WebServerTool() - return WebServerTool._instance - - @property - def is_running(self): - if not self.webserver_thread: - return False - return self.webserver_thread.is_running - - def stop(self): - if not self.is_running: - return - try: - log.debug("Stopping websocket server") - self.webserver_thread.is_running = False - self.webserver_thread.stop() - except Exception: - log.warning( - "Error has happened during Killing websocket server", - exc_info=True - ) - - def thread_stopped(self): - for callback in self.on_stop_callbacks: - callback() - - -class WebServerThread(threading.Thread): - """ Listener for websocket rpc requests. - - It would be probably better to "attach" this to main thread (as for - example Harmony needs to run something on main thread), but currently - it creates separate thread and separate asyncio event loop - """ - def __init__(self, module, port): - super(WebServerThread, self).__init__() - - self.is_running = False - self.port = port - self.module = module - self.loop = None - self.runner = None - self.site = None - self.tasks = [] - - def run(self): - self.is_running = True - - try: - log.info("Starting web server") - self.loop = asyncio.new_event_loop() # create new loop for thread - asyncio.set_event_loop(self.loop) - - self.loop.run_until_complete(self.start_server()) - - websocket_url = "ws://localhost:{}/ws".format(self.port) - - log.debug( - "Running Websocket server on URL: \"{}\"".format(websocket_url) - ) - - asyncio.ensure_future(self.check_shutdown(), loop=self.loop) - self.loop.run_forever() - except Exception: - self.is_running = False - log.warning( - "Websocket Server service has failed", exc_info=True - ) - raise - finally: - self.loop.close() # optional - - self.is_running = False - self.module.thread_stopped() - log.info("Websocket server stopped") - - async def start_server(self): - """ Starts runner and TCPsite """ - self.runner = web.AppRunner(self.module.app) - await self.runner.setup() - self.site = web.TCPSite(self.runner, 'localhost', self.port) - await self.site.start() - - def stop(self): - """Sets is_running flag to false, 'check_shutdown' shuts server down""" - self.is_running = False - - async def check_shutdown(self): - """ Future that is running and checks if server should be running - periodically. - """ - while self.is_running: - while self.tasks: - task = self.tasks.pop(0) - log.debug("waiting for task {}".format(task)) - await task - log.debug("returned value {}".format(task.result)) - - await asyncio.sleep(0.5) - - log.debug("Starting shutdown") - await self.site.stop() - log.debug("Site stopped") - await self.runner.cleanup() - log.debug("Runner stopped") - tasks = [task for task in asyncio.all_tasks() if - task is not asyncio.current_task()] - list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks - results = await asyncio.gather(*tasks, return_exceptions=True) - log.debug(f'Finished awaiting cancelled tasks, results: {results}...') - await self.loop.shutdown_asyncgens() - # to really make sure everything else has time to stop - await asyncio.sleep(0.07) - self.loop.stop() diff --git a/server_addon/photoshop/client/ayon_photoshop/api/ws_stub.py b/server_addon/photoshop/client/ayon_photoshop/api/ws_stub.py deleted file mode 100644 index 3619fa4b7a..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/api/ws_stub.py +++ /dev/null @@ -1,571 +0,0 @@ -""" - Stub handling connection from server to client. - Used anywhere solution is calling client methods. -""" -import json -import attr -from wsrpc_aiohttp import WebSocketAsync - -from .webserver import WebServerTool - - -@attr.s -class PSItem(object): - """ - Object denoting layer or group item in PS. Each item is created in - PS by any Loader, but contains same fields, which are being used - in later processing. - """ - # metadata - id = attr.ib() # id created by AE, could be used for querying - name = attr.ib() # name of item - group = attr.ib(default=None) # item type (footage, folder, comp) - parents = attr.ib(factory=list) - visible = attr.ib(default=True) - type = attr.ib(default=None) - # all imported elements, single for - members = attr.ib(factory=list) - long_name = attr.ib(default=None) - color_code = attr.ib(default=None) # color code of layer - instance_id = attr.ib(default=None) - - @property - def clean_name(self): - """Returns layer name without publish icon highlight - - Returns: - (str) - """ - return (self.name.replace(PhotoshopServerStub.PUBLISH_ICON, '') - .replace(PhotoshopServerStub.LOADED_ICON, '')) - - -class PhotoshopServerStub: - """ - Stub for calling function on client (Photoshop js) side. - Expects that client is already connected (started when avalon menu - is opened). - 'self.websocketserver.call' is used as async wrapper - """ - PUBLISH_ICON = '\u2117 ' - LOADED_ICON = '\u25bc' - - def __init__(self): - self.websocketserver = WebServerTool.get_instance() - self.client = self.get_client() - - @staticmethod - def get_client(): - """ - Return first connected client to WebSocket - TODO implement selection by Route - :return: client - """ - clients = WebSocketAsync.get_clients() - client = None - if len(clients) > 0: - key = list(clients.keys())[0] - client = clients.get(key) - - return client - - def open(self, path): - """Open file located at 'path' (local). - - Args: - path(string): file path locally - Returns: None - """ - self.websocketserver.call( - self.client.call('Photoshop.open', path=path) - ) - - def read(self, layer, layers_meta=None): - """Parses layer metadata from Headline field of active document. - - Args: - layer: (PSItem) - layers_meta: full list from Headline (for performance in loops) - Returns: - (dict) of layer metadata stored in PS file - - Example: - { - 'id': 'pyblish.avalon.container', - 'loader': 'ImageLoader', - 'members': ['64'], - 'name': 'imageMainMiddle', - 'namespace': 'Hero_imageMainMiddle_001', - 'representation': '6203dc91e80934d9f6ee7d96', - 'schema': 'openpype:container-2.0' - } - """ - if layers_meta is None: - layers_meta = self.get_layers_metadata() - - for layer_meta in layers_meta: - layer_id = layer_meta.get("uuid") # legacy - if layer_meta.get("members"): - layer_id = layer_meta["members"][0] - if str(layer.id) == str(layer_id): - return layer_meta - print("Unable to find layer metadata for {}".format(layer.id)) - - def imprint(self, item_id, data, all_layers=None, items_meta=None): - """Save layer metadata to Headline field of active document - - Stores metadata in format: - [{ - "active":true, - "productName":"imageBG", - "productType":"image", - "id":"ayon.create.instance", - "folderPath":"Town", - "uuid": "8" - }] - for created instances - OR - [{ - "schema": "openpype:container-2.0", - "id": "ayon.create.instance", - "name": "imageMG", - "namespace": "Jungle_imageMG_001", - "loader": "ImageLoader", - "representation": "5fbfc0ee30a946093c6ff18a", - "members": [ - "40" - ] - }] - for loaded instances - - Args: - item_id (str): - data(string): json representation for single layer - all_layers (list of PSItem): for performance, could be - injected for usage in loop, if not, single call will be - triggered - items_meta(string): json representation from Headline - (for performance - provide only if imprint is in - loop - value should be same) - Returns: None - """ - if not items_meta: - items_meta = self.get_layers_metadata() - - # json.dumps writes integer values in a dictionary to string, so - # anticipating it here. - item_id = str(item_id) - is_new = True - result_meta = [] - for item_meta in items_meta: - if ((item_meta.get('members') and - item_id == str(item_meta.get('members')[0])) or - item_meta.get("instance_id") == item_id): - is_new = False - if data: - item_meta.update(data) - result_meta.append(item_meta) - else: - result_meta.append(item_meta) - - if is_new: - result_meta.append(data) - - # Ensure only valid ids are stored. - if not all_layers: - all_layers = self.get_layers() - layer_ids = [layer.id for layer in all_layers] - cleaned_data = [] - - for item in result_meta: - if item.get("members"): - if int(item["members"][0]) not in layer_ids: - continue - - cleaned_data.append(item) - - payload = json.dumps(cleaned_data, indent=4) - self.websocketserver.call( - self.client.call('Photoshop.imprint', payload=payload) - ) - - def get_layers(self): - """Returns JSON document with all(?) layers in active document. - - Returns: - Format of tuple: { 'id':'123', - 'name': 'My Layer 1', - 'type': 'GUIDE'|'FG'|'BG'|'OBJ' - 'visible': 'true'|'false' - """ - res = self.websocketserver.call( - self.client.call('Photoshop.get_layers') - ) - - return self._to_records(res) - - def get_layer(self, layer_id): - """ - Returns PSItem for specific 'layer_id' or None if not found - Args: - layer_id (string): unique layer id, stored in 'uuid' field - - Returns: - (PSItem) or None - """ - layers = self.get_layers() - for layer in layers: - if str(layer.id) == str(layer_id): - return layer - - def get_layers_in_layers(self, layers): - """Return all layers that belong to layers (might be groups). - - Args: - layers : - - Returns: - - """ - parent_ids = set([lay.id for lay in layers]) - - return self._get_layers_in_layers(parent_ids) - - def get_layers_in_layers_ids(self, layers_ids, layers=None): - """Return all layers that belong to layers (might be groups). - - Args: - layers_ids - layers : - - Returns: - - """ - parent_ids = set(layers_ids) - - return self._get_layers_in_layers(parent_ids, layers) - - def _get_layers_in_layers(self, parent_ids, layers=None): - if not layers: - layers = self.get_layers() - - all_layers = layers - ret = [] - - for layer in all_layers: - parents = set(layer.parents) - if len(parent_ids & parents) > 0: - ret.append(layer) - if layer.id in parent_ids: - ret.append(layer) - - return ret - - def create_group(self, name): - """Create new group (eg. LayerSet) - - Returns: - - """ - enhanced_name = self.PUBLISH_ICON + name - ret = self.websocketserver.call( - self.client.call('Photoshop.create_group', name=enhanced_name) - ) - # create group on PS is asynchronous, returns only id - return PSItem(id=ret, name=name, group=True) - - def group_selected_layers(self, name): - """Group selected layers into new LayerSet (eg. group) - - Returns: - (Layer) - """ - enhanced_name = self.PUBLISH_ICON + name - res = self.websocketserver.call( - self.client.call( - 'Photoshop.group_selected_layers', name=enhanced_name - ) - ) - res = self._to_records(res) - if res: - rec = res.pop() - rec.name = rec.name.replace(self.PUBLISH_ICON, '') - return rec - raise ValueError("No group record returned") - - def get_selected_layers(self): - """Get a list of actually selected layers. - - Returns: - """ - res = self.websocketserver.call( - self.client.call('Photoshop.get_selected_layers') - ) - return self._to_records(res) - - def select_layers(self, layers): - """Selects specified layers in Photoshop by its ids. - - Args: - layers: - """ - layers_id = [str(lay.id) for lay in layers] - self.websocketserver.call( - self.client.call( - 'Photoshop.select_layers', - layers=json.dumps(layers_id) - ) - ) - - def get_active_document_full_name(self): - """Returns full name with path of active document via ws call - - Returns(string): - full path with name - """ - res = self.websocketserver.call( - self.client.call('Photoshop.get_active_document_full_name') - ) - - return res - - def get_active_document_name(self): - """Returns just a name of active document via ws call - - Returns(string): - file name - """ - return self.websocketserver.call( - self.client.call('Photoshop.get_active_document_name') - ) - - def is_saved(self): - """Returns true if no changes in active document - - Returns: - - """ - return self.websocketserver.call( - self.client.call('Photoshop.is_saved') - ) - - def save(self): - """Saves active document""" - self.websocketserver.call( - self.client.call('Photoshop.save') - ) - - def saveAs(self, image_path, ext, as_copy): - """Saves active document to psd (copy) or png or jpg - - Args: - image_path(string): full local path - ext: - as_copy: - Returns: None - """ - self.websocketserver.call( - self.client.call( - 'Photoshop.saveAs', - image_path=image_path, - ext=ext, - as_copy=as_copy - ) - ) - - def set_visible(self, layer_id, visibility): - """Set layer with 'layer_id' to 'visibility' - - Args: - layer_id: - visibility: - Returns: None - """ - self.websocketserver.call( - self.client.call( - 'Photoshop.set_visible', - layer_id=layer_id, - visibility=visibility - ) - ) - - def hide_all_others_layers(self, layers): - """hides all layers that are not part of the list or that are not - children of this list - - Args: - layers (list): list of PSItem - highest hierarchy - """ - extract_ids = set([ll.id for ll in self.get_layers_in_layers(layers)]) - - self.hide_all_others_layers_ids(extract_ids) - - def hide_all_others_layers_ids(self, extract_ids, layers=None): - """hides all layers that are not part of the list or that are not - children of this list - - Args: - extract_ids (list): list of integer that should be visible - layers (list) of PSItem (used for caching) - """ - if not layers: - layers = self.get_layers() - for layer in layers: - if layer.visible and layer.id not in extract_ids: - self.set_visible(layer.id, False) - - def get_layers_metadata(self): - """Reads layers metadata from Headline from active document in PS. - (Headline accessible by File > File Info) - - Returns: - (list) - example: - {"8":{"active":true,"productName":"imageBG", - "productType":"image","id":"ayon.create.instance", - "folderPath":"/Town"}} - 8 is layer(group) id - used for deletion, update etc. - """ - res = self.websocketserver.call(self.client.call('Photoshop.read')) - layers_data = [] - try: - if res: - layers_data = json.loads(res) - except json.decoder.JSONDecodeError: - raise ValueError("{} cannot be parsed, recreate meta".format(res)) - # format of metadata changed from {} to [] because of standardization - # keep current implementation logic as its working - if isinstance(layers_data, dict): - for layer_id, layer_meta in layers_data.items(): - if layer_meta.get("schema") != "openpype:container-2.0": - layer_meta["members"] = [str(layer_id)] - layers_data = list(layers_data.values()) - return layers_data - - def import_smart_object(self, path, layer_name, as_reference=False): - """Import the file at `path` as a smart object to active document. - - Args: - path (str): File path to import. - layer_name (str): Unique layer name to differentiate how many times - same smart object was loaded - as_reference (bool): pull in content or reference - """ - enhanced_name = self.LOADED_ICON + layer_name - res = self.websocketserver.call( - self.client.call( - 'Photoshop.import_smart_object', - path=path, - name=enhanced_name, - as_reference=as_reference - ) - ) - rec = self._to_records(res).pop() - if rec: - rec.name = rec.name.replace(self.LOADED_ICON, '') - return rec - - def replace_smart_object(self, layer, path, layer_name): - """Replace the smart object `layer` with file at `path` - - Args: - layer (PSItem): - path (str): File to import. - layer_name (str): Unique layer name to differentiate how many times - same smart object was loaded - """ - enhanced_name = self.LOADED_ICON + layer_name - self.websocketserver.call( - self.client.call( - 'Photoshop.replace_smart_object', - layer_id=layer.id, - path=path, - name=enhanced_name - ) - ) - - def delete_layer(self, layer_id): - """Deletes specific layer by it's id. - - Args: - layer_id (int): id of layer to delete - """ - self.websocketserver.call( - self.client.call('Photoshop.delete_layer', layer_id=layer_id) - ) - - def rename_layer(self, layer_id, name): - """Renames specific layer by it's id. - - Args: - layer_id (int): id of layer to delete - name (str): new name - """ - self.websocketserver.call( - self.client.call( - 'Photoshop.rename_layer', - layer_id=layer_id, - name=name - ) - ) - - def remove_instance(self, instance_id): - cleaned_data = [] - - for item in self.get_layers_metadata(): - inst_id = item.get("instance_id") or item.get("uuid") - if inst_id != instance_id: - cleaned_data.append(item) - - payload = json.dumps(cleaned_data, indent=4) - - self.websocketserver.call( - self.client.call('Photoshop.imprint', payload=payload) - ) - - def get_extension_version(self): - """Returns version number of installed extension.""" - return self.websocketserver.call( - self.client.call('Photoshop.get_extension_version') - ) - - def close(self): - """Shutting down PS and process too. - - For webpublishing only. - """ - # TODO change client.call to method with checks for client - self.websocketserver.call(self.client.call('Photoshop.close')) - - def _to_records(self, res): - """Converts string json representation into list of PSItem for - dot notation access to work. - - Args: - res (string): valid json - - Returns: - - """ - try: - layers_data = json.loads(res) - except json.decoder.JSONDecodeError: - raise ValueError("Received broken JSON {}".format(res)) - ret = [] - - # convert to AEItem to use dot donation - if isinstance(layers_data, dict): - layers_data = [layers_data] - for d in layers_data: - # currently implemented and expected fields - ret.append(PSItem( - d.get('id'), - d.get('name'), - d.get('group'), - d.get('parents'), - d.get('visible'), - d.get('type'), - d.get('members'), - d.get('long_name'), - d.get("color_code"), - d.get("instance_id") - )) - return ret diff --git a/server_addon/photoshop/client/ayon_photoshop/hooks/pre_launch_args.py b/server_addon/photoshop/client/ayon_photoshop/hooks/pre_launch_args.py deleted file mode 100644 index ff60c2f40d..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/hooks/pre_launch_args.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -import platform -import subprocess - -from ayon_core.lib import ( - get_ayon_launcher_args, - is_using_ayon_console, -) -from ayon_applications import PreLaunchHook, LaunchTypes -from ayon_photoshop import get_launch_script_path - - -def get_launch_kwargs(kwargs): - """Explicit setting of kwargs for Popen for Photoshop. - - Expected behavior - - ayon_console opens window with logs - - ayon has stdout/stderr available for capturing - - Args: - kwargs (Union[dict, None]): Current kwargs or None. - - """ - if kwargs is None: - kwargs = {} - - if platform.system().lower() != "windows": - return kwargs - - if not is_using_ayon_console(): - kwargs.update({ - "creationflags": subprocess.CREATE_NEW_CONSOLE - }) - else: - kwargs.update({ - "creationflags": subprocess.CREATE_NO_WINDOW, - "stdout": subprocess.DEVNULL, - "stderr": subprocess.DEVNULL - }) - return kwargs - - -class PhotoshopPrelaunchHook(PreLaunchHook): - """Launch arguments preparation. - - Hook add python executable and script path to Photoshop implementation - before Photoshop executable and add last workfile path to launch arguments. - - Existence of last workfile is checked. If workfile does not exists tries - to copy templated workfile from predefined path. - """ - app_groups = {"photoshop"} - - order = 20 - launch_types = {LaunchTypes.local} - - def execute(self): - # Pop executable - executable_path = self.launch_context.launch_args.pop(0) - - # Pop rest of launch arguments - There should not be other arguments! - remainders = [] - while self.launch_context.launch_args: - remainders.append(self.launch_context.launch_args.pop(0)) - - script_path = get_launch_script_path() - - new_launch_args = get_ayon_launcher_args( - "run", script_path, executable_path - ) - # Add workfile path if exists - workfile_path = self.data["last_workfile_path"] - if ( - self.data.get("start_last_workfile") - and workfile_path - and os.path.exists(workfile_path) - ): - new_launch_args.append(workfile_path) - - # Append as whole list as these arguments should not be separated - self.launch_context.launch_args.append(new_launch_args) - - if remainders: - self.launch_context.launch_args.extend(remainders) - - self.launch_context.kwargs = get_launch_kwargs( - self.launch_context.kwargs - ) diff --git a/server_addon/photoshop/client/ayon_photoshop/lib.py b/server_addon/photoshop/client/ayon_photoshop/lib.py deleted file mode 100644 index 9dc90953c5..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/lib.py +++ /dev/null @@ -1,127 +0,0 @@ -import re - -import ayon_api - -from ayon_core.lib import prepare_template_data -from ayon_core.pipeline import ( - AutoCreator, - CreatedInstance -) -from ayon_photoshop import api -from ayon_photoshop.api.pipeline import cache_and_get_instances - - -class PSAutoCreator(AutoCreator): - """Generic autocreator to extend.""" - def get_instance_attr_defs(self): - return [] - - def collect_instances(self): - for instance_data in cache_and_get_instances(self): - creator_id = instance_data.get("creator_identifier") - - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - self.log.debug("update_list:: {}".format(update_list)) - for created_inst, _changes in update_list: - api.stub().imprint(created_inst.get("instance_id"), - created_inst.data_to_store()) - - def create(self, options=None): - existing_instance = None - for instance in self.create_context.instances: - if instance.product_type == self.product_type: - existing_instance = instance - break - - context = self.create_context - project_name = context.get_current_project_name() - folder_path = context.get_current_folder_path() - task_name = context.get_current_task_name() - host_name = context.host_name - - if existing_instance is None: - existing_instance_folder = None - else: - existing_instance_folder = existing_instance["folderPath"] - - if existing_instance is None: - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - self.default_variant, - host_name, - ) - data = { - "folderPath": folder_path, - "task": task_name, - "variant": self.default_variant - } - data.update(self.get_dynamic_data( - project_name, - folder_entity, - task_entity, - self.default_variant, - host_name, - None - )) - - if not self.active_on_create: - data["active"] = False - - new_instance = CreatedInstance( - self.product_type, product_name, data, self - ) - self._add_instance_to_context(new_instance) - api.stub().imprint(new_instance.get("instance_id"), - new_instance.data_to_store()) - - elif ( - existing_instance_folder != folder_path - or existing_instance["task"] != task_name - ): - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - self.default_variant, - host_name, - ) - existing_instance["folderPath"] = folder_path - existing_instance["task"] = task_name - existing_instance["productName"] = product_name - - -def clean_product_name(product_name): - """Clean all variants leftover {layer} from product name.""" - dynamic_data = prepare_template_data({"layer": "{layer}"}) - for value in dynamic_data.values(): - if value in product_name: - product_name = ( - product_name - .replace(value, "") - .replace("__", "_") - .replace("..", ".") - ) - # clean trailing separator as Main_ - pattern = r'[\W_]+$' - replacement = '' - return re.sub(pattern, replacement, product_name) diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_flatten_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_flatten_image.py deleted file mode 100644 index a467a5ecaa..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_flatten_image.py +++ /dev/null @@ -1,156 +0,0 @@ -import ayon_api - -from ayon_photoshop import api -from ayon_photoshop.lib import PSAutoCreator, clean_product_name -from ayon_core.lib import BoolDef, prepare_template_data -from ayon_core.pipeline.create import get_product_name, CreatedInstance - - -class AutoImageCreator(PSAutoCreator): - """Creates flatten image from all visible layers. - - Used in simplified publishing as auto created instance. - Must be enabled in Setting and template for product name provided - """ - identifier = "auto_image" - product_type = "image" - - # Settings - default_variant = "" - # - Mark by default instance for review - mark_for_review = True - active_on_create = True - - def create(self, options=None): - existing_instance = None - for instance in self.create_context.instances: - if instance.creator_identifier == self.identifier: - existing_instance = instance - break - - context = self.create_context - project_name = context.get_current_project_name() - folder_path = context.get_current_folder_path() - task_name = context.get_current_task_name() - host_name = context.host_name - folder_entity = ayon_api.get_folder_by_path(project_name, folder_path) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - - existing_folder_path = None - if existing_instance is not None: - existing_folder_path = existing_instance["folderPath"] - - if existing_instance is None: - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - self.default_variant, - host_name, - ) - - data = { - "folderPath": folder_path, - "task": task_name, - } - - if not self.active_on_create: - data["active"] = False - - creator_attributes = {"mark_for_review": self.mark_for_review} - data.update({"creator_attributes": creator_attributes}) - - new_instance = CreatedInstance( - self.product_type, product_name, data, self - ) - self._add_instance_to_context(new_instance) - api.stub().imprint(new_instance.get("instance_id"), - new_instance.data_to_store()) - - elif ( # existing instance from different context - existing_folder_path != folder_path - or existing_instance["task"] != task_name - ): - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - self.default_variant, - host_name, - ) - existing_instance["folderPath"] = folder_path - existing_instance["task"] = task_name - existing_instance["productName"] = product_name - - api.stub().imprint(existing_instance.get("instance_id"), - existing_instance.data_to_store()) - - def get_pre_create_attr_defs(self): - return [ - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] - - def get_instance_attr_defs(self): - return [ - BoolDef( - "mark_for_review", - label="Review" - ) - ] - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["photoshop"]["create"]["AutoImageCreator"] - ) - - self.active_on_create = plugin_settings["active_on_create"] - self.default_variant = plugin_settings["default_variant"] - self.mark_for_review = plugin_settings["mark_for_review"] - self.enabled = plugin_settings["enabled"] - - def get_detail_description(self): - return """Creator for flatten image. - - Studio might configure simple publishing workflow. In that case - `image` instance is automatically created which will publish flat - image from all visible layers. - - Artist might disable this instance from publishing or from creating - review for it though. - """ - - def get_product_name( - self, - project_name, - folder_entity, - task_entity, - variant, - host_name=None, - instance=None - ): - if host_name is None: - host_name = self.create_context.host_name - - task_name = task_type = None - if task_entity: - task_name = task_entity["name"] - task_type = task_entity["taskType"] - - dynamic_data = prepare_template_data({"layer": "{layer}"}) - - product_name = get_product_name( - project_name, - task_name, - task_type, - host_name, - self.product_type, - variant, - dynamic_data=dynamic_data - ) - return clean_product_name(product_name) diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_image.py deleted file mode 100644 index 0170306301..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_image.py +++ /dev/null @@ -1,265 +0,0 @@ -import re - -from ayon_core.lib import BoolDef -from ayon_core.pipeline import ( - Creator, - CreatedInstance, - CreatorError -) -from ayon_core.lib import prepare_template_data -from ayon_core.pipeline.create import PRODUCT_NAME_ALLOWED_SYMBOLS -from ayon_photoshop import api -from ayon_photoshop.api.pipeline import cache_and_get_instances -from ayon_photoshop.lib import clean_product_name - - -class ImageCreator(Creator): - """Creates image instance for publishing. - - Result of 'image' instance is image of all visible layers, or image(s) of - selected layers. - """ - identifier = "image" - label = "Image" - product_type = "image" - description = "Image creator" - - # Settings - default_variants = "" - mark_for_review = False - active_on_create = True - - def create(self, product_name_from_ui, data, pre_create_data): - groups_to_create = [] - top_layers_to_wrap = [] - create_empty_group = False - - stub = api.stub() # only after PS is up - if pre_create_data.get("use_selection"): - try: - top_level_selected_items = stub.get_selected_layers() - except ValueError: - raise CreatorError("Cannot group locked Background layer!") - - only_single_item_selected = len(top_level_selected_items) == 1 - if ( - only_single_item_selected or - pre_create_data.get("create_multiple")): - for selected_item in top_level_selected_items: - if selected_item.group: - groups_to_create.append(selected_item) - else: - top_layers_to_wrap.append(selected_item) - else: - group = stub.group_selected_layers(product_name_from_ui) - groups_to_create.append(group) - else: - try: - stub.select_layers(stub.get_layers()) - group = stub.group_selected_layers(product_name_from_ui) - except ValueError: - raise CreatorError("Cannot group locked Background layer!") - - groups_to_create.append(group) - - # create empty group if nothing selected - if not groups_to_create and not top_layers_to_wrap: - group = stub.create_group(product_name_from_ui) - groups_to_create.append(group) - - # wrap each top level layer into separate new group - for layer in top_layers_to_wrap: - stub.select_layers([layer]) - group = stub.group_selected_layers(layer.name) - groups_to_create.append(group) - - layer_name = '' - # use artist chosen option OR force layer if more products are created - # to differentiate them - use_layer_name = (pre_create_data.get("use_layer_name") or - len(groups_to_create) > 1) - for group in groups_to_create: - product_name = product_name_from_ui # reset to name from creator UI - layer_names_in_hierarchy = [] - created_group_name = self._clean_highlights(stub, group.name) - - if use_layer_name: - layer_name = re.sub( - "[^{}]+".format(PRODUCT_NAME_ALLOWED_SYMBOLS), - "", - group.name - ) - if "{layer}" not in product_name.lower(): - product_name += "{Layer}" - - layer_fill = prepare_template_data({"layer": layer_name}) - product_name = product_name.format(**layer_fill) - product_name = clean_product_name(product_name) - - if group.long_name: - for directory in group.long_name[::-1]: - name = self._clean_highlights(stub, directory) - layer_names_in_hierarchy.append(name) - - data_update = { - "productName": product_name, - "members": [str(group.id)], - "layer_name": layer_name, - "long_name": "_".join(layer_names_in_hierarchy) - } - data.update(data_update) - - mark_for_review = (pre_create_data.get("mark_for_review") or - self.mark_for_review) - creator_attributes = {"mark_for_review": mark_for_review} - data.update({"creator_attributes": creator_attributes}) - - if not self.active_on_create: - data["active"] = False - - new_instance = CreatedInstance( - self.product_type, product_name, data, self - ) - - stub.imprint(new_instance.get("instance_id"), - new_instance.data_to_store()) - self._add_instance_to_context(new_instance) - # reusing existing group, need to rename afterwards - if not create_empty_group: - stub.rename_layer(group.id, - stub.PUBLISH_ICON + created_group_name) - - def collect_instances(self): - for instance_data in cache_and_get_instances(self): - # legacy instances have family=='image' - creator_id = (instance_data.get("creator_identifier") or - instance_data.get("family")) - - if creator_id == self.identifier: - instance_data = self._handle_legacy(instance_data) - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - self.log.debug("update_list:: {}".format(update_list)) - for created_inst, _changes in update_list: - if created_inst.get("layer"): - # not storing PSItem layer to metadata - created_inst.pop("layer") - api.stub().imprint(created_inst.get("instance_id"), - created_inst.data_to_store()) - - def remove_instances(self, instances): - for instance in instances: - self.host.remove_instance(instance) - self._remove_instance_from_context(instance) - - def get_pre_create_attr_defs(self): - output = [ - BoolDef("use_selection", default=True, - label="Create only for selected"), - BoolDef("create_multiple", - default=True, - label="Create separate instance for each selected"), - BoolDef("use_layer_name", - default=False, - label="Use layer name in product"), - BoolDef( - "mark_for_review", - label="Create separate review", - default=False - ) - ] - return output - - def get_instance_attr_defs(self): - return [ - BoolDef( - "mark_for_review", - label="Review" - ) - ] - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["photoshop"]["create"]["ImageCreator"] - ) - - self.active_on_create = plugin_settings["active_on_create"] - self.default_variants = plugin_settings["default_variants"] - self.mark_for_review = plugin_settings["mark_for_review"] - self.enabled = plugin_settings["enabled"] - - def get_detail_description(self): - return """Creator for Image instances - - Main publishable item in Photoshop will be of `image` product type. - Result of this item (instance) is picture that could be loaded and - used in another DCCs (for example as single layer in composition in - AfterEffects, reference in Maya etc). - - There are couple of options what to publish: - - separate image per selected layer (or group of layers) - - one image for all selected layers - - all visible layers (groups) flattened into single image - - In most cases you would like to keep `Create only for selected` - toggled on and select what you would like to publish. - Toggling this option off will allow you to create instance for all - visible layers without a need to select them explicitly. - - Use 'Create separate instance for each selected' to create separate - images per selected layer (group of layers). - - 'Use layer name in product' will explicitly add layer name into - product name. Position of this name is configurable in - `project_settings/global/tools/creator/product_name_profiles`. - If layer placeholder ({layer}) is not used in `product_name_profiles` - but layer name should be used (set explicitly in UI or implicitly if - multiple images should be created), it is added in capitalized form - as a suffix to product name. - - Each image could have its separate review created if necessary via - `Create separate review` toggle. - But more use case is to use separate `review` instance to create review - from all published items. - """ - - def _handle_legacy(self, instance_data): - """Converts old instances to new format.""" - if not instance_data.get("members"): - instance_data["members"] = [instance_data.get("uuid")] - - if instance_data.get("uuid"): - # uuid not needed, replaced with unique instance_id - api.stub().remove_instance(instance_data.get("uuid")) - instance_data.pop("uuid") - - if not instance_data.get("task"): - instance_data["task"] = self.create_context.get_current_task_name() - - if not instance_data.get("variant"): - instance_data["variant"] = '' - - return instance_data - - def _clean_highlights(self, stub, item): - return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON, - '') - - def get_dynamic_data( - self, - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ): - if instance is not None: - layer_name = instance.get("layer_name") - if layer_name: - return {"layer": layer_name} - return {"layer": "{layer}"} diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_review.py b/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_review.py deleted file mode 100644 index 60c64b3831..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_review.py +++ /dev/null @@ -1,28 +0,0 @@ -from ayon_photoshop.lib import PSAutoCreator - - -class ReviewCreator(PSAutoCreator): - """Creates review instance which might be disabled from publishing.""" - identifier = "review" - product_type = "review" - - default_variant = "Main" - - def get_detail_description(self): - return """Auto creator for review. - - Photoshop review is created from all published images or from all - visible layers if no `image` instances got created. - - Review might be disabled by an artist (instance shouldn't be deleted as - it will get recreated in next publish either way). - """ - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["photoshop"]["create"]["ReviewCreator"] - ) - - self.default_variant = plugin_settings["default_variant"] - self.active_on_create = plugin_settings["active_on_create"] - self.enabled = plugin_settings["enabled"] diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_workfile.py b/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_workfile.py deleted file mode 100644 index ce44a1ad2d..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/create/create_workfile.py +++ /dev/null @@ -1,28 +0,0 @@ -from ayon_photoshop.lib import PSAutoCreator - - -class WorkfileCreator(PSAutoCreator): - identifier = "workfile" - product_type = "workfile" - - default_variant = "Main" - - def get_detail_description(self): - return """Auto creator for workfile. - - It is expected that each publish will also publish its source workfile - for safekeeping. This creator triggers automatically without need for - an artist to remember and trigger it explicitly. - - Workfile instance could be disabled if it is not required to publish - workfile. (Instance shouldn't be deleted though as it will be recreated - in next publish automatically). - """ - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["photoshop"]["create"]["WorkfileCreator"] - ) - - self.active_on_create = plugin_settings["active_on_create"] - self.enabled = plugin_settings["enabled"] diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image.py deleted file mode 100644 index e3d80f6957..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image.py +++ /dev/null @@ -1,86 +0,0 @@ -import re - -from ayon_core.pipeline import get_representation_path -from ayon_photoshop import api as photoshop -from ayon_photoshop.api import get_unique_layer_name - - -class ImageLoader(photoshop.PhotoshopLoader): - """Load images - - Stores the imported asset in a container named after the asset. - """ - - product_types = {"image", "render"} - representations = {"*"} - - def load(self, context, name=None, namespace=None, data=None): - stub = self.get_stub() - layer_name = get_unique_layer_name( - stub.get_layers(), - context["folder"]["name"], - name - ) - with photoshop.maintained_selection(): - path = self.filepath_from_context(context) - layer = self.import_layer(path, layer_name, stub) - - self[:] = [layer] - namespace = namespace or layer_name - - return photoshop.containerise( - name, - namespace, - layer, - context, - self.__class__.__name__ - ) - - def update(self, container, context): - """ Switch asset or change version """ - stub = self.get_stub() - - layer = container.pop("layer") - - repre_entity = context["representation"] - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - - namespace_from_container = re.sub(r'_\d{3}$', '', - container["namespace"]) - layer_name = "{}_{}".format(folder_name, product_name) - # switching assets - if namespace_from_container != layer_name: - layer_name = get_unique_layer_name( - stub.get_layers(), folder_name, product_name - ) - else: # switching version - keep same name - layer_name = container["namespace"] - - path = get_representation_path(repre_entity) - with photoshop.maintained_selection(): - stub.replace_smart_object( - layer, path, layer_name - ) - - stub.imprint( - layer.id, {"representation": repre_entity["id"]} - ) - - def remove(self, container): - """ - Removes element from scene: deletes layer + removes from Headline - Args: - container (dict): container to be removed - used to get layer_id - """ - stub = self.get_stub() - - layer = container.pop("layer") - stub.imprint(layer.id, {}) - stub.delete_layer(layer.id) - - def switch(self, container, context): - self.update(container, context) - - def import_layer(self, file_name, layer_name, stub): - return stub.import_smart_object(file_name, layer_name) diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image_from_sequence.py b/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image_from_sequence.py deleted file mode 100644 index f69dce26f6..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_image_from_sequence.py +++ /dev/null @@ -1,95 +0,0 @@ -import os - -import qargparse - -from ayon_photoshop import api as photoshop -from ayon_photoshop.api import get_unique_layer_name - - -class ImageFromSequenceLoader(photoshop.PhotoshopLoader): - """ Load specific image from sequence - - Used only as quick load of reference file from a sequence. - - Plain ImageLoader picks first frame from sequence. - - Loads only existing files - currently not possible to limit loaders - to single select - multiselect. If user selects multiple repres, list - for all of them is provided, but selection is only single file. - This loader will be triggered multiple times, but selected name will - match only to proper path. - - Loader doesn't do containerization as there is currently no data model - of 'frame of rendered files' (only rendered sequence), update would be - difficult. - """ - - product_types = {"render"} - representations = {"*"} - options = [] - - def load(self, context, name=None, namespace=None, data=None): - - path = self.filepath_from_context(context) - if data.get("frame"): - path = os.path.join( - os.path.dirname(path), data["frame"] - ) - if not os.path.exists(path): - return - - stub = self.get_stub() - layer_name = get_unique_layer_name( - stub.get_layers(), context["folder"]["name"], name - ) - - with photoshop.maintained_selection(): - layer = stub.import_smart_object(path, layer_name) - - self[:] = [layer] - namespace = namespace or layer_name - - return namespace - - @classmethod - def get_options(cls, repre_contexts): - """ - Returns list of files for selected 'repre_contexts'. - - It returns only files with same extension as in context as it is - expected that context points to sequence of frames. - - Returns: - (list) of qargparse.Choice - """ - files = [] - for context in repre_contexts: - fname = cls.filepath_from_context(context) - _, file_extension = os.path.splitext(fname) - - for file_name in os.listdir(os.path.dirname(fname)): - if not file_name.endswith(file_extension): - continue - files.append(file_name) - - # return selection only if there is something - if not files or len(files) <= 1: - return [] - - return [ - qargparse.Choice( - "frame", - label="Select specific file", - items=files, - default=0, - help="Which frame should be loaded?" - ) - ] - - def update(self, container, context): - """No update possible, not containerized.""" - pass - - def remove(self, container): - """No update possible, not containerized.""" - pass diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_reference.py b/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_reference.py deleted file mode 100644 index 21076f6a4f..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/load/load_reference.py +++ /dev/null @@ -1,87 +0,0 @@ -import re - -from ayon_core.pipeline import get_representation_path -from ayon_photoshop import api as photoshop -from ayon_photoshop.api import get_unique_layer_name - - -class ReferenceLoader(photoshop.PhotoshopLoader): - """Load reference images - - Stores the imported asset in a container named after the asset. - - Inheriting from 'load_image' didn't work because of - "Cannot write to closing transport", possible refactor. - """ - - product_types = {"image", "render"} - representations = {"*"} - - def load(self, context, name=None, namespace=None, data=None): - stub = self.get_stub() - layer_name = get_unique_layer_name( - stub.get_layers(), context["folder"]["name"], name - ) - with photoshop.maintained_selection(): - path = self.filepath_from_context(context) - layer = self.import_layer(path, layer_name, stub) - - self[:] = [layer] - namespace = namespace or layer_name - - return photoshop.containerise( - name, - namespace, - layer, - context, - self.__class__.__name__ - ) - - def update(self, container, context): - """ Switch asset or change version.""" - stub = self.get_stub() - layer = container.pop("layer") - - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - repre_entity = context["representation"] - - namespace_from_container = re.sub(r'_\d{3}$', '', - container["namespace"]) - layer_name = "{}_{}".format(folder_name, product_name) - # switching assets - if namespace_from_container != layer_name: - layer_name = get_unique_layer_name( - stub.get_layers(), folder_name, product_name - ) - else: # switching version - keep same name - layer_name = container["namespace"] - - path = get_representation_path(repre_entity) - with photoshop.maintained_selection(): - stub.replace_smart_object( - layer, path, layer_name - ) - - stub.imprint( - layer.id, {"representation": repre_entity["id"]} - ) - - def remove(self, container): - """Removes element from scene: deletes layer + removes from Headline - - Args: - container (dict): container to be removed - used to get layer_id - """ - stub = self.get_stub() - layer = container.pop("layer") - stub.imprint(layer.id, {}) - stub.delete_layer(layer.id) - - def switch(self, container, context): - self.update(container, context) - - def import_layer(self, file_name, layer_name, stub): - return stub.import_smart_object( - file_name, layer_name, as_reference=True - ) diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/closePS.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/closePS.py deleted file mode 100644 index 2cdc9fa1e8..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/closePS.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- -"""Close PS after publish. For Webpublishing only.""" -import pyblish.api - -from ayon_photoshop import api as photoshop - - -class ClosePS(pyblish.api.ContextPlugin): - """Close PS after publish. For Webpublishing only. - """ - - order = pyblish.api.IntegratorOrder + 14 - label = "Close PS" - optional = True - active = True - - hosts = ["photoshop"] - targets = ["automated"] - - def process(self, context): - self.log.info("ClosePS") - - stub = photoshop.stub() - self.log.info("Shutting down PS") - stub.save() - stub.close() - self.log.info("PS closed") diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image.py deleted file mode 100644 index 23a71bdf46..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image.py +++ /dev/null @@ -1,106 +0,0 @@ -import pyblish.api - -from ayon_photoshop import api as photoshop -from ayon_core.pipeline.create import get_product_name - - -class CollectAutoImage(pyblish.api.ContextPlugin): - """Creates auto image in non artist based publishes (Webpublisher). - """ - - label = "Collect Auto Image" - hosts = ["photoshop"] - order = pyblish.api.CollectorOrder + 0.2 - - targets = ["automated"] - - def process(self, context): - for instance in context: - creator_identifier = instance.data.get("creator_identifier") - if creator_identifier and creator_identifier == "auto_image": - self.log.debug("Auto image instance found, won't create new") - return - - project_name = context.data["projectName"] - proj_settings = context.data["project_settings"] - host_name = context.data["hostName"] - folder_entity = context.data["folderEntity"] - task_entity = context.data["taskEntity"] - task_name = task_type = None - if task_entity: - task_name = task_entity["name"] - task_type = task_entity["taskType"] - - auto_creator = proj_settings.get( - "photoshop", {}).get( - "create", {}).get( - "AutoImageCreator", {}) - - if not auto_creator or not auto_creator["enabled"]: - self.log.debug("Auto image creator disabled, won't create new") - return - - stub = photoshop.stub() - stored_items = stub.get_layers_metadata() - for item in stored_items: - if item.get("creator_identifier") == "auto_image": - if not item.get("active"): - self.log.debug("Auto_image instance disabled") - return - - layer_items = stub.get_layers() - - publishable_ids = [layer.id for layer in layer_items - if layer.visible] - - # collect stored image instances - instance_names = [] - for layer_item in layer_items: - layer_meta_data = stub.read(layer_item, stored_items) - - # Skip layers without metadata. - if layer_meta_data is None: - continue - - # Skip containers. - if "container" in layer_meta_data["id"]: - continue - - # active might not be in legacy meta - if layer_meta_data.get("active", True) and layer_item.visible: - instance_names.append(layer_meta_data["productName"]) - - if len(instance_names) == 0: - variants = proj_settings.get( - "photoshop", {}).get( - "create", {}).get( - "CreateImage", {}).get( - "default_variants", ['']) - product_type = "image" - - variant = context.data.get("variant") or variants[0] - - product_name = get_product_name( - project_name, - task_name, - task_type, - host_name, - product_type, - variant, - ) - - instance = context.create_instance(product_name) - instance.data["folderPath"] = folder_entity["path"] - instance.data["productType"] = product_type - instance.data["productName"] = product_name - instance.data["ids"] = publishable_ids - instance.data["publish"] = True - instance.data["creator_identifier"] = "auto_image" - instance.data["family"] = product_type - instance.data["families"] = [product_type] - - if auto_creator["mark_for_review"]: - instance.data["creator_attributes"] = {"mark_for_review": True} - instance.data["families"].append("review") - - self.log.info("auto image instance: {} ".format(instance.data)) diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image_refresh.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image_refresh.py deleted file mode 100644 index 108b65232a..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_image_refresh.py +++ /dev/null @@ -1,23 +0,0 @@ -import pyblish.api - -from ayon_photoshop import api as photoshop - - -class CollectAutoImageRefresh(pyblish.api.ContextPlugin): - """Refreshes auto_image instance with currently visible layers.. - """ - - label = "Collect Auto Image Refresh" - hosts = ["photoshop"] - order = pyblish.api.CollectorOrder + 0.2 - - def process(self, context): - for instance in context: - creator_identifier = instance.data.get("creator_identifier") - if creator_identifier and creator_identifier == "auto_image": - self.log.debug("Auto image instance found, won't create new") - # refresh existing auto image instance with current visible - publishable_ids = [layer.id for layer in photoshop.stub().get_layers() # noqa - if layer.visible] - instance.data["ids"] = publishable_ids - return diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_review.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_review.py deleted file mode 100644 index 8b84e69309..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_review.py +++ /dev/null @@ -1,96 +0,0 @@ -""" -Requires: - None - -Provides: - instance -> productType ("review") -""" -import pyblish.api - -from ayon_photoshop import api as photoshop -from ayon_core.pipeline.create import get_product_name - - -class CollectAutoReview(pyblish.api.ContextPlugin): - """Create review instance in non artist based workflow. - - Called only if PS is triggered in Webpublisher or in tests. - """ - - label = "Collect Auto Review" - hosts = ["photoshop"] - order = pyblish.api.CollectorOrder + 0.2 - targets = ["automated"] - - publish = True - - def process(self, context): - product_type = "review" - has_review = False - for instance in context: - if instance.data["productType"] == product_type: - self.log.debug("Review instance found, won't create new") - has_review = True - - creator_attributes = instance.data.get("creator_attributes", {}) - if (creator_attributes.get("mark_for_review") and - "review" not in instance.data["families"]): - instance.data["families"].append("review") - - if has_review: - return - - stub = photoshop.stub() - stored_items = stub.get_layers_metadata() - for item in stored_items: - if item.get("creator_identifier") == product_type: - if not item.get("active"): - self.log.debug("Review instance disabled") - return - - auto_creator = context.data["project_settings"].get( - "photoshop", {}).get( - "create", {}).get( - "ReviewCreator", {}) - - if not auto_creator or not auto_creator["enabled"]: - self.log.debug("Review creator disabled, won't create new") - return - - variant = (context.data.get("variant") or - auto_creator["default_variant"]) - - project_name = context.data["projectName"] - proj_settings = context.data["project_settings"] - host_name = context.data["hostName"] - folder_entity = context.data["folderEntity"] - task_entity = context.data["taskEntity"] - task_name = task_type = None - if task_entity: - task_name = task_entity["name"] - task_type = task_entity["taskType"] - - product_name = get_product_name( - project_name, - task_name, - task_type, - host_name, - product_type, - variant, - project_settings=proj_settings - ) - - instance = context.create_instance(product_name) - instance.data.update({ - "label": product_name, - "name": product_name, - "productName": product_name, - "productType": product_type, - "family": product_type, - "families": [product_type], - "representations": [], - "folderPath": folder_entity["path"], - "publish": self.publish - }) - - self.log.debug("auto review created::{}".format(instance.data)) diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_workfile.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_workfile.py deleted file mode 100644 index 1bf7c1a600..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_auto_workfile.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -import pyblish.api - -from ayon_photoshop import api as photoshop -from ayon_core.pipeline.create import get_product_name - - -class CollectAutoWorkfile(pyblish.api.ContextPlugin): - """Collect current script for publish.""" - - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Workfile" - hosts = ["photoshop"] - - targets = ["automated"] - - def process(self, context): - product_type = "workfile" - file_path = context.data["currentFile"] - _, ext = os.path.splitext(file_path) - staging_dir = os.path.dirname(file_path) - base_name = os.path.basename(file_path) - workfile_representation = { - "name": ext[1:], - "ext": ext[1:], - "files": base_name, - "stagingDir": staging_dir, - } - - for instance in context: - if instance.data["productType"] == product_type: - self.log.debug("Workfile instance found, won't create new") - instance.data.update({ - "label": base_name, - "name": base_name, - "representations": [], - }) - - # creating representation - _, ext = os.path.splitext(file_path) - instance.data["representations"].append( - workfile_representation) - - return - - stub = photoshop.stub() - stored_items = stub.get_layers_metadata() - for item in stored_items: - if item.get("creator_identifier") == product_type: - if not item.get("active"): - self.log.debug("Workfile instance disabled") - return - - project_name = context.data["projectName"] - proj_settings = context.data["project_settings"] - auto_creator = proj_settings.get( - "photoshop", {}).get( - "create", {}).get( - "WorkfileCreator", {}) - - if not auto_creator or not auto_creator["enabled"]: - self.log.debug("Workfile creator disabled, won't create new") - return - - # context.data["variant"] might come only from collect_batch_data - variant = (context.data.get("variant") or - auto_creator["default_variant"]) - - task_name = context.data["task"] - host_name = context.data["hostName"] - folder_entity = context.data["folderEntity"] - task_entity = context.data["taskEntity"] - task_name = task_type = None - if task_entity: - task_name = task_entity["name"] - task_type = task_entity["taskType"] - - product_name = get_product_name( - project_name, - task_name, - task_type, - host_name, - product_type, - variant, - project_settings=proj_settings - ) - - # Create instance - instance = context.create_instance(product_name) - instance.data.update({ - "label": base_name, - "name": base_name, - "productName": product_name, - "productType": product_type, - "family": product_type, - "families": [product_type], - "representations": [], - "folderPath": folder_entity["path"] - }) - - # creating representation - instance.data["representations"].append(workfile_representation) - - self.log.debug("auto workfile review created:{}".format(instance.data)) diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_batch_data.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_batch_data.py deleted file mode 100644 index 527a7d516a..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_batch_data.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Parses batch context from json and continues in publish process. - -Provides: - context -> Loaded batch file. - - folderPath - - task (task name) - - taskType - - project_name - - variant - -Code is practically copy of `openype/hosts/webpublish/collect_batch_data` as -webpublisher should be eventually ejected as an addon, eg. mentioned plugin -shouldn't be pushed into general publish plugins. -""" - -import os - -import pyblish.api - -from ayon_webpublisher.lib import ( - get_batch_context_info, - parse_json -) -from ayon_core.lib import is_in_tests - - -class CollectBatchData(pyblish.api.ContextPlugin): - """Collect batch data from json stored in 'AYON_PUBLISH_DATA' env dir. - - The directory must contain 'manifest.json' file where batch data should be - stored. - """ - # must be really early, context values are only in json file - order = pyblish.api.CollectorOrder - 0.495 - label = "Collect batch data" - hosts = ["photoshop"] - targets = ["webpublish"] - - def process(self, context): - self.log.info("CollectBatchData") - batch_dir = ( - os.environ.get("AYON_PUBLISH_DATA") - or os.environ.get("OPENPYPE_PUBLISH_DATA") - ) - if is_in_tests(): - self.log.debug("Automatic testing, no batch data, skipping") - return - - assert batch_dir, ( - "Missing `AYON_PUBLISH_DATA`") - - assert os.path.exists(batch_dir), \ - "Folder {} doesn't exist".format(batch_dir) - - project_name = os.environ.get("AYON_PROJECT_NAME") - if project_name is None: - raise AssertionError( - "Environment `AYON_PROJECT_NAME` was not found." - "Could not set project `root` which may cause issues." - ) - - batch_data = parse_json(os.path.join(batch_dir, "manifest.json")) - - context.data["batchDir"] = batch_dir - context.data["batchData"] = batch_data - - folder_path, task_name, task_type = get_batch_context_info( - batch_data["context"] - ) - - os.environ["AYON_FOLDER_PATH"] = folder_path - os.environ["AYON_TASK_NAME"] = task_name - - context.data["folderPath"] = folder_path - context.data["task"] = task_name - context.data["taskType"] = task_type - context.data["project_name"] = project_name - context.data["variant"] = batch_data["variant"] diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_color_coded_instances.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_color_coded_instances.py deleted file mode 100644 index 072eb82179..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_color_coded_instances.py +++ /dev/null @@ -1,269 +0,0 @@ -import os -import re - -import pyblish.api - -from ayon_core.lib import prepare_template_data, is_in_tests -from ayon_core.settings import get_project_settings -from ayon_photoshop import api as photoshop - - -class CollectColorCodedInstances(pyblish.api.ContextPlugin): - """Creates instances for layers marked by configurable color. - - Used in remote publishing when artists marks publishable layers by color- - coding. Top level layers (group) must be marked by specific color to be - published as an instance of 'image' product type. - - Can add group for all publishable layers to allow creation of flattened - image. (Cannot contain special background layer as it cannot be grouped!) - - Based on value `create_flatten_image` from Settings: - - "yes": create flattened 'image' product of all publishable layers + create - 'image' product per publishable layer - - "only": create ONLY flattened 'image' product of all publishable layers - - "no": do not create flattened 'image' product at all, - only separate products per marked layer. - - Identifier: - id (str): "ayon.create.instance" - """ - - label = "Collect Color-coded Instances" - order = pyblish.api.CollectorOrder - hosts = ["photoshop"] - targets = ["automated"] - settings_category = "photoshop" - - # configurable by Settings - color_code_mapping = [] - create_flatten_image = "no" - flatten_product_name_template = "" - - def process(self, context): - self.log.info("CollectColorCodedInstances") - batch_dir = ( - os.environ.get("AYON_PUBLISH_DATA") - or os.environ.get("OPENPYPE_PUBLISH_DATA") - ) - if ( - is_in_tests() - and ( - not batch_dir or not os.path.exists(batch_dir) - ) - ): - self.log.debug("Automatic testing, no batch data, skipping") - return - - existing_product_names = self._get_existing_product_names(context) - - # from CollectBatchData - folder_path = context.data["folderPath"] - task_name = context.data["task"] - variant = context.data["variant"] - project_name = context.data["projectEntity"]["name"] - - naming_conventions = get_project_settings(project_name).get( - "photoshop", {}).get( - "publish", {}).get( - "ValidateNaming", {}) - - stub = photoshop.stub() - layers = stub.get_layers() - - publishable_layers = [] - created_instances = [] - product_type_from_settings = None - for layer in layers: - self.log.debug("Layer:: {}".format(layer)) - if layer.parents: - self.log.debug("!!! Not a top layer, skip") - continue - - if not layer.visible: - self.log.debug("Not visible, skip") - continue - - resolved_product_type, resolved_product_template = ( - self._resolve_mapping(layer) - ) - - if not resolved_product_template or not resolved_product_type: - self.log.debug("!!! Not found product type or template, skip") - continue - - if not product_type_from_settings: - product_type_from_settings = resolved_product_type - - fill_pairs = { - "variant": variant, - "family": resolved_product_type, - "product": {"type": resolved_product_type}, - "task": task_name, - "layer": layer.clean_name - } - - product_name = resolved_product_template.format( - **prepare_template_data(fill_pairs)) - - product_name = self._clean_product_name( - stub, naming_conventions, product_name, layer - ) - - if product_name in existing_product_names: - self.log.info(( - "Product {} already created, skipping." - ).format(product_name)) - continue - - if self.create_flatten_image != "flatten_only": - instance = self._create_instance( - context, - layer, - resolved_product_type, - folder_path, - product_name, - task_name - ) - created_instances.append(instance) - - existing_product_names.append(product_name) - publishable_layers.append(layer) - - if self.create_flatten_image != "no" and publishable_layers: - self.log.debug("create_flatten_image") - if not self.flatten_product_name_template: - self.log.warning("No template for flatten image") - return - - fill_pairs.pop("layer") - product_name = self.flatten_product_name_template.format( - **prepare_template_data(fill_pairs)) - - first_layer = publishable_layers[0] # dummy layer - first_layer.name = product_name - product_type = product_type_from_settings # inherit product type - instance = self._create_instance( - context, - first_layer, - product_type, - folder_path, - product_name, - task_name - ) - instance.data["ids"] = [layer.id for layer in publishable_layers] - created_instances.append(instance) - - for instance in created_instances: - # Produce diagnostic message for any graphical - # user interface interested in visualising it. - self.log.info("Found: \"%s\" " % instance.data["name"]) - self.log.info("instance: {} ".format(instance.data)) - - def _get_existing_product_names(self, context): - """Collect manually created instances from workfile. - - Shouldn't be any as Webpublisher bypass publishing via Openpype, but - might be some if workfile published through OP is reused. - """ - existing_product_names = [] - for instance in context: - if instance.data.get("publish") is not False: - existing_product_names.append(instance.data.get("productName")) - - return existing_product_names - - def _create_instance( - self, - context, - layer, - product_type, - folder_path, - product_name, - task_name - ): - instance = context.create_instance(layer.name) - instance.data["publish"] = True - instance.data["productType"] = product_type - instance.data["productName"] = product_name - instance.data["folderPath"] = folder_path - instance.data["task"] = task_name - instance.data["layer"] = layer - instance.data["family"] = product_type - instance.data["families"] = [product_type] - - return instance - - def _resolve_mapping(self, layer): - """Matches 'layer' color code and name to mapping. - - If both color code AND name regex is configured, BOTH must be valid - If layer matches to multiple mappings, only first is used! - """ - product_type_list = [] - product_name_list = [] - for mapping in self.color_code_mapping: - if ( - mapping["color_code"] - and layer.color_code not in mapping["color_code"] - ): - continue - - if ( - mapping["layer_name_regex"] - and not any( - re.search(pattern, layer.name) - for pattern in mapping["layer_name_regex"] - ) - ): - continue - - product_type_list.append(mapping["product_type"]) - product_name_list.append(mapping["product_name_template"]) - - if len(product_name_list) > 1: - self.log.warning( - "Multiple mappings found for '{}'".format(layer.name) - ) - self.log.warning("Only first product name template used!") - product_name_list[:] = product_name_list[0] - - if len(product_type_list) > 1: - self.log.warning( - "Multiple mappings found for '{}'".format(layer.name) - ) - self.log.warning("Only first product type used!") - product_type_list[:] = product_type_list[0] - - resolved_product_template = None - if product_name_list: - resolved_product_template = product_name_list.pop() - - product_type = None - if product_type_list: - product_type = product_type_list.pop() - - self.log.debug("resolved_product_type {}".format(product_type)) - self.log.debug("resolved_product_template {}".format( - resolved_product_template)) - return product_type, resolved_product_template - - def _clean_product_name( - self, stub, naming_conventions, product_name, layer - ): - """Cleans invalid characters from product name and layer name.""" - if re.search(naming_conventions["invalid_chars"], product_name): - product_name = re.sub( - naming_conventions["invalid_chars"], - naming_conventions["replace_char"], - product_name - ) - layer_name = re.sub( - naming_conventions["invalid_chars"], - naming_conventions["replace_char"], - layer.clean_name - ) - layer.name = layer_name - stub.rename_layer(layer.id, layer_name) - - return product_name diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_current_file.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_current_file.py deleted file mode 100644 index 02f2217f75..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_current_file.py +++ /dev/null @@ -1,18 +0,0 @@ -import os - -import pyblish.api - -from ayon_photoshop import api as photoshop - - -class CollectCurrentFile(pyblish.api.ContextPlugin): - """Inject the current working file into context""" - - order = pyblish.api.CollectorOrder - 0.49 - label = "Current File" - hosts = ["photoshop"] - - def process(self, context): - context.data["currentFile"] = os.path.normpath( - photoshop.stub().get_active_document_full_name() - ).replace("\\", "/") diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_extension_version.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_extension_version.py deleted file mode 100644 index 90415e9245..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_extension_version.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import re -import pyblish.api - -from ayon_photoshop import api as photoshop - - -class CollectExtensionVersion(pyblish.api.ContextPlugin): - """ Pulls and compares version of installed extension. - - It is recommended to use same extension as in provided Openpype code. - - Please use Anastasiy’s Extension Manager or ZXPInstaller to update - extension in case of an error. - - You can locate extension.zxp in your installed Openpype code in - `repos/avalon-core/avalon/photoshop` - """ - # This technically should be a validator, but other collectors might be - # impacted with usage of obsolete extension, so collector that runs first - # was chosen - order = pyblish.api.CollectorOrder - 0.5 - label = "Collect extension version" - hosts = ["photoshop"] - - optional = True - active = True - - def process(self, context): - installed_version = photoshop.stub().get_extension_version() - - if not installed_version: - raise ValueError("Unknown version, probably old extension") - - manifest_url = os.path.join(os.path.dirname(photoshop.__file__), - "extension", "CSXS", "manifest.xml") - - if not os.path.exists(manifest_url): - self.log.debug("Unable to locate extension manifest, not checking") - return - - expected_version = None - with open(manifest_url) as fp: - content = fp.read() - - found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")', - content) - if found: - expected_version = found[0][1] - - if expected_version != installed_version: - msg = "Expected version '{}' found '{}'\n".format( - expected_version, installed_version) - msg += "Please update your installed extension, it might not work " - msg += "properly." - - raise ValueError(msg) diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_image.py deleted file mode 100644 index ed6af6f7d3..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_image.py +++ /dev/null @@ -1,20 +0,0 @@ -import pyblish.api - -from ayon_photoshop import api - - -class CollectImage(pyblish.api.InstancePlugin): - """Collect layer metadata into a instance. - - Used later in validation - """ - order = pyblish.api.CollectorOrder + 0.200 - label = 'Collect Image' - - hosts = ["photoshop"] - families = ["image"] - - def process(self, instance): - if instance.data.get("members"): - layer = api.stub().get_layer(instance.data["members"][0]) - instance.data["layer"] = layer diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_published_version.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_published_version.py deleted file mode 100644 index 84c9fa3e62..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_published_version.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Collects published version of workfile and increments it. - -For synchronization of published image and workfile version it is required -to store workfile version from workfile file name in context.data["version"]. -In remote publishing this name is unreliable (artist might not follow naming -convention etc.), last published workfile version for particular workfile -product is used instead. - -This plugin runs only in remote publishing (eg. Webpublisher). - -Requires: - context.data["folderEntity"] - -Provides: - context["version"] - incremented latest published workfile version -""" - -import pyblish.api -import ayon_api - -from ayon_core.pipeline.version_start import get_versioning_start - - -class CollectPublishedVersion(pyblish.api.ContextPlugin): - """Collects published version of workfile and increments it.""" - - order = pyblish.api.CollectorOrder + 0.190 - label = "Collect published version" - hosts = ["photoshop"] - targets = ["automated"] - - def process(self, context): - workfile_product_name = None - for instance in context: - if instance.data["productType"] == "workfile": - workfile_product_name = instance.data["productName"] - break - - if not workfile_product_name: - self.log.warning("No workfile instance found, " - "synchronization of version will not work.") - return - - project_name = context.data["projectName"] - folder_id = context.data["folderEntity"]["id"] - - version_entity = ayon_api.get_last_version_by_product_name( - project_name, workfile_product_name, folder_id - ) - - if version_entity: - version_int = int(version_entity["version"]) + 1 - else: - version_int = get_versioning_start( - project_name, - "photoshop", - task_name=context.data["task"], - task_type=context.data["taskType"], - project_settings=context.data["project_settings"] - ) - - self.log.debug(f"Setting {version_int} to context.") - context.data["version"] = version_int diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_review.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_review.py deleted file mode 100644 index d9a29f9b74..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_review.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -Requires: - None - -Provides: - instance -> family ("review") -""" - -import pyblish.api - - -class CollectReview(pyblish.api.ContextPlugin): - """Adds review to families for instances marked to be reviewable. - """ - - label = "Collect Review" - hosts = ["photoshop"] - order = pyblish.api.CollectorOrder + 0.1 - settings_category = "photoshop" - - def process(self, context): - for instance in context: - creator_attributes = instance.data["creator_attributes"] - if (creator_attributes.get("mark_for_review") and - "review" not in instance.data["families"]): - instance.data["families"].append("review") diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_version.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_version.py deleted file mode 100644 index bc9f05ab50..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_version.py +++ /dev/null @@ -1,30 +0,0 @@ -import pyblish.api - - -class CollectVersion(pyblish.api.InstancePlugin): - """Collect version for publishable instances. - - Used to synchronize version from workfile to all publishable instances: - - image (manually created or color coded) - - review - - workfile - - Dev comment: - Explicit collector created to control this from single place and not from - 3 different. - - Workfile set here explicitly as version might to be forced from latest + 1 - because of Webpublisher. - (This plugin must run after CollectPublishedVersion!) - """ - order = pyblish.api.CollectorOrder + 0.200 - label = 'Collect Version' - - hosts = ["photoshop"] - families = ["image", "review", "workfile"] - settings_category = "photoshop" - - def process(self, instance): - workfile_version = instance.context.data["version"] - self.log.debug(f"Applying version {workfile_version}") - instance.data["version"] = workfile_version diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_workfile.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_workfile.py deleted file mode 100644 index b9080a12ff..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/collect_workfile.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import pyblish.api - - -class CollectWorkfile(pyblish.api.ContextPlugin): - """Collect current script for publish.""" - - order = pyblish.api.CollectorOrder + 0.1 - label = "Collect Workfile" - hosts = ["photoshop"] - - default_variant = "Main" - - def process(self, context): - for instance in context: - if instance.data["productType"] == "workfile": - file_path = context.data["currentFile"] - _, ext = os.path.splitext(file_path) - staging_dir = os.path.dirname(file_path) - base_name = os.path.basename(file_path) - - # creating representation - _, ext = os.path.splitext(file_path) - instance.data["representations"].append({ - "name": ext[1:], - "ext": ext[1:], - "files": base_name, - "stagingDir": staging_dir, - }) - return diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_image.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_image.py deleted file mode 100644 index 33599d37bb..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_image.py +++ /dev/null @@ -1,102 +0,0 @@ -import os - -import pyblish.api -from ayon_core.pipeline import publish -from ayon_photoshop import api as photoshop - - -class ExtractImage(pyblish.api.ContextPlugin): - """Extract all layers (groups) marked for publish. - - Usually publishable instance is created as a wrapper of layer(s). For each - publishable instance so many images as there is 'formats' is created. - - Logic tries to hide/unhide layers minimum times. - - Called once for all publishable instances. - """ - - order = publish.Extractor.order - 0.48 - label = "Extract Image" - hosts = ["photoshop"] - - families = ["image", "background"] - formats = ["png", "jpg"] - settings_category = "photoshop" - - def process(self, context): - stub = photoshop.stub() - hidden_layer_ids = set() - - all_layers = stub.get_layers() - for layer in all_layers: - if not layer.visible: - hidden_layer_ids.add(layer.id) - stub.hide_all_others_layers_ids([], layers=all_layers) - - with photoshop.maintained_selection(): - with photoshop.maintained_visibility(layers=all_layers): - for instance in context: - if instance.data["productType"] not in self.families: - continue - - staging_dir = self.staging_dir(instance) - self.log.info("Outputting image to {}".format(staging_dir)) - - # Perform extraction - files = {} - ids = set() - # real layers and groups - members = instance.data("members") - if members: - ids.update(set([int(member) for member in members])) - # virtual groups collected by color coding or auto_image - add_ids = instance.data.pop("ids", None) - if add_ids: - ids.update(set(add_ids)) - extract_ids = set([ll.id for ll in stub. - get_layers_in_layers_ids(ids, all_layers) - if ll.id not in hidden_layer_ids]) - - for extracted_id in extract_ids: - stub.set_visible(extracted_id, True) - - file_basename = os.path.splitext( - stub.get_active_document_name() - )[0] - for extension in self.formats: - _filename = "{}.{}".format(file_basename, - extension) - files[extension] = _filename - - full_filename = os.path.join(staging_dir, - _filename) - stub.saveAs(full_filename, extension, True) - self.log.info(f"Extracted: {extension}") - - representations = [] - for extension, filename in files.items(): - representations.append({ - "name": extension, - "ext": extension, - "files": filename, - "stagingDir": staging_dir - }) - instance.data["representations"] = representations - instance.data["stagingDir"] = staging_dir - - self.log.info(f"Extracted {instance} to {staging_dir}") - - for extracted_id in extract_ids: - stub.set_visible(extracted_id, False) - - def staging_dir(self, instance): - """Provide a temporary directory in which to store extracted files - - Upon calling this method the staging directory is stored inside - the instance.data['stagingDir'] - """ - - from ayon_core.pipeline.publish import get_instance_staging_dir - - return get_instance_staging_dir(instance) diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_review.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_review.py deleted file mode 100644 index 0f36d31648..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_review.py +++ /dev/null @@ -1,329 +0,0 @@ -import os -import shutil -from PIL import Image - -from ayon_core.lib import ( - run_subprocess, - get_ffmpeg_tool_args, -) -from ayon_core.pipeline import publish -from ayon_photoshop import api as photoshop - - -class ExtractReview(publish.Extractor): - """ - Produce a flattened or sequence image files from all 'image' instances. - - If no 'image' instance is created, it produces flattened image from - all visible layers. - - It creates review, thumbnail and mov representations. - - 'review' family could be used in other steps as a reference, as it - contains flattened image by default. (Eg. artist could load this - review as a single item and see full image. In most cases 'image' - product type is separated by layers to better usage in animation - or comp.) - """ - - label = "Extract Review" - hosts = ["photoshop"] - families = ["review"] - settings_category = "photoshop" - - # Extract Options - jpg_options = None - mov_options = None - make_image_sequence = None - max_downscale_size = 8192 - - def process(self, instance): - staging_dir = self.staging_dir(instance) - self.log.info("Outputting image to {}".format(staging_dir)) - - fps = instance.data.get("fps", 25) - stub = photoshop.stub() - self.output_seq_filename = os.path.splitext( - stub.get_active_document_name())[0] + ".%04d.jpg" - - layers = self._get_layers_from_image_instances(instance) - self.log.info("Layers image instance found: {}".format(layers)) - - repre_name = "jpg" - repre_skeleton = { - "name": repre_name, - "ext": "jpg", - "stagingDir": staging_dir, - "tags": self.jpg_options['tags'], - } - - if instance.data["productType"] != "review": - self.log.debug( - "Existing extracted file from image product type used." - ) - # enable creation of review, without this jpg review would clash - # with jpg of the image product type - output_name = repre_name - repre_name = "{}_{}".format(repre_name, output_name) - repre_skeleton.update({"name": repre_name, - "outputName": output_name}) - - img_file = self.output_seq_filename % 0 - self._prepare_file_for_image_product_type( - img_file, instance, staging_dir - ) - repre_skeleton.update({ - "files": img_file, - }) - processed_img_names = [img_file] - elif self.make_image_sequence and len(layers) > 1: - self.log.debug("Extract layers to image sequence.") - img_list = self._save_sequence_images(staging_dir, layers) - - repre_skeleton.update({ - "frameStart": 0, - "frameEnd": len(img_list), - "fps": fps, - "files": img_list, - }) - processed_img_names = img_list - else: - self.log.debug("Extract layers to flatten image.") - img_file = self._save_flatten_image(staging_dir, layers) - - repre_skeleton.update({ - "files": img_file, - }) - processed_img_names = [img_file] - - instance.data["representations"].append(repre_skeleton) - - ffmpeg_args = get_ffmpeg_tool_args("ffmpeg") - - instance.data["stagingDir"] = staging_dir - - source_files_pattern = os.path.join(staging_dir, - self.output_seq_filename) - source_files_pattern = self._check_and_resize(processed_img_names, - source_files_pattern, - staging_dir) - self._generate_thumbnail( - list(ffmpeg_args), - instance, - source_files_pattern, - staging_dir) - - no_of_frames = len(processed_img_names) - if no_of_frames > 1: - self._generate_mov( - list(ffmpeg_args), - instance, - fps, - no_of_frames, - source_files_pattern, - staging_dir) - - self.log.info(f"Extracted {instance} to {staging_dir}") - - def _prepare_file_for_image_product_type( - self, img_file, instance, staging_dir - ): - """Converts existing file for image product type to .jpg - - Image instance could have its own separate review (instance per layer - for example). This uses extracted file instead of extracting again. - Args: - img_file (str): name of output file (with 0000 value for ffmpeg - later) - instance: - staging_dir (str): temporary folder where extracted file is located - """ - repre_file = instance.data["representations"][0] - source_file_path = os.path.join(repre_file["stagingDir"], - repre_file["files"]) - if not os.path.exists(source_file_path): - raise RuntimeError(f"{source_file_path} doesn't exist for " - "review to create from") - _, ext = os.path.splitext(repre_file["files"]) - if ext != ".jpg": - im = Image.open(source_file_path) - if (im.mode in ('RGBA', 'LA') or ( - im.mode == 'P' and 'transparency' in im.info)): - # without this it produces messy low quality jpg - rgb_im = Image.new("RGBA", (im.width, im.height), "#ffffff") - rgb_im.alpha_composite(im) - rgb_im.convert("RGB").save(os.path.join(staging_dir, img_file)) - else: - im.save(os.path.join(staging_dir, img_file)) - else: - # handles already .jpg - shutil.copy(source_file_path, - os.path.join(staging_dir, img_file)) - - def _generate_mov(self, ffmpeg_path, instance, fps, no_of_frames, - source_files_pattern, staging_dir): - """Generates .mov to upload to Ftrack. - - Args: - ffmpeg_path (str): path to ffmpeg - instance (Pyblish Instance) - fps (str) - no_of_frames (int): - source_files_pattern (str): name of source file - staging_dir (str): temporary location to store thumbnail - Updates: - instance - adds representation portion - """ - # Generate mov. - mov_path = os.path.join(staging_dir, "review.mov") - self.log.info(f"Generate mov review: {mov_path}") - args = ffmpeg_path + [ - "-y", - "-i", source_files_pattern, - "-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2", - "-vframes", str(no_of_frames), - mov_path - ] - self.log.debug("mov args:: {}".format(args)) - _output = run_subprocess(args) - instance.data["representations"].append({ - "name": "mov", - "ext": "mov", - "files": os.path.basename(mov_path), - "stagingDir": staging_dir, - "frameStart": 1, - "frameEnd": no_of_frames, - "fps": fps, - "tags": self.mov_options['tags'] - }) - - def _generate_thumbnail( - self, ffmpeg_args, instance, source_files_pattern, staging_dir - ): - """Generates scaled down thumbnail and adds it as representation. - - Args: - ffmpeg_path (str): path to ffmpeg - instance (Pyblish Instance) - source_files_pattern (str): name of source file - staging_dir (str): temporary location to store thumbnail - Updates: - instance - adds representation portion - """ - # Generate thumbnail - thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") - self.log.info(f"Generate thumbnail {thumbnail_path}") - args = ffmpeg_args + [ - "-y", - "-i", source_files_pattern, - "-vf", "scale=300:-1", - "-vframes", "1", - thumbnail_path - ] - self.log.debug("thumbnail args:: {}".format(args)) - _output = run_subprocess(args) - instance.data["representations"].append({ - "name": "thumbnail", - "ext": "jpg", - "outputName": "thumb", - "files": os.path.basename(thumbnail_path), - "stagingDir": staging_dir, - "tags": ["thumbnail", "delete"] - }) - instance.data["thumbnailPath"] = thumbnail_path - - def _check_and_resize(self, processed_img_names, source_files_pattern, - staging_dir): - """Check if saved image could be used in ffmpeg. - - Ffmpeg has max size 16384x16384. Saved image(s) must be resized to be - used as a source for thumbnail or review mov. - """ - Image.MAX_IMAGE_PIXELS = None - first_url = os.path.join(staging_dir, processed_img_names[0]) - with Image.open(first_url) as im: - width, height = im.size - - if width > self.max_downscale_size or height > self.max_downscale_size: - resized_dir = os.path.join(staging_dir, "resized") - os.mkdir(resized_dir) - source_files_pattern = os.path.join(resized_dir, - self.output_seq_filename) - for file_name in processed_img_names: - source_url = os.path.join(staging_dir, file_name) - with Image.open(source_url) as res_img: - # 'thumbnail' automatically keeps aspect ratio - res_img.thumbnail((self.max_downscale_size, - self.max_downscale_size), - Image.ANTIALIAS) - res_img.save(os.path.join(resized_dir, file_name)) - - return source_files_pattern - - def _get_layers_from_image_instances(self, instance): - """Collect all layers from 'instance'. - - Returns: - (list) of PSItem - """ - layers = [] - # creating review for existing 'image' instance - if ( - instance.data["productType"] == "image" - and instance.data.get("layer") - ): - layers.append(instance.data["layer"]) - return layers - - for image_instance in instance.context: - if image_instance.data["productType"] != "image": - continue - if not image_instance.data.get("layer"): - # dummy instance for flatten image - continue - layers.append(image_instance.data.get("layer")) - - return sorted(layers) - - def _save_flatten_image(self, staging_dir, layers): - """Creates flat image from 'layers' into 'staging_dir'. - - Returns: - (str): path to new image - """ - img_filename = self.output_seq_filename % 0 - output_image_path = os.path.join(staging_dir, img_filename) - stub = photoshop.stub() - - with photoshop.maintained_visibility(): - self.log.info("Extracting {}".format(layers)) - if layers: - stub.hide_all_others_layers(layers) - - stub.saveAs(output_image_path, 'jpg', True) - - return img_filename - - def _save_sequence_images(self, staging_dir, layers): - """Creates separate flat images from 'layers' into 'staging_dir'. - - Used as source for multi frames .mov to review at once. - Returns: - (list): paths to new images - """ - stub = photoshop.stub() - - list_img_filename = [] - with photoshop.maintained_visibility(): - for i, layer in enumerate(layers): - self.log.info("Extracting {}".format(layer)) - - img_filename = self.output_seq_filename % i - output_image_path = os.path.join(staging_dir, img_filename) - list_img_filename.append(img_filename) - - with photoshop.maintained_visibility(): - stub.hide_all_others_layers([layer]) - stub.saveAs(output_image_path, 'jpg', True) - - return list_img_filename diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_save_scene.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_save_scene.py deleted file mode 100644 index 22ebbb739d..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/extract_save_scene.py +++ /dev/null @@ -1,14 +0,0 @@ -from ayon_core.pipeline import publish -from ayon_photoshop import api as photoshop - - -class ExtractSaveScene(publish.Extractor): - """Save scene before extraction.""" - - order = publish.Extractor.order - 0.49 - label = "Extract Save Scene" - hosts = ["photoshop"] - families = ["workfile"] - - def process(self, instance): - photoshop.stub().save() diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_instance_asset.xml b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_instance_asset.xml deleted file mode 100644 index c033f922c6..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_instance_asset.xml +++ /dev/null @@ -1,20 +0,0 @@ - - - -Folder does not match - -## Collected folder path is not same as in context - - {msg} -### How to repair? - {repair_msg} - Refresh Publish afterwards (circle arrow at the bottom right). - - If that's not correct value, close workfile and reopen via Workfiles to get - proper context folder path OR disable this validator and publish again - if you are publishing to different context deliberately. - - (Context means combination of project, folder path and task name.) - - - \ No newline at end of file diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_naming.xml b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_naming.xml deleted file mode 100644 index 28c2329c8a..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/help/validate_naming.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - -Product name - -## Invalid product or layer name - -Product or layer name cannot contain specific characters (spaces etc) which could cause issue when product name is used in a published file name. - {msg} - -### How to repair? - -You can fix this with "repair" button on the right and press Refresh publishing button at the bottom right. - - -### __Detailed Info__ (optional) - -Not all characters are available in a file names on all OS. Wrong characters could be configured in Settings. - - - \ No newline at end of file diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/increment_workfile.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/increment_workfile.py deleted file mode 100644 index b10645813a..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/increment_workfile.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import pyblish.api -from ayon_core.pipeline.publish import get_errored_plugins_from_context -from ayon_core.lib import version_up - -from ayon_photoshop import api as photoshop - - -class IncrementWorkfile(pyblish.api.InstancePlugin): - """Increment the current workfile. - - Saves the current scene with an increased version number. - """ - - label = "Increment Workfile" - order = pyblish.api.IntegratorOrder + 9.0 - hosts = ["photoshop"] - families = ["workfile"] - optional = True - - def process(self, instance): - errored_plugins = get_errored_plugins_from_context(instance.context) - if errored_plugins: - raise RuntimeError( - "Skipping incrementing current file because publishing failed." - ) - - scene_path = version_up(instance.context.data["currentFile"]) - _, ext = os.path.splitext(scene_path) - photoshop.stub().saveAs(scene_path, ext[1:], True) - - self.log.info("Incremented workfile to: {}".format(scene_path)) diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_instance_asset.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_instance_asset.py deleted file mode 100644 index 36ba621dc2..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_instance_asset.py +++ /dev/null @@ -1,76 +0,0 @@ -import pyblish.api - -from ayon_core.pipeline import get_current_folder_path -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, - OptionalPyblishPluginMixin -) -from ayon_photoshop import api as photoshop - - -class ValidateInstanceFolderRepair(pyblish.api.Action): - """Repair the instance folder.""" - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - - # Get the errored instances - failed = [] - for result in context.data["results"]: - if ( - result["error"] is not None - and result["instance"] is not None - and result["instance"] not in failed - ): - failed.append(result["instance"]) - - # Apply pyblish.logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(failed, plugin) - stub = photoshop.stub() - current_folder_path = get_current_folder_path() - for instance in instances: - data = stub.read(instance[0]) - data["folderPath"] = current_folder_path - stub.imprint(instance[0], data) - - -class ValidateInstanceAsset(OptionalPyblishPluginMixin, - pyblish.api.InstancePlugin): - """Validate the instance folder is the current selected context folder. - - As it might happen that multiple worfiles are opened, switching - between them would mess with selected context. - In that case outputs might be output under wrong folder! - - Repair action will use Context folder value (from Workfiles or Launcher) - Closing and reopening with Workfiles will refresh Context value. - """ - - label = "Validate Instance Folder" - hosts = ["photoshop"] - optional = True - actions = [ValidateInstanceFolderRepair] - order = ValidateContentsOrder - - def process(self, instance): - instance_folder_path = instance.data["folderPath"] - current_folder_path = get_current_folder_path() - - if instance_folder_path != current_folder_path: - msg = ( - f"Instance folder {instance_folder_path} is not the same" - f" as current context {current_folder_path}." - - ) - repair_msg = ( - "Repair with 'Repair' button" - f" to use '{current_folder_path}'.\n" - ) - formatting_data = {"msg": msg, - "repair_msg": repair_msg} - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) diff --git a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_naming.py b/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_naming.py deleted file mode 100644 index e5f826b07e..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/plugins/publish/validate_naming.py +++ /dev/null @@ -1,117 +0,0 @@ -import re - -import pyblish.api - -from ayon_photoshop import api as photoshop -from ayon_core.pipeline.create import PRODUCT_NAME_ALLOWED_SYMBOLS -from ayon_core.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - - -class ValidateNamingRepair(pyblish.api.Action): - """Repair the instance folder.""" - - label = "Repair" - icon = "wrench" - on = "failed" - settings_category = "photoshop" - - def process(self, context, plugin): - - # Get the errored instances - failed = [] - for result in context.data["results"]: - if ( - result["error"] is not None - and result["instance"] is not None - and result["instance"] not in failed - ): - failed.append(result["instance"]) - - invalid_chars, replace_char = plugin.get_replace_chars() - self.log.debug("{} --- {}".format(invalid_chars, replace_char)) - - # Apply pyblish.logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(failed, plugin) - stub = photoshop.stub() - for instance in instances: - self.log.debug("validate_naming instance {}".format(instance)) - current_layer_state = stub.get_layer(instance.data["layer"].id) - self.log.debug("current_layer{}".format(current_layer_state)) - - layer_meta = stub.read(current_layer_state) - instance_id = (layer_meta.get("instance_id") or - layer_meta.get("uuid")) - if not instance_id: - self.log.warning("Unable to repair, cannot find layer") - continue - - layer_name = re.sub(invalid_chars, - replace_char, - current_layer_state.clean_name) - layer_name = stub.PUBLISH_ICON + layer_name - - stub.rename_layer(current_layer_state.id, layer_name) - - product_name = re.sub(invalid_chars, replace_char, - instance.data["productName"]) - - # format from Tool Creator - product_name = re.sub( - "[^{}]+".format(PRODUCT_NAME_ALLOWED_SYMBOLS), - "", - product_name - ) - - layer_meta["productName"] = product_name - stub.imprint(instance_id, layer_meta) - - return True - - -class ValidateNaming(pyblish.api.InstancePlugin): - """Validate the instance name. - - Spaces in names are not allowed. Will be replace with underscores. - """ - - label = "Validate Naming" - hosts = ["photoshop"] - order = ValidateContentsOrder - families = ["image"] - actions = [ValidateNamingRepair] - - # configured by Settings - invalid_chars = '' - replace_char = '' - - def process(self, instance): - help_msg = ' Use Repair button to fix it and then refresh publish.' - - layer = instance.data.get("layer") - if layer: - msg = "Name \"{}\" is not allowed.{}".format( - layer.clean_name, help_msg - ) - formatting_data = {"msg": msg} - if re.search(self.invalid_chars, layer.clean_name): - raise PublishXmlValidationError( - self, msg, formatting_data=formatting_data - ) - - product_name = instance.data["productName"] - msg = "Product \"{}\" is not allowed.{}".format( - product_name, help_msg - ) - formatting_data = {"msg": msg} - if re.search(self.invalid_chars, product_name): - raise PublishXmlValidationError( - self, msg, formatting_data=formatting_data - ) - - @classmethod - def get_replace_chars(cls): - """Pass values configured in Settings for Repair.""" - return cls.invalid_chars, cls.replace_char diff --git a/server_addon/photoshop/client/ayon_photoshop/resources/template.psd b/server_addon/photoshop/client/ayon_photoshop/resources/template.psd deleted file mode 100644 index 4c731771ba..0000000000 Binary files a/server_addon/photoshop/client/ayon_photoshop/resources/template.psd and /dev/null differ diff --git a/server_addon/photoshop/client/ayon_photoshop/version.py b/server_addon/photoshop/client/ayon_photoshop/version.py deleted file mode 100644 index 7f6de51228..0000000000 --- a/server_addon/photoshop/client/ayon_photoshop/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring AYON addon 'photoshop' version.""" -__version__ = "0.2.2" diff --git a/server_addon/photoshop/client/pyproject.toml b/server_addon/photoshop/client/pyproject.toml deleted file mode 100644 index 3beb76ba74..0000000000 --- a/server_addon/photoshop/client/pyproject.toml +++ /dev/null @@ -1,6 +0,0 @@ -[project] -name="photoshop" -description="AYON Phostoshop addon." - -[ayon.runtimeDependencies] -wsrpc_aiohttp = "^3.1.1" # websocket server diff --git a/server_addon/photoshop/package.py b/server_addon/photoshop/package.py deleted file mode 100644 index f4d2a98293..0000000000 --- a/server_addon/photoshop/package.py +++ /dev/null @@ -1,10 +0,0 @@ -name = "photoshop" -title = "Photoshop" -version = "0.2.2" - -client_dir = "ayon_photoshop" - -ayon_required_addons = { - "core": ">0.3.2", -} -ayon_compatible_addons = {} diff --git a/server_addon/photoshop/server/__init__.py b/server_addon/photoshop/server/__init__.py deleted file mode 100644 index 86d1025a2d..0000000000 --- a/server_addon/photoshop/server/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from ayon_server.addons import BaseServerAddon - -from .settings import PhotoshopSettings, DEFAULT_PHOTOSHOP_SETTING - - -class Photoshop(BaseServerAddon): - settings_model = PhotoshopSettings - - async def get_default_settings(self): - settings_model_cls = self.get_settings_model() - return settings_model_cls(**DEFAULT_PHOTOSHOP_SETTING) diff --git a/server_addon/photoshop/server/settings/__init__.py b/server_addon/photoshop/server/settings/__init__.py deleted file mode 100644 index 9ae5764362..0000000000 --- a/server_addon/photoshop/server/settings/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .main import ( - PhotoshopSettings, - DEFAULT_PHOTOSHOP_SETTING, -) - - -__all__ = ( - "PhotoshopSettings", - "DEFAULT_PHOTOSHOP_SETTING", -) diff --git a/server_addon/photoshop/server/settings/creator_plugins.py b/server_addon/photoshop/server/settings/creator_plugins.py deleted file mode 100644 index 8acc213866..0000000000 --- a/server_addon/photoshop/server/settings/creator_plugins.py +++ /dev/null @@ -1,77 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -class CreateImagePluginModel(BaseSettingsModel): - enabled: bool = SettingsField(True, title="Enabled") - active_on_create: bool = SettingsField(True, title="Active by default") - mark_for_review: bool = SettingsField(False, title="Review by default") - default_variants: list[str] = SettingsField( - default_factory=list, - title="Default Variants" - ) - - -class AutoImageCreatorPluginModel(BaseSettingsModel): - enabled: bool = SettingsField(False, title="Enabled") - active_on_create: bool = SettingsField(True, title="Active by default") - mark_for_review: bool = SettingsField(False, title="Review by default") - default_variant: str = SettingsField("", title="Default Variants") - - -class CreateReviewPlugin(BaseSettingsModel): - enabled: bool = SettingsField(True, title="Enabled") - active_on_create: bool = SettingsField(True, title="Active by default") - default_variant: str = SettingsField("", title="Default Variants") - - -class CreateWorkfilelugin(BaseSettingsModel): - enabled: bool = SettingsField(True, title="Enabled") - active_on_create: bool = SettingsField(True, title="Active by default") - default_variant: str = SettingsField("", title="Default Variants") - - -class PhotoshopCreatorPlugins(BaseSettingsModel): - ImageCreator: CreateImagePluginModel = SettingsField( - title="Create Image", - default_factory=CreateImagePluginModel, - ) - AutoImageCreator: AutoImageCreatorPluginModel = SettingsField( - title="Create Flatten Image", - default_factory=AutoImageCreatorPluginModel, - ) - ReviewCreator: CreateReviewPlugin = SettingsField( - title="Create Review", - default_factory=CreateReviewPlugin, - ) - WorkfileCreator: CreateWorkfilelugin = SettingsField( - title="Create Workfile", - default_factory=CreateWorkfilelugin, - ) - - -DEFAULT_CREATE_SETTINGS = { - "ImageCreator": { - "enabled": True, - "active_on_create": True, - "mark_for_review": False, - "default_variants": [ - "Main" - ] - }, - "AutoImageCreator": { - "enabled": False, - "active_on_create": True, - "mark_for_review": False, - "default_variant": "" - }, - "ReviewCreator": { - "enabled": True, - "active_on_create": True, - "default_variant": "" - }, - "WorkfileCreator": { - "enabled": True, - "active_on_create": True, - "default_variant": "Main" - } -} diff --git a/server_addon/photoshop/server/settings/imageio.py b/server_addon/photoshop/server/settings/imageio.py deleted file mode 100644 index c514f58173..0000000000 --- a/server_addon/photoshop/server/settings/imageio.py +++ /dev/null @@ -1,79 +0,0 @@ -from pydantic import validator -from ayon_server.settings import BaseSettingsModel, SettingsField -from ayon_server.settings.validators import ensure_unique_names - - -class ImageIOConfigModel(BaseSettingsModel): - """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config - path in the Core addon profiles here - (ayon+settings://core/imageio/ocio_config_profiles). - """ - - override_global_config: bool = SettingsField( - False, - title="Override global OCIO config", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - filepath: list[str] = SettingsField( - default_factory=list, - title="Config path", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - - -class ImageIOFileRuleModel(BaseSettingsModel): - name: str = SettingsField("", title="Rule name") - pattern: str = SettingsField("", title="Regex pattern") - colorspace: str = SettingsField("", title="Colorspace name") - ext: str = SettingsField("", title="File extension") - - -class ImageIOFileRulesModel(BaseSettingsModel): - activate_host_rules: bool = SettingsField(False) - rules: list[ImageIOFileRuleModel] = SettingsField( - default_factory=list, - title="Rules" - ) - - @validator("rules") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -class ImageIORemappingRulesModel(BaseSettingsModel): - host_native_name: str = SettingsField( - title="Application native colorspace name" - ) - ocio_name: str = SettingsField(title="OCIO colorspace name") - - -class ImageIORemappingModel(BaseSettingsModel): - rules: list[ImageIORemappingRulesModel] = SettingsField( - default_factory=list) - - -class PhotoshopImageIOModel(BaseSettingsModel): - activate_host_color_management: bool = SettingsField( - True, title="Enable Color Management" - ) - remapping: ImageIORemappingModel = SettingsField( - title="Remapping colorspace names", - default_factory=ImageIORemappingModel - ) - ocio_config: ImageIOConfigModel = SettingsField( - default_factory=ImageIOConfigModel, - title="OCIO config" - ) - file_rules: ImageIOFileRulesModel = SettingsField( - default_factory=ImageIOFileRulesModel, - title="File Rules" - ) diff --git a/server_addon/photoshop/server/settings/main.py b/server_addon/photoshop/server/settings/main.py deleted file mode 100644 index b6474d6d29..0000000000 --- a/server_addon/photoshop/server/settings/main.py +++ /dev/null @@ -1,40 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - -from .imageio import PhotoshopImageIOModel -from .creator_plugins import PhotoshopCreatorPlugins, DEFAULT_CREATE_SETTINGS -from .publish_plugins import PhotoshopPublishPlugins, DEFAULT_PUBLISH_SETTINGS -from .workfile_builder import WorkfileBuilderPlugin - - -class PhotoshopSettings(BaseSettingsModel): - """Photoshop Project Settings.""" - - imageio: PhotoshopImageIOModel = SettingsField( - default_factory=PhotoshopImageIOModel, - title="OCIO config" - ) - - create: PhotoshopCreatorPlugins = SettingsField( - default_factory=PhotoshopCreatorPlugins, - title="Creator plugins" - ) - - publish: PhotoshopPublishPlugins = SettingsField( - default_factory=PhotoshopPublishPlugins, - title="Publish plugins" - ) - - workfile_builder: WorkfileBuilderPlugin = SettingsField( - default_factory=WorkfileBuilderPlugin, - title="Workfile Builder" - ) - - -DEFAULT_PHOTOSHOP_SETTING = { - "create": DEFAULT_CREATE_SETTINGS, - "publish": DEFAULT_PUBLISH_SETTINGS, - "workfile_builder": { - "create_first_version": False, - "custom_templates": [] - } -} diff --git a/server_addon/photoshop/server/settings/publish_plugins.py b/server_addon/photoshop/server/settings/publish_plugins.py deleted file mode 100644 index 149b08beb4..0000000000 --- a/server_addon/photoshop/server/settings/publish_plugins.py +++ /dev/null @@ -1,203 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -create_flatten_image_enum = [ - {"value": "flatten_with_images", "label": "Flatten with images"}, - {"value": "flatten_only", "label": "Flatten only"}, - {"value": "no", "label": "No"}, -] - - -color_code_enum = [ - {"value": "red", "label": "Red"}, - {"value": "orange", "label": "Orange"}, - {"value": "yellowColor", "label": "Yellow"}, - {"value": "grain", "label": "Green"}, - {"value": "blue", "label": "Blue"}, - {"value": "violet", "label": "Violet"}, - {"value": "gray", "label": "Gray"}, -] - - -class ColorCodeMappings(BaseSettingsModel): - color_code: list[str] = SettingsField( - title="Color codes for layers", - default_factory=list, - enum_resolver=lambda: color_code_enum, - ) - - layer_name_regex: list[str] = SettingsField( - default_factory=list, - title="Layer name regex" - ) - - product_type: str = SettingsField( - "", - title="Resulting product type" - ) - - product_name_template: str = SettingsField( - "", - title="Product name template" - ) - - -class ExtractedOptions(BaseSettingsModel): - tags: list[str] = SettingsField( - title="Tags", - default_factory=list - ) - - -class CollectColorCodedInstancesPlugin(BaseSettingsModel): - """Set color for publishable layers, set its resulting product type - and template for product name. \n Can create flatten image from published - instances. - (Applicable only for remote publishing!)""" - - enabled: bool = SettingsField(True, title="Enabled") - create_flatten_image: str = SettingsField( - "", - title="Create flatten image", - enum_resolver=lambda: create_flatten_image_enum, - ) - - flatten_product_name_template: str = SettingsField( - "", - title="Product name template for flatten image" - ) - - color_code_mapping: list[ColorCodeMappings] = SettingsField( - title="Color code mappings", - default_factory=ColorCodeMappings, - ) - - -class CollectReviewPlugin(BaseSettingsModel): - """Should review product be created""" - enabled: bool = SettingsField(True, title="Enabled") - - -class CollectVersionPlugin(BaseSettingsModel): - """Synchronize version for image and review instances by workfile version""" # noqa - enabled: bool = SettingsField(True, title="Enabled") - - -class ValidateNamingPlugin(BaseSettingsModel): - """Validate naming of products and layers""" # noqa - invalid_chars: str = SettingsField( - '', - title="Regex pattern of invalid characters" - ) - - replace_char: str = SettingsField( - '', - title="Replacement character" - ) - - -class ExtractImagePlugin(BaseSettingsModel): - """Currently only jpg and png are supported""" - formats: list[str] = SettingsField( - title="Extract Formats", - default_factory=list, - ) - - -class ExtractReviewPlugin(BaseSettingsModel): - make_image_sequence: bool = SettingsField( - False, - title="Make an image sequence instead of flatten image" - ) - - max_downscale_size: int = SettingsField( - 8192, - title="Maximum size of sources for review", - description="FFMpeg can only handle limited resolution for creation of review and/or thumbnail", # noqa - gt=300, # greater than - le=16384, # less or equal - ) - - jpg_options: ExtractedOptions = SettingsField( - title="Extracted jpg Options", - default_factory=ExtractedOptions - ) - - mov_options: ExtractedOptions = SettingsField( - title="Extracted mov Options", - default_factory=ExtractedOptions - ) - - -class PhotoshopPublishPlugins(BaseSettingsModel): - CollectColorCodedInstances: CollectColorCodedInstancesPlugin = ( - SettingsField( - title="Collect Color Coded Instances", - default_factory=CollectColorCodedInstancesPlugin, - ) - ) - CollectReview: CollectReviewPlugin = SettingsField( - title="Collect Review", - default_factory=CollectReviewPlugin, - ) - - CollectVersion: CollectVersionPlugin = SettingsField( - title="Collect Version", - default_factory=CollectVersionPlugin, - ) - - ValidateNaming: ValidateNamingPlugin = SettingsField( - title="Validate naming of products and layers", - default_factory=ValidateNamingPlugin, - ) - - ExtractImage: ExtractImagePlugin = SettingsField( - title="Extract Image", - default_factory=ExtractImagePlugin, - ) - - ExtractReview: ExtractReviewPlugin = SettingsField( - title="Extract Review", - default_factory=ExtractReviewPlugin, - ) - - -DEFAULT_PUBLISH_SETTINGS = { - "CollectColorCodedInstances": { - "create_flatten_image": "no", - "flatten_product_name_template": "", - "color_code_mapping": [] - }, - "CollectReview": { - "enabled": True - }, - "CollectVersion": { - "enabled": False - }, - "ValidateNaming": { - "invalid_chars": "[ \\\\/+\\*\\?\\(\\)\\[\\]\\{\\}:,;]", - "replace_char": "_" - }, - "ExtractImage": { - "formats": [ - "png", - "jpg" - ] - }, - "ExtractReview": { - "make_image_sequence": False, - "max_downscale_size": 8192, - "jpg_options": { - "tags": [ - "review", - "ftrackreview" - ] - }, - "mov_options": { - "tags": [ - "review", - "ftrackreview" - ] - } - } -} diff --git a/server_addon/photoshop/server/settings/workfile_builder.py b/server_addon/photoshop/server/settings/workfile_builder.py deleted file mode 100644 index 4b00b99272..0000000000 --- a/server_addon/photoshop/server/settings/workfile_builder.py +++ /dev/null @@ -1,31 +0,0 @@ -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - MultiplatformPathModel, -) - - -class CustomBuilderTemplate(BaseSettingsModel): - _layout = "expanded" - task_types: list[str] = SettingsField( - default_factory=list, - title="Task types", - ) - - path: MultiplatformPathModel = SettingsField( - default_factory=MultiplatformPathModel, - title="Template path" - ) - - -class WorkfileBuilderPlugin(BaseSettingsModel): - _title = "Workfile Builder" - create_first_version: bool = SettingsField( - False, - title="Create first workfile" - ) - - custom_templates: list[CustomBuilderTemplate] = SettingsField( - default_factory=CustomBuilderTemplate, - title="Template profiles" - ) diff --git a/server_addon/substancepainter/client/ayon_substancepainter/__init__.py b/server_addon/substancepainter/client/ayon_substancepainter/__init__.py deleted file mode 100644 index 5627134cbd..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .version import __version__ -from .addon import ( - SubstanceAddon, - SUBSTANCE_HOST_DIR, -) - - -__all__ = ( - "__version__", - - "SubstanceAddon", - "SUBSTANCE_HOST_DIR" -) diff --git a/server_addon/substancepainter/client/ayon_substancepainter/addon.py b/server_addon/substancepainter/client/ayon_substancepainter/addon.py deleted file mode 100644 index 971b25a1bd..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/addon.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -from ayon_core.addon import AYONAddon, IHostAddon - -from .version import __version__ - -SUBSTANCE_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class SubstanceAddon(AYONAddon, IHostAddon): - name = "substancepainter" - version = __version__ - host_name = "substancepainter" - - def add_implementation_envs(self, env, _app): - # Add requirements to SUBSTANCE_PAINTER_PLUGINS_PATH - plugin_path = os.path.join(SUBSTANCE_HOST_DIR, "deploy") - plugin_path = plugin_path.replace("\\", "/") - if env.get("SUBSTANCE_PAINTER_PLUGINS_PATH"): - plugin_path += os.pathsep + env["SUBSTANCE_PAINTER_PLUGINS_PATH"] - - env["SUBSTANCE_PAINTER_PLUGINS_PATH"] = plugin_path - - # Log in Substance Painter doesn't support custom terminal colors - env["AYON_LOG_NO_COLORS"] = "1" - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(SUBSTANCE_HOST_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".spp", ".toc"] diff --git a/server_addon/substancepainter/client/ayon_substancepainter/api/__init__.py b/server_addon/substancepainter/client/ayon_substancepainter/api/__init__.py deleted file mode 100644 index 937d0c429e..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/api/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .pipeline import ( - SubstanceHost, - -) - -__all__ = [ - "SubstanceHost", -] diff --git a/server_addon/substancepainter/client/ayon_substancepainter/api/colorspace.py b/server_addon/substancepainter/client/ayon_substancepainter/api/colorspace.py deleted file mode 100644 index 375b61b39b..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/api/colorspace.py +++ /dev/null @@ -1,157 +0,0 @@ -"""Substance Painter OCIO management - -Adobe Substance 3D Painter supports OCIO color management using a per project -configuration. Output color spaces are defined at the project level - -More information see: - - https://substance3d.adobe.com/documentation/spdoc/color-management-223053233.html # noqa - - https://substance3d.adobe.com/documentation/spdoc/color-management-with-opencolorio-225969419.html # noqa - -""" -import substance_painter.export -import substance_painter.js -import json - -from .lib import ( - get_document_structure, - get_channel_format -) - - -def _iter_document_stack_channels(): - """Yield all stack paths and channels project""" - - for material in get_document_structure()["materials"]: - material_name = material["name"] - for stack in material["stacks"]: - stack_name = stack["name"] - if stack_name: - stack_path = [material_name, stack_name] - else: - stack_path = material_name - for channel in stack["channels"]: - yield stack_path, channel - - -def _get_first_color_and_data_stack_and_channel(): - """Return first found color channel and data channel.""" - color_channel = None - data_channel = None - for stack_path, channel in _iter_document_stack_channels(): - channel_format = get_channel_format(stack_path, channel) - if channel_format["color"]: - color_channel = (stack_path, channel) - else: - data_channel = (stack_path, channel) - - if color_channel and data_channel: - return color_channel, data_channel - - return color_channel, data_channel - - -def get_project_channel_data(): - """Return colorSpace settings for the current substance painter project. - - In Substance Painter only color channels have Color Management enabled - whereas data channels have no color management applied. This can't be - changed. The artist can only customize the export color space for color - channels per bit-depth for 8 bpc, 16 bpc and 32 bpc. - - As such this returns the color space for 'data' and for per bit-depth - for color channels. - - Example output: - { - "data": {'colorSpace': 'Utility - Raw'}, - "8": {"colorSpace": "ACES - AcesCG"}, - "16": {"colorSpace": "ACES - AcesCG"}, - "16f": {"colorSpace": "ACES - AcesCG"}, - "32f": {"colorSpace": "ACES - AcesCG"} - } - - """ - - keys = ["colorSpace"] - query = {key: f"${key}" for key in keys} - - config = { - "exportPath": "/", - "exportShaderParams": False, - "defaultExportPreset": "query_preset", - - "exportPresets": [{ - "name": "query_preset", - - # List of maps making up this export preset. - "maps": [{ - "fileName": json.dumps(query), - # List of source/destination defining which channels will - # make up the texture file. - "channels": [], - "parameters": { - "fileFormat": "exr", - "bitDepth": "32f", - "dithering": False, - "sizeLog2": 4, - "paddingAlgorithm": "passthrough", - "dilationDistance": 16 - } - }] - }], - } - - def _get_query_output(config): - # Return the basename of the single output path we defined - result = substance_painter.export.list_project_textures(config) - path = next(iter(result.values()))[0] - # strip extension and slash since we know relevant json data starts - # and ends with { and } characters - path = path.strip("/\\.exr") - return json.loads(path) - - # Query for each type of channel (color and data) - color_channel, data_channel = _get_first_color_and_data_stack_and_channel() - colorspaces = {} - for key, channel_data in { - "data": data_channel, - "color": color_channel - }.items(): - if channel_data is None: - # No channel of that datatype anywhere in the Stack. We're - # unable to identify the output color space of the project - colorspaces[key] = None - continue - - stack, channel = channel_data - - # Stack must be a string - if not isinstance(stack, str): - # Assume iterable - stack = "/".join(stack) - - # Define the temp output config - config["exportList"] = [{"rootPath": stack}] - config_map = config["exportPresets"][0]["maps"][0] - config_map["channels"] = [ - { - "destChannel": x, - "srcChannel": x, - "srcMapType": "documentMap", - "srcMapName": channel - } for x in "RGB" - ] - - if key == "color": - # Query for each bit depth - # Color space definition can have a different OCIO config set - # for 8-bit, 16-bit and 32-bit outputs so we need to check each - # bit depth - for depth in ["8", "16", "16f", "32f"]: - config_map["parameters"]["bitDepth"] = depth # noqa - colorspaces[key + depth] = _get_query_output(config) - else: - # Data channel (not color managed) - colorspaces[key] = _get_query_output(config) - - return colorspaces diff --git a/server_addon/substancepainter/client/ayon_substancepainter/api/lib.py b/server_addon/substancepainter/client/ayon_substancepainter/api/lib.py deleted file mode 100644 index 0ae3932f58..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/api/lib.py +++ /dev/null @@ -1,729 +0,0 @@ -import os -import re -import json -from collections import defaultdict - -import contextlib -import substance_painter -import substance_painter.project -import substance_painter.resource -import substance_painter.js -import substance_painter.export - -from qtpy import QtGui, QtWidgets, QtCore - - -def get_export_presets(): - """Return Export Preset resource URLs for all available Export Presets. - - Returns: - dict: {Resource url: GUI Label} - - """ - # TODO: Find more optimal way to find all export templates - - preset_resources = {} - for shelf in substance_painter.resource.Shelves.all(): - shelf_path = os.path.normpath(shelf.path()) - - presets_path = os.path.join(shelf_path, "export-presets") - if not os.path.exists(presets_path): - continue - - for filename in os.listdir(presets_path): - if filename.endswith(".spexp"): - template_name = os.path.splitext(filename)[0] - - resource = substance_painter.resource.ResourceID( - context=shelf.name(), - name=template_name - ) - resource_url = resource.url() - - preset_resources[resource_url] = template_name - - # Sort by template name - export_templates = dict(sorted(preset_resources.items(), - key=lambda x: x[1])) - - # Add default built-ins at the start - # TODO: find the built-ins automatically; scraped with https://gist.github.com/BigRoy/97150c7c6f0a0c916418207b9a2bc8f1 # noqa - result = { - "export-preset-generator://viewport2d": "2D View", # noqa - "export-preset-generator://doc-channel-normal-no-alpha": "Document channels + Normal + AO (No Alpha)", # noqa - "export-preset-generator://doc-channel-normal-with-alpha": "Document channels + Normal + AO (With Alpha)", # noqa - "export-preset-generator://sketchfab": "Sketchfab", # noqa - "export-preset-generator://adobe-standard-material": "Substance 3D Stager", # noqa - "export-preset-generator://usd": "USD PBR Metal Roughness", # noqa - "export-preset-generator://gltf": "glTF PBR Metal Roughness", # noqa - "export-preset-generator://gltf-displacement": "glTF PBR Metal Roughness + Displacement texture (experimental)" # noqa - } - result.update(export_templates) - return result - - -def _convert_stack_path_to_cmd_str(stack_path): - """Convert stack path `str` or `[str, str]` for javascript query - - Example usage: - >>> stack_path = _convert_stack_path_to_cmd_str(stack_path) - >>> cmd = f"alg.mapexport.channelIdentifiers({stack_path})" - >>> substance_painter.js.evaluate(cmd) - - Args: - stack_path (list or str): Path to the stack, could be - "Texture set name" or ["Texture set name", "Stack name"] - - Returns: - str: Stack path usable as argument in javascript query. - - """ - return json.dumps(stack_path) - - -def get_channel_identifiers(stack_path=None): - """Return the list of channel identifiers. - - If a context is passed (texture set/stack), - return only used channels with resolved user channels. - - Channel identifiers are: - basecolor, height, specular, opacity, emissive, displacement, - glossiness, roughness, anisotropylevel, anisotropyangle, transmissive, - scattering, reflection, ior, metallic, normal, ambientOcclusion, - diffuse, specularlevel, blendingmask, [custom user names]. - - Args: - stack_path (list or str, Optional): Path to the stack, could be - "Texture set name" or ["Texture set name", "Stack name"] - - Returns: - list: List of channel identifiers. - - """ - if stack_path is None: - stack_path = "" - else: - stack_path = _convert_stack_path_to_cmd_str(stack_path) - cmd = f"alg.mapexport.channelIdentifiers({stack_path})" - return substance_painter.js.evaluate(cmd) - - -def get_channel_format(stack_path, channel): - """Retrieve the channel format of a specific stack channel. - - See `alg.mapexport.channelFormat` (javascript API) for more details. - - The channel format data is: - "label" (str): The channel format label: could be one of - [sRGB8, L8, RGB8, L16, RGB16, L16F, RGB16F, L32F, RGB32F] - "color" (bool): True if the format is in color, False is grayscale - "floating" (bool): True if the format uses floating point - representation, false otherwise - "bitDepth" (int): Bit per color channel (could be 8, 16 or 32 bpc) - - Arguments: - stack_path (list or str): Path to the stack, could be - "Texture set name" or ["Texture set name", "Stack name"] - channel (str): Identifier of the channel to export - (see `get_channel_identifiers`) - - Returns: - dict: The channel format data. - - """ - stack_path = _convert_stack_path_to_cmd_str(stack_path) - cmd = f"alg.mapexport.channelFormat({stack_path}, '{channel}')" - return substance_painter.js.evaluate(cmd) - - -def get_document_structure(): - """Dump the document structure. - - See `alg.mapexport.documentStructure` (javascript API) for more details. - - Returns: - dict: Document structure or None when no project is open - - """ - return substance_painter.js.evaluate("alg.mapexport.documentStructure()") - - -def get_export_templates(config, format="png", strip_folder=True): - """Return export config outputs. - - This use the Javascript API `alg.mapexport.getPathsExportDocumentMaps` - which returns a different output than using the Python equivalent - `substance_painter.export.list_project_textures(config)`. - - The nice thing about the Javascript API version is that it returns the - output textures grouped by filename template. - - A downside is that it doesn't return all the UDIM tiles but per template - always returns a single file. - - Note: - The file format needs to be explicitly passed to the Javascript API - but upon exporting through the Python API the file format can be based - on the output preset. So it's likely the file extension will mismatch - - Warning: - Even though the function appears to solely get the expected outputs - the Javascript API will actually create the config's texture output - folder if it does not exist yet. As such, a valid path must be set. - - Example output: - { - "DefaultMaterial": { - "$textureSet_BaseColor(_$colorSpace)(.$udim)": "DefaultMaterial_BaseColor_ACES - ACEScg.1002.png", # noqa - "$textureSet_Emissive(_$colorSpace)(.$udim)": "DefaultMaterial_Emissive_ACES - ACEScg.1002.png", # noqa - "$textureSet_Height(_$colorSpace)(.$udim)": "DefaultMaterial_Height_Utility - Raw.1002.png", # noqa - "$textureSet_Metallic(_$colorSpace)(.$udim)": "DefaultMaterial_Metallic_Utility - Raw.1002.png", # noqa - "$textureSet_Normal(_$colorSpace)(.$udim)": "DefaultMaterial_Normal_Utility - Raw.1002.png", # noqa - "$textureSet_Roughness(_$colorSpace)(.$udim)": "DefaultMaterial_Roughness_Utility - Raw.1002.png" # noqa - } - } - - Arguments: - config (dict) Export config - format (str, Optional): Output format to write to, defaults to 'png' - strip_folder (bool, Optional): Whether to strip the output folder - from the output filenames. - - Returns: - dict: The expected output maps. - - """ - folder = config["exportPath"].replace("\\", "/") - preset = config["defaultExportPreset"] - cmd = f'alg.mapexport.getPathsExportDocumentMaps("{preset}", "{folder}", "{format}")' # noqa - result = substance_painter.js.evaluate(cmd) - - if strip_folder: - for _stack, maps in result.items(): - for map_template, map_filepath in maps.items(): - map_filepath = map_filepath.replace("\\", "/") - assert map_filepath.startswith(folder) - map_filename = map_filepath[len(folder):].lstrip("/") - maps[map_template] = map_filename - - return result - - -def _templates_to_regex(templates, - texture_set, - colorspaces, - project, - mesh): - """Return regex based on a Substance Painter expot filename template. - - This converts Substance Painter export filename templates like - `$mesh_$textureSet_BaseColor(_$colorSpace)(.$udim)` into a regex - which can be used to query an output filename to help retrieve: - - - Which template filename the file belongs to. - - Which color space the file is written with. - - Which udim tile it is exactly. - - This is used by `get_parsed_export_maps` which tries to as explicitly - as possible match the filename pattern against the known possible outputs. - That's why Texture Set name, Color spaces, Project path and mesh path must - be provided. By doing so we get the best shot at correctly matching the - right template because otherwise $texture_set could basically be any string - and thus match even that of a color space or mesh. - - Arguments: - templates (list): List of templates to convert to regex. - texture_set (str): The texture set to match against. - colorspaces (list): The colorspaces defined in the current project. - project (str): Filepath of current substance project. - mesh (str): Path to mesh file used in current project. - - Returns: - dict: Template: Template regex pattern - - """ - def _filename_no_ext(path): - return os.path.splitext(os.path.basename(path))[0] - - if colorspaces and any(colorspaces): - colorspace_match = "|".join(re.escape(c) for c in set(colorspaces)) - colorspace_match = f"({colorspace_match})" - else: - # No colorspace support enabled - colorspace_match = "" - - # Key to regex valid search values - key_matches = { - "$project": re.escape(_filename_no_ext(project)), - "$mesh": re.escape(_filename_no_ext(mesh)), - "$textureSet": re.escape(texture_set), - "$colorSpace": colorspace_match, - "$udim": "([0-9]{4})" - } - - # Turn the templates into regexes - regexes = {} - for template in templates: - - # We need to tweak a temp - search_regex = re.escape(template) - - # Let's assume that any ( and ) character in the file template was - # intended as an optional template key and do a simple `str.replace` - # Note: we are matching against re.escape(template) so will need to - # search for the escaped brackets. - search_regex = search_regex.replace(re.escape("("), "(") - search_regex = search_regex.replace(re.escape(")"), ")?") - - # Substitute each key into a named group - for key, key_expected_regex in key_matches.items(): - - # We want to use the template as a regex basis in the end so will - # escape the whole thing first. Note that thus we'll need to - # search for the escaped versions of the keys too. - escaped_key = re.escape(key) - key_label = key[1:] # key without $ prefix - - key_expected_grp_regex = f"(?P<{key_label}>{key_expected_regex})" - search_regex = search_regex.replace(escaped_key, - key_expected_grp_regex) - - # The filename templates don't include the extension so we add it - # to be able to match the out filename beginning to end - ext_regex = r"(?P\.[A-Za-z][A-Za-z0-9-]*)" - search_regex = rf"^{search_regex}{ext_regex}$" - - regexes[template] = search_regex - - return regexes - - -def strip_template(template, strip="._ "): - """Return static characters in a substance painter filename template. - - >>> strip_template("$textureSet_HELLO(.$udim)") - # HELLO - >>> strip_template("$mesh_$textureSet_HELLO_WORLD_$colorSpace(.$udim)") - # HELLO_WORLD - >>> strip_template("$textureSet_HELLO(.$udim)", strip=None) - # _HELLO - >>> strip_template("$mesh_$textureSet_$colorSpace(.$udim)", strip=None) - # _HELLO_ - >>> strip_template("$textureSet_HELLO(.$udim)") - # _HELLO - - Arguments: - template (str): Filename template to strip. - strip (str, optional): Characters to strip from beginning and end - of the static string in template. Defaults to: `._ `. - - Returns: - str: The static string in filename template. - - """ - # Return only characters that were part of the template that were static. - # Remove all keys - keys = ["$project", "$mesh", "$textureSet", "$udim", "$colorSpace"] - stripped_template = template - for key in keys: - stripped_template = stripped_template.replace(key, "") - - # Everything inside an optional bracket space is excluded since it's not - # static. We keep a counter to track whether we are currently iterating - # over parts of the template that are inside an 'optional' group or not. - counter = 0 - result = "" - for char in stripped_template: - if char == "(": - counter += 1 - elif char == ")": - counter -= 1 - if counter < 0: - counter = 0 - else: - if counter == 0: - result += char - - if strip: - # Strip of any trailing start/end characters. Technically these are - # static but usually start and end separators like space or underscore - # aren't wanted. - result = result.strip(strip) - - return result - - -def get_parsed_export_maps(config): - """Return Export Config's expected output textures with parsed data. - - This tries to parse the texture outputs using a Python API export config. - - Parses template keys: $project, $mesh, $textureSet, $colorSpace, $udim - - Example: - {("DefaultMaterial", ""): { - "$mesh_$textureSet_BaseColor(_$colorSpace)(.$udim)": [ - { - // OUTPUT DATA FOR FILE #1 OF THE TEMPLATE - }, - { - // OUTPUT DATA FOR FILE #2 OF THE TEMPLATE - }, - ] - }, - }} - - File output data (all outputs are `str`). - 1) Parsed tokens: These are parsed tokens from the template, they will - only exist if found in the filename template and output filename. - - project: Workfile filename without extension - mesh: Filename of the loaded mesh without extension - textureSet: The texture set, e.g. "DefaultMaterial", - colorSpace: The color space, e.g. "ACES - ACEScg", - udim: The udim tile, e.g. "1001" - - 2) Template output and filepath - - filepath: Full path to the resulting texture map, e.g. - "/path/to/mesh_DefaultMaterial_BaseColor_ACES - ACEScg.1002.png", - output: "mesh_DefaultMaterial_BaseColor_ACES - ACEScg.1002.png" - Note: if template had slashes (folders) then `output` will too. - So `output` might include a folder. - - Returns: - dict: [texture_set, stack]: {template: [file1_data, file2_data]} - - """ - # Import is here to avoid recursive lib <-> colorspace imports - from .colorspace import get_project_channel_data - - outputs = substance_painter.export.list_project_textures(config) - templates = get_export_templates(config, strip_folder=False) - - # Get all color spaces set for the current project - project_colorspaces = set( - data["colorSpace"] for data in get_project_channel_data().values() - ) - - # Get current project mesh path and project path to explicitly match - # the $mesh and $project tokens - project_mesh_path = substance_painter.project.last_imported_mesh_path() - project_path = substance_painter.project.file_path() - - # Get the current export path to strip this of the beginning of filepath - # results, since filename templates don't have these we'll match without - # that part of the filename. - export_path = config["exportPath"] - export_path = export_path.replace("\\", "/") - if not export_path.endswith("/"): - export_path += "/" - - # Parse the outputs - result = {} - for key, filepaths in outputs.items(): - texture_set, stack = key - - if stack: - stack_path = f"{texture_set}/{stack}" - else: - stack_path = texture_set - - stack_templates = list(templates[stack_path].keys()) - - template_regex = _templates_to_regex(stack_templates, - texture_set=texture_set, - colorspaces=project_colorspaces, - mesh=project_mesh_path, - project=project_path) - - # Let's precompile the regexes - for template, regex in template_regex.items(): - template_regex[template] = re.compile(regex) - - stack_results = defaultdict(list) - for filepath in sorted(filepaths): - # We strip explicitly using the full parent export path instead of - # using `os.path.basename` because export template is allowed to - # have subfolders in its template which we want to match against - filepath = filepath.replace("\\", "/") - assert filepath.startswith(export_path), ( - f"Filepath {filepath} must start with folder {export_path}" - ) - filename = filepath[len(export_path):] - - for template, regex in template_regex.items(): - match = regex.match(filename) - if match: - parsed = match.groupdict(default={}) - - # Include some special outputs for convenience - parsed["filepath"] = filepath - parsed["output"] = filename - - stack_results[template].append(parsed) - break - else: - raise ValueError(f"Unable to match {filename} against any " - f"template in: {list(template_regex.keys())}") - - result[key] = dict(stack_results) - - return result - - -def load_shelf(path, name=None): - """Add shelf to substance painter (for current application session) - - This will dynamically add a Shelf for the current session. It's good - to note however that these will *not* persist on restart of the host. - - Note: - Consider the loaded shelf a static library of resources. - - The shelf will *not* be visible in application preferences in - Edit > Settings > Libraries. - - The shelf will *not* show in the Assets browser if it has no existing - assets - - The shelf will *not* be a selectable option for selecting it as a - destination to import resources too. - - """ - - # Ensure expanded path with forward slashes - path = os.path.expandvars(path) - path = os.path.abspath(path) - path = path.replace("\\", "/") - - # Path must exist - if not os.path.isdir(path): - raise ValueError(f"Path is not an existing folder: {path}") - - # This name must be unique and must only contain lowercase letters, - # numbers, underscores or hyphens. - if name is None: - name = os.path.basename(path) - - name = name.lower() - name = re.sub(r"[^a-z0-9_\-]", "_", name) # sanitize to underscores - - if substance_painter.resource.Shelves.exists(name): - shelf = next( - shelf for shelf in substance_painter.resource.Shelves.all() - if shelf.name() == name - ) - if os.path.normpath(shelf.path()) != os.path.normpath(path): - raise ValueError(f"Shelf with name '{name}' already exists " - f"for a different path: '{shelf.path()}") - - return - - print(f"Adding Shelf '{name}' to path: {path}") - substance_painter.resource.Shelves.add(name, path) - - return name - - -def _get_new_project_action(): - """Return QAction which triggers Substance Painter's new project dialog""" - - main_window = substance_painter.ui.get_main_window() - - # Find the file menu's New file action - menubar = main_window.menuBar() - new_action = None - for action in menubar.actions(): - menu = action.menu() - if not menu: - continue - - if menu.objectName() != "file": - continue - - # Find the action with the CTRL+N key sequence - new_action = next(action for action in menu.actions() - if action.shortcut() == QtGui.QKeySequence.New) - break - - return new_action - - -def prompt_new_file_with_mesh(mesh_filepath): - """Prompts the user for a new file using Substance Painter's own dialog. - - This will set the mesh path to load to the given mesh and disables the - dialog box to disallow the user to change the path. This way we can allow - user configuration of a project but set the mesh path ourselves. - - Warning: - This is very hacky and experimental. - - Note: - If a project is currently open using the same mesh filepath it can't - accurately detect whether the user had actually accepted the new project - dialog or whether the project afterwards is still the original project, - for example when the user might have cancelled the operation. - - """ - - app = QtWidgets.QApplication.instance() - assert os.path.isfile(mesh_filepath), \ - f"Mesh filepath does not exist: {mesh_filepath}" - - def _setup_file_dialog(): - """Set filepath in QFileDialog and trigger accept result""" - file_dialog = app.activeModalWidget() - assert isinstance(file_dialog, QtWidgets.QFileDialog) - - # Quickly hide the dialog - file_dialog.hide() - app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 1000) - - file_dialog.setDirectory(os.path.dirname(mesh_filepath)) - url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath)) - file_dialog.selectUrl(url) - # TODO: find a way to improve the process event to - # load more complicated mesh - app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 3000) - file_dialog.done(file_dialog.Accepted) - app.processEvents(QtCore.QEventLoop.AllEvents) - - def _setup_prompt(): - app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents) - dialog = app.activeModalWidget() - assert dialog.objectName() == "NewProjectDialog" - - # Set the window title - mesh = os.path.basename(mesh_filepath) - dialog.setWindowTitle(f"New Project with mesh: {mesh}") - - # Get the select mesh file button - mesh_select = dialog.findChild(QtWidgets.QPushButton, "meshSelect") - - # Hide the select mesh button to the user to block changing of mesh - mesh_select.setVisible(False) - - # Ensure UI is visually up-to-date - app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 8000) - - # Trigger the 'select file' dialog to set the path and have the - # new file dialog to use the path. - QtCore.QTimer.singleShot(10, _setup_file_dialog) - mesh_select.click() - - app.processEvents(QtCore.QEventLoop.AllEvents, 5000) - - mesh_filename = dialog.findChild(QtWidgets.QFrame, "meshFileName") - mesh_filename_label = mesh_filename.findChild(QtWidgets.QLabel) - if not mesh_filename_label.text(): - dialog.close() - substance_painter.logging.warning( - "Failed to set mesh path with the prompt dialog:" - f"{mesh_filepath}\n\n" - "Creating new project directly with the mesh path instead.") - - new_action = _get_new_project_action() - if not new_action: - raise RuntimeError("Unable to detect new file action..") - - QtCore.QTimer.singleShot(0, _setup_prompt) - new_action.trigger() - app.processEvents(QtCore.QEventLoop.AllEvents, 5000) - - if not substance_painter.project.is_open(): - return - - # Confirm mesh was set as expected - project_mesh = substance_painter.project.last_imported_mesh_path() - if os.path.normpath(project_mesh) != os.path.normpath(mesh_filepath): - return - - return project_mesh - - -def get_filtered_export_preset(export_preset_name, channel_type_names): - """Return export presets included with specific channels - requested by users. - - Args: - export_preset_name (str): Name of export preset - channel_type_list (list): A list of channel type requested by users - - Returns: - dict: export preset data - """ - - target_maps = [] - - export_presets = get_export_presets() - export_preset_nice_name = export_presets[export_preset_name] - resource_presets = substance_painter.export.list_resource_export_presets() - preset = next( - ( - preset for preset in resource_presets - if preset.resource_id.name == export_preset_nice_name - ), None - ) - if preset is None: - return {} - - maps = preset.list_output_maps() - for channel_map in maps: - for channel_name in channel_type_names: - if not channel_map.get("fileName"): - continue - - if channel_name in channel_map["fileName"]: - target_maps.append(channel_map) - # Create a new preset - return { - "exportPresets": [ - { - "name": export_preset_name, - "maps": target_maps - } - ], - } - - -@contextlib.contextmanager -def set_layer_stack_opacity(node_ids, channel_types): - """Function to set the opacity of the layer stack during - context - Args: - node_ids (list[int]): Substance painter root layer node ids - channel_types (list[str]): Channel type names as defined as - attributes in `substance_painter.textureset.ChannelType` - """ - # Do nothing - if not node_ids or not channel_types: - yield - return - - stack = substance_painter.textureset.get_active_stack() - stack_root_layers = ( - substance_painter.layerstack.get_root_layer_nodes(stack) - ) - node_ids = set(node_ids) # lookup - excluded_nodes = [ - node for node in stack_root_layers - if node.uid() not in node_ids - ] - - original_opacity_values = [] - for node in excluded_nodes: - for channel in channel_types: - chan = getattr(substance_painter.textureset.ChannelType, channel) - original_opacity_values.append((chan, node.get_opacity(chan))) - try: - for node in excluded_nodes: - for channel, _ in original_opacity_values: - node.set_opacity(0.0, channel) - yield - finally: - for node in excluded_nodes: - for channel, opacity in original_opacity_values: - node.set_opacity(opacity, channel) diff --git a/server_addon/substancepainter/client/ayon_substancepainter/api/pipeline.py b/server_addon/substancepainter/client/ayon_substancepainter/api/pipeline.py deleted file mode 100644 index 47090c4b0a..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/api/pipeline.py +++ /dev/null @@ -1,425 +0,0 @@ -# -*- coding: utf-8 -*- -"""Pipeline tools for OpenPype Substance Painter integration.""" -import os -import logging -from functools import partial - -# Substance 3D Painter modules -import substance_painter.ui -import substance_painter.event -import substance_painter.project - -import pyblish.api - -from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost -from ayon_core.settings import get_current_project_settings - -from ayon_core.pipeline.template_data import get_template_data_with_names -from ayon_core.pipeline import ( - register_creator_plugin_path, - register_loader_plugin_path, - AVALON_CONTAINER_ID, - Anatomy, -) -from ayon_core.lib import ( - StringTemplate, - register_event_callback, - emit_event, -) -from ayon_core.pipeline.load import any_outdated_containers -from ayon_substancepainter import SUBSTANCE_HOST_DIR - -from . import lib - -log = logging.getLogger("ayon_substancepainter") - -PLUGINS_DIR = os.path.join(SUBSTANCE_HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - -OPENPYPE_METADATA_KEY = "OpenPype" -OPENPYPE_METADATA_CONTAINERS_KEY = "containers" # child key -OPENPYPE_METADATA_CONTEXT_KEY = "context" # child key -OPENPYPE_METADATA_INSTANCES_KEY = "instances" # child key - - -class SubstanceHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "substancepainter" - - def __init__(self): - super(SubstanceHost, self).__init__() - self._has_been_setup = False - self.menu = None - self.callbacks = [] - self.shelves = [] - - def install(self): - pyblish.api.register_host("substancepainter") - - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - - log.info("Installing callbacks ... ") - # register_event_callback("init", on_init) - self._register_callbacks() - # register_event_callback("before.save", before_save) - # register_event_callback("save", on_save) - register_event_callback("open", on_open) - # register_event_callback("new", on_new) - - log.info("Installing menu ... ") - self._install_menu() - - project_settings = get_current_project_settings() - self._install_shelves(project_settings) - - self._has_been_setup = True - - def uninstall(self): - self._uninstall_shelves() - self._uninstall_menu() - self._deregister_callbacks() - - def workfile_has_unsaved_changes(self): - - if not substance_painter.project.is_open(): - return False - - return substance_painter.project.needs_saving() - - def get_workfile_extensions(self): - return [".spp", ".toc"] - - def save_workfile(self, dst_path=None): - - if not substance_painter.project.is_open(): - return False - - if not dst_path: - dst_path = self.get_current_workfile() - - full_save_mode = substance_painter.project.ProjectSaveMode.Full - substance_painter.project.save_as(dst_path, full_save_mode) - - return dst_path - - def open_workfile(self, filepath): - - if not os.path.exists(filepath): - raise RuntimeError("File does not exist: {}".format(filepath)) - - # We must first explicitly close current project before opening another - if substance_painter.project.is_open(): - substance_painter.project.close() - - substance_painter.project.open(filepath) - return filepath - - def get_current_workfile(self): - if not substance_painter.project.is_open(): - return None - - filepath = substance_painter.project.file_path() - if filepath and filepath.endswith(".spt"): - # When currently in a Substance Painter template assume our - # scene isn't saved. This can be the case directly after doing - # "New project", the path will then be the template used. This - # avoids Workfiles tool trying to save as .spt extension if the - # file hasn't been saved before. - return - - return filepath - - def get_containers(self): - - if not substance_painter.project.is_open(): - return - - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) - if containers: - for key, container in containers.items(): - container["objectName"] = key - yield container - - def update_context_data(self, data, changes): - - if not substance_painter.project.is_open(): - return - - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - metadata.set(OPENPYPE_METADATA_CONTEXT_KEY, data) - - def get_context_data(self): - - if not substance_painter.project.is_open(): - return - - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - return metadata.get(OPENPYPE_METADATA_CONTEXT_KEY) or {} - - def _install_menu(self): - from PySide2 import QtWidgets - from ayon_core.tools.utils import host_tools - - parent = substance_painter.ui.get_main_window() - - tab_menu_label = os.environ.get("AYON_MENU_LABEL") or "AYON" - menu = QtWidgets.QMenu(tab_menu_label) - - action = menu.addAction("Create...") - action.triggered.connect( - lambda: host_tools.show_publisher(parent=parent, - tab="create") - ) - - action = menu.addAction("Load...") - action.triggered.connect( - lambda: host_tools.show_loader(parent=parent, use_context=True) - ) - - action = menu.addAction("Publish...") - action.triggered.connect( - lambda: host_tools.show_publisher(parent=parent, - tab="publish") - ) - - action = menu.addAction("Manage...") - action.triggered.connect( - lambda: host_tools.show_scene_inventory(parent=parent) - ) - - action = menu.addAction("Library...") - action.triggered.connect( - lambda: host_tools.show_library_loader(parent=parent) - ) - - menu.addSeparator() - action = menu.addAction("Work Files...") - action.triggered.connect( - lambda: host_tools.show_workfiles(parent=parent) - ) - - substance_painter.ui.add_menu(menu) - - def on_menu_destroyed(): - self.menu = None - - menu.destroyed.connect(on_menu_destroyed) - - self.menu = menu - - def _uninstall_menu(self): - if self.menu: - self.menu.destroy() - self.menu = None - - def _register_callbacks(self): - # Prepare emit event callbacks - open_callback = partial(emit_event, "open") - - # Connect to the Substance Painter events - dispatcher = substance_painter.event.DISPATCHER - for event, callback in [ - (substance_painter.event.ProjectOpened, open_callback) - ]: - dispatcher.connect(event, callback) - # Keep a reference so we can deregister if needed - self.callbacks.append((event, callback)) - - def _deregister_callbacks(self): - for event, callback in self.callbacks: - substance_painter.event.DISPATCHER.disconnect(event, callback) - self.callbacks.clear() - - def _install_shelves(self, project_settings): - - shelves = project_settings["substancepainter"].get("shelves", []) - if not shelves: - return - - # Prepare formatting data if we detect any path which might have - # template tokens like {folder[name]} in there. - formatting_data = {} - has_formatting_entries = any("{" in item["value"] for item in shelves) - if has_formatting_entries: - project_name = self.get_current_project_name() - folder_path = self.get_current_folder_path() - task_name = self.get_current_task_name() - formatting_data = get_template_data_with_names( - project_name, folder_path, task_name, project_settings - ) - anatomy = Anatomy(project_name) - formatting_data["root"] = anatomy.roots - - for shelve_item in shelves: - - # Allow formatting with anatomy for the paths - path = shelve_item["value"] - if "{" in path: - path = StringTemplate.format_template(path, formatting_data) - - name = shelve_item["name"] - shelf_name = None - try: - shelf_name = lib.load_shelf(path, name=name) - except ValueError as exc: - print(f"Failed to load shelf -> {exc}") - - if shelf_name: - self.shelves.append(shelf_name) - - def _uninstall_shelves(self): - for shelf_name in self.shelves: - substance_painter.resource.Shelves.remove(shelf_name) - self.shelves.clear() - - -def on_open(): - log.info("Running callback on open..") - - if any_outdated_containers(): - from ayon_core.tools.utils import SimplePopup - - log.warning("Scene has outdated content.") - - # Get main window - parent = substance_painter.ui.get_main_window() - if parent is None: - log.info("Skipping outdated content pop-up " - "because Substance window can't be found.") - else: - - # Show outdated pop-up - def _on_show_inventory(): - from ayon_core.tools.utils import host_tools - host_tools.show_scene_inventory(parent=parent) - - dialog = SimplePopup(parent=parent) - dialog.setWindowTitle("Substance scene has outdated content") - dialog.set_message("There are outdated containers in " - "your Substance scene.") - dialog.on_clicked.connect(_on_show_inventory) - dialog.show() - - -def imprint_container(container, - name, - namespace, - context, - loader): - """Imprint a loaded container with metadata. - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - container (dict): The (substance metadata) dictionary to imprint into. - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - context (dict): Asset information - loader (load.LoaderPlugin): loader instance used to produce container. - - Returns: - None - - """ - - data = [ - ("schema", "openpype:container-2.0"), - ("id", AVALON_CONTAINER_ID), - ("name", str(name)), - ("namespace", str(namespace) if namespace else None), - ("loader", str(loader.__class__.__name__)), - ("representation", context["representation"]["id"]), - ] - for key, value in data: - container[key] = value - - -def set_container_metadata(object_name, container_data, update=False): - """Helper method to directly set the data for a specific container - - Args: - object_name (str): The unique object name identifier for the container - container_data (dict): The data for the container. - Note 'objectName' data is derived from `object_name` and key in - `container_data` will be ignored. - update (bool): Whether to only update the dict data. - - """ - # The objectName is derived from the key in the metadata so won't be stored - # in the metadata in the container's data. - container_data.pop("objectName", None) - - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) or {} - if update: - existing_data = containers.setdefault(object_name, {}) - existing_data.update(container_data) # mutable dict, in-place update - else: - containers[object_name] = container_data - metadata.set("containers", containers) - - -def remove_container_metadata(object_name): - """Helper method to remove the data for a specific container""" - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) - if containers: - containers.pop(object_name, None) - metadata.set("containers", containers) - - -def set_instance(instance_id, instance_data, update=False): - """Helper method to directly set the data for a specific container - - Args: - instance_id (str): Unique identifier for the instance - instance_data (dict): The instance data to store in the metaadata. - """ - set_instances({instance_id: instance_data}, update=update) - - -def set_instances(instance_data_by_id, update=False): - """Store data for multiple instances at the same time. - - This is more optimal than querying and setting them in the metadata one - by one. - """ - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - instances = metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} - - for instance_id, instance_data in instance_data_by_id.items(): - if update: - existing_data = instances.get(instance_id, {}) - existing_data.update(instance_data) - else: - instances[instance_id] = instance_data - - metadata.set("instances", instances) - - -def remove_instance(instance_id): - """Helper method to remove the data for a specific container""" - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - instances = metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} - instances.pop(instance_id, None) - metadata.set("instances", instances) - - -def get_instances_by_id(): - """Return all instances stored in the project instances metadata""" - if not substance_painter.project.is_open(): - return {} - - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - return metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} - - -def get_instances(): - """Return all instances stored in the project instances as a list""" - return list(get_instances_by_id().values()) diff --git a/server_addon/substancepainter/client/ayon_substancepainter/deploy/plugins/ayon_plugin.py b/server_addon/substancepainter/client/ayon_substancepainter/deploy/plugins/ayon_plugin.py deleted file mode 100644 index 85bb56d73c..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/deploy/plugins/ayon_plugin.py +++ /dev/null @@ -1,36 +0,0 @@ - - -def cleanup_ayon_qt_widgets(): - """ - Workaround for Substance failing to shut down correctly - when a Qt window was still open at the time of shutting down. - - This seems to work sometimes, but not all the time. - - """ - # TODO: Create a more reliable method to close down all AYON Qt widgets - from PySide2 import QtWidgets - import substance_painter.ui - - # Kill AYON Qt widgets - print("Killing AYON Qt widgets..") - for widget in QtWidgets.QApplication.topLevelWidgets(): - if widget.__module__.startswith("ayon_"): - print(f"Deleting widget: {widget.__class__.__name__}") - substance_painter.ui.delete_ui_element(widget) - - -def start_plugin(): - from ayon_core.pipeline import install_host - from ayon_substancepainter.api import SubstanceHost - install_host(SubstanceHost()) - - -def close_plugin(): - from ayon_core.pipeline import uninstall_host - cleanup_ayon_qt_widgets() - uninstall_host() - - -if __name__ == "__main__": - start_plugin() diff --git a/server_addon/substancepainter/client/ayon_substancepainter/deploy/startup/ayon_load_on_first_run.py b/server_addon/substancepainter/client/ayon_substancepainter/deploy/startup/ayon_load_on_first_run.py deleted file mode 100644 index 9ef119e357..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/deploy/startup/ayon_load_on_first_run.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Ease the AYON on-boarding process by loading the plug-in on first run""" - -AYON_PLUGIN_NAME = "ayon_plugin" - - -def start_plugin(): - try: - # This isn't exposed in the official API so we keep it in a try-except - from painter_plugins_ui import ( - get_settings, - LAUNCH_AT_START_KEY, - ON_STATE, - PLUGINS_MENU, - plugin_manager - ) - - # The `painter_plugins_ui` plug-in itself is also a startup plug-in - # we need to take into account that it could run either earlier or - # later than this startup script, we check whether its menu initialized - is_before_plugins_menu = PLUGINS_MENU is None - - settings = get_settings(AYON_PLUGIN_NAME) - if settings.value(LAUNCH_AT_START_KEY, None) is None: - print("Initializing AYON plug-in on first run...") - if is_before_plugins_menu: - print("- running before 'painter_plugins_ui'") - # Delay the launch to the painter_plugins_ui initialization - settings.setValue(LAUNCH_AT_START_KEY, ON_STATE) - else: - # Launch now - print("- running after 'painter_plugins_ui'") - plugin_manager(AYON_PLUGIN_NAME)(True) - - # Set the checked state in the menu to avoid confusion - action = next(action for action in PLUGINS_MENU._menu.actions() - if action.text() == AYON_PLUGIN_NAME) - if action is not None: - action.blockSignals(True) - action.setChecked(True) - action.blockSignals(False) - - except Exception as exc: - print(exc) diff --git a/server_addon/substancepainter/client/ayon_substancepainter/plugins/create/create_textures.py b/server_addon/substancepainter/client/ayon_substancepainter/plugins/create/create_textures.py deleted file mode 100644 index 97d34a6557..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/plugins/create/create_textures.py +++ /dev/null @@ -1,247 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating textures.""" -from ayon_core.pipeline import CreatedInstance, Creator, CreatorError -from ayon_core.lib import ( - EnumDef, - UILabelDef, - NumberDef, - BoolDef -) - -from ayon_substancepainter.api.pipeline import ( - get_instances, - set_instance, - set_instances, - remove_instance -) -from ayon_substancepainter.api.lib import get_export_presets - -import substance_painter -import substance_painter.project - - -class CreateTextures(Creator): - """Create a texture set.""" - identifier = "io.openpype.creators.substancepainter.textureset" - label = "Textures" - product_type = "textureSet" - icon = "picture-o" - - default_variant = "Main" - settings_category = "substancepainter" - channel_mapping = [] - - def apply_settings(self, project_settings): - settings = project_settings["substancepainter"].get("create", []) # noqa - if settings: - self.channel_mapping = settings["CreateTextures"].get( - "channel_mapping", []) - - - def create(self, product_name, instance_data, pre_create_data): - if not substance_painter.project.is_open(): - raise CreatorError("Can't create a Texture Set instance without " - "an open project.") - # Transfer settings from pre create to instance - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - for key in [ - "review", - "exportPresetUrl", - "exportFileFormat", - "exportSize", - "exportPadding", - "exportDilationDistance", - "useCustomExportPreset", - "exportChannel" - ]: - if key in pre_create_data: - creator_attributes[key] = pre_create_data[key] - - if pre_create_data.get("use_selection"): - stack = substance_painter.textureset.get_active_stack() - - instance_data["selected_node_id"] = [ - node_number.uid() for node_number in - substance_painter.layerstack.get_selected_nodes(stack)] - - instance = self.create_instance_in_context(product_name, - instance_data) - set_instance( - instance_id=instance["instance_id"], - instance_data=instance.data_to_store() - ) - - def collect_instances(self): - for instance in get_instances(): - if (instance.get("creator_identifier") == self.identifier or - instance.get("productType") == self.product_type): - self.create_instance_in_context_from_existing(instance) - - def update_instances(self, update_list): - instance_data_by_id = {} - for instance, _changes in update_list: - # Persist the data - instance_id = instance.get("instance_id") - instance_data = instance.data_to_store() - instance_data_by_id[instance_id] = instance_data - set_instances(instance_data_by_id, update=True) - - def remove_instances(self, instances): - for instance in instances: - remove_instance(instance["instance_id"]) - self._remove_instance_from_context(instance) - - # Helper methods (this might get moved into Creator class) - def create_instance_in_context(self, product_name, data): - instance = CreatedInstance( - self.product_type, product_name, data, self - ) - self.create_context.creator_adds_instance(instance) - return instance - - def create_instance_in_context_from_existing(self, data): - instance = CreatedInstance.from_existing(data, self) - self.create_context.creator_adds_instance(instance) - return instance - - def get_instance_attr_defs(self): - if self.channel_mapping: - export_channel_enum = { - item["value"]: item["name"] - for item in self.channel_mapping - } - else: - export_channel_enum = { - "BaseColor": "Base Color", - "Metallic": "Metallic", - "Roughness": "Roughness", - "SpecularEdgeColor": "Specular Edge Color", - "Emissive": "Emissive", - "Opacity": "Opacity", - "Displacement": "Displacement", - "Glossiness": "Glossiness", - "Anisotropylevel": "Anisotropy Level", - "AO": "Ambient Occulsion", - "Anisotropyangle": "Anisotropy Angle", - "Transmissive": "Transmissive", - "Reflection": "Reflection", - "Diffuse": "Diffuse", - "Ior": "Index of Refraction", - "Specularlevel": "Specular Level", - "BlendingMask": "Blending Mask", - "Translucency": "Translucency", - "Scattering": "Scattering", - "ScatterColor": "Scatter Color", - "SheenOpacity": "Sheen Opacity", - "SheenRoughness": "Sheen Roughness", - "SheenColor": "Sheen Color", - "CoatOpacity": "Coat Opacity", - "CoatColor": "Coat Color", - "CoatRoughness": "Coat Roughness", - "CoatSpecularLevel": "Coat Specular Level", - "CoatNormal": "Coat Normal", - } - - return [ - BoolDef("review", - label="Review", - tooltip="Mark as reviewable", - default=True), - EnumDef("exportChannel", - items=export_channel_enum, - multiselection=True, - default=None, - label="Export Channel(s)", - tooltip="Choose the channel which you " - "want to solely export. The value " - "is 'None' by default which exports " - "all channels"), - EnumDef("exportPresetUrl", - items=get_export_presets(), - label="Output Template"), - BoolDef("allowSkippedMaps", - label="Allow Skipped Output Maps", - tooltip="When enabled this allows the publish to ignore " - "output maps in the used output template if one " - "or more maps are skipped due to the required " - "channels not being present in the current file.", - default=True), - EnumDef("exportFileFormat", - items={ - None: "Based on output template", - # TODO: Get available extensions from substance API - "bmp": "bmp", - "ico": "ico", - "jpeg": "jpeg", - "jng": "jng", - "pbm": "pbm", - "pgm": "pgm", - "png": "png", - "ppm": "ppm", - "tga": "targa", - "tif": "tiff", - "wap": "wap", - "wbmp": "wbmp", - "xpm": "xpm", - "gif": "gif", - "hdr": "hdr", - "exr": "exr", - "j2k": "j2k", - "jp2": "jp2", - "pfm": "pfm", - "webp": "webp", - # TODO: Unsure why jxr format fails to export - # "jxr": "jpeg-xr", - # TODO: File formats that combine the exported textures - # like psd are not correctly supported due to - # publishing only a single file - # "psd": "psd", - # "sbsar": "sbsar", - }, - default=None, - label="File type"), - EnumDef("exportSize", - items={ - None: "Based on each Texture Set's size", - # The key is size of the texture file in log2. - # (i.e. 10 means 2^10 = 1024) - 7: "128", - 8: "256", - 9: "512", - 10: "1024", - 11: "2048", - 12: "4096", - 13: "8192" - }, - default=None, - label="Size"), - EnumDef("exportPadding", - items={ - "passthrough": "No padding (passthrough)", - "infinite": "Dilation infinite", - "transparent": "Dilation + transparent", - "color": "Dilation + default background color", - "diffusion": "Dilation + diffusion" - }, - default="infinite", - label="Padding"), - NumberDef("exportDilationDistance", - minimum=0, - maximum=256, - decimals=0, - default=16, - label="Dilation Distance"), - UILabelDef("*only used with " - "'Dilation + ' padding"), - ] - - def get_pre_create_attr_defs(self): - # Use same attributes as for instance attributes - attr_defs = [] - if substance_painter.application.version_info()[0] >= 10: - attr_defs.append( - BoolDef("use_selection", label="Use selection", - tooltip="Select Layer Stack(s) for exporting") - ) - return attr_defs + self.get_instance_attr_defs() diff --git a/server_addon/substancepainter/client/ayon_substancepainter/plugins/create/create_workfile.py b/server_addon/substancepainter/client/ayon_substancepainter/plugins/create/create_workfile.py deleted file mode 100644 index b100e4189d..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/plugins/create/create_workfile.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating workfiles.""" - -import ayon_api - -from ayon_core.pipeline import CreatedInstance, AutoCreator - -from ayon_substancepainter.api.pipeline import ( - set_instances, - set_instance, - get_instances -) - -import substance_painter.project - - -class CreateWorkfile(AutoCreator): - """Workfile auto-creator.""" - identifier = "io.openpype.creators.substancepainter.workfile" - label = "Workfile" - product_type = "workfile" - icon = "document" - - default_variant = "Main" - settings_category = "substancepainter" - - def create(self): - - if not substance_painter.project.is_open(): - return - - variant = self.default_variant - project_name = self.project_name - folder_path = self.create_context.get_current_folder_path() - task_name = self.create_context.get_current_task_name() - host_name = self.create_context.host_name - - # Workfile instance should always exist and must only exist once. - # As such we'll first check if it already exists and is collected. - current_instance = next( - ( - instance for instance in self.create_context.instances - if instance.creator_identifier == self.identifier - ), None) - - current_folder_path = None - if current_instance is not None: - current_folder_path = current_instance["folderPath"] - - if current_instance is None: - self.log.info("Auto-creating workfile instance...") - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - variant, - host_name, - ) - data = { - "folderPath": folder_path, - "task": task_name, - "variant": variant - } - current_instance = self.create_instance_in_context(product_name, - data) - elif ( - current_folder_path != folder_path - or current_instance["task"] != task_name - ): - # Update instance context if is not the same - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - variant, - host_name, - ) - current_instance["folderPath"] = folder_path - current_instance["task"] = task_name - current_instance["productName"] = product_name - - set_instance( - instance_id=current_instance.get("instance_id"), - instance_data=current_instance.data_to_store() - ) - - def collect_instances(self): - for instance in get_instances(): - if (instance.get("creator_identifier") == self.identifier or - instance.get("productType") == self.product_type): - self.create_instance_in_context_from_existing(instance) - - def update_instances(self, update_list): - instance_data_by_id = {} - for instance, _changes in update_list: - # Persist the data - instance_id = instance.get("instance_id") - instance_data = instance.data_to_store() - instance_data_by_id[instance_id] = instance_data - set_instances(instance_data_by_id, update=True) - - # Helper methods (this might get moved into Creator class) - def create_instance_in_context(self, product_name, data): - instance = CreatedInstance( - self.product_type, product_name, data, self - ) - self.create_context.creator_adds_instance(instance) - return instance - - def create_instance_in_context_from_existing(self, data): - instance = CreatedInstance.from_existing(data, self) - self.create_context.creator_adds_instance(instance) - return instance diff --git a/server_addon/substancepainter/client/ayon_substancepainter/plugins/load/load_mesh.py b/server_addon/substancepainter/client/ayon_substancepainter/plugins/load/load_mesh.py deleted file mode 100644 index e2a48dd5a4..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/plugins/load/load_mesh.py +++ /dev/null @@ -1,246 +0,0 @@ -import copy -from qtpy import QtWidgets, QtCore -from ayon_core.pipeline import ( - load, - get_representation_path, -) -from ayon_core.pipeline.load import LoadError -from ayon_substancepainter.api.pipeline import ( - imprint_container, - set_container_metadata, - remove_container_metadata -) - -import substance_painter.project - - -def _convert(substance_attr): - """Return Substance Painter Python API Project attribute from string. - - This converts a string like "ProjectWorkflow.Default" to for example - the Substance Painter Python API equivalent object, like: - `substance_painter.project.ProjectWorkflow.Default` - - Args: - substance_attr (str): The `substance_painter.project` attribute, - for example "ProjectWorkflow.Default" - - Returns: - Any: Substance Python API object of the project attribute. - - Raises: - ValueError: If attribute does not exist on the - `substance_painter.project` python api. - """ - root = substance_painter.project - for attr in substance_attr.split("."): - root = getattr(root, attr, None) - if root is None: - raise ValueError( - "Substance Painter project attribute" - f" does not exist: {substance_attr}") - - return root - - -def get_template_by_name(name: str, templates: list[dict]) -> dict: - return next( - template for template in templates - if template["name"] == name - ) - - -class SubstanceProjectConfigurationWindow(QtWidgets.QDialog): - """The pop-up dialog allows users to choose material - duplicate options for importing Max objects when updating - or switching assets. - """ - def __init__(self, project_templates): - super(SubstanceProjectConfigurationWindow, self).__init__() - self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) - - self.configuration = None - self.template_names = [template["name"] for template - in project_templates] - self.project_templates = project_templates - - self.widgets = { - "label": QtWidgets.QLabel( - "Select your template for project configuration"), - "template_options": QtWidgets.QComboBox(), - "import_cameras": QtWidgets.QCheckBox("Import Cameras"), - "preserve_strokes": QtWidgets.QCheckBox("Preserve Strokes"), - "clickbox": QtWidgets.QWidget(), - "combobox": QtWidgets.QWidget(), - "buttons": QtWidgets.QDialogButtonBox( - QtWidgets.QDialogButtonBox.Ok - | QtWidgets.QDialogButtonBox.Cancel) - } - - self.widgets["template_options"].addItems(self.template_names) - - template_name = self.widgets["template_options"].currentText() - self._update_to_match_template(template_name) - # Build clickboxes - layout = QtWidgets.QHBoxLayout(self.widgets["clickbox"]) - layout.addWidget(self.widgets["import_cameras"]) - layout.addWidget(self.widgets["preserve_strokes"]) - # Build combobox - layout = QtWidgets.QHBoxLayout(self.widgets["combobox"]) - layout.addWidget(self.widgets["template_options"]) - # Build buttons - layout = QtWidgets.QHBoxLayout(self.widgets["buttons"]) - # Build layout. - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(self.widgets["label"]) - layout.addWidget(self.widgets["combobox"]) - layout.addWidget(self.widgets["clickbox"]) - layout.addWidget(self.widgets["buttons"]) - - self.widgets["template_options"].currentTextChanged.connect( - self._update_to_match_template) - self.widgets["buttons"].accepted.connect(self.on_accept) - self.widgets["buttons"].rejected.connect(self.on_reject) - - def on_accept(self): - self.configuration = self.get_project_configuration() - self.close() - - def on_reject(self): - self.close() - - def _update_to_match_template(self, template_name): - template = get_template_by_name(template_name, self.project_templates) - self.widgets["import_cameras"].setChecked(template["import_cameras"]) - self.widgets["preserve_strokes"].setChecked( - template["preserve_strokes"]) - - def get_project_configuration(self): - templates = self.project_templates - template_name = self.widgets["template_options"].currentText() - template = get_template_by_name(template_name, templates) - template = copy.deepcopy(template) # do not edit the original - template["import_cameras"] = self.widgets["import_cameras"].isChecked() - template["preserve_strokes"] = ( - self.widgets["preserve_strokes"].isChecked() - ) - for key in ["normal_map_format", - "project_workflow", - "tangent_space_mode"]: - template[key] = _convert(template[key]) - return template - - @classmethod - def prompt(cls, templates): - dialog = cls(templates) - dialog.exec_() - configuration = dialog.configuration - dialog.deleteLater() - return configuration - - -class SubstanceLoadProjectMesh(load.LoaderPlugin): - """Load mesh for project""" - - product_types = {"*"} - representations = {"abc", "fbx", "obj", "gltf", "usd", "usda", "usdc"} - - label = "Load mesh" - order = -10 - icon = "code-fork" - color = "orange" - - # Defined via settings - project_templates = [] - - def load(self, context, name, namespace, options=None): - - # Get user inputs - result = SubstanceProjectConfigurationWindow.prompt( - self.project_templates) - if not result: - # cancelling loader action - return - if not substance_painter.project.is_open(): - # Allow to 'initialize' a new project - path = self.filepath_from_context(context) - sp_settings = substance_painter.project.Settings( - import_cameras=result["import_cameras"], - normal_map_format=result["normal_map_format"], - project_workflow=result["project_workflow"], - tangent_space_mode=result["tangent_space_mode"], - default_texture_resolution=result["default_texture_resolution"] - ) - settings = substance_painter.project.create( - mesh_file_path=path, settings=sp_settings - ) - else: - # Reload the mesh - settings = substance_painter.project.MeshReloadingSettings( - import_cameras=result["import_cameras"], - preserve_strokes=result["preserve_strokes"]) - - def on_mesh_reload(status: substance_painter.project.ReloadMeshStatus): # noqa - if status == substance_painter.project.ReloadMeshStatus.SUCCESS: # noqa - self.log.info("Reload succeeded") - else: - raise LoadError("Reload of mesh failed") - - path = self.filepath_from_context(context) - substance_painter.project.reload_mesh(path, - settings, - on_mesh_reload) - - # Store container - container = {} - project_mesh_object_name = "_ProjectMesh_" - imprint_container(container, - name=project_mesh_object_name, - namespace=project_mesh_object_name, - context=context, - loader=self) - - # We want store some options for updating to keep consistent behavior - # from the user's original choice. We don't store 'preserve_strokes' - # as we always preserve strokes on updates. - container["options"] = { - "import_cameras": result["import_cameras"], - } - - set_container_metadata(project_mesh_object_name, container) - - def switch(self, container, context): - self.update(container, context) - - def update(self, container, context): - repre_entity = context["representation"] - - path = get_representation_path(repre_entity) - - # Reload the mesh - container_options = container.get("options", {}) - settings = substance_painter.project.MeshReloadingSettings( - import_cameras=container_options.get("import_cameras", True), - preserve_strokes=True - ) - - def on_mesh_reload(status: substance_painter.project.ReloadMeshStatus): - if status == substance_painter.project.ReloadMeshStatus.SUCCESS: - self.log.info("Reload succeeded") - else: - raise LoadError("Reload of mesh failed") - - substance_painter.project.reload_mesh(path, settings, on_mesh_reload) - - # Update container representation - object_name = container["objectName"] - update_data = {"representation": repre_entity["id"]} - set_container_metadata(object_name, update_data, update=True) - - def remove(self, container): - - # Remove OpenPype related settings about what model was loaded - # or close the project? - # TODO: This is likely best 'hidden' away to the user because - # this will leave the project's mesh unmanaged. - remove_container_metadata(container["objectName"]) diff --git a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/collect_current_file.py b/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/collect_current_file.py deleted file mode 100644 index db0edafac0..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/collect_current_file.py +++ /dev/null @@ -1,17 +0,0 @@ -import pyblish.api - -from ayon_core.pipeline import registered_host - - -class CollectCurrentFile(pyblish.api.ContextPlugin): - """Inject the current working file into context""" - - order = pyblish.api.CollectorOrder - 0.49 - label = "Current Workfile" - hosts = ["substancepainter"] - - def process(self, context): - host = registered_host() - path = host.get_current_workfile() - context.data["currentFile"] = path - self.log.debug(f"Current workfile: {path}") diff --git a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/collect_textureset_images.py b/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/collect_textureset_images.py deleted file mode 100644 index 5bea3e971f..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/collect_textureset_images.py +++ /dev/null @@ -1,217 +0,0 @@ -import os -import copy - -import pyblish.api -import ayon_api - -import substance_painter.textureset -from ayon_core.pipeline import publish -from ayon_substancepainter.api.lib import ( - get_parsed_export_maps, - get_filtered_export_preset, - strip_template -) -from ayon_core.pipeline.create import get_product_name - - -class CollectTextureSet(pyblish.api.InstancePlugin): - """Extract Textures using an output template config""" - # TODO: Production-test usage of color spaces - # TODO: Detect what source data channels end up in each file - - label = "Collect Texture Set images" - hosts = ["substancepainter"] - families = ["textureSet"] - order = pyblish.api.CollectorOrder - - def process(self, instance): - - config = self.get_export_config(instance) - project_name = instance.context.data["projectName"] - folder_entity = ayon_api.get_folder_by_path( - project_name, - instance.data["folderPath"] - ) - task_name = instance.data.get("task") - task_entity = None - if folder_entity and task_name: - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - - instance.data["exportConfig"] = config - maps = get_parsed_export_maps(config) - - # Let's break the instance into multiple instances to integrate - # a product per generated texture or texture UDIM sequence - for (texture_set_name, stack_name), template_maps in maps.items(): - self.log.info(f"Processing {texture_set_name}/{stack_name}") - for template, outputs in template_maps.items(): - self.log.info(f"Processing {template}") - self.create_image_instance(instance, template, outputs, - task_entity=task_entity, - texture_set_name=texture_set_name, - stack_name=stack_name) - - def create_image_instance(self, instance, template, outputs, - task_entity, texture_set_name, stack_name): - """Create a new instance per image or UDIM sequence. - - The new instances will be of product type `image`. - - """ - - context = instance.context - first_filepath = outputs[0]["filepath"] - fnames = [os.path.basename(output["filepath"]) for output in outputs] - ext = os.path.splitext(first_filepath)[1] - assert ext.lstrip("."), f"No extension: {ext}" - - always_include_texture_set_name = False # todo: make this configurable - all_texture_sets = substance_painter.textureset.all_texture_sets() - texture_set = substance_painter.textureset.TextureSet.from_name( - texture_set_name - ) - - # Define the suffix we want to give this particular texture - # set and set up a remapped product naming for it. - suffix = "" - if always_include_texture_set_name or len(all_texture_sets) > 1: - # More than one texture set, include texture set name - suffix += f".{texture_set_name}" - if texture_set.is_layered_material() and stack_name: - # More than one stack, include stack name - suffix += f".{stack_name}" - - # Always include the map identifier - map_identifier = strip_template(template) - suffix += f".{map_identifier}" - - task_name = task_type = None - if task_entity: - task_name = task_entity["name"] - task_type = task_entity["taskType"] - - image_product_name = get_product_name( - # TODO: The product type actually isn't 'texture' currently but - # for now this is only done so the product name starts with - # 'texture' - context.data["projectName"], - task_name, - task_type, - context.data["hostName"], - product_type="texture", - variant=instance.data["variant"] + suffix, - project_settings=context.data["project_settings"] - ) - - # Prepare representation - representation = { - "name": ext.lstrip("."), - "ext": ext.lstrip("."), - "files": fnames if len(fnames) > 1 else fnames[0], - } - - # Mark as UDIM explicitly if it has UDIM tiles. - if bool(outputs[0].get("udim")): - # The representation for a UDIM sequence should have a `udim` key - # that is a list of all udim tiles (str) like: ["1001", "1002"] - # strings. See CollectTextures plug-in and Integrators. - representation["udim"] = [output["udim"] for output in outputs] - - # Set up the representation for thumbnail generation - # TODO: Simplify this once thumbnail extraction is refactored - staging_dir = os.path.dirname(first_filepath) - representation["tags"] = ["review"] - representation["stagingDir"] = staging_dir - # Clone the instance - product_type = "image" - image_instance = context.create_instance(image_product_name) - image_instance[:] = instance[:] - image_instance.data.update(copy.deepcopy(dict(instance.data))) - image_instance.data["name"] = image_product_name - image_instance.data["label"] = image_product_name - image_instance.data["productName"] = image_product_name - image_instance.data["productType"] = product_type - image_instance.data["family"] = product_type - image_instance.data["families"] = [product_type, "textures"] - if instance.data["creator_attributes"].get("review"): - image_instance.data["families"].append("review") - - image_instance.data["representations"] = [representation] - - # Group the textures together in the loader - image_instance.data["productGroup"] = image_product_name - - # Store the texture set name and stack name on the instance - image_instance.data["textureSetName"] = texture_set_name - image_instance.data["textureStackName"] = stack_name - - # Store color space with the instance - # Note: The extractor will assign it to the representation - colorspace = outputs[0].get("colorSpace") - if colorspace: - self.log.debug(f"{image_product_name} colorspace: {colorspace}") - image_instance.data["colorspace"] = colorspace - - # Store the instance in the original instance as a member - instance.append(image_instance) - - def get_export_config(self, instance): - """Return an export configuration dict for texture exports. - - This config can be supplied to: - - `substance_painter.export.export_project_textures` - - `substance_painter.export.list_project_textures` - - See documentation on substance_painter.export module about the - formatting of the configuration dictionary. - - Args: - instance (pyblish.api.Instance): Texture Set instance to be - published. - - Returns: - dict: Export config - - """ - - creator_attrs = instance.data["creator_attributes"] - preset_url = creator_attrs["exportPresetUrl"] - self.log.debug(f"Exporting using preset: {preset_url}") - - # See: https://substance3d.adobe.com/documentation/ptpy/api/substance_painter/export # noqa - config = { # noqa - "exportShaderParams": True, - "exportPath": publish.get_instance_staging_dir(instance), - "defaultExportPreset": preset_url, - - # Custom overrides to the exporter - "exportParameters": [ - { - "parameters": { - "fileFormat": creator_attrs["exportFileFormat"], - "sizeLog2": creator_attrs["exportSize"], - "paddingAlgorithm": creator_attrs["exportPadding"], - "dilationDistance": creator_attrs["exportDilationDistance"] # noqa - } - } - ] - } - - # Create the list of Texture Sets to export. - config["exportList"] = [] - for texture_set in substance_painter.textureset.all_texture_sets(): - config["exportList"].append({"rootPath": texture_set.name()}) - - # Consider None values from the creator attributes optionals - for override in config["exportParameters"]: - parameters = override.get("parameters") - for key, value in dict(parameters).items(): - if value is None: - parameters.pop(key) - channel_layer = creator_attrs.get("exportChannel", []) - if channel_layer: - maps = get_filtered_export_preset(preset_url, channel_layer) - config.update(maps) - return config diff --git a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/collect_workfile_representation.py b/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/collect_workfile_representation.py deleted file mode 100644 index 8d98d0b014..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/collect_workfile_representation.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import pyblish.api - - -class CollectWorkfileRepresentation(pyblish.api.InstancePlugin): - """Create a publish representation for the current workfile instance.""" - - order = pyblish.api.CollectorOrder - label = "Workfile representation" - hosts = ["substancepainter"] - families = ["workfile"] - - def process(self, instance): - - context = instance.context - current_file = context.data["currentFile"] - - folder, file = os.path.split(current_file) - filename, ext = os.path.splitext(file) - - instance.data["representations"] = [{ - "name": ext.lstrip("."), - "ext": ext.lstrip("."), - "files": file, - "stagingDir": folder, - }] diff --git a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/extract_textures.py b/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/extract_textures.py deleted file mode 100644 index 52212922ae..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/extract_textures.py +++ /dev/null @@ -1,67 +0,0 @@ -import substance_painter.export -from ayon_core.pipeline import KnownPublishError, publish -from ayon_substancepainter.api.lib import set_layer_stack_opacity - - -class ExtractTextures(publish.Extractor, - publish.ColormanagedPyblishPluginMixin): - """Extract Textures using an output template config. - - Note: - This Extractor assumes that `collect_textureset_images` has prepared - the relevant export config and has also collected the individual image - instances for publishing including its representation. That is why this - particular Extractor doesn't specify representations to integrate. - - """ - - label = "Extract Texture Set" - hosts = ["substancepainter"] - families = ["textureSet"] - - # Run before thumbnail extractors - order = publish.Extractor.order - 0.1 - - def process(self, instance): - - config = instance.data["exportConfig"] - creator_attrs = instance.data["creator_attributes"] - export_channel = creator_attrs.get("exportChannel", []) - node_ids = instance.data.get("selected_node_id", []) - - with set_layer_stack_opacity(node_ids, export_channel): - result = substance_painter.export.export_project_textures(config) - - if result.status != substance_painter.export.ExportStatus.Success: - raise KnownPublishError( - "Failed to export texture set: {}".format(result.message) - ) - - # Log what files we generated - for (texture_set_name, stack_name), maps in result.textures.items(): - # Log our texture outputs - self.log.info(f"Exported stack: {texture_set_name} {stack_name}") - for texture_map in maps: - self.log.info(f"Exported texture: {texture_map}") - - # We'll insert the color space data for each image instance that we - # added into this texture set. The collector couldn't do so because - # some anatomy and other instance data needs to be collected prior - context = instance.context - for image_instance in instance: - representation = next(iter(image_instance.data["representations"])) - - colorspace = image_instance.data.get("colorspace") - if not colorspace: - self.log.debug("No color space data present for instance: " - f"{image_instance}") - continue - - self.set_representation_colorspace(representation, - context=context, - colorspace=colorspace) - - # The TextureSet instance should not be integrated. It generates no - # output data. Instead the separated texture instances are generated - # from it which themselves integrate into the database. - instance.data["integrate"] = False diff --git a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/increment_workfile.py b/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/increment_workfile.py deleted file mode 100644 index 521a28130b..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/increment_workfile.py +++ /dev/null @@ -1,23 +0,0 @@ -import pyblish.api - -from ayon_core.lib import version_up -from ayon_core.pipeline import registered_host - - -class IncrementWorkfileVersion(pyblish.api.ContextPlugin): - """Increment current workfile version.""" - - order = pyblish.api.IntegratorOrder + 1 - label = "Increment Workfile Version" - optional = True - hosts = ["substancepainter"] - - def process(self, context): - - assert all(result["success"] for result in context.data["results"]), ( - "Publishing not successful so version is not increased.") - - host = registered_host() - path = context.data["currentFile"] - self.log.info(f"Incrementing current workfile to: {path}") - host.save_workfile(version_up(path)) diff --git a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/save_workfile.py b/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/save_workfile.py deleted file mode 100644 index 627fb991aa..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/save_workfile.py +++ /dev/null @@ -1,28 +0,0 @@ -import pyblish.api - -from ayon_core.pipeline import ( - registered_host, - KnownPublishError -) - - -class SaveCurrentWorkfile(pyblish.api.ContextPlugin): - """Save current workfile""" - - label = "Save current workfile" - order = pyblish.api.ExtractorOrder - 0.49 - hosts = ["substancepainter"] - - def process(self, context): - - host = registered_host() - current = host.get_current_workfile() - if context.data["currentFile"] != current: - raise KnownPublishError("Workfile has changed during publishing!") - - if host.workfile_has_unsaved_changes(): - self.log.info("Saving current file: {}".format(current)) - host.save_workfile() - else: - self.log.debug("Skipping workfile save because there are no " - "unsaved changes.") diff --git a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/validate_ouput_maps.py b/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/validate_ouput_maps.py deleted file mode 100644 index 3293e7f204..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/plugins/publish/validate_ouput_maps.py +++ /dev/null @@ -1,153 +0,0 @@ -import copy -import os - -import pyblish.api - -import substance_painter.export - -from ayon_core.pipeline import PublishValidationError - - -class ValidateOutputMaps(pyblish.api.InstancePlugin): - """Validate all output maps for Output Template are generated. - - Output maps will be skipped by Substance Painter if it is an output - map in the Substance Output Template which uses channels that the current - substance painter project has not painted or generated. - - """ - - order = pyblish.api.ValidatorOrder - label = "Validate output maps" - hosts = ["substancepainter"] - families = ["textureSet"] - - def process(self, instance): - - config = instance.data["exportConfig"] - - # Substance Painter API does not allow to query the actual output maps - # it will generate without actually exporting the files. So we try to - # generate the smallest size / fastest export as possible - config = copy.deepcopy(config) - invalid_channels = self.get_invalid_channels(instance, config) - if invalid_channels: - raise PublishValidationError( - "Invalid Channel(s): {} found in texture set {}".format( - invalid_channels, instance.name - )) - parameters = config["exportParameters"][0]["parameters"] - parameters["sizeLog2"] = [1, 1] # output 2x2 images (smallest) - parameters["paddingAlgorithm"] = "passthrough" # no dilation (faster) - parameters["dithering"] = False # no dithering (faster) - result = substance_painter.export.export_project_textures(config) - if result.status != substance_painter.export.ExportStatus.Success: - raise PublishValidationError( - "Failed to export texture set: {}".format(result.message) - ) - - generated_files = set() - for texture_maps in result.textures.values(): - for texture_map in texture_maps: - generated_files.add(os.path.normpath(texture_map)) - # Directly clean up our temporary export - os.remove(texture_map) - - creator_attributes = instance.data.get("creator_attributes", {}) - allow_skipped_maps = creator_attributes.get("allowSkippedMaps", True) - error_report_missing = [] - for image_instance in instance: - - # Confirm whether the instance has its expected files generated. - # We assume there's just one representation and that it is - # the actual texture representation from the collector. - representation = next(iter(image_instance.data["representations"])) - staging_dir = representation["stagingDir"] - filenames = representation["files"] - if not isinstance(filenames, (list, tuple)): - # Convert single file to list - filenames = [filenames] - - missing = [] - for filename in filenames: - filepath = os.path.join(staging_dir, filename) - filepath = os.path.normpath(filepath) - if filepath not in generated_files: - self.log.warning(f"Missing texture: {filepath}") - missing.append(filepath) - - if not missing: - continue - - if allow_skipped_maps: - # TODO: This is changing state on the instance's which - # should not be done during validation. - self.log.warning(f"Disabling texture instance: " - f"{image_instance}") - image_instance.data["active"] = False - image_instance.data["publish"] = False - image_instance.data["integrate"] = False - representation.setdefault("tags", []).append("delete") - continue - else: - error_report_missing.append((image_instance, missing)) - - if error_report_missing: - - message = ( - "The Texture Set skipped exporting some output maps which are " - "defined in the Output Template. This happens if the Output " - "Templates exports maps from channels which you do not " - "have in your current Substance Painter project.\n\n" - "To allow this enable the *Allow Skipped Output Maps* setting " - "on the instance.\n\n" - f"Instance {instance} skipped exporting output maps:\n" - "" - ) - - for image_instance, missing in error_report_missing: - missing_str = ", ".join(missing) - message += f"- **{image_instance}** skipped: {missing_str}\n" - - raise PublishValidationError( - message=message, - title="Missing output maps" - ) - - def get_invalid_channels(self, instance, config): - """Function to get invalid channel(s) from export channel - filtering - - Args: - instance (pyblish.api.Instance): Instance - config (dict): export config - - Raises: - PublishValidationError: raise Publish Validation - Error if any invalid channel(s) found - - Returns: - list: invalid channel(s) - """ - creator_attrs = instance.data["creator_attributes"] - export_channel = creator_attrs.get("exportChannel", []) - tmp_export_channel = copy.deepcopy(export_channel) - invalid_channel = [] - if export_channel: - for export_preset in config.get("exportPresets", {}): - if not export_preset.get("maps", {}): - raise PublishValidationError( - "No Texture Map Exported with texture set: {}.".format( - instance.name) - ) - map_names = [channel_map["fileName"] for channel_map - in export_preset["maps"]] - for channel in tmp_export_channel: - # Check if channel is found in at least one map - for map_name in map_names: - if channel in map_name: - break - else: - invalid_channel.append(channel) - - return invalid_channel diff --git a/server_addon/substancepainter/client/ayon_substancepainter/version.py b/server_addon/substancepainter/client/ayon_substancepainter/version.py deleted file mode 100644 index d8e2b13996..0000000000 --- a/server_addon/substancepainter/client/ayon_substancepainter/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring AYON addon 'substancepainter' version.""" -__version__ = "0.2.1" diff --git a/server_addon/substancepainter/package.py b/server_addon/substancepainter/package.py deleted file mode 100644 index 8ffad08b27..0000000000 --- a/server_addon/substancepainter/package.py +++ /dev/null @@ -1,10 +0,0 @@ -name = "substancepainter" -title = "Substance Painter" -version = "0.2.1" - -client_dir = "ayon_substancepainter" - -ayon_required_addons = { - "core": ">0.3.2", -} -ayon_compatible_addons = {} diff --git a/server_addon/substancepainter/server/__init__.py b/server_addon/substancepainter/server/__init__.py deleted file mode 100644 index f6cd51e610..0000000000 --- a/server_addon/substancepainter/server/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Type - -from ayon_server.addons import BaseServerAddon - -from .settings import SubstancePainterSettings, DEFAULT_SPAINTER_SETTINGS - - -class SubstancePainterAddon(BaseServerAddon): - settings_model: Type[SubstancePainterSettings] = SubstancePainterSettings - - async def get_default_settings(self): - settings_model_cls = self.get_settings_model() - return settings_model_cls(**DEFAULT_SPAINTER_SETTINGS) diff --git a/server_addon/substancepainter/server/settings/__init__.py b/server_addon/substancepainter/server/settings/__init__.py deleted file mode 100644 index f47f064536..0000000000 --- a/server_addon/substancepainter/server/settings/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .main import ( - SubstancePainterSettings, - DEFAULT_SPAINTER_SETTINGS, -) - - -__all__ = ( - "SubstancePainterSettings", - "DEFAULT_SPAINTER_SETTINGS", -) diff --git a/server_addon/substancepainter/server/settings/creator_plugins.py b/server_addon/substancepainter/server/settings/creator_plugins.py deleted file mode 100644 index 9ba7684d30..0000000000 --- a/server_addon/substancepainter/server/settings/creator_plugins.py +++ /dev/null @@ -1,59 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -class ChannelMappingItemModel(BaseSettingsModel): - _layout = "compact" - name: str = SettingsField(title="Channel Type") - value: str = SettingsField(title="Channel Map") - - -class CreateTextureModel(BaseSettingsModel): - channel_mapping: list[ChannelMappingItemModel] = SettingsField( - default_factory=list, title="Channel Mapping") - - -class CreatorsModel(BaseSettingsModel): - CreateTextures: CreateTextureModel = SettingsField( - default_factory=CreateTextureModel, - title="Create Textures" - ) - - -DEFAULT_CREATOR_SETTINGS = { - "CreateTextures": { - "channel_mapping": [ - {"name": "Base Color", "value": "BaseColor"}, - {"name": "Metallic", "value": "Metallic"}, - {"name": "Roughness", "value": "Roughness"}, - {"name": "Normal", "value": "Normal"}, - {"name": "Height", "value": "Height"}, - {"name": "Specular Edge Color", - "value": "SpecularEdgeColor"}, - {"name": "Opacity", "value": "Opacity"}, - {"name": "Displacement", "value": "Displacement"}, - {"name": "Glossiness", "value": "Glossiness"}, - {"name": "Anisotropy Level", - "value": "Anisotropylevel"}, - {"name": "Ambient Occulsion", "value": "AO"}, - {"name": "Anisotropy Angle", - "value": "Anisotropyangle"}, - {"name": "Transmissive", "value": "Transmissive"}, - {"name": "Reflection", "value": "Reflection"}, - {"name": "Diffuse", "value": "Diffuse"}, - {"name": "Index of Refraction", "value": "Ior"}, - {"name": "Specular Level", "value": "Specularlevel"}, - {"name": "Blending Mask", "value": "BlendingMask"}, - {"name": "Translucency", "value": "Translucency"}, - {"name": "Scattering", "value": "Scattering"}, - {"name": "Scatter Color", "value": "ScatterColor"}, - {"name": "Sheen Opacity", "value": "SheenOpacity"}, - {"name": "Sheen Color", "value": "SheenColor"}, - {"name": "Coat Opacity", "value": "CoatOpacity"}, - {"name": "Coat Color", "value": "CoatColor"}, - {"name": "Coat Roughness", "value": "CoatRoughness"}, - {"name": "CoatSpecularLevel", - "value": "Coat Specular Level"}, - {"name": "CoatNormal", "value": "Coat Normal"} - ], - } -} \ No newline at end of file diff --git a/server_addon/substancepainter/server/settings/imageio.py b/server_addon/substancepainter/server/settings/imageio.py deleted file mode 100644 index 05aafd5215..0000000000 --- a/server_addon/substancepainter/server/settings/imageio.py +++ /dev/null @@ -1,76 +0,0 @@ -from pydantic import validator -from ayon_server.settings import BaseSettingsModel, SettingsField -from ayon_server.settings.validators import ensure_unique_names - - -class ImageIOConfigModel(BaseSettingsModel): - """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config - path in the Core addon profiles here - (ayon+settings://core/imageio/ocio_config_profiles). - """ - - override_global_config: bool = SettingsField( - False, - title="Override global OCIO config", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - filepath: list[str] = SettingsField( - default_factory=list, - title="Config path", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - - -class ImageIOFileRuleModel(BaseSettingsModel): - name: str = SettingsField("", title="Rule name") - pattern: str = SettingsField("", title="Regex pattern") - colorspace: str = SettingsField("", title="Colorspace name") - ext: str = SettingsField("", title="File extension") - - -class ImageIOFileRulesModel(BaseSettingsModel): - activate_host_rules: bool = SettingsField(False) - rules: list[ImageIOFileRuleModel] = SettingsField( - default_factory=list, - title="Rules" - ) - - @validator("rules") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -class ImageIOSettings(BaseSettingsModel): - activate_host_color_management: bool = SettingsField( - True, title="Enable Color Management" - ) - ocio_config: ImageIOConfigModel = SettingsField( - default_factory=ImageIOConfigModel, - title="OCIO config" - ) - file_rules: ImageIOFileRulesModel = SettingsField( - default_factory=ImageIOFileRulesModel, - title="File Rules" - ) - - -DEFAULT_IMAGEIO_SETTINGS = { - "activate_host_color_management": True, - "ocio_config": { - "override_global_config": False, - "filepath": [] - }, - "file_rules": { - "activate_host_rules": False, - "rules": [] - } -} diff --git a/server_addon/substancepainter/server/settings/load_plugins.py b/server_addon/substancepainter/server/settings/load_plugins.py deleted file mode 100644 index e6b2fd86c3..0000000000 --- a/server_addon/substancepainter/server/settings/load_plugins.py +++ /dev/null @@ -1,122 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -def normal_map_format_enum(): - return [ - {"label": "DirectX", "value": "NormalMapFormat.DirectX"}, - {"label": "OpenGL", "value": "NormalMapFormat.OpenGL"}, - ] - - -def tangent_space_enum(): - return [ - {"label": "Per Fragment", "value": "TangentSpace.PerFragment"}, - {"label": "Per Vertex", "value": "TangentSpace.PerVertex"}, - ] - - -def uv_workflow_enum(): - return [ - {"label": "Default", "value": "ProjectWorkflow.Default"}, - {"label": "UV Tile", "value": "ProjectWorkflow.UVTile"}, - {"label": "Texture Set Per UV Tile", - "value": "ProjectWorkflow.TextureSetPerUVTile"} - ] - - -def document_resolution_enum(): - return [ - {"label": "128", "value": 128}, - {"label": "256", "value": 256}, - {"label": "512", "value": 512}, - {"label": "1024", "value": 1024}, - {"label": "2048", "value": 2048}, - {"label": "4096", "value": 4096} - ] - - -class ProjectTemplatesModel(BaseSettingsModel): - _layout = "expanded" - name: str = SettingsField("default", title="Template Name") - default_texture_resolution: int = SettingsField( - 1024, enum_resolver=document_resolution_enum, - title="Document Resolution", - description=("Set texture resolution when " - "creating new project.") - ) - import_cameras: bool = SettingsField( - True, title="Import Cameras", - description="Import cameras from the mesh file.") - normal_map_format: str = SettingsField( - "DirectX", enum_resolver=normal_map_format_enum, - title="Normal Map Format", - description=("Set normal map format when " - "creating new project.") - ) - project_workflow: str = SettingsField( - "Default", enum_resolver=uv_workflow_enum, - title="UV Tile Settings", - description=("Set UV workflow when " - "creating new project.") - ) - tangent_space_mode: str = SettingsField( - "PerFragment", enum_resolver=tangent_space_enum, - title="Tangent Space", - description=("An option to compute tangent space " - "when creating new project.") - ) - preserve_strokes: bool = SettingsField( - True, title="Preserve Strokes", - description=("Preserve strokes positions on mesh.\n" - "(only relevant when loading into " - "existing project)") - ) - - -class ProjectTemplateSettingModel(BaseSettingsModel): - project_templates: list[ProjectTemplatesModel] = SettingsField( - default_factory=ProjectTemplatesModel, - title="Project Templates" - ) - - -class LoadersModel(BaseSettingsModel): - SubstanceLoadProjectMesh: ProjectTemplateSettingModel = SettingsField( - default_factory=ProjectTemplateSettingModel, - title="Load Mesh" - ) - - -DEFAULT_LOADER_SETTINGS = { - "SubstanceLoadProjectMesh": { - "project_templates": [ - { - "name": "2K(Default)", - "default_texture_resolution": 2048, - "import_cameras": True, - "normal_map_format": "NormalMapFormat.DirectX", - "project_workflow": "ProjectWorkflow.Default", - "tangent_space_mode": "TangentSpace.PerFragment", - "preserve_strokes": True - }, - { - "name": "2K(UV tile)", - "default_texture_resolution": 2048, - "import_cameras": True, - "normal_map_format": "NormalMapFormat.DirectX", - "project_workflow": "ProjectWorkflow.UVTile", - "tangent_space_mode": "TangentSpace.PerFragment", - "preserve_strokes": True - }, - { - "name": "4K(Custom)", - "default_texture_resolution": 4096, - "import_cameras": True, - "normal_map_format": "NormalMapFormat.OpenGL", - "project_workflow": "ProjectWorkflow.UVTile", - "tangent_space_mode": "TangentSpace.PerFragment", - "preserve_strokes": True - } - ] - } -} diff --git a/server_addon/substancepainter/server/settings/main.py b/server_addon/substancepainter/server/settings/main.py deleted file mode 100644 index 9a13d2c32f..0000000000 --- a/server_addon/substancepainter/server/settings/main.py +++ /dev/null @@ -1,34 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField -from .imageio import ImageIOSettings, DEFAULT_IMAGEIO_SETTINGS -from .creator_plugins import CreatorsModel, DEFAULT_CREATOR_SETTINGS -from .load_plugins import LoadersModel, DEFAULT_LOADER_SETTINGS - - -class ShelvesSettingsModel(BaseSettingsModel): - _layout = "compact" - name: str = SettingsField(title="Name") - value: str = SettingsField(title="Path") - - -class SubstancePainterSettings(BaseSettingsModel): - imageio: ImageIOSettings = SettingsField( - default_factory=ImageIOSettings, - title="Color Management (ImageIO)" - ) - shelves: list[ShelvesSettingsModel] = SettingsField( - default_factory=list, - title="Shelves" - ) - create: CreatorsModel = SettingsField( - default_factory=DEFAULT_CREATOR_SETTINGS, title="Creators") - load: LoadersModel = SettingsField( - default_factory=DEFAULT_LOADER_SETTINGS, title="Loaders") - - -DEFAULT_SPAINTER_SETTINGS = { - "imageio": DEFAULT_IMAGEIO_SETTINGS, - "shelves": [], - "create": DEFAULT_CREATOR_SETTINGS, - "load": DEFAULT_LOADER_SETTINGS, - -} diff --git a/server_addon/tvpaint/client/ayon_tvpaint/__init__.py b/server_addon/tvpaint/client/ayon_tvpaint/__init__.py deleted file mode 100644 index 2c4a052234..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from .version import __version__ -from .addon import ( - get_launch_script_path, - TVPaintAddon, - TVPAINT_ROOT_DIR, -) - - -__all__ = ( - "__version__", - - "get_launch_script_path", - "TVPaintAddon", - "TVPAINT_ROOT_DIR", -) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/addon.py b/server_addon/tvpaint/client/ayon_tvpaint/addon.py deleted file mode 100644 index c98c929a96..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/addon.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -from ayon_core.addon import AYONAddon, IHostAddon - -from .version import __version__ - -TVPAINT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -def get_launch_script_path(): - return os.path.join( - TVPAINT_ROOT_DIR, - "api", - "launch_script.py" - ) - - -class TVPaintAddon(AYONAddon, IHostAddon): - name = "tvpaint" - version = __version__ - host_name = "tvpaint" - - def add_implementation_envs(self, env, _app): - """Modify environments to contain all required for implementation.""" - - defaults = { - "AYON_LOG_NO_COLORS": "1" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(TVPAINT_ROOT_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".tvpp"] diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/__init__.py b/server_addon/tvpaint/client/ayon_tvpaint/api/__init__.py deleted file mode 100644 index 7b53aad9a4..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/api/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .communication_server import CommunicationWrapper -from .pipeline import ( - TVPaintHost, -) - - -__all__ = ( - "CommunicationWrapper", - - "TVPaintHost", -) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/communication_server.py b/server_addon/tvpaint/client/ayon_tvpaint/api/communication_server.py deleted file mode 100644 index 7ccb49f07e..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/api/communication_server.py +++ /dev/null @@ -1,925 +0,0 @@ -import os -import json -import time -import subprocess -import collections -import asyncio -import logging -import socket -import platform -import filecmp -import tempfile -import threading -import shutil - -from contextlib import closing - -from aiohttp import web -from aiohttp_json_rpc import JsonRpc -from aiohttp_json_rpc.protocol import ( - encode_request, encode_error, decode_msg, JsonRpcMsgTyp -) -from aiohttp_json_rpc.exceptions import RpcError - -from ayon_core.lib import emit_event -from ayon_tvpaint.tvpaint_plugin import get_plugin_files_path - -log = logging.getLogger(__name__) -log.setLevel(logging.DEBUG) - - -class CommunicationWrapper: - # TODO add logs and exceptions - communicator = None - - log = logging.getLogger("CommunicationWrapper") - - @classmethod - def create_qt_communicator(cls, *args, **kwargs): - """Create communicator for Artist usage.""" - communicator = QtCommunicator(*args, **kwargs) - cls.set_communicator(communicator) - return communicator - - @classmethod - def set_communicator(cls, communicator): - if not cls.communicator: - cls.communicator = communicator - else: - cls.log.warning("Communicator was set multiple times.") - - @classmethod - def client(cls): - if not cls.communicator: - return None - return cls.communicator.client() - - @classmethod - def execute_george(cls, george_script): - """Execute passed goerge script in TVPaint.""" - if not cls.communicator: - return - return cls.communicator.execute_george(george_script) - - -class WebSocketServer: - def __init__(self): - self.client = None - - self.loop = asyncio.new_event_loop() - self.app = web.Application(loop=self.loop) - self.port = self.find_free_port() - self.websocket_thread = WebsocketServerThread( - self, self.port, loop=self.loop - ) - - @property - def server_is_running(self): - return self.websocket_thread.server_is_running - - def add_route(self, *args, **kwargs): - self.app.router.add_route(*args, **kwargs) - - @staticmethod - def find_free_port(): - with closing( - socket.socket(socket.AF_INET, socket.SOCK_STREAM) - ) as sock: - sock.bind(("", 0)) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - port = sock.getsockname()[1] - return port - - def start(self): - self.websocket_thread.start() - - def stop(self): - try: - if self.websocket_thread.is_running: - log.debug("Stopping websocket server") - self.websocket_thread.is_running = False - self.websocket_thread.stop() - except Exception: - log.warning( - "Error has happened during Killing websocket server", - exc_info=True - ) - - -class WebsocketServerThread(threading.Thread): - """ Listener for websocket rpc requests. - - It would be probably better to "attach" this to main thread (as for - example Harmony needs to run something on main thread), but currently - it creates separate thread and separate asyncio event loop - """ - def __init__(self, module, port, loop): - super(WebsocketServerThread, self).__init__() - self.is_running = False - self.server_is_running = False - self.port = port - self.module = module - self.loop = loop - self.runner = None - self.site = None - self.tasks = [] - - def run(self): - self.is_running = True - - try: - log.debug("Starting websocket server") - - self.loop.run_until_complete(self.start_server()) - - log.info( - "Running Websocket server on URL:" - " \"ws://localhost:{}\"".format(self.port) - ) - - asyncio.ensure_future(self.check_shutdown(), loop=self.loop) - - self.server_is_running = True - self.loop.run_forever() - - except Exception: - log.warning( - "Websocket Server service has failed", exc_info=True - ) - finally: - self.server_is_running = False - # optional - self.loop.close() - - self.is_running = False - log.info("Websocket server stopped") - - async def start_server(self): - """ Starts runner and TCPsite """ - self.runner = web.AppRunner(self.module.app) - await self.runner.setup() - self.site = web.TCPSite(self.runner, "localhost", self.port) - await self.site.start() - - def stop(self): - """Sets is_running flag to false, 'check_shutdown' shuts server down""" - self.is_running = False - - async def check_shutdown(self): - """ Future that is running and checks if server should be running - periodically. - """ - while self.is_running: - while self.tasks: - task = self.tasks.pop(0) - log.debug("waiting for task {}".format(task)) - await task - log.debug("returned value {}".format(task.result)) - - await asyncio.sleep(0.5) - - log.debug("## Server shutdown started") - - await self.site.stop() - log.debug("# Site stopped") - await self.runner.cleanup() - log.debug("# Server runner stopped") - tasks = [ - task for task in asyncio.all_tasks() - if task is not asyncio.current_task() - ] - list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks - results = await asyncio.gather(*tasks, return_exceptions=True) - log.debug(f"Finished awaiting cancelled tasks, results: {results}...") - await self.loop.shutdown_asyncgens() - # to really make sure everything else has time to stop - await asyncio.sleep(0.07) - self.loop.stop() - - -class BaseTVPaintRpc(JsonRpc): - def __init__(self, communication_obj, route_name="", **kwargs): - super().__init__(**kwargs) - self.requests_ids = collections.defaultdict(lambda: 0) - self.waiting_requests = collections.defaultdict(list) - self.responses = collections.defaultdict(list) - - self.route_name = route_name - self.communication_obj = communication_obj - - async def _handle_rpc_msg(self, http_request, raw_msg): - # This is duplicated code from super but there is no way how to do it - # to be able handle server->client requests - host = http_request.host - if host in self.waiting_requests: - try: - _raw_message = raw_msg.data - msg = decode_msg(_raw_message) - - except RpcError as error: - await self._ws_send_str(http_request, encode_error(error)) - return - - if msg.type in (JsonRpcMsgTyp.RESULT, JsonRpcMsgTyp.ERROR): - msg_data = json.loads(_raw_message) - if msg_data.get("id") in self.waiting_requests[host]: - self.responses[host].append(msg_data) - return - - return await super()._handle_rpc_msg(http_request, raw_msg) - - def client_connected(self): - # TODO This is poor check. Add check it is client from TVPaint - if self.clients: - return True - return False - - def send_notification(self, client, method, params=None): - if params is None: - params = [] - asyncio.run_coroutine_threadsafe( - client.ws.send_str(encode_request(method, params=params)), - loop=self.loop - ) - - def send_request(self, client, method, params=None, timeout=0): - if params is None: - params = [] - - client_host = client.host - - request_id = self.requests_ids[client_host] - self.requests_ids[client_host] += 1 - - self.waiting_requests[client_host].append(request_id) - - log.debug("Sending request to client {} ({}, {}) id: {}".format( - client_host, method, params, request_id - )) - future = asyncio.run_coroutine_threadsafe( - client.ws.send_str(encode_request(method, request_id, params)), - loop=self.loop - ) - result = future.result() - - not_found = object() - response = not_found - start = time.time() - while True: - if client.ws.closed: - return None - - for _response in self.responses[client_host]: - _id = _response.get("id") - if _id == request_id: - response = _response - break - - if response is not not_found: - break - - if timeout > 0 and (time.time() - start) > timeout: - raise Exception("Timeout passed") - return - - time.sleep(0.1) - - if response is not_found: - raise Exception("Connection closed") - - self.responses[client_host].remove(response) - - error = response.get("error") - result = response.get("result") - if error: - raise Exception("Error happened: {}".format(error)) - return result - - -class QtTVPaintRpc(BaseTVPaintRpc): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - from ayon_core.tools.utils import host_tools - self.tools_helper = host_tools.HostToolsHelper() - - route_name = self.route_name - - # Register methods - self.add_methods( - (route_name, self.workfiles_tool), - (route_name, self.loader_tool), - (route_name, self.publish_tool), - (route_name, self.scene_inventory_tool), - (route_name, self.library_loader_tool), - (route_name, self.experimental_tools) - ) - - # Panel routes for tools - async def workfiles_tool(self): - log.info("Triggering Workfile tool") - item = MainThreadItem(self.tools_helper.show_workfiles) - self._execute_in_main_thread(item, wait=False) - return - - async def loader_tool(self): - log.info("Triggering Loader tool") - item = MainThreadItem(self.tools_helper.show_loader) - self._execute_in_main_thread(item, wait=False) - return - - async def publish_tool(self): - log.info("Triggering Publish tool") - item = MainThreadItem(self.tools_helper.show_publisher_tool) - self._execute_in_main_thread(item, wait=False) - return - - async def scene_inventory_tool(self): - """Open Scene Inventory tool. - - Function can't confirm if tool was opened becauise one part of - SceneInventory initialization is calling websocket request to host but - host can't response because is waiting for response from this call. - """ - log.info("Triggering Scene inventory tool") - item = MainThreadItem(self.tools_helper.show_scene_inventory) - # Do not wait for result of callback - self._execute_in_main_thread(item, wait=False) - return - - async def library_loader_tool(self): - log.info("Triggering Library loader tool") - item = MainThreadItem(self.tools_helper.show_library_loader) - self._execute_in_main_thread(item, wait=False) - return - - async def experimental_tools(self): - log.info("Triggering Library loader tool") - item = MainThreadItem(self.tools_helper.show_experimental_tools_dialog) - self._execute_in_main_thread(item, wait=False) - return - - async def _async_execute_in_main_thread(self, item, **kwargs): - await self.communication_obj.async_execute_in_main_thread( - item, **kwargs - ) - - def _execute_in_main_thread(self, item, **kwargs): - return self.communication_obj.execute_in_main_thread(item, **kwargs) - - -class MainThreadItem: - """Structure to store information about callback in main thread. - - Item should be used to execute callback in main thread which may be needed - for execution of Qt objects. - - Item store callback (callable variable), arguments and keyword arguments - for the callback. Item hold information about it's process. - """ - not_set = object() - sleep_time = 0.1 - - def __init__(self, callback, *args, **kwargs): - self.done = False - self.exception = self.not_set - self.result = self.not_set - self.callback = callback - self.args = args - self.kwargs = kwargs - - def execute(self): - """Execute callback and store its result. - - Method must be called from main thread. Item is marked as `done` - when callback execution finished. Store output of callback of exception - information when callback raises one. - """ - log.debug("Executing process in main thread") - if self.done: - log.warning("- item is already processed") - return - - callback = self.callback - args = self.args - kwargs = self.kwargs - log.info("Running callback: {}".format(str(callback))) - try: - result = callback(*args, **kwargs) - self.result = result - - except Exception as exc: - self.exception = exc - - finally: - self.done = True - - def wait(self): - """Wait for result from main thread. - - This method stops current thread until callback is executed. - - Returns: - object: Output of callback. May be any type or object. - - Raises: - Exception: Reraise any exception that happened during callback - execution. - """ - while not self.done: - time.sleep(self.sleep_time) - - if self.exception is self.not_set: - return self.result - raise self.exception - - async def async_wait(self): - """Wait for result from main thread. - - Returns: - object: Output of callback. May be any type or object. - - Raises: - Exception: Reraise any exception that happened during callback - execution. - """ - while not self.done: - await asyncio.sleep(self.sleep_time) - - if self.exception is self.not_set: - return self.result - raise self.exception - - -class BaseCommunicator: - def __init__(self): - self.process = None - self.websocket_server = None - self.websocket_rpc = None - self.exit_code = None - self._connected_client = None - - @property - def server_is_running(self): - if self.websocket_server is None: - return False - return self.websocket_server.server_is_running - - def _windows_file_process(self, src_dst_mapping, to_remove): - """Windows specific file processing asking for admin permissions. - - It is required to have administration permissions to modify plugin - files in TVPaint installation folder. - - Method requires `pywin32` python module. - - Args: - src_dst_mapping (list, tuple, set): Mapping of source file to - destination. Both must be full path. Each item must be iterable - of size 2 `(C:/src/file.dll, C:/dst/file.dll)`. - to_remove (list): Fullpath to files that should be removed. - """ - - import pythoncom - from win32comext.shell import shell - - # Create temp folder where plugin files are temporary copied - # - reason is that copy to TVPaint requires administartion permissions - # but admin may not have access to source folder - tmp_dir = os.path.normpath( - tempfile.mkdtemp(prefix="tvpaint_copy_") - ) - - # Copy source to temp folder and create new mapping - dst_folders = collections.defaultdict(list) - new_src_dst_mapping = [] - for old_src, dst in src_dst_mapping: - new_src = os.path.join(tmp_dir, os.path.split(old_src)[1]) - shutil.copy(old_src, new_src) - new_src_dst_mapping.append((new_src, dst)) - - for src, dst in new_src_dst_mapping: - src = os.path.normpath(src) - dst = os.path.normpath(dst) - dst_filename = os.path.basename(dst) - dst_folder_path = os.path.dirname(dst) - dst_folders[dst_folder_path].append((dst_filename, src)) - - # create an instance of IFileOperation - fo = pythoncom.CoCreateInstance( - shell.CLSID_FileOperation, - None, - pythoncom.CLSCTX_ALL, - shell.IID_IFileOperation - ) - # Add delete command to file operation object - for filepath in to_remove: - item = shell.SHCreateItemFromParsingName( - filepath, None, shell.IID_IShellItem - ) - fo.DeleteItem(item) - - # here you can use SetOperationFlags, progress Sinks, etc. - for folder_path, items in dst_folders.items(): - # create an instance of IShellItem for the target folder - folder_item = shell.SHCreateItemFromParsingName( - folder_path, None, shell.IID_IShellItem - ) - for _dst_filename, source_file_path in items: - # create an instance of IShellItem for the source item - copy_item = shell.SHCreateItemFromParsingName( - source_file_path, None, shell.IID_IShellItem - ) - # queue the copy operation - fo.CopyItem(copy_item, folder_item, _dst_filename, None) - - # commit - fo.PerformOperations() - - # Remove temp folder - shutil.rmtree(tmp_dir) - - def _prepare_windows_plugin(self, launch_args): - """Copy plugin to TVPaint plugins and set PATH to dependencies. - - Check if plugin in TVPaint's plugins exist and match to plugin - version to current implementation version. Based on 64-bit or 32-bit - version of the plugin. Path to libraries required for plugin is added - to PATH variable. - """ - - host_executable = launch_args[0] - executable_file = os.path.basename(host_executable) - if "64bit" in executable_file: - subfolder = "windows_x64" - elif "32bit" in executable_file: - subfolder = "windows_x86" - else: - raise ValueError( - "Can't determine if executable " - "leads to 32-bit or 64-bit TVPaint!" - ) - - plugin_files_path = get_plugin_files_path() - # Folder for right windows plugin files - source_plugins_dir = os.path.join(plugin_files_path, subfolder) - - # Path to libraries (.dll) required for plugin library - # - additional libraries can be copied to TVPaint installation folder - # (next to executable) or added to PATH environment variable - additional_libs_folder = os.path.join( - source_plugins_dir, - "additional_libraries" - ) - additional_libs_folder = additional_libs_folder.replace("\\", "/") - if ( - os.path.exists(additional_libs_folder) - and additional_libs_folder not in os.environ["PATH"] - ): - os.environ["PATH"] += (os.pathsep + additional_libs_folder) - - # Path to TVPaint's plugins folder (where we want to add our plugin) - host_plugins_path = os.path.join( - os.path.dirname(host_executable), - "plugins" - ) - - # Files that must be copied to TVPaint's plugin folder - plugin_dir = os.path.join(source_plugins_dir, "plugin") - - to_copy = [] - to_remove = [] - # Remove old plugin name - deprecated_filepath = os.path.join( - host_plugins_path, "AvalonPlugin.dll" - ) - if os.path.exists(deprecated_filepath): - to_remove.append(deprecated_filepath) - - for filename in os.listdir(plugin_dir): - src_full_path = os.path.join(plugin_dir, filename) - dst_full_path = os.path.join(host_plugins_path, filename) - if dst_full_path in to_remove: - to_remove.remove(dst_full_path) - - if ( - not os.path.exists(dst_full_path) - or not filecmp.cmp(src_full_path, dst_full_path) - ): - to_copy.append((src_full_path, dst_full_path)) - - # Skip copy if everything is done - if not to_copy and not to_remove: - return - - # Try to copy - try: - self._windows_file_process(to_copy, to_remove) - except Exception: - log.error("Plugin copy failed", exc_info=True) - - # Validate copy was done - invalid_copy = [] - for src, dst in to_copy: - if not os.path.exists(dst) or not filecmp.cmp(src, dst): - invalid_copy.append((src, dst)) - - # Validate delete was dones - invalid_remove = [] - for filepath in to_remove: - if os.path.exists(filepath): - invalid_remove.append(filepath) - - if not invalid_remove and not invalid_copy: - return - - msg_parts = [] - if invalid_remove: - msg_parts.append( - "Failed to remove files: {}".format(", ".join(invalid_remove)) - ) - - if invalid_copy: - _invalid = [ - "\"{}\" -> \"{}\"".format(src, dst) - for src, dst in invalid_copy - ] - msg_parts.append( - "Failed to copy files: {}".format(", ".join(_invalid)) - ) - raise RuntimeError(" & ".join(msg_parts)) - - def _launch_tv_paint(self, launch_args): - flags = ( - subprocess.DETACHED_PROCESS - | subprocess.CREATE_NEW_PROCESS_GROUP - ) - env = os.environ.copy() - # Remove QuickTime from PATH on windows - # - quicktime overrides TVPaint's ffmpeg encode/decode which may - # cause issues on loading - if platform.system().lower() == "windows": - new_path = [] - for path in env["PATH"].split(os.pathsep): - if path and "quicktime" not in path.lower(): - new_path.append(path) - env["PATH"] = os.pathsep.join(new_path) - - kwargs = { - "env": env, - "creationflags": flags - } - self.process = subprocess.Popen(launch_args, **kwargs) - - def _create_routes(self): - self.websocket_rpc = BaseTVPaintRpc( - self, loop=self.websocket_server.loop - ) - self.websocket_server.add_route( - "*", "/", self.websocket_rpc.handle_request - ) - - def _start_webserver(self): - self.websocket_server.start() - # Make sure RPC is using same loop as websocket server - while not self.websocket_server.server_is_running: - time.sleep(0.1) - - def _stop_webserver(self): - self.websocket_server.stop() - - def _exit(self, exit_code=None): - self._stop_webserver() - if exit_code is not None: - self.exit_code = exit_code - - if self.exit_code is None: - self.exit_code = 0 - - def stop(self): - """Stop communication and currently running python process.""" - log.info("Stopping communication") - self._exit() - - def launch(self, launch_args): - """Prepare all required data and launch host. - - First is prepared websocket server as communication point for host, - when server is ready to use host is launched as subprocess. - """ - if platform.system().lower() == "windows": - self._prepare_windows_plugin(launch_args) - - # Launch TVPaint and the websocket server. - log.info("Launching TVPaint") - self.websocket_server = WebSocketServer() - - self._create_routes() - - os.environ["WEBSOCKET_URL"] = "ws://localhost:{}".format( - self.websocket_server.port - ) - - log.info("Added request handler for url: {}".format( - os.environ["WEBSOCKET_URL"] - )) - - self._start_webserver() - - # Start TVPaint when server is running - self._launch_tv_paint(launch_args) - - log.info("Waiting for client connection") - while True: - if self.process.poll() is not None: - log.debug("Host process is not alive. Exiting") - self._exit(1) - return - - if self.websocket_rpc.client_connected(): - log.info("Client has connected") - break - time.sleep(0.5) - - self._on_client_connect() - - emit_event("application.launched") - - def _on_client_connect(self): - self._initial_textfile_write() - - def _initial_textfile_write(self): - """Show popup about Write to file at start of TVPaint.""" - tmp_file = tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".txt", delete=False - ) - tmp_file.close() - tmp_filepath = tmp_file.name.replace("\\", "/") - george_script = ( - "tv_writetextfile \"strict\" \"append\" \"{}\" \"empty\"" - ).format(tmp_filepath) - - result = CommunicationWrapper.execute_george(george_script) - - # Remote the file - os.remove(tmp_filepath) - - if result is None: - log.warning( - "Host was probably closed before plugin was initialized." - ) - elif result.lower() == "forbidden": - log.warning("User didn't confirm saving files.") - - def _client(self): - if not self.websocket_rpc: - log.warning("Communicator's server did not start yet.") - return None - - for client in self.websocket_rpc.clients: - if not client.ws.closed: - return client - log.warning("Client is not yet connected to Communicator.") - return None - - def client(self): - if not self._connected_client or self._connected_client.ws.closed: - self._connected_client = self._client() - return self._connected_client - - def send_request(self, method, params=None): - client = self.client() - if not client: - return - - return self.websocket_rpc.send_request( - client, method, params - ) - - def send_notification(self, method, params=None): - client = self.client() - if not client: - return - - self.websocket_rpc.send_notification( - client, method, params - ) - - def execute_george(self, george_script): - """Execute passed goerge script in TVPaint.""" - return self.send_request( - "execute_george", [george_script] - ) - - def execute_george_through_file(self, george_script): - """Execute george script with temp file. - - Allows to execute multiline george script without stopping websocket - client. - - On windows make sure script does not contain paths with backwards - slashes in paths, TVPaint won't execute properly in that case. - - Args: - george_script (str): George script to execute. May be multilined. - """ - temporary_file = tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".grg", delete=False - ) - temporary_file.write(george_script) - temporary_file.close() - temp_file_path = temporary_file.name.replace("\\", "/") - self.execute_george("tv_runscript {}".format(temp_file_path)) - os.remove(temp_file_path) - - -class QtCommunicator(BaseCommunicator): - label = os.getenv("AYON_MENU_LABEL") or "AYON" - title = "{} Tools".format(label) - menu_definitions = { - "title": title, - "menu_items": [ - { - "callback": "workfiles_tool", - "label": "Workfiles", - "help": "Open workfiles tool" - }, { - "callback": "loader_tool", - "label": "Load", - "help": "Open loader tool" - }, { - "callback": "scene_inventory_tool", - "label": "Scene inventory", - "help": "Open scene inventory tool" - }, { - "callback": "publish_tool", - "label": "Publish", - "help": "Open publisher" - }, { - "callback": "library_loader_tool", - "label": "Library", - "help": "Open library loader tool" - }, { - "callback": "experimental_tools", - "label": "Experimental tools", - "help": "Open experimental tools dialog" - } - ] - } - - def __init__(self, qt_app): - super().__init__() - self.callback_queue = collections.deque() - self.qt_app = qt_app - - def _create_routes(self): - self.websocket_rpc = QtTVPaintRpc( - self, loop=self.websocket_server.loop - ) - self.websocket_server.add_route( - "*", "/", self.websocket_rpc.handle_request - ) - - def execute_in_main_thread(self, main_thread_item, wait=True): - """Add `MainThreadItem` to callback queue and wait for result.""" - self.callback_queue.append(main_thread_item) - if wait: - return main_thread_item.wait() - return - - async def async_execute_in_main_thread(self, main_thread_item, wait=True): - """Add `MainThreadItem` to callback queue and wait for result.""" - self.callback_queue.append(main_thread_item) - if wait: - return await main_thread_item.async_wait() - - def main_thread_listen(self): - """Get last `MainThreadItem` from queue. - - Must be called from main thread. - - Method checks if host process is still running as it may cause - issues if not. - """ - # check if host still running - if self.process.poll() is not None: - self._exit() - return None - - if self.callback_queue: - return self.callback_queue.popleft() - return None - - def _on_client_connect(self): - super()._on_client_connect() - self._build_menu() - - def _build_menu(self): - self.send_request( - "define_menu", [self.menu_definitions] - ) - - def _exit(self, *args, **kwargs): - super()._exit(*args, **kwargs) - emit_event("application.exit") - self.qt_app.exit(self.exit_code) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/launch_script.py b/server_addon/tvpaint/client/ayon_tvpaint/api/launch_script.py deleted file mode 100644 index 1e23e95572..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/api/launch_script.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -import sys -import signal -import traceback -import ctypes -import platform -import logging - -from qtpy import QtWidgets, QtCore, QtGui - -from ayon_core import style -from ayon_core.pipeline import install_host -from ayon_tvpaint.api import ( - TVPaintHost, - CommunicationWrapper, -) - -log = logging.getLogger(__name__) - - -def safe_excepthook(*args): - traceback.print_exception(*args) - - -def main(launch_args): - # Be sure server won't crash at any moment but just print traceback - sys.excepthook = safe_excepthook - - # Create QtApplication for tools - # - QApplicaiton is also main thread/event loop of the server - qt_app = QtWidgets.QApplication([]) - - tvpaint_host = TVPaintHost() - # Execute pipeline installation - install_host(tvpaint_host) - - # Create Communicator object and trigger launch - # - this must be done before anything is processed - communicator = CommunicationWrapper.create_qt_communicator(qt_app) - communicator.launch(launch_args) - - def process_in_main_thread(): - """Execution of `MainThreadItem`.""" - item = communicator.main_thread_listen() - if item: - item.execute() - - timer = QtCore.QTimer() - timer.setInterval(100) - timer.timeout.connect(process_in_main_thread) - timer.start() - - # Register terminal signal handler - def signal_handler(*_args): - print("You pressed Ctrl+C. Process ended.") - communicator.stop() - - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - - qt_app.setQuitOnLastWindowClosed(False) - qt_app.setStyleSheet(style.load_stylesheet()) - - # Load avalon icon - icon_path = style.app_icon_path() - if icon_path: - icon = QtGui.QIcon(icon_path) - qt_app.setWindowIcon(icon) - - # Set application name to be able show application icon in task bar - if platform.system().lower() == "windows": - ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID( - u"WebsocketServer" - ) - - # Run Qt application event processing - sys.exit(qt_app.exec_()) - - -if __name__ == "__main__": - args = list(sys.argv) - if os.path.abspath(__file__) == os.path.normpath(args[0]): - # Pop path to script - args.pop(0) - main(args) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/lib.py b/server_addon/tvpaint/client/ayon_tvpaint/api/lib.py deleted file mode 100644 index f8b8c29cdb..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/api/lib.py +++ /dev/null @@ -1,542 +0,0 @@ -import os -import logging -import tempfile - -from .communication_server import CommunicationWrapper - -log = logging.getLogger(__name__) - - -def execute_george(george_script, communicator=None): - if not communicator: - communicator = CommunicationWrapper.communicator - return communicator.execute_george(george_script) - - -def execute_george_through_file(george_script, communicator=None): - """Execute george script with temp file. - - Allows to execute multiline george script without stopping websocket - client. - - On windows make sure script does not contain paths with backwards - slashes in paths, TVPaint won't execute properly in that case. - - Args: - george_script (str): George script to execute. May be multilined. - """ - if not communicator: - communicator = CommunicationWrapper.communicator - - return communicator.execute_george_through_file(george_script) - - -def parse_layers_data(data): - """Parse layers data loaded in 'get_layers_data'.""" - layers = [] - layers_raw = data.split("\n") - for layer_raw in layers_raw: - layer_raw = layer_raw.strip() - if not layer_raw: - continue - ( - layer_id, group_id, visible, position, opacity, name, - layer_type, - frame_start, frame_end, prelighttable, postlighttable, - selected, editable, sencil_state, is_current - ) = layer_raw.split("|") - layer = { - "layer_id": int(layer_id), - "group_id": int(group_id), - "visible": visible == "ON", - "position": int(position), - # Opacity from 'tv_layerinfo' is always set to '0' so it's unusable - # "opacity": int(opacity), - "name": name, - "type": layer_type, - "frame_start": int(frame_start), - "frame_end": int(frame_end), - "prelighttable": prelighttable == "1", - "postlighttable": postlighttable == "1", - "selected": selected == "1", - "editable": editable == "1", - "sencil_state": sencil_state, - "is_current": is_current == "1" - } - layers.append(layer) - return layers - - -def get_layers_data_george_script(output_filepath, layer_ids=None): - """Prepare george script which will collect all layers from workfile.""" - output_filepath = output_filepath.replace("\\", "/") - george_script_lines = [ - # Variable containing full path to output file - "output_path = \"{}\"".format(output_filepath), - # Get Current Layer ID - "tv_LayerCurrentID", - "current_layer_id = result" - ] - # Script part for getting and storing layer information to temp - layer_data_getter = ( - # Get information about layer's group - "tv_layercolor \"get\" layer_id", - "group_id = result", - "tv_LayerInfo layer_id", - ( - "PARSE result visible position opacity name" - " type startFrame endFrame prelighttable postlighttable" - " selected editable sencilState" - ), - # Check if layer ID match `tv_LayerCurrentID` - "is_current=0", - "IF CMP(current_layer_id, layer_id)==1", - # - mark layer as selected if layer id match to current layer id - "is_current=1", - "selected=1", - "END", - # Prepare line with data separated by "|" - ( - "line = layer_id'|'group_id'|'visible'|'position'|'opacity'|'" - "name'|'type'|'startFrame'|'endFrame'|'prelighttable'|'" - "postlighttable'|'selected'|'editable'|'sencilState'|'is_current" - ), - # Write data to output file - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line", - ) - - # Collect data for all layers if layers are not specified - if layer_ids is None: - george_script_lines.extend(( - # Layer loop variables - "loop = 1", - "idx = 0", - # Layers loop - "WHILE loop", - "tv_LayerGetID idx", - "layer_id = result", - "idx = idx + 1", - # Stop loop if layer_id is "NONE" - "IF CMP(layer_id, \"NONE\")==1", - "loop = 0", - "ELSE", - *layer_data_getter, - "END", - "END" - )) - else: - for layer_id in layer_ids: - george_script_lines.append("layer_id = {}".format(layer_id)) - george_script_lines.extend(layer_data_getter) - - return "\n".join(george_script_lines) - - -def layers_data(layer_ids=None, communicator=None): - """Backwards compatible function of 'get_layers_data'.""" - return get_layers_data(layer_ids, communicator) - - -def get_layers_data(layer_ids=None, communicator=None): - """Collect all layers information from currently opened workfile.""" - output_file = tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".txt", delete=False - ) - output_file.close() - if layer_ids is not None and isinstance(layer_ids, int): - layer_ids = [layer_ids] - - output_filepath = output_file.name - - george_script = get_layers_data_george_script(output_filepath, layer_ids) - - execute_george_through_file(george_script, communicator) - - with open(output_filepath, "r") as stream: - data = stream.read() - - output = parse_layers_data(data) - os.remove(output_filepath) - return output - - -def parse_group_data(data): - """Parse group data collected in 'get_groups_data'.""" - output = [] - groups_raw = data.split("\n") - for group_raw in groups_raw: - group_raw = group_raw.strip() - if not group_raw: - continue - - parts = group_raw.split("|") - # Check for length and concatenate 2 last items until length match - # - this happens if name contain spaces - while len(parts) > 6: - last_item = parts.pop(-1) - parts[-1] = "|".join([parts[-1], last_item]) - clip_id, group_id, red, green, blue, name = parts - - group = { - "group_id": int(group_id), - "name": name, - "clip_id": int(clip_id), - "red": int(red), - "green": int(green), - "blue": int(blue), - } - output.append(group) - return output - - -def groups_data(communicator=None): - """Backwards compatible function of 'get_groups_data'.""" - return get_groups_data(communicator) - - -def get_groups_data(communicator=None): - """Information about groups from current workfile.""" - output_file = tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".txt", delete=False - ) - output_file.close() - - output_filepath = output_file.name.replace("\\", "/") - george_script_lines = ( - # Variable containing full path to output file - "output_path = \"{}\"".format(output_filepath), - "empty = 0", - # Loop over 26 groups which is ATM maximum possible (in 11.7) - # - ref: https://www.tvpaint.com/forum/viewtopic.php?t=13880 - "FOR idx = 1 TO 26", - # Receive information about groups - "tv_layercolor \"getcolor\" 0 idx", - "PARSE result clip_id group_index c_red c_green c_blue group_name", - # Create and add line to output file - "line = clip_id'|'group_index'|'c_red'|'c_green'|'c_blue'|'group_name", - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line", - "END", - ) - george_script = "\n".join(george_script_lines) - execute_george_through_file(george_script, communicator) - - with open(output_filepath, "r") as stream: - data = stream.read() - - output = parse_group_data(data) - os.remove(output_filepath) - return output - - -def get_layers_pre_post_behavior(layer_ids, communicator=None): - """Collect data about pre and post behavior of layer ids. - - Pre and Post behaviors is enumerator of possible values: - - "none" - - "repeat" - - "pingpong" - - "hold" - - Example output: - ```json - { - 0: { - "pre": "none", - "post": "repeat" - } - } - ``` - - Returns: - dict: Key is layer id value is dictionary with "pre" and "post" keys. - """ - # Skip if is empty - if not layer_ids: - return {} - - # Auto convert to list - if not isinstance(layer_ids, (list, set, tuple)): - layer_ids = [layer_ids] - - # Prepare temp file - output_file = tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".txt", delete=False - ) - output_file.close() - - output_filepath = output_file.name.replace("\\", "/") - george_script_lines = [ - # Variable containing full path to output file - "output_path = \"{}\"".format(output_filepath), - ] - for layer_id in layer_ids: - george_script_lines.extend([ - "layer_id = {}".format(layer_id), - "tv_layerprebehavior layer_id", - "pre_beh = result", - "tv_layerpostbehavior layer_id", - "post_beh = result", - "line = layer_id'|'pre_beh'|'post_beh", - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line" - ]) - - george_script = "\n".join(george_script_lines) - execute_george_through_file(george_script, communicator) - - # Read data - with open(output_filepath, "r") as stream: - data = stream.read() - - # Remove temp file - os.remove(output_filepath) - - # Parse data - output = {} - raw_lines = data.split("\n") - for raw_line in raw_lines: - line = raw_line.strip() - if not line: - continue - parts = line.split("|") - if len(parts) != 3: - continue - layer_id, pre_beh, post_beh = parts - output[int(layer_id)] = { - "pre": pre_beh.lower(), - "post": post_beh.lower() - } - return output - - -def get_layers_exposure_frames(layer_ids, layers_data=None, communicator=None): - """Get exposure frames. - - Easily said returns frames where keyframes are. Recognized with george - function `tv_exposureinfo` returning "Head". - - Args: - layer_ids (list): Ids of a layers for which exposure frames should - look for. - layers_data (list): Precollected layers data. If are not passed then - 'get_layers_data' is used. - communicator (BaseCommunicator): Communicator used for communication - with TVPaint. - - Returns: - dict: Frames where exposure is set to "Head" by layer id. - """ - - if layers_data is None: - layers_data = get_layers_data(layer_ids) - _layers_by_id = { - layer["layer_id"]: layer - for layer in layers_data - } - layers_by_id = { - layer_id: _layers_by_id.get(layer_id) - for layer_id in layer_ids - } - tmp_file = tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".txt", delete=False - ) - tmp_file.close() - tmp_output_path = tmp_file.name.replace("\\", "/") - george_script_lines = [ - "output_path = \"{}\"".format(tmp_output_path) - ] - - output = {} - layer_id_mapping = {} - for layer_id, layer_data in layers_by_id.items(): - layer_id_mapping[str(layer_id)] = layer_id - output[layer_id] = [] - if not layer_data: - continue - first_frame = layer_data["frame_start"] - last_frame = layer_data["frame_end"] - george_script_lines.extend([ - "line = \"\"", - "layer_id = {}".format(layer_id), - "line = line''layer_id", - "tv_layerset layer_id", - "frame = {}".format(first_frame), - "WHILE (frame <= {})".format(last_frame), - "tv_exposureinfo frame", - "exposure = result", - "IF (CMP(exposure, \"Head\") == 1)", - "line = line'|'frame", - "END", - "frame = frame + 1", - "END", - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line" - ]) - - execute_george_through_file("\n".join(george_script_lines), communicator) - - with open(tmp_output_path, "r") as stream: - data = stream.read() - - os.remove(tmp_output_path) - - lines = [] - for line in data.split("\n"): - line = line.strip() - if line: - lines.append(line) - - for line in lines: - line_items = list(line.split("|")) - layer_id = line_items.pop(0) - _layer_id = layer_id_mapping[layer_id] - output[_layer_id] = [int(frame) for frame in line_items] - - return output - - -def get_exposure_frames( - layer_id, first_frame=None, last_frame=None, communicator=None -): - """Get exposure frames. - - Easily said returns frames where keyframes are. Recognized with george - function `tv_exposureinfo` returning "Head". - - Args: - layer_id (int): Id of a layer for which exposure frames should - look for. - first_frame (int): From which frame will look for exposure frames. - Used layers first frame if not entered. - last_frame (int): Last frame where will look for exposure frames. - Used layers last frame if not entered. - - Returns: - list: Frames where exposure is set to "Head". - """ - if first_frame is None or last_frame is None: - layer = layers_data(layer_id)[0] - if first_frame is None: - first_frame = layer["frame_start"] - if last_frame is None: - last_frame = layer["frame_end"] - - tmp_file = tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".txt", delete=False - ) - tmp_file.close() - tmp_output_path = tmp_file.name.replace("\\", "/") - george_script_lines = [ - "tv_layerset {}".format(layer_id), - "output_path = \"{}\"".format(tmp_output_path), - "output = \"\"", - "frame = {}".format(first_frame), - "WHILE (frame <= {})".format(last_frame), - "tv_exposureinfo frame", - "exposure = result", - "IF (CMP(exposure, \"Head\") == 1)", - "IF (CMP(output, \"\") == 1)", - "output = output''frame", - "ELSE", - "output = output'|'frame", - "END", - "END", - "frame = frame + 1", - "END", - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' output" - ] - - execute_george_through_file("\n".join(george_script_lines), communicator) - - with open(tmp_output_path, "r") as stream: - data = stream.read() - - os.remove(tmp_output_path) - - lines = [] - for line in data.split("\n"): - line = line.strip() - if line: - lines.append(line) - - exposure_frames = [] - for line in lines: - for frame in line.split("|"): - exposure_frames.append(int(frame)) - return exposure_frames - - -def get_scene_data(communicator=None): - """Scene data of currently opened scene. - - Result contains resolution, pixel aspect, fps mark in/out with states, - frame start and background color. - - Returns: - dict: Scene data collected in many ways. - """ - workfile_info = execute_george("tv_projectinfo", communicator) - workfile_info_parts = workfile_info.split(" ") - - # Project frame start - not used - workfile_info_parts.pop(-1) - field_order = workfile_info_parts.pop(-1) - frame_rate = float(workfile_info_parts.pop(-1)) - pixel_apsect = float(workfile_info_parts.pop(-1)) - height = int(workfile_info_parts.pop(-1)) - width = int(workfile_info_parts.pop(-1)) - - # Marks return as "{frame - 1} {state} ", example "0 set". - result = execute_george("tv_markin", communicator) - mark_in_frame, mark_in_state, _ = result.split(" ") - - result = execute_george("tv_markout", communicator) - mark_out_frame, mark_out_state, _ = result.split(" ") - - start_frame = execute_george("tv_startframe", communicator) - return { - "width": width, - "height": height, - "pixel_aspect": pixel_apsect, - "fps": frame_rate, - "field_order": field_order, - "mark_in": int(mark_in_frame), - "mark_in_state": mark_in_state, - "mark_in_set": mark_in_state == "set", - "mark_out": int(mark_out_frame), - "mark_out_state": mark_out_state, - "mark_out_set": mark_out_state == "set", - "start_frame": int(start_frame), - "bg_color": get_scene_bg_color(communicator) - } - - -def get_scene_bg_color(communicator=None): - """Background color set on scene. - - Is important for review exporting where scene bg color is used as - background. - """ - output_file = tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".txt", delete=False - ) - output_file.close() - output_filepath = output_file.name.replace("\\", "/") - george_script_lines = [ - # Variable containing full path to output file - "output_path = \"{}\"".format(output_filepath), - "tv_background", - "bg_color = result", - # Write data to output file - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' bg_color" - ] - - george_script = "\n".join(george_script_lines) - execute_george_through_file(george_script, communicator) - - with open(output_filepath, "r") as stream: - data = stream.read() - - os.remove(output_filepath) - data = data.strip() - if not data: - return None - return data.split(" ") diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/pipeline.py b/server_addon/tvpaint/client/ayon_tvpaint/api/pipeline.py deleted file mode 100644 index 5ec6355138..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/api/pipeline.py +++ /dev/null @@ -1,518 +0,0 @@ -import os -import json -import tempfile -import logging - -import requests -import ayon_api -import pyblish.api - -from ayon_tvpaint import TVPAINT_ROOT_DIR - -from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost -from ayon_core.settings import get_current_project_settings -from ayon_core.lib import register_event_callback -from ayon_core.pipeline import ( - register_loader_plugin_path, - register_creator_plugin_path, - AVALON_CONTAINER_ID, -) -from ayon_core.pipeline.context_tools import get_global_context - -from .lib import ( - execute_george, - execute_george_through_file -) - -log = logging.getLogger(__name__) - - -METADATA_SECTION = "avalon" -SECTION_NAME_CONTEXT = "context" -SECTION_NAME_CREATE_CONTEXT = "create_context" -SECTION_NAME_INSTANCES = "instances" -SECTION_NAME_CONTAINERS = "containers" -# Maximum length of metadata chunk string -# TODO find out the max (500 is safe enough) -TVPAINT_CHUNK_LENGTH = 500 - -"""TVPaint's Metadata - -Metadata are stored to TVPaint's workfile. - -Workfile works similar to .ini file but has few limitation. Most important -limitation is that value under key has limited length. Due to this limitation -each metadata section/key stores number of "subkeys" that are related to -the section. - -Example: -Metadata key `"instances"` may have stored value "2". In that case it is -expected that there are also keys `["instances0", "instances1"]`. - -Workfile data looks like: -``` -[avalon] -instances0=[{{__dq__}id{__dq__}: {__dq__}ayon.create.instance{__dq__... -instances1=...more data... -instances=2 -``` -""" - - -class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "tvpaint" - - def install(self): - """Install TVPaint-specific functionality.""" - - log.info("AYON - Installing TVPaint integration") - - # Create workdir folder if does not exist yet - workdir = os.getenv("AYON_WORKDIR") - if not os.path.exists(workdir): - os.makedirs(workdir) - - plugins_dir = os.path.join(TVPAINT_ROOT_DIR, "plugins") - publish_dir = os.path.join(plugins_dir, "publish") - load_dir = os.path.join(plugins_dir, "load") - create_dir = os.path.join(plugins_dir, "create") - - pyblish.api.register_host("tvpaint") - pyblish.api.register_plugin_path(publish_dir) - register_loader_plugin_path(load_dir) - register_creator_plugin_path(create_dir) - - register_event_callback("application.launched", self.initial_launch) - register_event_callback("application.exit", self.application_exit) - - def get_current_project_name(self): - """ - Returns: - Union[str, None]: Current project name. - """ - - return self.get_current_context().get("project_name") - - def get_current_folder_path(self): - """ - Returns: - Union[str, None]: Current folder path. - """ - - return self.get_current_context().get("folder_path") - - def get_current_task_name(self): - """ - Returns: - Union[str, None]: Current task name. - """ - - return self.get_current_context().get("task_name") - - def get_current_context(self): - context = get_current_workfile_context() - if not context: - return get_global_context() - - if "project_name" in context: - if "asset_name" in context: - context["folder_path"] = context["asset_name"] - return context - # This is legacy way how context was stored - return { - "project_name": context.get("project"), - "folder_path": context.get("asset"), - "task_name": context.get("task") - } - - # --- Create --- - def get_context_data(self): - return get_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, {}) - - def update_context_data(self, data, changes): - return write_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, data) - - def list_instances(self): - """List all created instances from current workfile.""" - return list_instances() - - def write_instances(self, data): - return write_instances(data) - - # --- Workfile --- - def open_workfile(self, filepath): - george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( - filepath.replace("\\", "/") - ) - return execute_george_through_file(george_script) - - def save_workfile(self, filepath=None): - if not filepath: - filepath = self.get_current_workfile() - context = get_global_context() - save_current_workfile_context(context) - - # Execute george script to save workfile. - george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/")) - return execute_george(george_script) - - def work_root(self, session): - return session["AYON_WORKDIR"] - - def get_current_workfile(self): - return execute_george("tv_GetProjectName") - - def workfile_has_unsaved_changes(self): - return None - - def get_workfile_extensions(self): - return [".tvpp"] - - # --- Load --- - def get_containers(self): - return get_containers() - - def initial_launch(self): - # Setup project settings if its the template that's launched. - # TODO also check for template creation when it's possible to define - # templates - last_workfile = os.environ.get("AYON_LAST_WORKFILE") - if not last_workfile or os.path.exists(last_workfile): - return - - log.info("Setting up project...") - global_context = get_global_context() - project_name = global_context.get("project_name") - folder_path = global_context.get("folder_path") - if not project_name or not folder_path: - return - - folder_entity = ayon_api.get_folder_by_path(project_name, folder_path) - - set_context_settings(project_name, folder_entity) - - def application_exit(self): - """Logic related to TimerManager. - - Todo: - This should be handled out of TVPaint integration logic. - """ - - data = get_current_project_settings() - stop_timer = data["tvpaint"]["stop_timer_on_application_exit"] - - if not stop_timer: - return - - # Stop application timer. - webserver_url = os.environ.get("AYON_WEBSERVER_URL") - rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) - requests.post(rest_api_url) - - -def containerise( - name, namespace, members, context, loader, current_containers=None -): - """Add new container to metadata. - - Args: - name (str): Container name. - namespace (str): Container namespace. - members (list): List of members that were loaded and belongs - to the container (layer names). - current_containers (list): Preloaded containers. Should be used only - on update/switch when containers were modified during the process. - - Returns: - dict: Container data stored to workfile metadata. - """ - - container_data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "members": members, - "name": name, - "namespace": namespace, - "loader": str(loader), - "representation": context["representation"]["id"] - } - if current_containers is None: - current_containers = get_containers() - - # Add container to containers list - current_containers.append(container_data) - - # Store data to metadata - write_workfile_metadata(SECTION_NAME_CONTAINERS, current_containers) - - return container_data - - -def split_metadata_string(text, chunk_length=None): - """Split string by length. - - Split text to chunks by entered length. - Example: - ```python - text = "ABCDEFGHIJKLM" - result = split_metadata_string(text, 3) - print(result) - >>> ['ABC', 'DEF', 'GHI', 'JKL'] - ``` - - Args: - text (str): Text that will be split into chunks. - chunk_length (int): Single chunk size. Default chunk_length is - set to global variable `TVPAINT_CHUNK_LENGTH`. - - Returns: - list: List of strings with at least one item. - """ - if chunk_length is None: - chunk_length = TVPAINT_CHUNK_LENGTH - chunks = [] - for idx in range(chunk_length, len(text) + chunk_length, chunk_length): - start_idx = idx - chunk_length - chunks.append(text[start_idx:idx]) - return chunks - - -def get_workfile_metadata_string_for_keys(metadata_keys): - """Read metadata for specific keys from current project workfile. - - All values from entered keys are stored to single string without separator. - - Function is designed to help get all values for one metadata key at once. - So order of passed keys matteres. - - Args: - metadata_keys (list, str): Metadata keys for which data should be - retrieved. Order of keys matters! It is possible to enter only - single key as string. - """ - # Add ability to pass only single key - if isinstance(metadata_keys, str): - metadata_keys = [metadata_keys] - - output_file = tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".txt", delete=False - ) - output_file.close() - output_filepath = output_file.name.replace("\\", "/") - - george_script_parts = [] - george_script_parts.append( - "output_path = \"{}\"".format(output_filepath) - ) - # Store data for each index of metadata key - for metadata_key in metadata_keys: - george_script_parts.append( - "tv_readprojectstring \"{}\" \"{}\" \"\"".format( - METADATA_SECTION, metadata_key - ) - ) - george_script_parts.append( - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' result" - ) - - # Execute the script - george_script = "\n".join(george_script_parts) - execute_george_through_file(george_script) - - # Load data from temp file - with open(output_filepath, "r") as stream: - file_content = stream.read() - - # Remove `\n` from content - output_string = file_content.replace("\n", "") - - # Delete temp file - os.remove(output_filepath) - - return output_string - - -def get_workfile_metadata_string(metadata_key): - """Read metadata for specific key from current project workfile.""" - result = get_workfile_metadata_string_for_keys([metadata_key]) - if not result: - return None - - stripped_result = result.strip() - if not stripped_result: - return None - - # NOTE Backwards compatibility when metadata key did not store range of key - # indexes but the value itself - # NOTE We don't have to care about negative values with `isdecimal` check - if not stripped_result.isdecimal(): - metadata_string = result - else: - keys = [] - for idx in range(int(stripped_result)): - keys.append("{}{}".format(metadata_key, idx)) - metadata_string = get_workfile_metadata_string_for_keys(keys) - - # Replace quotes plaholders with their values - metadata_string = ( - metadata_string - .replace("{__sq__}", "'") - .replace("{__dq__}", "\"") - ) - return metadata_string - - -def get_workfile_metadata(metadata_key, default=None): - """Read and parse metadata for specific key from current project workfile. - - Pipeline use function to store loaded and created instances within keys - stored in `SECTION_NAME_INSTANCES` and `SECTION_NAME_CONTAINERS` - constants. - - Args: - metadata_key (str): Key defying which key should read. It is expected - value contain json serializable string. - """ - if default is None: - default = [] - - json_string = get_workfile_metadata_string(metadata_key) - if json_string: - try: - return json.loads(json_string) - except json.decoder.JSONDecodeError: - # TODO remove when backwards compatibility of storing metadata - # will be removed - print(( - "Fixed invalid metadata in workfile." - " Not serializable string was: {}" - ).format(json_string)) - write_workfile_metadata(metadata_key, default) - return default - - -def write_workfile_metadata(metadata_key, value): - """Write metadata for specific key into current project workfile. - - George script has specific way how to work with quotes which should be - solved automatically with this function. - - Args: - metadata_key (str): Key defying under which key value will be stored. - value (dict,list,str): Data to store they must be json serializable. - """ - if isinstance(value, (dict, list)): - value = json.dumps(value) - - if not value: - value = "" - - # Handle quotes in dumped json string - # - replace single and double quotes with placeholders - value = ( - value - .replace("'", "{__sq__}") - .replace("\"", "{__dq__}") - ) - chunks = split_metadata_string(value) - chunks_len = len(chunks) - - write_template = "tv_writeprojectstring \"{}\" \"{}\" \"{}\"" - george_script_parts = [] - # Add information about chunks length to metadata key itself - george_script_parts.append( - write_template.format(METADATA_SECTION, metadata_key, chunks_len) - ) - # Add chunk values to indexed metadata keys - for idx, chunk_value in enumerate(chunks): - sub_key = "{}{}".format(metadata_key, idx) - george_script_parts.append( - write_template.format(METADATA_SECTION, sub_key, chunk_value) - ) - - george_script = "\n".join(george_script_parts) - - return execute_george_through_file(george_script) - - -def get_current_workfile_context(): - """Return context in which was workfile saved.""" - return get_workfile_metadata(SECTION_NAME_CONTEXT, {}) - - -def save_current_workfile_context(context): - """Save context which was used to create a workfile.""" - return write_workfile_metadata(SECTION_NAME_CONTEXT, context) - - -def list_instances(): - """List all created instances from current workfile.""" - return get_workfile_metadata(SECTION_NAME_INSTANCES) - - -def write_instances(data): - return write_workfile_metadata(SECTION_NAME_INSTANCES, data) - - -def get_containers(): - output = get_workfile_metadata(SECTION_NAME_CONTAINERS) - if output: - for item in output: - if "objectName" not in item and "members" in item: - members = item["members"] - if isinstance(members, list): - members = "|".join([str(member) for member in members]) - item["objectName"] = members - return output - - -def set_context_settings(project_name, folder_entity): - """Set workfile settings by folder entity attributes. - - Change fps, resolution and frame start/end. - - Args: - project_name (str): Project name. - folder_entity (dict[str, Any]): Folder entity. - - """ - - if not folder_entity: - return - - folder_attributes = folder_entity["attrib"] - - width = folder_attributes.get("resolutionWidth") - height = folder_attributes.get("resolutionHeight") - if width is None or height is None: - print("Resolution was not found!") - else: - execute_george( - "tv_resizepage {} {} 0".format(width, height) - ) - - framerate = folder_attributes.get("fps") - - if framerate is not None: - execute_george( - "tv_framerate {} \"timestretch\"".format(framerate) - ) - else: - print("Framerate was not found!") - - frame_start = folder_attributes.get("frameStart") - frame_end = folder_attributes.get("frameEnd") - - if frame_start is None or frame_end is None: - print("Frame range was not found!") - return - - handle_start = folder_attributes.get("handleStart") - handle_end = folder_attributes.get("handleEnd") - - # Always start from 0 Mark In and set only Mark Out - mark_in = 0 - mark_out = mark_in + (frame_end - frame_start) + handle_start + handle_end - - execute_george("tv_markin {} set".format(mark_in)) - execute_george("tv_markout {} set".format(mark_out)) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/api/plugin.py b/server_addon/tvpaint/client/ayon_tvpaint/api/plugin.py deleted file mode 100644 index 9dd6ae530a..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/api/plugin.py +++ /dev/null @@ -1,205 +0,0 @@ -import re - -from ayon_core.pipeline import LoaderPlugin -from ayon_core.pipeline.create import ( - CreatedInstance, - get_product_name, - AutoCreator, - Creator, -) -from ayon_core.pipeline.create.creator_plugins import cache_and_get_instances - -from .lib import get_layers_data - - -SHARED_DATA_KEY = "ayon.tvpaint.instances" - - -class TVPaintCreatorCommon: - @property - def product_template_product_type(self): - return self.product_type - - def _cache_and_get_instances(self): - return cache_and_get_instances( - self, SHARED_DATA_KEY, self.host.list_instances - ) - - def _collect_create_instances(self): - instances_by_identifier = self._cache_and_get_instances() - for instance_data in instances_by_identifier[self.identifier]: - instance = CreatedInstance.from_existing(instance_data, self) - self._add_instance_to_context(instance) - - def _update_create_instances(self, update_list): - if not update_list: - return - - cur_instances = self.host.list_instances() - cur_instances_by_id = {} - for instance_data in cur_instances: - instance_id = instance_data.get("instance_id") - if instance_id: - cur_instances_by_id[instance_id] = instance_data - - for instance, changes in update_list: - instance_data = changes.new_value - cur_instance_data = cur_instances_by_id.get(instance.id) - if cur_instance_data is None: - cur_instances.append(instance_data) - continue - for key in set(cur_instance_data) - set(instance_data): - cur_instance_data.pop(key) - cur_instance_data.update(instance_data) - self.host.write_instances(cur_instances) - - def _custom_get_product_name( - self, - project_name, - folder_entity, - task_entity, - variant, - host_name=None, - instance=None - ): - dynamic_data = self.get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ) - task_name = task_type = None - if task_entity: - task_name = task_entity["name"] - task_type = task_entity["taskType"] - - return get_product_name( - project_name, - task_name, - task_type, - host_name, - self.product_type, - variant, - dynamic_data=dynamic_data, - project_settings=self.project_settings, - product_type_filter=self.product_template_product_type - ) - - -class TVPaintCreator(Creator, TVPaintCreatorCommon): - settings_category = "tvpaint" - - def collect_instances(self): - self._collect_create_instances() - - def update_instances(self, update_list): - self._update_create_instances(update_list) - - def remove_instances(self, instances): - ids_to_remove = { - instance.id - for instance in instances - } - cur_instances = self.host.list_instances() - changed = False - new_instances = [] - for instance_data in cur_instances: - if instance_data.get("instance_id") in ids_to_remove: - changed = True - else: - new_instances.append(instance_data) - - if changed: - self.host.write_instances(new_instances) - - for instance in instances: - self._remove_instance_from_context(instance) - - def get_dynamic_data(self, *args, **kwargs): - # Change folder and name by current workfile context - create_context = self.create_context - folder_path = create_context.get_current_folder_path() - task_name = create_context.get_current_task_name() - output = {} - if folder_path: - folder_name = folder_path.rsplit("/")[-1] - output["asset"] = folder_name - output["folder"] = {"name": folder_name} - if task_name: - output["task"] = task_name - return output - - def get_product_name(self, *args, **kwargs): - return self._custom_get_product_name(*args, **kwargs) - - def _store_new_instance(self, new_instance): - instances_data = self.host.list_instances() - instances_data.append(new_instance.data_to_store()) - self.host.write_instances(instances_data) - self._add_instance_to_context(new_instance) - - -class TVPaintAutoCreator(AutoCreator, TVPaintCreatorCommon): - settings_category = "tvpaint" - - def collect_instances(self): - self._collect_create_instances() - - def update_instances(self, update_list): - self._update_create_instances(update_list) - - def get_product_name(self, *args, **kwargs): - return self._custom_get_product_name(*args, **kwargs) - - -class Loader(LoaderPlugin): - hosts = ["tvpaint"] - settings_category = "tvpaint" - - @staticmethod - def get_members_from_container(container): - if "members" not in container and "objectName" in container: - # Backwards compatibility - layer_ids_str = container.get("objectName") - return [ - int(layer_id) for layer_id in layer_ids_str.split("|") - ] - return container["members"] - - def get_unique_layer_name(self, namespace, name): - """Layer name with counter as suffix. - - Find higher 3 digit suffix from all layer names in scene matching regex - `{namespace}_{name}_{suffix}`. Higher 3 digit suffix is used - as base for next number if scene does not contain layer matching regex - `0` is used ase base. - - Args: - namespace (str): Usually folder name. - name (str): Name of loaded product. - - Returns: - str: `{namespace}_{name}_{higher suffix + 1}` - """ - layer_name_base = "{}_{}".format(namespace, name) - - counter_regex = re.compile(r"_(\d{3})$") - - higher_counter = 0 - for layer in get_layers_data(): - layer_name = layer["name"] - if not layer_name.startswith(layer_name_base): - continue - number_subpart = layer_name[len(layer_name_base):] - groups = counter_regex.findall(number_subpart) - if len(groups) != 1: - continue - - counter = int(groups[0]) - if counter > higher_counter: - higher_counter = counter - continue - - return "{}_{:0>3d}".format(layer_name_base, higher_counter + 1) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/hooks/pre_launch_args.py b/server_addon/tvpaint/client/ayon_tvpaint/hooks/pre_launch_args.py deleted file mode 100644 index 8ee91aa0e7..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/hooks/pre_launch_args.py +++ /dev/null @@ -1,42 +0,0 @@ -from ayon_core.lib import get_ayon_launcher_args -from ayon_applications import PreLaunchHook, LaunchTypes - - -class TvpaintPrelaunchHook(PreLaunchHook): - """Launch arguments preparation. - - Hook add python executable and script path to tvpaint implementation before - tvpaint executable and add last workfile path to launch arguments. - - Existence of last workfile is checked. If workfile does not exists tries - to copy templated workfile from predefined path. - """ - app_groups = {"tvpaint"} - launch_types = {LaunchTypes.local} - - def execute(self): - # Pop tvpaint executable - executable_path = self.launch_context.launch_args.pop(0) - - # Pop rest of launch arguments - There should not be other arguments! - remainders = [] - while self.launch_context.launch_args: - remainders.append(self.launch_context.launch_args.pop(0)) - - new_launch_args = get_ayon_launcher_args( - "run", self.launch_script_path(), executable_path - ) - - # Append as whole list as these areguments should not be separated - self.launch_context.launch_args.append(new_launch_args) - - if remainders: - self.log.warning(( - "There are unexpected launch arguments in TVPaint launch. {}" - ).format(str(remainders))) - self.launch_context.launch_args.extend(remainders) - - def launch_script_path(self): - from ayon_tvpaint import get_launch_script_path - - return get_launch_script_path() diff --git a/server_addon/tvpaint/client/ayon_tvpaint/lib.py b/server_addon/tvpaint/client/ayon_tvpaint/lib.py deleted file mode 100644 index 97cf8d3633..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/lib.py +++ /dev/null @@ -1,684 +0,0 @@ -import os -import shutil -import collections -from PIL import Image, ImageDraw - - -def backwards_id_conversion(data_by_layer_id): - """Convert layer ids to strings from integers.""" - for key in tuple(data_by_layer_id.keys()): - if not isinstance(key, str): - data_by_layer_id[str(key)] = data_by_layer_id.pop(key) - - -def get_frame_filename_template(frame_end, filename_prefix=None, ext=None): - """Get file template with frame key for rendered files. - - This is simple template contains `{frame}{ext}` for sequential outputs - and `single_file{ext}` for single file output. Output is rendered to - temporary folder so filename should not matter as integrator change - them. - """ - frame_padding = 4 - frame_end_str_len = len(str(frame_end)) - if frame_end_str_len > frame_padding: - frame_padding = frame_end_str_len - - ext = ext or ".png" - filename_prefix = filename_prefix or "" - - return "{}{{frame:0>{}}}{}".format(filename_prefix, frame_padding, ext) - - -def get_layer_pos_filename_template(range_end, filename_prefix=None, ext=None): - filename_prefix = filename_prefix or "" - new_filename_prefix = filename_prefix + "pos_{pos}." - return get_frame_filename_template(range_end, new_filename_prefix, ext) - - -def _calculate_pre_behavior_copy( - range_start, exposure_frames, pre_beh, - layer_frame_start, layer_frame_end, - output_idx_by_frame_idx -): - """Calculate frames before first exposure frame based on pre behavior. - - Function may skip whole processing if first exposure frame is before - layer's first frame. In that case pre behavior does not make sense. - - Args: - range_start(int): First frame of range which should be rendered. - exposure_frames(list): List of all exposure frames on layer. - pre_beh(str): Pre behavior of layer (enum of 4 strings). - layer_frame_start(int): First frame of layer. - layer_frame_end(int): Last frame of layer. - output_idx_by_frame_idx(dict): References to already prepared frames - and where result will be stored. - """ - # Check if last layer frame is after range end - if layer_frame_start < range_start: - return - - first_exposure_frame = min(exposure_frames) - # Skip if last exposure frame is after range end - if first_exposure_frame < range_start: - return - - # Calculate frame count of layer - frame_count = layer_frame_end - layer_frame_start + 1 - - if pre_beh == "none": - # Just fill all frames from last exposure frame to range end with None - for frame_idx in range(range_start, layer_frame_start): - output_idx_by_frame_idx[frame_idx] = None - - elif pre_beh == "hold": - # Keep first frame for whole time - for frame_idx in range(range_start, layer_frame_start): - output_idx_by_frame_idx[frame_idx] = first_exposure_frame - - elif pre_beh == "repeat": - # Loop backwards from last frame of layer - for frame_idx in reversed(range(range_start, layer_frame_start)): - eq_frame_idx_offset = ( - (layer_frame_end - frame_idx) % frame_count - ) - eq_frame_idx = layer_frame_start + ( - layer_frame_end - eq_frame_idx_offset - ) - output_idx_by_frame_idx[frame_idx] = eq_frame_idx - - elif pre_beh == "pingpong": - half_seq_len = frame_count - 1 - seq_len = half_seq_len * 2 - for frame_idx in reversed(range(range_start, layer_frame_start)): - eq_frame_idx_offset = (layer_frame_start - frame_idx) % seq_len - if eq_frame_idx_offset > half_seq_len: - eq_frame_idx_offset = (seq_len - eq_frame_idx_offset) - eq_frame_idx = layer_frame_start + eq_frame_idx_offset - output_idx_by_frame_idx[frame_idx] = eq_frame_idx - - -def _calculate_post_behavior_copy( - range_end, exposure_frames, post_beh, - layer_frame_start, layer_frame_end, - output_idx_by_frame_idx -): - """Calculate frames after last frame of layer based on post behavior. - - Function may skip whole processing if last layer frame is after range_end. - In that case post behavior does not make sense. - - Args: - range_end(int): Last frame of range which should be rendered. - exposure_frames(list): List of all exposure frames on layer. - post_beh(str): Post behavior of layer (enum of 4 strings). - layer_frame_start(int): First frame of layer. - layer_frame_end(int): Last frame of layer. - output_idx_by_frame_idx(dict): References to already prepared frames - and where result will be stored. - """ - # Check if last layer frame is after range end - if layer_frame_end >= range_end: - return - - last_exposure_frame = max(exposure_frames) - # Skip if last exposure frame is after range end - # - this is probably irrelevant with layer frame end check? - if last_exposure_frame >= range_end: - return - - # Calculate frame count of layer - frame_count = layer_frame_end - layer_frame_start + 1 - - if post_beh == "none": - # Just fill all frames from last exposure frame to range end with None - for frame_idx in range(layer_frame_end + 1, range_end + 1): - output_idx_by_frame_idx[frame_idx] = None - - elif post_beh == "hold": - # Keep last exposure frame to the end - for frame_idx in range(layer_frame_end + 1, range_end + 1): - output_idx_by_frame_idx[frame_idx] = last_exposure_frame - - elif post_beh == "repeat": - # Loop backwards from last frame of layer - for frame_idx in range(layer_frame_end + 1, range_end + 1): - eq_frame_idx = layer_frame_start + (frame_idx % frame_count) - output_idx_by_frame_idx[frame_idx] = eq_frame_idx - - elif post_beh == "pingpong": - half_seq_len = frame_count - 1 - seq_len = half_seq_len * 2 - for frame_idx in range(layer_frame_end + 1, range_end + 1): - eq_frame_idx_offset = (frame_idx - layer_frame_end) % seq_len - if eq_frame_idx_offset > half_seq_len: - eq_frame_idx_offset = seq_len - eq_frame_idx_offset - eq_frame_idx = layer_frame_end - eq_frame_idx_offset - output_idx_by_frame_idx[frame_idx] = eq_frame_idx - - -def _calculate_in_range_frames( - range_start, range_end, - exposure_frames, layer_frame_end, - output_idx_by_frame_idx -): - """Calculate frame references in defined range. - - Function may skip whole processing if last layer frame is after range_end. - In that case post behavior does not make sense. - - Args: - range_start(int): First frame of range which should be rendered. - range_end(int): Last frame of range which should be rendered. - exposure_frames(list): List of all exposure frames on layer. - layer_frame_end(int): Last frame of layer. - output_idx_by_frame_idx(dict): References to already prepared frames - and where result will be stored. - """ - # Calculate in range frames - in_range_frames = [] - for frame_idx in exposure_frames: - if range_start <= frame_idx <= range_end: - output_idx_by_frame_idx[frame_idx] = frame_idx - in_range_frames.append(frame_idx) - - if in_range_frames: - first_in_range_frame = min(in_range_frames) - # Calculate frames from first exposure frames to range end or last - # frame of layer (post behavior should be calculated since that time) - previous_exposure = first_in_range_frame - for frame_idx in range(first_in_range_frame, range_end + 1): - if frame_idx > layer_frame_end: - break - - if frame_idx in exposure_frames: - previous_exposure = frame_idx - else: - output_idx_by_frame_idx[frame_idx] = previous_exposure - - # There can be frames before first exposure frame in range - # First check if we don't alreade have first range frame filled - if range_start in output_idx_by_frame_idx: - return - - first_exposure_frame = max(exposure_frames) - last_exposure_frame = max(exposure_frames) - # Check if is first exposure frame smaller than defined range - # if not then skip - if first_exposure_frame >= range_start: - return - - # Check is if last exposure frame is also before range start - # in that case we can't use fill frames before out range - if last_exposure_frame < range_start: - return - - closest_exposure_frame = first_exposure_frame - for frame_idx in exposure_frames: - if frame_idx >= range_start: - break - if frame_idx > closest_exposure_frame: - closest_exposure_frame = frame_idx - - output_idx_by_frame_idx[closest_exposure_frame] = closest_exposure_frame - for frame_idx in range(range_start, range_end + 1): - if frame_idx in output_idx_by_frame_idx: - break - output_idx_by_frame_idx[frame_idx] = closest_exposure_frame - - -def _cleanup_frame_references(output_idx_by_frame_idx): - """Cleanup frame references to frame reference. - - Cleanup not direct references to rendered frame. - ``` - // Example input - { - 1: 1, - 2: 1, - 3: 2 - } - // Result - { - 1: 1, - 2: 1, - 3: 1 // Changed reference to final rendered frame - } - ``` - Result is dictionary where keys leads to frame that should be rendered. - """ - for frame_idx in tuple(output_idx_by_frame_idx.keys()): - reference_idx = output_idx_by_frame_idx[frame_idx] - # Skip transparent frames - if reference_idx is None or reference_idx == frame_idx: - continue - - real_reference_idx = reference_idx - _tmp_reference_idx = reference_idx - while True: - _temp = output_idx_by_frame_idx[_tmp_reference_idx] - if _temp == _tmp_reference_idx: - real_reference_idx = _tmp_reference_idx - break - _tmp_reference_idx = _temp - - if real_reference_idx != reference_idx: - output_idx_by_frame_idx[frame_idx] = real_reference_idx - - -def _cleanup_out_range_frames(output_idx_by_frame_idx, range_start, range_end): - """Cleanup frame references to frames out of passed range. - - First available frame in range is used - ``` - // Example input. Range 2-3 - { - 1: 1, - 2: 1, - 3: 1 - } - // Result - { - 2: 2, // Redirect to self as is first that reference out range - 3: 2 // Redirect to first redirected frame - } - ``` - Result is dictionary where keys leads to frame that should be rendered. - """ - in_range_frames_by_out_frames = collections.defaultdict(set) - out_range_frames = set() - for frame_idx in tuple(output_idx_by_frame_idx.keys()): - # Skip frames that are already out of range - if frame_idx < range_start or frame_idx > range_end: - out_range_frames.add(frame_idx) - continue - - reference_idx = output_idx_by_frame_idx[frame_idx] - # Skip transparent frames - if reference_idx is None: - continue - - # Skip references in range - if reference_idx < range_start or reference_idx > range_end: - in_range_frames_by_out_frames[reference_idx].add(frame_idx) - - for reference_idx in tuple(in_range_frames_by_out_frames.keys()): - frame_indexes = in_range_frames_by_out_frames.pop(reference_idx) - new_reference = None - for frame_idx in frame_indexes: - if new_reference is None: - new_reference = frame_idx - output_idx_by_frame_idx[frame_idx] = new_reference - - # Finally remove out of range frames - for frame_idx in out_range_frames: - output_idx_by_frame_idx.pop(frame_idx) - - -def calculate_layer_frame_references( - range_start, range_end, - layer_frame_start, - layer_frame_end, - exposure_frames, - pre_beh, post_beh -): - """Calculate frame references for one layer based on it's data. - - Output is dictionary where key is frame index referencing to rendered frame - index. If frame index should be rendered then is referencing to self. - - ``` - // Example output - { - 1: 1, // Reference to self - will be rendered - 2: 1, // Reference to frame 1 - will be copied - 3: 1, // Reference to frame 1 - will be copied - 4: 4, // Reference to self - will be rendered - ... - 20: 4 // Reference to frame 4 - will be copied - 21: None // Has reference to None - transparent image - } - ``` - - Args: - range_start(int): First frame of range which should be rendered. - range_end(int): Last frame of range which should be rendered. - layer_frame_start(int)L First frame of layer. - layer_frame_end(int): Last frame of layer. - exposure_frames(list): List of all exposure frames on layer. - pre_beh(str): Pre behavior of layer (enum of 4 strings). - post_beh(str): Post behavior of layer (enum of 4 strings). - """ - # Output variable - output_idx_by_frame_idx = {} - # Skip if layer does not have any exposure frames - if not exposure_frames: - return output_idx_by_frame_idx - - # First calculate in range frames - _calculate_in_range_frames( - range_start, range_end, - exposure_frames, layer_frame_end, - output_idx_by_frame_idx - ) - # Calculate frames by pre behavior of layer - _calculate_pre_behavior_copy( - range_start, exposure_frames, pre_beh, - layer_frame_start, layer_frame_end, - output_idx_by_frame_idx - ) - # Calculate frames by post behavior of layer - _calculate_post_behavior_copy( - range_end, exposure_frames, post_beh, - layer_frame_start, layer_frame_end, - output_idx_by_frame_idx - ) - # Cleanup of referenced frames - _cleanup_frame_references(output_idx_by_frame_idx) - - # Remove frames out of range - _cleanup_out_range_frames(output_idx_by_frame_idx, range_start, range_end) - - return output_idx_by_frame_idx - - -def calculate_layers_extraction_data( - layers_data, - exposure_frames_by_layer_id, - behavior_by_layer_id, - range_start, - range_end, - skip_not_visible=True, - filename_prefix=None, - ext=None -): - """Calculate extraction data for passed layers data. - - ``` - { - : { - "frame_references": {...}, - "filenames_by_frame_index": {...} - }, - ... - } - ``` - - Frame references contains frame index reference to rendered frame index. - - Filename by frame index represents filename under which should be frame - stored. Directory is not handled here because each usage may need different - approach. - - Args: - layers_data(list): Layers data loaded from TVPaint. - exposure_frames_by_layer_id(dict): Exposure frames of layers stored by - layer id. - behavior_by_layer_id(dict): Pre and Post behavior of layers stored by - layer id. - range_start(int): First frame of rendered range. - range_end(int): Last frame of rendered range. - skip_not_visible(bool): Skip calculations for hidden layers (Skipped - by default). - filename_prefix(str): Prefix before filename. - ext(str): Extension which filenames will have ('.png' is default). - - Returns: - dict: Prepared data for rendering by layer position. - """ - # Make sure layer ids are strings - # backwards compatibility when layer ids were integers - backwards_id_conversion(exposure_frames_by_layer_id) - backwards_id_conversion(behavior_by_layer_id) - - layer_template = get_layer_pos_filename_template( - range_end, filename_prefix, ext - ) - output = {} - for layer_data in layers_data: - if skip_not_visible and not layer_data["visible"]: - continue - - orig_layer_id = layer_data["layer_id"] - layer_id = str(orig_layer_id) - - # Skip if does not have any exposure frames (empty layer) - exposure_frames = exposure_frames_by_layer_id[layer_id] - if not exposure_frames: - continue - - layer_position = layer_data["position"] - layer_frame_start = layer_data["frame_start"] - layer_frame_end = layer_data["frame_end"] - - layer_behavior = behavior_by_layer_id[layer_id] - - pre_behavior = layer_behavior["pre"] - post_behavior = layer_behavior["post"] - - frame_references = calculate_layer_frame_references( - range_start, range_end, - layer_frame_start, - layer_frame_end, - exposure_frames, - pre_behavior, post_behavior - ) - # All values in 'frame_references' reference to a frame that must be - # rendered out - frames_to_render = set(frame_references.values()) - # Remove 'None' reference (transparent image) - if None in frames_to_render: - frames_to_render.remove(None) - - # Skip layer if has nothing to render - if not frames_to_render: - continue - - # All filenames that should be as output (not final output) - filename_frames = ( - set(range(range_start, range_end + 1)) - | frames_to_render - ) - filenames_by_frame_index = {} - for frame_idx in filename_frames: - filenames_by_frame_index[frame_idx] = layer_template.format( - pos=layer_position, - frame=frame_idx - ) - - # Store objects under the layer id - output[orig_layer_id] = { - "frame_references": frame_references, - "filenames_by_frame_index": filenames_by_frame_index - } - return output - - -def create_transparent_image_from_source(src_filepath, dst_filepath): - """Create transparent image of same type and size as source image.""" - img_obj = Image.open(src_filepath) - painter = ImageDraw.Draw(img_obj) - painter.rectangle((0, 0, *img_obj.size), fill=(0, 0, 0, 0)) - img_obj.save(dst_filepath) - - -def fill_reference_frames(frame_references, filepaths_by_frame): - # Store path to first transparent image if there is any - for frame_idx, ref_idx in frame_references.items(): - # Frame referencing to self should be rendered and used as source - # and reference indexes with None can't be filled - if ref_idx is None or frame_idx == ref_idx: - continue - - # Get destination filepath - src_filepath = filepaths_by_frame[ref_idx] - dst_filepath = filepaths_by_frame[frame_idx] - - if hasattr(os, "link"): - os.link(src_filepath, dst_filepath) - else: - shutil.copy(src_filepath, dst_filepath) - - -def copy_render_file(src_path, dst_path): - """Create copy file of an image.""" - if hasattr(os, "link"): - os.link(src_path, dst_path) - else: - shutil.copy(src_path, dst_path) - - -def cleanup_rendered_layers(filepaths_by_layer_id): - """Delete all files for each individual layer files after compositing.""" - # Collect all filepaths from data - all_filepaths = [] - for filepaths_by_frame in filepaths_by_layer_id.values(): - all_filepaths.extend(filepaths_by_frame.values()) - - # Loop over loop - for filepath in set(all_filepaths): - if filepath is not None and os.path.exists(filepath): - os.remove(filepath) - - -def composite_rendered_layers( - layers_data, filepaths_by_layer_id, - range_start, range_end, - dst_filepaths_by_frame, cleanup=True -): - """Composite multiple rendered layers by their position. - - Result is single frame sequence with transparency matching content - created in TVPaint. Missing source filepaths are replaced with transparent - images but at least one image must be rendered and exist. - - Function can be used even if single layer was created to fill transparent - filepaths. - - Args: - layers_data(list): Layers data loaded from TVPaint. - filepaths_by_layer_id(dict): Rendered filepaths stored by frame index - per layer id. Used as source for compositing. - range_start(int): First frame of rendered range. - range_end(int): Last frame of rendered range. - dst_filepaths_by_frame(dict): Output filepaths by frame where final - image after compositing will be stored. Path must not clash with - source filepaths. - cleanup(bool): Remove all source filepaths when done with compositing. - """ - # Prepare layers by their position - # - position tells in which order will compositing happen - layer_ids_by_position = {} - for layer in layers_data: - layer_position = layer["position"] - layer_ids_by_position[layer_position] = layer["layer_id"] - - # Sort layer positions - sorted_positions = tuple(reversed(sorted(layer_ids_by_position.keys()))) - # Prepare variable where filepaths without any rendered content - # - transparent will be created - transparent_filepaths = set() - # Store first final filepath - first_dst_filepath = None - for frame_idx in range(range_start, range_end + 1): - dst_filepath = dst_filepaths_by_frame[frame_idx] - src_filepaths = [] - for layer_position in sorted_positions: - layer_id = layer_ids_by_position[layer_position] - filepaths_by_frame = filepaths_by_layer_id[layer_id] - src_filepath = filepaths_by_frame.get(frame_idx) - if src_filepath is not None: - src_filepaths.append(src_filepath) - - if not src_filepaths: - transparent_filepaths.add(dst_filepath) - continue - - # Store first destination filepath to be used for transparent images - if first_dst_filepath is None: - first_dst_filepath = dst_filepath - - if len(src_filepaths) == 1: - src_filepath = src_filepaths[0] - if cleanup: - os.rename(src_filepath, dst_filepath) - else: - copy_render_file(src_filepath, dst_filepath) - - else: - composite_images(src_filepaths, dst_filepath) - - # Store first transparent filepath to be able copy it - transparent_filepath = None - for dst_filepath in transparent_filepaths: - if transparent_filepath is None: - create_transparent_image_from_source( - first_dst_filepath, dst_filepath - ) - transparent_filepath = dst_filepath - else: - copy_render_file(transparent_filepath, dst_filepath) - - # Remove all files that were used as source for compositing - if cleanup: - cleanup_rendered_layers(filepaths_by_layer_id) - - -def composite_images(input_image_paths, output_filepath): - """Composite images in order from passed list. - - Raises: - ValueError: When entered list is empty. - """ - if not input_image_paths: - raise ValueError("Nothing to composite.") - - img_obj = None - for image_filepath in input_image_paths: - _img_obj = Image.open(image_filepath) - if img_obj is None: - img_obj = _img_obj - else: - img_obj.alpha_composite(_img_obj) - img_obj.save(output_filepath) - - -def rename_filepaths_by_frame_start( - filepaths_by_frame, range_start, range_end, new_frame_start -): - """Change frames in filenames of finished images to new frame start.""" - - # Calculate frame end - new_frame_end = range_end + (new_frame_start - range_start) - # Create filename template - filename_template = get_frame_filename_template( - max(range_end, new_frame_end) - ) - - # Use different ranges based on Mark In and output Frame Start values - # - this is to make sure that filename renaming won't affect files that - # are not renamed yet - if range_start < new_frame_start: - source_range = range(range_end, range_start - 1, -1) - output_range = range(new_frame_end, new_frame_start - 1, -1) - else: - # This is less possible situation as frame start will be in most - # cases higher than Mark In. - source_range = range(range_start, range_end + 1) - output_range = range(new_frame_start, new_frame_end + 1) - - # Skip if source first frame is same as destination first frame - new_dst_filepaths = {} - for src_frame, dst_frame in zip(source_range, output_range): - src_filepath = os.path.normpath(filepaths_by_frame[src_frame]) - dirpath, src_filename = os.path.split(src_filepath) - dst_filename = filename_template.format(frame=dst_frame) - dst_filepath = os.path.join(dirpath, dst_filename) - - if src_filename != dst_filename: - os.rename(src_filepath, dst_filepath) - - new_dst_filepaths[dst_frame] = dst_filepath - - return new_dst_filepaths diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/convert_legacy.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/convert_legacy.py deleted file mode 100644 index e79a6565e8..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/convert_legacy.py +++ /dev/null @@ -1,150 +0,0 @@ -import collections - -from ayon_core.pipeline.create.creator_plugins import ( - ProductConvertorPlugin, - cache_and_get_instances, -) -from ayon_tvpaint.api.plugin import SHARED_DATA_KEY -from ayon_tvpaint.api.lib import get_groups_data - - -class TVPaintLegacyConverted(ProductConvertorPlugin): - """Conversion of legacy instances in scene to new creators. - - This convertor handles only instances created by core creators. - - All instances that would be created using auto-creators are removed as at - the moment of finding them would there already be existing instances. - """ - - identifier = "tvpaint.legacy.converter" - - def find_instances(self): - instances_by_identifier = cache_and_get_instances( - self, SHARED_DATA_KEY, self.host.list_instances - ) - if instances_by_identifier[None]: - self.add_convertor_item("Convert legacy instances") - - def convert(self): - current_instances = self.host.list_instances() - to_convert = collections.defaultdict(list) - converted = False - for instance in current_instances: - if instance.get("creator_identifier") is not None: - continue - converted = True - - family = instance.get("family") - if family in ( - "renderLayer", - "renderPass", - "renderScene", - "review", - "workfile", - ): - to_convert[family].append(instance) - else: - instance["keep"] = False - - # Skip if nothing was changed - if not converted: - self.remove_convertor_item() - return - - self._convert_render_layers( - to_convert["renderLayer"], current_instances) - self._convert_render_passes( - to_convert["renderPass"], current_instances) - self._convert_render_scenes( - to_convert["renderScene"], current_instances) - self._convert_workfiles( - to_convert["workfile"], current_instances) - self._convert_reviews( - to_convert["review"], current_instances) - - new_instances = [ - instance - for instance in current_instances - if instance.get("keep") is not False - ] - self.host.write_instances(new_instances) - # remove legacy item if all is fine - self.remove_convertor_item() - - def _convert_render_layers(self, render_layers, current_instances): - if not render_layers: - return - - # Look for possible existing render layers in scene - render_layers_by_group_id = {} - for instance in current_instances: - if instance.get("creator_identifier") == "render.layer": - group_id = instance["creator_identifier"]["group_id"] - render_layers_by_group_id[group_id] = instance - - groups_by_id = { - group["group_id"]: group - for group in get_groups_data() - } - for render_layer in render_layers: - group_id = render_layer.pop("group_id") - # Just remove legacy instance if group is already occupied - if group_id in render_layers_by_group_id: - render_layer["keep"] = False - continue - # Add identifier - render_layer["creator_identifier"] = "render.layer" - # Change 'uuid' to 'instance_id' - render_layer["instance_id"] = render_layer.pop("uuid") - # Fill creator attributes - render_layer["creator_attributes"] = { - "group_id": group_id - } - render_layer["productType"] = "render" - group = groups_by_id[group_id] - # Use group name for variant - group["variant"] = group["name"] - - def _convert_render_passes(self, render_passes, current_instances): - if not render_passes: - return - - # Render passes must have available render layers so we look for render - # layers first - # - '_convert_render_layers' must be called before this method - render_layers_by_group_id = {} - for instance in current_instances: - if instance.get("creator_identifier") == "render.layer": - group_id = instance["creator_attributes"]["group_id"] - render_layers_by_group_id[group_id] = instance - - for render_pass in render_passes: - group_id = render_pass.pop("group_id") - render_layer = render_layers_by_group_id.get(group_id) - if not render_layer: - render_pass["keep"] = False - continue - - render_pass["creator_identifier"] = "render.pass" - render_pass["instance_id"] = render_pass.pop("uuid") - render_pass["productType"] = "render" - - render_pass["creator_attributes"] = { - "render_layer_instance_id": render_layer["instance_id"] - } - render_pass["variant"] = render_pass.pop("pass") - render_pass.pop("renderlayer") - - # Rest of instances are just marked for deletion - def _convert_render_scenes(self, render_scenes, current_instances): - for render_scene in render_scenes: - render_scene["keep"] = False - - def _convert_workfiles(self, workfiles, current_instances): - for render_scene in workfiles: - render_scene["keep"] = False - - def _convert_reviews(self, reviews, current_instances): - for render_scene in reviews: - render_scene["keep"] = False diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_render.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_render.py deleted file mode 100644 index 2286a4417a..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_render.py +++ /dev/null @@ -1,1208 +0,0 @@ -"""Render Layer and Passes creators. - -Render layer is main part which is represented by group in TVPaint. All TVPaint -layers marked with that group color are part of the render layer. To be more -specific about some parts of layer it is possible to create sub-sets of layer -which are named passes. Render pass consist of layers in same color group as -render layer but define more specific part. - -For example render layer could be 'Bob' which consist of 5 TVPaint layers. -- Bob has 'head' which consist of 2 TVPaint layers -> Render pass 'head' -- Bob has 'body' which consist of 1 TVPaint layer -> Render pass 'body' -- Bob has 'arm' which consist of 1 TVPaint layer -> Render pass 'arm' -- Last layer does not belong to render pass at all - -Bob will be rendered as 'beauty' of bob (all visible layers in group). -His head will be rendered too but without any other parts. The same for body -and arm. - -What is this good for? Compositing has more power how the renders are used. -Can do transforms on each render pass without need to modify a re-render them -using TVPaint. - -The workflow may hit issues when there are used other blending modes than -default 'color' blend more. In that case it is not recommended to use this -workflow at all as other blend modes may affect all layers in clip which can't -be done. - -There is special case for simple publishing of scene which is called -'render.scene'. That will use all visible layers and render them as one big -sequence. - -Todos: - Add option to extract marked layers and passes as json output format for - AfterEffects. -""" - -import collections -from typing import Any, Optional, Union - -import ayon_api - -from ayon_core.lib import ( - prepare_template_data, - AbstractAttrDef, - UILabelDef, - UISeparatorDef, - EnumDef, - TextDef, - BoolDef, -) -from ayon_core.pipeline.create import ( - CreatedInstance, - CreatorError, -) -from ayon_tvpaint.api.plugin import ( - TVPaintCreator, - TVPaintAutoCreator, -) -from ayon_tvpaint.api.lib import ( - get_layers_data, - get_groups_data, - execute_george_through_file, -) - -RENDER_LAYER_DETAILED_DESCRIPTIONS = ( - """Render Layer is "a group of TVPaint layers" - -Be aware Render Layer is not TVPaint layer. - -All TVPaint layers in the scene with the color group id are rendered in the -beauty pass. To create sub passes use Render Pass creator which is -dependent on existence of render layer instance. - -The group can represent an asset (tree) or different part of scene that consist -of one or more TVPaint layers that can be used as single item during -compositing (for example). - -In some cases may be needed to have sub parts of the layer. For example 'Bob' -could be Render Layer which has 'Arm', 'Head' and 'Body' as Render Passes. -""" -) - - -RENDER_PASS_DETAILED_DESCRIPTIONS = ( - """Render Pass is sub part of Render Layer. - -Render Pass can consist of one or more TVPaint layers. Render Pass must -belong to a Render Layer. Marked TVPaint layers will change it's group color -to match group color of Render Layer. -""" -) - - -AUTODETECT_RENDER_DETAILED_DESCRIPTION = ( - """Semi-automated Render Layer and Render Pass creation. - -Based on information in TVPaint scene will be created Render Layers and Render -Passes. All color groups used in scene will be used for Render Layer creation. -Name of the group is used as a variant. - -All TVPaint layers under the color group will be created as Render Pass where -layer name is used as variant. - -The plugin will use all used color groups and layers, or can skip those that -are not visible. - -There is option to auto-rename color groups before Render Layer creation. That -is based on settings template where is filled index of used group from bottom -to top. -""" -) - -class CreateRenderlayer(TVPaintCreator): - """Mark layer group as Render layer instance. - - All TVPaint layers in the scene with the color group id are rendered in the - beauty pass. To create sub passes use Render Layer creator which is - dependent on existence of render layer instance. - """ - - label = "Render Layer" - product_type = "render" - product_template_product_type = "renderLayer" - identifier = "render.layer" - icon = "fa5.images" - - # George script to change color group - rename_script_template = ( - "tv_layercolor \"setcolor\"" - " {clip_id} {group_id} {r} {g} {b} \"{name}\"" - ) - # Order to be executed before Render Pass creator - order = 90 - description = "Mark TVPaint color group as one Render Layer." - detailed_description = RENDER_LAYER_DETAILED_DESCRIPTIONS - - # Settings - # - Default render pass name for beauty - default_pass_name = "beauty" - # - Mark by default instance for review - mark_for_review = True - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["tvpaint"]["create"]["create_render_layer"] - ) - self.default_variant = plugin_settings["default_variant"] - self.default_variants = plugin_settings["default_variants"] - self.default_pass_name = plugin_settings["default_pass_name"] - self.mark_for_review = plugin_settings["mark_for_review"] - - def get_dynamic_data( - self, - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ): - dynamic_data = super().get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ) - dynamic_data["renderpass"] = self.default_pass_name - dynamic_data["renderlayer"] = variant - return dynamic_data - - def _get_selected_group_ids(self): - return { - layer["group_id"] - for layer in get_layers_data() - if layer["selected"] - } - - def create(self, product_name, instance_data, pre_create_data): - self.log.debug("Query data from workfile.") - - group_name = instance_data["variant"] - group_id = pre_create_data.get("group_id") - # This creator should run only on one group - if group_id is None or group_id == -1: - selected_groups = self._get_selected_group_ids() - selected_groups.discard(0) - if len(selected_groups) > 1: - raise CreatorError("You have selected more than one group") - - if len(selected_groups) == 0: - raise CreatorError("You don't have selected any group") - group_id = tuple(selected_groups)[0] - - self.log.debug("Querying groups data from workfile.") - groups_data = get_groups_data() - group_item = None - for group_data in groups_data: - if group_data["group_id"] == group_id: - group_item = group_data - - for instance in self.create_context.instances: - if ( - instance.creator_identifier == self.identifier - and instance["creator_attributes"]["group_id"] == group_id - ): - raise CreatorError(( - f"Group \"{group_item.get('name')}\" is already used" - f" by another render layer \"{instance['productName']}\"" - )) - - self.log.debug(f"Selected group id is \"{group_id}\".") - if "creator_attributes" not in instance_data: - instance_data["creator_attributes"] = {} - creator_attributes = instance_data["creator_attributes"] - mark_for_review = pre_create_data.get("mark_for_review") - if mark_for_review is None: - mark_for_review = self.mark_for_review - creator_attributes["group_id"] = group_id - creator_attributes["mark_for_review"] = mark_for_review - - self.log.info(f"Product name is {product_name}") - new_instance = CreatedInstance( - self.product_type, - product_name, - instance_data, - self - ) - self._store_new_instance(new_instance) - - if not group_id or group_item["name"] == group_name: - return new_instance - - self.log.debug("Changing name of the group.") - # Rename TVPaint group (keep color same) - # - groups can't contain spaces - rename_script = self.rename_script_template.format( - clip_id=group_item["clip_id"], - group_id=group_item["group_id"], - r=group_item["red"], - g=group_item["green"], - b=group_item["blue"], - name=group_name - ) - execute_george_through_file(rename_script) - - self.log.info(( - f"Name of group with index {group_id}" - f" was changed to \"{group_name}\"." - )) - return new_instance - - def _get_groups_enum(self): - groups_enum = [] - empty_groups = [] - for group in get_groups_data(): - group_name = group["name"] - item = { - "label": group_name, - "value": group["group_id"] - } - # TVPaint have defined how many color groups is available, but - # the count is not consistent across versions. It is not possible - # to know how many groups there is. - # - if group_name and group_name != "0": - if empty_groups: - groups_enum.extend(empty_groups) - empty_groups = [] - groups_enum.append(item) - else: - empty_groups.append(item) - return groups_enum - - def get_pre_create_attr_defs(self): - groups_enum = self._get_groups_enum() - groups_enum.insert(0, {"label": "", "value": -1}) - - return [ - EnumDef( - "group_id", - label="Group", - items=groups_enum - ), - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] - - def get_instance_attr_defs(self): - groups_enum = self._get_groups_enum() - return [ - EnumDef( - "group_id", - label="Group", - items=groups_enum - ), - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] - - def update_instances(self, update_list): - self._update_color_groups() - self._update_renderpass_groups() - - super().update_instances(update_list) - - def _update_color_groups(self): - render_layer_instances = [] - for instance in self.create_context.instances: - if instance.creator_identifier == self.identifier: - render_layer_instances.append(instance) - - if not render_layer_instances: - return - - groups_by_id = { - group["group_id"]: group - for group in get_groups_data() - } - grg_script_lines = [] - for instance in render_layer_instances: - group_id = instance["creator_attributes"]["group_id"] - variant = instance["variant"] - group = groups_by_id[group_id] - if group["name"] == variant: - continue - - grg_script_lines.append(self.rename_script_template.format( - clip_id=group["clip_id"], - group_id=group["group_id"], - r=group["red"], - g=group["green"], - b=group["blue"], - name=variant - )) - - if grg_script_lines: - execute_george_through_file("\n".join(grg_script_lines)) - - def _update_renderpass_groups(self): - render_layer_instances = {} - render_pass_instances = collections.defaultdict(list) - - for instance in self.create_context.instances: - if instance.creator_identifier == CreateRenderPass.identifier: - render_layer_id = ( - instance["creator_attributes"]["render_layer_instance_id"] - ) - render_pass_instances[render_layer_id].append(instance) - elif instance.creator_identifier == self.identifier: - render_layer_instances[instance.id] = instance - - if not render_pass_instances or not render_layer_instances: - return - - layers_data = get_layers_data() - layers_by_name = collections.defaultdict(list) - for layer in layers_data: - layers_by_name[layer["name"]].append(layer) - - george_lines = [] - for render_layer_id, instances in render_pass_instances.items(): - render_layer_inst = render_layer_instances.get(render_layer_id) - if render_layer_inst is None: - continue - group_id = render_layer_inst["creator_attributes"]["group_id"] - layer_names = set() - for instance in instances: - layer_names |= set(instance["layer_names"]) - - for layer_name in layer_names: - george_lines.extend( - f"tv_layercolor \"set\" {layer['layer_id']} {group_id}" - for layer in layers_by_name[layer_name] - if layer["group_id"] != group_id - ) - if george_lines: - execute_george_through_file("\n".join(george_lines)) - - -class CreateRenderPass(TVPaintCreator): - product_type = "render" - product_template_product_type = "renderPass" - identifier = "render.pass" - label = "Render Pass" - icon = "fa5.image" - description = "Mark selected TVPaint layers as pass of Render Layer." - detailed_description = RENDER_PASS_DETAILED_DESCRIPTIONS - - order = CreateRenderlayer.order + 10 - - # Settings - mark_for_review = True - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["tvpaint"]["create"]["create_render_pass"] - ) - self.default_variant = plugin_settings["default_variant"] - self.default_variants = plugin_settings["default_variants"] - self.mark_for_review = plugin_settings["mark_for_review"] - - def collect_instances(self): - instances_by_identifier = self._cache_and_get_instances() - render_layers = { - instance_data["instance_id"]: { - "variant": instance_data["variant"], - "template_data": prepare_template_data({ - "renderlayer": instance_data["variant"] - }) - } - for instance_data in ( - instances_by_identifier[CreateRenderlayer.identifier] - ) - } - - for instance_data in instances_by_identifier[self.identifier]: - render_layer_instance_id = ( - instance_data - .get("creator_attributes", {}) - .get("render_layer_instance_id") - ) - render_layer_info = render_layers.get(render_layer_instance_id, {}) - self.update_instance_labels( - instance_data, - render_layer_info.get("variant"), - render_layer_info.get("template_data") - ) - instance = CreatedInstance.from_existing(instance_data, self) - self._add_instance_to_context(instance) - - def get_dynamic_data( - self, - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ): - dynamic_data = super().get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ) - dynamic_data["renderpass"] = variant - dynamic_data["renderlayer"] = "{renderlayer}" - return dynamic_data - - def update_instance_labels( - self, instance, render_layer_variant, render_layer_data=None - ): - old_label = instance.get("label") - old_group = instance.get("group") - new_label = None - new_group = None - if render_layer_variant is not None: - if render_layer_data is None: - render_layer_data = prepare_template_data({ - "renderlayer": render_layer_variant - }) - try: - new_label = instance["productName"].format(**render_layer_data) - except (KeyError, ValueError): - pass - - new_group = f"{self.get_group_label()} ({render_layer_variant})" - - instance["label"] = new_label - instance["group"] = new_group - return old_group != new_group or old_label != new_label - - def create(self, product_name, instance_data, pre_create_data): - render_layer_instance_id = pre_create_data.get( - "render_layer_instance_id" - ) - if not render_layer_instance_id: - raise CreatorError(( - "You cannot create a Render Pass without a Render Layer." - " Please select one first" - )) - - render_layer_instance = self.create_context.instances_by_id.get( - render_layer_instance_id - ) - if render_layer_instance is None: - raise CreatorError(( - "RenderLayer instance was not found" - f" by id \"{render_layer_instance_id}\"" - )) - - group_id = render_layer_instance["creator_attributes"]["group_id"] - self.log.debug("Query data from workfile.") - layers_data = get_layers_data() - - self.log.debug("Checking selection.") - # Get all selected layers and their group ids - marked_layer_names = pre_create_data.get("layer_names") - if marked_layer_names is not None: - layers_by_name = {layer["name"]: layer for layer in layers_data} - marked_layers = [] - for layer_name in marked_layer_names: - layer = layers_by_name.get(layer_name) - if layer is None: - raise CreatorError( - f"Layer with name \"{layer_name}\" was not found") - marked_layers.append(layer) - - else: - marked_layers = [ - layer - for layer in layers_data - if layer["selected"] - ] - - # Raise if nothing is selected - if not marked_layers: - raise CreatorError( - "Nothing is selected. Please select layers.") - - marked_layer_names = {layer["name"] for layer in marked_layers} - - marked_layer_names = set(marked_layer_names) - - instances_to_remove = [] - for instance in self.create_context.instances: - if instance.creator_identifier != self.identifier: - continue - cur_layer_names = set(instance["layer_names"]) - if not cur_layer_names.intersection(marked_layer_names): - continue - new_layer_names = cur_layer_names - marked_layer_names - if new_layer_names: - instance["layer_names"] = list(new_layer_names) - else: - instances_to_remove.append(instance) - - render_layer = render_layer_instance["variant"] - product_name_fill_data = {"renderlayer": render_layer} - - # Format dynamic keys in product name - label = product_name - try: - label = label.format( - **prepare_template_data(product_name_fill_data) - ) - except (KeyError, ValueError): - pass - - self.log.info(f"New product name is \"{label}\".") - instance_data["label"] = label - instance_data["group"] = f"{self.get_group_label()} ({render_layer})" - instance_data["layer_names"] = list(marked_layer_names) - if "creator_attributes" not in instance_data: - instance_data["creator_attributes"] = {} - - creator_attributes = instance_data["creator_attributes"] - mark_for_review = pre_create_data.get("mark_for_review") - if mark_for_review is None: - mark_for_review = self.mark_for_review - creator_attributes["mark_for_review"] = mark_for_review - creator_attributes["render_layer_instance_id"] = ( - render_layer_instance_id - ) - - new_instance = CreatedInstance( - self.product_type, - product_name, - instance_data, - self - ) - instances_data = self._remove_and_filter_instances( - instances_to_remove - ) - instances_data.append(new_instance.data_to_store()) - - self.host.write_instances(instances_data) - self._add_instance_to_context(new_instance) - self._change_layers_group(marked_layers, group_id) - - return new_instance - - def _change_layers_group(self, layers, group_id): - filtered_layers = [ - layer - for layer in layers - if layer["group_id"] != group_id - ] - if filtered_layers: - self.log.info(( - "Changing group of " - f"{','.join([layer['name'] for layer in filtered_layers])}" - f" to {group_id}" - )) - george_lines = [ - f"tv_layercolor \"set\" {layer['layer_id']} {group_id}" - for layer in filtered_layers - ] - execute_george_through_file("\n".join(george_lines)) - - def _remove_and_filter_instances(self, instances_to_remove): - instances_data = self.host.list_instances() - if not instances_to_remove: - return instances_data - - removed_ids = set() - for instance in instances_to_remove: - removed_ids.add(instance.id) - self._remove_instance_from_context(instance) - - return [ - instance_data - for instance_data in instances_data - if instance_data.get("instance_id") not in removed_ids - ] - - def get_pre_create_attr_defs(self): - # Find available Render Layers - # - instances are created after creators reset - current_instances = self.host.list_instances() - render_layers = [ - { - "value": inst["instance_id"], - "label": inst["productName"] - } - for inst in current_instances - if inst.get("creator_identifier") == CreateRenderlayer.identifier - ] - if not render_layers: - render_layers.append({"value": None, "label": "N/A"}) - - return [ - EnumDef( - "render_layer_instance_id", - label="Render Layer", - items=render_layers - ), - UILabelDef( - "NOTE: Try to hit refresh if you don't see a Render Layer" - ), - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] - - def get_instance_attr_defs(self): - # Find available Render Layers - current_instances = self.create_context.instances - render_layers = [ - { - "value": instance.id, - "label": instance.label - } - for instance in current_instances - if instance.creator_identifier == CreateRenderlayer.identifier - ] - if not render_layers: - render_layers.append({"value": None, "label": "N/A"}) - - return [ - EnumDef( - "render_layer_instance_id", - label="Render Layer", - items=render_layers - ), - UILabelDef( - "NOTE: Try to hit refresh if you don't see a Render Layer" - ), - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] - - -class TVPaintAutoDetectRenderCreator(TVPaintCreator): - """Create Render Layer and Render Pass instances based on scene data. - - This is auto-detection creator which can be triggered by user to create - instances based on information in scene. Each used color group in scene - will be created as Render Layer where group name is used as variant and - each TVPaint layer as Render Pass where layer name is used as variant. - - Never will have any instances, all instances belong to different creators. - """ - - product_type = "render" - label = "Render Layer/Passes" - identifier = "render.auto.detect.creator" - order = CreateRenderPass.order + 10 - description = ( - "Create Render Layers and Render Passes based on scene setup" - ) - detailed_description = AUTODETECT_RENDER_DETAILED_DESCRIPTION - - # Settings - enabled = False - allow_group_rename = True - group_name_template = "L{group_index}" - group_idx_offset = 10 - group_idx_padding = 3 - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings - ["tvpaint"] - ["create"] - ["auto_detect_render"] - ) - self.enabled = plugin_settings.get("enabled", False) - self.allow_group_rename = plugin_settings["allow_group_rename"] - self.group_name_template = plugin_settings["group_name_template"] - self.group_idx_offset = plugin_settings["group_idx_offset"] - self.group_idx_padding = plugin_settings["group_idx_padding"] - - def _rename_groups( - self, - groups_order: list[int], - scene_groups: list[dict[str, Any]] - ): - new_group_name_by_id: dict[int, str] = {} - groups_by_id: dict[int, dict[str, Any]] = { - group["group_id"]: group - for group in scene_groups - } - # Count only renamed groups - for idx, group_id in enumerate(groups_order): - group_index_value: str = ( - "{{:0>{}}}" - .format(self.group_idx_padding) - .format((idx + 1) * self.group_idx_offset) - ) - group_name_fill_values: dict[str, str] = { - "groupIdx": group_index_value, - "groupidx": group_index_value, - "group_idx": group_index_value, - "group_index": group_index_value, - } - - group_name: str = self.group_name_template.format( - **group_name_fill_values - ) - group: dict[str, Any] = groups_by_id[group_id] - if group["name"] != group_name: - new_group_name_by_id[group_id] = group_name - - grg_lines: list[str] = [] - for group_id, group_name in new_group_name_by_id.items(): - group: dict[str, Any] = groups_by_id[group_id] - grg_line: str = ( - "tv_layercolor \"setcolor\" {} {} {} {} {} \"{}\"" - ).format( - group["clip_id"], - group_id, - group["red"], - group["green"], - group["blue"], - group_name - ) - grg_lines.append(grg_line) - group["name"] = group_name - - if grg_lines: - execute_george_through_file("\n".join(grg_lines)) - - def _prepare_render_layer( - self, - project_name: str, - folder_entity: dict[str, Any], - task_entity: dict[str, Any], - group_id: int, - groups: list[dict[str, Any]], - mark_for_review: bool, - existing_instance: Optional[CreatedInstance] = None, - ) -> Union[CreatedInstance, None]: - match_group: Union[dict[str, Any], None] = next( - ( - group - for group in groups - if group["group_id"] == group_id - ), - None - ) - if not match_group: - return None - - task_name = task_entity["name"] - variant: str = match_group["name"] - creator: CreateRenderlayer = ( - self.create_context.creators[CreateRenderlayer.identifier] - ) - - product_name: str = creator.get_product_name( - project_name, - folder_entity, - task_entity, - variant, - host_name=self.create_context.host_name, - ) - if existing_instance is not None: - existing_instance["folderPath"] = folder_entity["path"] - existing_instance["task"] = task_name - existing_instance["productName"] = product_name - return existing_instance - - instance_data: dict[str, str] = { - "folderPath": folder_entity["path"], - "task": task_name, - "productType": creator.product_type, - "variant": variant, - } - pre_create_data: dict[str, str] = { - "group_id": group_id, - "mark_for_review": mark_for_review - } - return creator.create(product_name, instance_data, pre_create_data) - - def _prepare_render_passes( - self, - project_name: str, - folder_entity: dict[str, Any], - task_entity: dict[str, Any], - render_layer_instance: CreatedInstance, - layers: list[dict[str, Any]], - mark_for_review: bool, - existing_render_passes: list[CreatedInstance] - ): - task_name = task_entity["name"] - creator: CreateRenderPass = ( - self.create_context.creators[CreateRenderPass.identifier] - ) - render_pass_by_layer_name = {} - for render_pass in existing_render_passes: - for layer_name in render_pass["layer_names"]: - render_pass_by_layer_name[layer_name] = render_pass - - for layer in layers: - layer_name = layer["name"] - variant = layer_name - render_pass = render_pass_by_layer_name.get(layer_name) - if render_pass is not None: - if (render_pass["layer_names"]) > 1: - variant = render_pass["variant"] - - product_name = creator.get_product_name( - project_name, - folder_entity, - task_entity, - variant, - host_name=self.create_context.host_name, - instance=render_pass - ) - - if render_pass is not None: - render_pass["folderPath"] = folder_entity["path"] - render_pass["task"] = task_name - render_pass["productName"] = product_name - continue - - instance_data: dict[str, str] = { - "folderPath": folder_entity["path"], - "task": task_name, - "productType": creator.product_type, - "variant": variant - } - - pre_create_data: dict[str, Any] = { - "render_layer_instance_id": render_layer_instance.id, - "layer_names": [layer_name], - "mark_for_review": mark_for_review - } - creator.create(product_name, instance_data, pre_create_data) - - def _filter_groups( - self, - layers_by_group_id, - groups_order, - only_visible_groups - ): - new_groups_order = [] - for group_id in groups_order: - layers: list[dict[str, Any]] = layers_by_group_id[group_id] - if not layers: - continue - - if ( - only_visible_groups - and not any( - layer - for layer in layers - if layer["visible"] - ) - ): - continue - new_groups_order.append(group_id) - return new_groups_order - - def create(self, product_name, instance_data, pre_create_data): - project_name: str = self.create_context.get_current_project_name() - folder_path: str = instance_data["folderPath"] - task_name: str = instance_data["task"] - folder_entity: dict[str, Any] = ayon_api.get_folder_by_path( - project_name, folder_path) - task_entity: dict[str, Any] = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - - render_layers_by_group_id: dict[int, CreatedInstance] = {} - render_passes_by_render_layer_id: dict[int, list[CreatedInstance]] = ( - collections.defaultdict(list) - ) - for instance in self.create_context.instances: - if instance.creator_identifier == CreateRenderlayer.identifier: - group_id = instance["creator_attributes"]["group_id"] - render_layers_by_group_id[group_id] = instance - elif instance.creator_identifier == CreateRenderPass.identifier: - render_layer_id = ( - instance - ["creator_attributes"] - ["render_layer_instance_id"] - ) - render_passes_by_render_layer_id[render_layer_id].append( - instance - ) - - layers_by_group_id: dict[int, list[dict[str, Any]]] = ( - collections.defaultdict(list) - ) - scene_layers: list[dict[str, Any]] = get_layers_data() - scene_groups: list[dict[str, Any]] = get_groups_data() - groups_order: list[int] = [] - for layer in scene_layers: - group_id: int = layer["group_id"] - # Skip 'default' group - if group_id == 0: - continue - - layers_by_group_id[group_id].append(layer) - if group_id not in groups_order: - groups_order.append(group_id) - - groups_order.reverse() - - mark_layers_for_review = pre_create_data.get( - "mark_layers_for_review", False - ) - mark_passes_for_review = pre_create_data.get( - "mark_passes_for_review", False - ) - rename_groups = pre_create_data.get("rename_groups", False) - only_visible_groups = pre_create_data.get("only_visible_groups", False) - groups_order = self._filter_groups( - layers_by_group_id, - groups_order, - only_visible_groups - ) - if not groups_order: - return - - if rename_groups: - self._rename_groups(groups_order, scene_groups) - - # Make sure all render layers are created - for group_id in groups_order: - instance: Union[CreatedInstance, None] = ( - self._prepare_render_layer( - project_name, - folder_entity, - task_entity, - group_id, - scene_groups, - mark_layers_for_review, - render_layers_by_group_id.get(group_id), - ) - ) - if instance is not None: - render_layers_by_group_id[group_id] = instance - - for group_id in groups_order: - layers: list[dict[str, Any]] = layers_by_group_id[group_id] - render_layer_instance: Union[CreatedInstance, None] = ( - render_layers_by_group_id.get(group_id) - ) - if not layers or render_layer_instance is None: - continue - - self._prepare_render_passes( - project_name, - folder_entity, - task_entity, - render_layer_instance, - layers, - mark_passes_for_review, - render_passes_by_render_layer_id[render_layer_instance.id] - ) - - def get_pre_create_attr_defs(self) -> list[AbstractAttrDef]: - render_layer_creator: CreateRenderlayer = ( - self.create_context.creators[CreateRenderlayer.identifier] - ) - render_pass_creator: CreateRenderPass = ( - self.create_context.creators[CreateRenderPass.identifier] - ) - output = [] - if self.allow_group_rename: - output.extend([ - BoolDef( - "rename_groups", - label="Rename color groups", - tooltip="Will rename color groups using studio template", - default=True - ), - BoolDef( - "only_visible_groups", - label="Only visible color groups", - tooltip=( - "Render Layers and rename will happen only on color" - " groups with visible layers." - ), - default=True - ), - UISeparatorDef() - ]) - output.extend([ - BoolDef( - "mark_layers_for_review", - label="Mark RenderLayers for review", - default=render_layer_creator.mark_for_review - ), - BoolDef( - "mark_passes_for_review", - label="Mark RenderPasses for review", - default=render_pass_creator.mark_for_review - ) - ]) - return output - - -class TVPaintSceneRenderCreator(TVPaintAutoCreator): - product_type = "render" - product_template_product_type = "renderScene" - identifier = "render.scene" - label = "Scene Render" - icon = "fa.file-image-o" - - # Settings - default_pass_name = "beauty" - mark_for_review = True - active_on_create = False - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["tvpaint"]["create"]["create_render_scene"] - ) - self.default_variant = plugin_settings["default_variant"] - self.default_variants = plugin_settings["default_variants"] - self.mark_for_review = plugin_settings["mark_for_review"] - self.active_on_create = plugin_settings["active_on_create"] - self.default_pass_name = plugin_settings["default_pass_name"] - - def get_dynamic_data( - self, - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ): - dynamic_data = super().get_dynamic_data( - project_name, - folder_entity, - task_entity, - variant, - host_name, - instance - ) - dynamic_data["renderpass"] = "{renderpass}" - dynamic_data["renderlayer"] = variant - return dynamic_data - - def _create_new_instance(self): - create_context = self.create_context - host_name = create_context.host_name - project_name = create_context.get_current_project_name() - folder_path = create_context.get_current_folder_path() - task_name = create_context.get_current_task_name() - - folder_entity = ayon_api.get_folder_by_path(project_name, folder_path) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - self.default_variant, - host_name, - ) - data = { - "folderPath": folder_path, - "task": task_name, - "variant": self.default_variant, - "creator_attributes": { - "render_pass_name": self.default_pass_name, - "mark_for_review": True - }, - "label": self._get_label( - product_name, - self.default_pass_name - ) - } - if not self.active_on_create: - data["active"] = False - - new_instance = CreatedInstance( - self.product_type, product_name, data, self - ) - instances_data = self.host.list_instances() - instances_data.append(new_instance.data_to_store()) - self.host.write_instances(instances_data) - self._add_instance_to_context(new_instance) - return new_instance - - def create(self): - existing_instance = None - for instance in self.create_context.instances: - if instance.creator_identifier == self.identifier: - existing_instance = instance - break - - if existing_instance is None: - return self._create_new_instance() - - create_context = self.create_context - host_name = create_context.host_name - project_name = create_context.get_current_project_name() - folder_path = create_context.get_current_folder_path() - task_name = create_context.get_current_task_name() - - existing_name = existing_instance.get("folderPath") - if ( - existing_name != folder_path - or existing_instance["task"] != task_name - ): - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - existing_instance["variant"], - host_name, - existing_instance - ) - existing_instance["folderPath"] = folder_path - existing_instance["task"] = task_name - existing_instance["productName"] = product_name - - existing_instance["label"] = self._get_label( - existing_instance["productName"], - existing_instance["creator_attributes"]["render_pass_name"] - ) - - def _get_label(self, product_name, render_pass_name): - try: - product_name = product_name.format(**prepare_template_data({ - "renderpass": render_pass_name - })) - except (KeyError, ValueError): - pass - - return product_name - - def get_instance_attr_defs(self): - return [ - TextDef( - "render_pass_name", - label="Pass Name", - default=self.default_pass_name, - tooltip=( - "Value is calculated during publishing and UI will update" - " label after refresh." - ) - ), - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_review.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_review.py deleted file mode 100644 index 6068ffa1d8..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_review.py +++ /dev/null @@ -1,92 +0,0 @@ -import ayon_api - -from ayon_core.pipeline import CreatedInstance -from ayon_tvpaint.api.plugin import TVPaintAutoCreator - - -class TVPaintReviewCreator(TVPaintAutoCreator): - product_type = "review" - identifier = "scene.review" - label = "Review" - icon = "ei.video" - - # Settings - active_on_create = True - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["tvpaint"]["create"]["create_review"] - ) - self.default_variant = plugin_settings["default_variant"] - self.default_variants = plugin_settings["default_variants"] - self.active_on_create = plugin_settings["active_on_create"] - - def create(self): - existing_instance = None - for instance in self.create_context.instances: - if instance.creator_identifier == self.identifier: - existing_instance = instance - break - - create_context = self.create_context - host_name = create_context.host_name - project_name = create_context.get_current_project_name() - folder_path = create_context.get_current_folder_path() - task_name = create_context.get_current_task_name() - - existing_folder_path = None - if existing_instance is not None: - existing_folder_path = existing_instance["folderPath"] - - if existing_instance is None: - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - self.default_variant, - host_name - ) - data = { - "folderPath": folder_path, - "task": task_name, - "variant": self.default_variant, - } - - if not self.active_on_create: - data["active"] = False - - new_instance = CreatedInstance( - self.product_type, product_name, data, self - ) - instances_data = self.host.list_instances() - instances_data.append(new_instance.data_to_store()) - self.host.write_instances(instances_data) - self._add_instance_to_context(new_instance) - - elif ( - existing_folder_path != folder_path - or existing_instance["task"] != task_name - ): - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - existing_instance["variant"], - host_name, - existing_instance - ) - existing_instance["folderPath"] = folder_path - existing_instance["task"] = task_name - existing_instance["productName"] = product_name diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_workfile.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_workfile.py deleted file mode 100644 index b08f731869..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/create/create_workfile.py +++ /dev/null @@ -1,85 +0,0 @@ -import ayon_api - -from ayon_core.pipeline import CreatedInstance -from ayon_tvpaint.api.plugin import TVPaintAutoCreator - - -class TVPaintWorkfileCreator(TVPaintAutoCreator): - product_type = "workfile" - identifier = "workfile" - label = "Workfile" - icon = "fa.file-o" - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["tvpaint"]["create"]["create_workfile"] - ) - self.default_variant = plugin_settings["default_variant"] - self.default_variants = plugin_settings["default_variants"] - - def create(self): - existing_instance = None - for instance in self.create_context.instances: - if instance.creator_identifier == self.identifier: - existing_instance = instance - break - - create_context = self.create_context - host_name = create_context.host_name - project_name = create_context.get_current_project_name() - folder_path = create_context.get_current_folder_path() - task_name = create_context.get_current_task_name() - - existing_folder_path = None - if existing_instance is not None: - existing_folder_path = existing_instance["folderPath"] - - if existing_instance is None: - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - self.default_variant, - host_name - ) - data = { - "folderPath": folder_path, - "task": task_name, - "variant": self.default_variant - } - - new_instance = CreatedInstance( - self.product_type, product_name, data, self - ) - instances_data = self.host.list_instances() - instances_data.append(new_instance.data_to_store()) - self.host.write_instances(instances_data) - self._add_instance_to_context(new_instance) - - elif ( - existing_folder_path != folder_path - or existing_instance["task"] != task_name - ): - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path - ) - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) - product_name = self.get_product_name( - project_name, - folder_entity, - task_entity, - existing_instance["variant"], - host_name, - existing_instance - ) - existing_instance["folderPath"] = folder_path - existing_instance["task"] = task_name - existing_instance["productName"] = product_name diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_image.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_image.py deleted file mode 100644 index 18b06c9632..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_image.py +++ /dev/null @@ -1,87 +0,0 @@ -from ayon_core.lib.attribute_definitions import BoolDef -from ayon_tvpaint.api import plugin -from ayon_tvpaint.api.lib import execute_george_through_file - - -class ImportImage(plugin.Loader): - """Load image or image sequence to TVPaint as new layer.""" - - product_types = {"render", "image", "background", "plate", "review"} - representations = {"*"} - settings_category = "tvpaint" - - label = "Import Image" - order = 1 - icon = "image" - color = "white" - - import_script = ( - "filepath = \"{}\"\n" - "layer_name = \"{}\"\n" - "tv_loadsequence filepath {}PARSE layer_id\n" - "tv_layerrename layer_id layer_name" - ) - - defaults = { - "stretch": True, - "timestretch": True, - "preload": True - } - - @classmethod - def get_options(cls, contexts): - return [ - BoolDef( - "stretch", - label="Stretch to project size", - default=cls.defaults["stretch"], - tooltip="Stretch loaded image/s to project resolution?" - ), - BoolDef( - "timestretch", - label="Stretch to timeline length", - default=cls.defaults["timestretch"], - tooltip="Clip loaded image/s to timeline length?" - ), - BoolDef( - "preload", - label="Preload loaded image/s", - default=cls.defaults["preload"], - tooltip="Preload image/s?" - ) - ] - - def load(self, context, name, namespace, options): - stretch = options.get("stretch", self.defaults["stretch"]) - timestretch = options.get("timestretch", self.defaults["timestretch"]) - preload = options.get("preload", self.defaults["preload"]) - - load_options = [] - if stretch: - load_options.append("\"STRETCH\"") - if timestretch: - load_options.append("\"TIMESTRETCH\"") - if preload: - load_options.append("\"PRELOAD\"") - - load_options_str = "" - for load_option in load_options: - load_options_str += (load_option + " ") - - # Prepare layer name - folder_name = context["folder"]["name"] - version_name = context["version"]["name"] - layer_name = "{}_{}_v{:0>3}".format( - folder_name, - name, - version_name - ) - # Fill import script with filename and layer name - # - filename mus not contain backwards slashes - path = self.filepath_from_context(context).replace("\\", "/") - george_script = self.import_script.format( - path, - layer_name, - load_options_str - ) - return execute_george_through_file(george_script) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_reference_image.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_reference_image.py deleted file mode 100644 index 88bf738999..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_reference_image.py +++ /dev/null @@ -1,319 +0,0 @@ -import collections - -from ayon_core.lib.attribute_definitions import BoolDef -from ayon_core.pipeline import registered_host -from ayon_tvpaint.api import plugin -from ayon_tvpaint.api.lib import ( - get_layers_data, - execute_george_through_file, -) -from ayon_tvpaint.api.pipeline import ( - write_workfile_metadata, - SECTION_NAME_CONTAINERS, - containerise, -) - - -class LoadImage(plugin.Loader): - """Load image or image sequence to TVPaint as new layer.""" - - product_types = {"render", "image", "background", "plate", "review"} - representations = {"*"} - settings_category = "tvpaint" - - label = "Load Image" - order = 1 - icon = "image" - color = "white" - - import_script = ( - "filepath = '\"'\"{}\"'\"'\n" - "layer_name = \"{}\"\n" - "tv_loadsequence filepath {}PARSE layer_id\n" - "tv_layerrename layer_id layer_name" - ) - - defaults = { - "stretch": True, - "timestretch": True, - "preload": True - } - - @classmethod - def get_options(cls, contexts): - return [ - BoolDef( - "stretch", - label="Stretch to project size", - default=cls.defaults["stretch"], - tooltip="Stretch loaded image/s to project resolution?" - ), - BoolDef( - "timestretch", - label="Stretch to timeline length", - default=cls.defaults["timestretch"], - tooltip="Clip loaded image/s to timeline length?" - ), - BoolDef( - "preload", - label="Preload loaded image/s", - default=cls.defaults["preload"], - tooltip="Preload image/s?" - ) - ] - - def load(self, context, name, namespace, options): - stretch = options.get("stretch", self.defaults["stretch"]) - timestretch = options.get("timestretch", self.defaults["timestretch"]) - preload = options.get("preload", self.defaults["preload"]) - - load_options = [] - if stretch: - load_options.append("\"STRETCH\"") - if timestretch: - load_options.append("\"TIMESTRETCH\"") - if preload: - load_options.append("\"PRELOAD\"") - - load_options_str = "" - for load_option in load_options: - load_options_str += (load_option + " ") - - # Prepare layer name - folder_name = context["folder"]["name"] - product_name = context["product"]["name"] - layer_name = self.get_unique_layer_name(folder_name, product_name) - - path = self.filepath_from_context(context) - - # Fill import script with filename and layer name - # - filename mus not contain backwards slashes - george_script = self.import_script.format( - path.replace("\\", "/"), - layer_name, - load_options_str - ) - - execute_george_through_file(george_script) - - loaded_layer = None - layers = get_layers_data() - for layer in layers: - if layer["name"] == layer_name: - loaded_layer = layer - break - - if loaded_layer is None: - raise AssertionError( - "Loading probably failed during execution of george script." - ) - - layer_names = [loaded_layer["name"]] - namespace = namespace or layer_name - return containerise( - name=name, - namespace=namespace, - members=layer_names, - context=context, - loader=self.__class__.__name__ - ) - - def _remove_layers(self, layer_names=None, layer_ids=None, layers=None): - if not layer_names and not layer_ids: - self.log.warning("Got empty layer names list.") - return - - if layers is None: - layers = get_layers_data() - - available_ids = set(layer["layer_id"] for layer in layers) - - if layer_ids is None: - # Backwards compatibility (layer ids were stored instead of names) - layer_names_are_ids = True - for layer_name in layer_names: - if ( - not isinstance(layer_name, int) - and not layer_name.isnumeric() - ): - layer_names_are_ids = False - break - - if layer_names_are_ids: - layer_ids = layer_names - - layer_ids_to_remove = [] - if layer_ids is not None: - for layer_id in layer_ids: - if layer_id in available_ids: - layer_ids_to_remove.append(layer_id) - - else: - layers_by_name = collections.defaultdict(list) - for layer in layers: - layers_by_name[layer["name"]].append(layer) - - for layer_name in layer_names: - layers = layers_by_name[layer_name] - if len(layers) == 1: - layer_ids_to_remove.append(layers[0]["layer_id"]) - - if not layer_ids_to_remove: - self.log.warning("No layers to delete.") - return - - george_script_lines = [] - for layer_id in layer_ids_to_remove: - line = "tv_layerkill {}".format(layer_id) - george_script_lines.append(line) - george_script = "\n".join(george_script_lines) - execute_george_through_file(george_script) - - def _remove_container(self, container): - if not container: - return - representation = container["representation"] - members = self.get_members_from_container(container) - host = registered_host() - current_containers = host.get_containers() - pop_idx = None - for idx, cur_con in enumerate(current_containers): - cur_members = self.get_members_from_container(cur_con) - if ( - cur_members == members - and cur_con["representation"] == representation - ): - pop_idx = idx - break - - if pop_idx is None: - self.log.warning( - "Didn't find container in workfile containers. {}".format( - container - ) - ) - return - - current_containers.pop(pop_idx) - write_workfile_metadata( - SECTION_NAME_CONTAINERS, current_containers - ) - - def remove(self, container): - members = self.get_members_from_container(container) - self.log.warning("Layers to delete {}".format(members)) - self._remove_layers(members) - self._remove_container(container) - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, context): - """Replace container with different version. - - New layers are loaded as first step. Then is tried to change data in - new layers with data from old layers. When that is done old layers are - removed. - """ - - # Create new containers first - # Get layer ids from previous container - old_layer_names = self.get_members_from_container(container) - - # Backwards compatibility (layer ids were stored instead of names) - old_layers_are_ids = True - for name in old_layer_names: - if isinstance(name, int) or name.isnumeric(): - continue - old_layers_are_ids = False - break - - old_layers = [] - layers = get_layers_data() - previous_layer_ids = set(layer["layer_id"] for layer in layers) - if old_layers_are_ids: - for layer in layers: - if layer["layer_id"] in old_layer_names: - old_layers.append(layer) - else: - layers_by_name = collections.defaultdict(list) - for layer in layers: - layers_by_name[layer["name"]].append(layer) - - for layer_name in old_layer_names: - layers = layers_by_name[layer_name] - if len(layers) == 1: - old_layers.append(layers[0]) - - # Prepare few data - new_start_position = None - new_group_id = None - layer_ids_to_remove = set() - for layer in old_layers: - layer_ids_to_remove.add(layer["layer_id"]) - position = layer["position"] - group_id = layer["group_id"] - if new_start_position is None: - new_start_position = position - elif new_start_position > position: - new_start_position = position - - if new_group_id is None: - new_group_id = group_id - elif new_group_id < 0: - continue - elif new_group_id != group_id: - new_group_id = -1 - - # Remove old container - self._remove_container(container) - # Remove old layers - self._remove_layers(layer_ids=layer_ids_to_remove) - - name = container["name"] - namespace = container["namespace"] - new_container = self.load(context, name, namespace, {}) - new_layer_names = self.get_members_from_container(new_container) - - layers = get_layers_data() - - new_layers = [] - for layer in layers: - if layer["layer_id"] in previous_layer_ids: - continue - if layer["name"] in new_layer_names: - new_layers.append(layer) - - george_script_lines = [] - # Group new layers to same group as previous container layers had - # - all old layers must be under same group - if new_group_id is not None and new_group_id > 0: - for layer in new_layers: - line = "tv_layercolor \"set\" {} {}".format( - layer["layer_id"], new_group_id - ) - george_script_lines.append(line) - - # Rename new layer to have same name - # - only if both old and new have one layer - if len(old_layers) == 1 and len(new_layers) == 1: - layer_name = old_layers[0]["name"] - george_script_lines.append( - "tv_layerrename {} \"{}\"".format( - new_layers[0]["layer_id"], layer_name - ) - ) - - # Change position of new layer - # - this must be done before remove old layers - if len(new_layers) == 1 and new_start_position is not None: - new_layer = new_layers[0] - george_script_lines.extend([ - "tv_layerset {}".format(new_layer["layer_id"]), - "tv_layermove {}".format(new_start_position) - ]) - - # Execute george scripts if there are any - if george_script_lines: - george_script = "\n".join(george_script_lines) - execute_george_through_file(george_script) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_sound.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_sound.py deleted file mode 100644 index 086afba079..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_sound.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -import tempfile -from ayon_tvpaint.api import plugin -from ayon_tvpaint.api.lib import ( - execute_george_through_file, -) - - -class ImportSound(plugin.Loader): - """Load sound to TVPaint. - - Sound layers does not have ids but only position index so we can't - reference them as we can't say which is which input. - - We might do that (in future) by input path. Which may be identifier if - we'll allow only one loaded instance of the representation as an audio. - - This plugin does not work for all version of TVPaint. Known working - version is TVPaint 11.0.10 . - - It is allowed to load video files as sound but it does not check if video - file contain any audio. - """ - - product_types = {"audio", "review", "plate"} - representations = {"*"} - - label = "Import Sound" - order = 1 - icon = "image" - color = "white" - - import_script_lines = ( - "sound_path = '\"'\"{}\"'\"'", - "output_path = \"{}\"", - # Try to get sound clip info to check if we are in TVPaint that can - # load sound - "tv_clipcurrentid", - "clip_id = result", - "tv_soundclipinfo clip_id 0", - "IF CMP(result,\"\")==1", - ( - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"'" - " 'success|'" - ), - "EXIT", - "END", - - "tv_soundclipnew sound_path", - "line = 'success|'result", - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line" - ) - - def load(self, context, name, namespace, options): - # Create temp file for output - output_file = tempfile.NamedTemporaryFile( - mode="w", prefix="ayon_tvp_", suffix=".txt", delete=False - ) - output_file.close() - output_filepath = output_file.name.replace("\\", "/") - - # Prepare george script - path = self.filepath_from_context(context).replace("\\", "/") - import_script = "\n".join(self.import_script_lines) - george_script = import_script.format( - path, - output_filepath - ) - self.log.info("*** George script:\n{}\n***".format(george_script)) - # Execute geoge script - execute_george_through_file(george_script) - - # Read output file - lines = [] - with open(output_filepath, "r") as file_stream: - for line in file_stream: - line = line.rstrip() - if line: - lines.append(line) - - # Clean up temp file - os.remove(output_filepath) - - output = {} - for line in lines: - key, value = line.split("|") - output[key] = value - - success = output.get("success") - # Successfully loaded sound - if success == "0": - return - - if success == "": - raise ValueError( - "Your TVPaint version does not support loading of" - " sound through George script. Please use manual load." - ) - - if success is None: - raise ValueError( - "Unknown error happened during load." - " Please report and try to use manual load." - ) - - # Possible errors by TVPaint documentation - # https://www.tvpaint.com/doc/tvpaint-animation-11/george-commands#tv_soundclipnew - if success == "-1": - raise ValueError( - "BUG: George command did not get enough arguments." - ) - - if success == "-2": - # Who know what does that mean? - raise ValueError("No current clip without mixer.") - - if success == "-3": - raise ValueError("TVPaint couldn't read the file.") - - if success == "-4": - raise ValueError("TVPaint couldn't add the track.") - - raise ValueError("BUG: Unknown success value {}.".format(success)) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_workfile.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_workfile.py deleted file mode 100644 index 045e22f188..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/load/load_workfile.py +++ /dev/null @@ -1,115 +0,0 @@ -import os - -from ayon_core.pipeline import ( - registered_host, - get_current_context, - Anatomy, -) -from ayon_core.pipeline.workfile import ( - get_workfile_template_key_from_context, - get_last_workfile_with_version, -) -from ayon_core.pipeline.template_data import get_template_data_with_names -from ayon_tvpaint.api import plugin -from ayon_tvpaint.api.lib import ( - execute_george_through_file, -) -from ayon_tvpaint.api.pipeline import ( - get_current_workfile_context, -) -from ayon_core.pipeline.version_start import get_versioning_start - - -class LoadWorkfile(plugin.Loader): - """Load workfile.""" - - product_types = {"workfile"} - representations = {"tvpp"} - - label = "Load Workfile" - - def load(self, context, name, namespace, options): - # Load context of current workfile as first thing - # - which context and extension has - filepath = self.filepath_from_context(context) - filepath = filepath.replace("\\", "/") - - if not os.path.exists(filepath): - raise FileExistsError( - "The loaded file does not exist. Try downloading it first." - ) - - host = registered_host() - current_file = host.get_current_workfile() - work_context = get_current_workfile_context() - - george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( - filepath - ) - execute_george_through_file(george_script) - - # Save workfile. - host_name = "tvpaint" - if "project_name" in work_context: - project_name = context["project_name"] - folder_path = context["folder_path"] - task_name = context["task_name"] - else: - project_name = work_context.get("project") - folder_path = work_context.get("asset") - task_name = work_context.get("task") - - # Far cases when there is workfile without work_context - if not folder_path: - context = get_current_context() - project_name = context["project_name"] - folder_path = context["folder_path"] - task_name = context["task_name"] - - template_key = get_workfile_template_key_from_context( - project_name, - folder_path, - task_name, - host_name, - ) - anatomy = Anatomy(project_name) - - data = get_template_data_with_names( - project_name, folder_path, task_name, host_name - ) - data["root"] = anatomy.roots - - work_template = anatomy.get_template_item("work", template_key) - - # Define saving file extension - extensions = host.get_workfile_extensions() - if current_file: - # Match the extension of current file - _, extension = os.path.splitext(current_file) - else: - # Fall back to the first extension supported for this host. - extension = extensions[0] - - data["ext"] = extension.lstrip(".") - - work_root = work_template["directory"].format_strict(data) - version = get_last_workfile_with_version( - work_root, work_template["file"].template, data, extensions - )[1] - - if version is None: - version = get_versioning_start( - project_name, - "tvpaint", - task_name=task_name, - task_type=data["task"]["type"], - product_type="workfile" - ) - else: - version += 1 - - data["version"] = version - - filename = work_template["file"].format_strict(data) - path = os.path.join(work_root, filename) - host.save_workfile(path) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_instance_frames.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_instance_frames.py deleted file mode 100644 index a9e69166d7..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_instance_frames.py +++ /dev/null @@ -1,38 +0,0 @@ -import pyblish.api - - -class CollectOutputFrameRange(pyblish.api.InstancePlugin): - """Collect frame start/end from context. - - When instances are collected context does not contain `frameStart` and - `frameEnd` keys yet. They are collected in global plugin - `CollectContextEntities`. - """ - - label = "Collect output frame range" - order = pyblish.api.CollectorOrder + 0.4999 - hosts = ["tvpaint"] - families = ["review", "render"] - - settings_category = "tvpaint" - - def process(self, instance): - folder_entity = instance.data.get("folderEntity") - if not folder_entity: - return - - context = instance.context - - frame_start = folder_entity["attrib"]["frameStart"] - fps = folder_entity["attrib"]["fps"] - frame_end = frame_start + ( - context.data["sceneMarkOut"] - context.data["sceneMarkIn"] - ) - instance.data["fps"] = fps - instance.data["frameStart"] = frame_start - instance.data["frameEnd"] = frame_end - self.log.info( - "Set frames {}-{} on instance {} ".format( - frame_start, frame_end, instance.data["productName"] - ) - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_render_instances.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_render_instances.py deleted file mode 100644 index 00af624700..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_render_instances.py +++ /dev/null @@ -1,115 +0,0 @@ -import copy -import pyblish.api -from ayon_core.lib import prepare_template_data - - -class CollectRenderInstances(pyblish.api.InstancePlugin): - label = "Collect Render Instances" - order = pyblish.api.CollectorOrder - 0.4 - hosts = ["tvpaint"] - families = ["render", "review"] - - settings_category = "tvpaint" - ignore_render_pass_transparency = False - - def process(self, instance): - context = instance.context - creator_identifier = instance.data["creator_identifier"] - if creator_identifier == "render.layer": - self._collect_data_for_render_layer(instance) - - elif creator_identifier == "render.pass": - self._collect_data_for_render_pass(instance) - - elif creator_identifier == "render.scene": - self._collect_data_for_render_scene(instance) - - else: - if creator_identifier == "scene.review": - self._collect_data_for_review(instance) - return - - product_name = instance.data["productName"] - instance.data["name"] = product_name - instance.data["label"] = "{} [{}-{}]".format( - product_name, - context.data["sceneMarkIn"] + 1, - context.data["sceneMarkOut"] + 1 - ) - - def _collect_data_for_render_layer(self, instance): - instance.data["families"].append("renderLayer") - creator_attributes = instance.data["creator_attributes"] - group_id = creator_attributes["group_id"] - if creator_attributes["mark_for_review"]: - instance.data["families"].append("review") - - layers_data = instance.context.data["layersData"] - instance.data["layers"] = [ - copy.deepcopy(layer) - for layer in layers_data - if layer["group_id"] == group_id - ] - - def _collect_data_for_render_pass(self, instance): - instance.data["families"].append("renderPass") - - layer_names = set(instance.data["layer_names"]) - layers_data = instance.context.data["layersData"] - - creator_attributes = instance.data["creator_attributes"] - if creator_attributes["mark_for_review"]: - instance.data["families"].append("review") - - instance.data["layers"] = [ - copy.deepcopy(layer) - for layer in layers_data - if layer["name"] in layer_names - ] - instance.data["ignoreLayersTransparency"] = ( - self.ignore_render_pass_transparency - ) - - render_layer_data = None - render_layer_id = creator_attributes["render_layer_instance_id"] - for in_data in instance.context.data["workfileInstances"]: - if ( - in_data.get("creator_identifier") == "render.layer" - and in_data["instance_id"] == render_layer_id - ): - render_layer_data = in_data - break - - instance.data["renderLayerData"] = copy.deepcopy(render_layer_data) - # Invalid state - if render_layer_data is None: - return - render_layer_name = render_layer_data["variant"] - product_name = instance.data["productName"] - instance.data["productName"] = product_name.format( - **prepare_template_data({"renderlayer": render_layer_name}) - ) - - def _collect_data_for_render_scene(self, instance): - instance.data["families"].append("renderScene") - - creator_attributes = instance.data["creator_attributes"] - if creator_attributes["mark_for_review"]: - instance.data["families"].append("review") - - instance.data["layers"] = copy.deepcopy( - instance.context.data["layersData"] - ) - - render_pass_name = ( - instance.data["creator_attributes"]["render_pass_name"] - ) - product_name = instance.data["productName"] - instance.data["productName"] = product_name.format( - **prepare_template_data({"renderpass": render_pass_name}) - ) - - def _collect_data_for_review(self, instance): - instance.data["layers"] = copy.deepcopy( - instance.context.data["layersData"] - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_workfile.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_workfile.py deleted file mode 100644 index 27de086a46..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_workfile.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import json -import pyblish.api - - -class CollectWorkfile(pyblish.api.InstancePlugin): - label = "Collect Workfile" - order = pyblish.api.CollectorOrder - 0.4 - hosts = ["tvpaint"] - families = ["workfile"] - - settings_category = "tvpaint" - - def process(self, instance): - context = instance.context - current_file = context.data["currentFile"] - - self.log.info( - "Workfile path used for workfile product: {}".format(current_file) - ) - - dirpath, filename = os.path.split(current_file) - basename, ext = os.path.splitext(filename) - - instance.data["representations"].append({ - "name": ext.lstrip("."), - "ext": ext.lstrip("."), - "files": filename, - "stagingDir": dirpath - }) - - self.log.info("Collected workfile instance: {}".format( - json.dumps(instance.data, indent=4) - )) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_workfile_data.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_workfile_data.py deleted file mode 100644 index a34a718ff5..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/collect_workfile_data.py +++ /dev/null @@ -1,221 +0,0 @@ -import os -import json -import tempfile - -import pyblish.api - -from ayon_tvpaint.api.lib import ( - execute_george, - execute_george_through_file, - get_layers_data, - get_groups_data, -) -from ayon_tvpaint.api.pipeline import ( - SECTION_NAME_CONTEXT, - SECTION_NAME_INSTANCES, - SECTION_NAME_CONTAINERS, - - get_workfile_metadata_string, - write_workfile_metadata, - get_current_workfile_context, - list_instances, -) - - -class ResetTVPaintWorkfileMetadata(pyblish.api.Action): - """Fix invalid metadata in workfile.""" - label = "Reset invalid workfile metadata" - on = "failed" - - def process(self, context, plugin): - metadata_keys = { - SECTION_NAME_CONTEXT: {}, - SECTION_NAME_INSTANCES: [], - SECTION_NAME_CONTAINERS: [] - } - for metadata_key, default in metadata_keys.items(): - json_string = get_workfile_metadata_string(metadata_key) - if not json_string: - continue - - try: - return json.loads(json_string) - except Exception: - self.log.warning( - ( - "Couldn't parse metadata from key \"{}\"." - " Will reset to default value \"{}\"." - " Loaded value was: {}" - ).format(metadata_key, default, json_string), - exc_info=True - ) - write_workfile_metadata(metadata_key, default) - - -class CollectWorkfileData(pyblish.api.ContextPlugin): - label = "Collect Workfile Data" - order = pyblish.api.CollectorOrder - 0.45 - hosts = ["tvpaint"] - actions = [ResetTVPaintWorkfileMetadata] - - settings_category = "tvpaint" - - def process(self, context): - current_project_id = execute_george("tv_projectcurrentid") - execute_george("tv_projectselect {}".format(current_project_id)) - - # Collect and store current context to have reference - current_context = { - "project_name": context.data["projectName"], - "folder_path": context.data["folderPath"], - "task_name": context.data["task"] - } - self.log.debug("Current context is: {}".format(current_context)) - - # Collect context from workfile metadata - self.log.info("Collecting workfile context") - - workfile_context = get_current_workfile_context() - if "project" in workfile_context: - workfile_context = { - "project_name": workfile_context.get("project"), - "folder_path": workfile_context.get("asset"), - "task_name": workfile_context.get("task"), - } - # Store workfile context to pyblish context - context.data["workfile_context"] = workfile_context - if workfile_context: - # Change current context with context from workfile - key_map = ( - ("AYON_FOLDER_PATH", "folder_path"), - ("AYON_TASK_NAME", "task_name") - ) - for env_key, key in key_map: - os.environ[env_key] = workfile_context[key] - self.log.info("Context changed to: {}".format(workfile_context)) - - folder_path = workfile_context["folder_path"] - task_name = workfile_context["task_name"] - - else: - folder_path = current_context["folder_path"] - task_name = current_context["task_name"] - # Handle older workfiles or workfiles without metadata - self.log.warning(( - "Workfile does not contain information about context." - " Using current Session context." - )) - - # Store context folder path - context.data["folderPath"] = folder_path - context.data["task"] = task_name - self.log.info( - "Context is set to Folder: \"{}\" and Task: \"{}\"".format( - folder_path, task_name - ) - ) - - # Collect instances - self.log.info("Collecting instance data from workfile") - instance_data = list_instances() - context.data["workfileInstances"] = instance_data - self.log.debug( - "Instance data:\"{}".format(json.dumps(instance_data, indent=4)) - ) - - # Collect information about layers - self.log.info("Collecting layers data from workfile") - layers_data = get_layers_data() - layers_by_name = {} - for layer in layers_data: - layer_name = layer["name"] - if layer_name not in layers_by_name: - layers_by_name[layer_name] = [] - layers_by_name[layer_name].append(layer) - context.data["layersData"] = layers_data - context.data["layersByName"] = layers_by_name - - self.log.debug( - "Layers data:\"{}".format(json.dumps(layers_data, indent=4)) - ) - - # Collect information about groups - self.log.info("Collecting groups data from workfile") - group_data = get_groups_data() - context.data["groupsData"] = group_data - self.log.debug( - "Group data:\"{}".format(json.dumps(group_data, indent=4)) - ) - - self.log.info("Collecting scene data from workfile") - workfile_info_parts = execute_george("tv_projectinfo").split(" ") - - # Project frame start - not used - workfile_info_parts.pop(-1) - field_order = workfile_info_parts.pop(-1) - frame_rate = float(workfile_info_parts.pop(-1)) - pixel_apsect = float(workfile_info_parts.pop(-1)) - height = int(workfile_info_parts.pop(-1)) - width = int(workfile_info_parts.pop(-1)) - workfile_path = " ".join(workfile_info_parts).replace("\"", "") - - # Marks return as "{frame - 1} {state} ", example "0 set". - result = execute_george("tv_markin") - mark_in_frame, mark_in_state, _ = result.split(" ") - - result = execute_george("tv_markout") - mark_out_frame, mark_out_state, _ = result.split(" ") - - scene_data = { - "currentFile": workfile_path, - "sceneWidth": width, - "sceneHeight": height, - "scenePixelAspect": pixel_apsect, - "sceneFps": frame_rate, - "sceneFieldOrder": field_order, - "sceneMarkIn": int(mark_in_frame), - "sceneMarkInState": mark_in_state == "set", - "sceneMarkOut": int(mark_out_frame), - "sceneMarkOutState": mark_out_state == "set", - "sceneStartFrame": int(execute_george("tv_startframe")), - "sceneBgColor": self._get_bg_color() - } - self.log.debug( - "Scene data: {}".format(json.dumps(scene_data, indent=4)) - ) - context.data.update(scene_data) - - def _get_bg_color(self): - """Background color set on scene. - - Is important for review exporting where scene bg color is used as - background. - """ - output_file = tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".txt", delete=False - ) - output_file.close() - output_filepath = output_file.name.replace("\\", "/") - george_script_lines = [ - # Variable containing full path to output file - "output_path = \"{}\"".format(output_filepath), - "tv_background", - "bg_color = result", - # Write data to output file - ( - "tv_writetextfile" - " \"strict\" \"append\" '\"'output_path'\"' bg_color" - ) - ] - - george_script = "\n".join(george_script_lines) - execute_george_through_file(george_script) - - with open(output_filepath, "r") as stream: - data = stream.read() - - os.remove(output_filepath) - data = data.strip() - if not data: - return None - return data.split(" ") diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/extract_convert_to_exr.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/extract_convert_to_exr.py deleted file mode 100644 index 020ebc1a89..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/extract_convert_to_exr.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Plugin converting png files from ExtractSequence into exrs. - -Requires: - ExtractSequence - source of PNG - ExtractReview - review was already created so we can convert to any exr -""" -import os -import json - -import pyblish.api -from ayon_core.lib import ( - get_oiio_tool_args, - ToolNotFoundError, - run_subprocess, -) -from ayon_core.pipeline import KnownPublishError - - -class ExtractConvertToEXR(pyblish.api.InstancePlugin): - # Offset to get after ExtractSequence plugin. - order = pyblish.api.ExtractorOrder + 0.1 - label = "Extract Sequence EXR" - hosts = ["tvpaint"] - families = ["render"] - - settings_category = "tvpaint" - - enabled = False - - # Replace source PNG files or just add - replace_pngs = True - # EXR compression - exr_compression = "ZIP" - - def process(self, instance): - repres = instance.data.get("representations") - if not repres: - return - - try: - oiio_args = get_oiio_tool_args("oiiotool") - except ToolNotFoundError: - # Raise an exception when oiiotool is not available - # - this can currently happen on MacOS machines - raise KnownPublishError( - "OpenImageIO tool is not available on this machine." - ) - - new_repres = [] - for repre in repres: - if repre["name"] != "png": - continue - - self.log.info( - "Processing representation: {}".format( - json.dumps(repre, sort_keys=True, indent=4) - ) - ) - - src_filepaths = set() - new_filenames = [] - for src_filename in repre["files"]: - dst_filename = os.path.splitext(src_filename)[0] + ".exr" - new_filenames.append(dst_filename) - - src_filepath = os.path.join(repre["stagingDir"], src_filename) - dst_filepath = os.path.join(repre["stagingDir"], dst_filename) - - src_filepaths.add(src_filepath) - - args = oiio_args + [ - src_filepath, - "--compression", self.exr_compression, - # TODO how to define color conversion? - "--colorconvert", "sRGB", "linear", - "-o", dst_filepath - ] - run_subprocess(args) - - new_repres.append( - { - "name": "exr", - "ext": "exr", - "files": new_filenames, - "stagingDir": repre["stagingDir"], - "tags": list(repre["tags"]) - } - ) - - if self.replace_pngs: - instance.data["representations"].remove(repre) - - for filepath in src_filepaths: - instance.context.data["cleanupFullPaths"].append(filepath) - - instance.data["representations"].extend(new_repres) - self.log.info( - "Representations: {}".format( - json.dumps( - instance.data["representations"], sort_keys=True, indent=4 - ) - ) - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/extract_sequence.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/extract_sequence.py deleted file mode 100644 index 86c20c6528..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/extract_sequence.py +++ /dev/null @@ -1,449 +0,0 @@ -import os -import copy -import tempfile - -from PIL import Image - -import pyblish.api - -from ayon_core.pipeline.publish import ( - KnownPublishError, - get_publish_instance_families, -) -from ayon_tvpaint.api.lib import ( - execute_george, - execute_george_through_file, - get_layers_pre_post_behavior, - get_layers_exposure_frames, -) -from ayon_tvpaint.lib import ( - calculate_layers_extraction_data, - get_frame_filename_template, - fill_reference_frames, - composite_rendered_layers, - rename_filepaths_by_frame_start, -) - - -class ExtractSequence(pyblish.api.InstancePlugin): - label = "Extract Sequence" - order = pyblish.api.ExtractorOrder - hosts = ["tvpaint"] - families = ["review", "render"] - - settings_category = "tvpaint" - - # Modifiable with settings - review_bg = [255, 255, 255, 1.0] - - def process(self, instance): - self.log.info( - "* Processing instance \"{}\"".format(instance.data["label"]) - ) - - # Get all layers and filter out not visible - layers = instance.data["layers"] - filtered_layers = [ - layer - for layer in layers - if layer["visible"] - ] - layer_names = [str(layer["name"]) for layer in filtered_layers] - if not layer_names: - self.log.info( - "None of the layers from the instance" - " are visible. Extraction skipped." - ) - return - - joined_layer_names = ", ".join( - ["\"{}\"".format(name) for name in layer_names] - ) - self.log.debug( - "Instance has {} layers with names: {}".format( - len(layer_names), joined_layer_names - ) - ) - - ignore_layers_transparency = instance.data.get( - "ignoreLayersTransparency", False - ) - - mark_in = instance.context.data["sceneMarkIn"] - mark_out = instance.context.data["sceneMarkOut"] - - # Change scene Start Frame to 0 to prevent frame index issues - # - issue is that TVPaint versions deal with frame indexes in a - # different way when Start Frame is not `0` - # NOTE It will be set back after rendering - scene_start_frame = instance.context.data["sceneStartFrame"] - execute_george("tv_startframe 0") - - # Frame start/end may be stored as float - frame_start = int(instance.data["frameStart"]) - - # Handles are not stored per instance but on Context - handle_start = instance.context.data["handleStart"] - - scene_bg_color = instance.context.data["sceneBgColor"] - - # Prepare output frames - output_frame_start = frame_start - handle_start - - # Change output frame start to 0 if handles cause it's negative number - if output_frame_start < 0: - self.log.warning(( - "Frame start with handles has negative value." - " Changed to \"0\". Frames start: {}, Handle Start: {}" - ).format(frame_start, handle_start)) - output_frame_start = 0 - - # Calculate frame end - output_frame_end = output_frame_start + (mark_out - mark_in) - - # Save to staging dir - output_dir = instance.data.get("stagingDir") - if not output_dir: - # Create temp folder if staging dir is not set - output_dir = ( - tempfile.mkdtemp(prefix="tvpaint_render_") - ).replace("\\", "/") - instance.data["stagingDir"] = output_dir - - self.log.debug( - "Files will be rendered to folder: {}".format(output_dir) - ) - - if instance.data["productType"] == "review": - result = self.render_review( - output_dir, mark_in, mark_out, scene_bg_color - ) - else: - # Render output - result = self.render( - output_dir, - mark_in, - mark_out, - filtered_layers, - ignore_layers_transparency - ) - - output_filepaths_by_frame_idx, thumbnail_fullpath = result - - # Change scene frame Start back to previous value - execute_george("tv_startframe {}".format(scene_start_frame)) - - # Sequence of one frame - if not output_filepaths_by_frame_idx: - self.log.warning("Extractor did not create any output.") - return - - repre_files = self._rename_output_files( - output_filepaths_by_frame_idx, - mark_in, - mark_out, - output_frame_start - ) - - # Fill tags and new families from project settings - instance_families = get_publish_instance_families(instance) - tags = [] - if "review" in instance_families: - tags.append("review") - - # Sequence of one frame - single_file = len(repre_files) == 1 - if single_file: - repre_files = repre_files[0] - - # Extension is hardcoded - # - changing extension would require change code - new_repre = { - "name": "png", - "ext": "png", - "files": repre_files, - "stagingDir": output_dir, - "tags": tags - } - - if not single_file: - new_repre["frameStart"] = output_frame_start - new_repre["frameEnd"] = output_frame_end - - self.log.debug("Creating new representation: {}".format(new_repre)) - - instance.data["representations"].append(new_repre) - - if not thumbnail_fullpath: - return - - thumbnail_ext = os.path.splitext( - thumbnail_fullpath - )[1].replace(".", "") - # Create thumbnail representation - thumbnail_repre = { - "name": "thumbnail", - "ext": thumbnail_ext, - "outputName": "thumb", - "files": os.path.basename(thumbnail_fullpath), - "stagingDir": output_dir, - "tags": ["thumbnail"] - } - instance.data["representations"].append(thumbnail_repre) - - def _rename_output_files( - self, filepaths_by_frame, mark_in, mark_out, output_frame_start - ): - new_filepaths_by_frame = rename_filepaths_by_frame_start( - filepaths_by_frame, mark_in, mark_out, output_frame_start - ) - - repre_filenames = [] - for filepath in new_filepaths_by_frame.values(): - repre_filenames.append(os.path.basename(filepath)) - - if mark_in < output_frame_start: - repre_filenames = list(reversed(repre_filenames)) - - return repre_filenames - - def render_review( - self, output_dir, mark_in, mark_out, scene_bg_color - ): - """ Export images from TVPaint using `tv_savesequence` command. - - Args: - output_dir (str): Directory where files will be stored. - mark_in (int): Starting frame index from which export will begin. - mark_out (int): On which frame index export will end. - scene_bg_color (list): Bg color set in scene. Result of george - script command `tv_background`. - - Returns: - tuple: With 2 items first is list of filenames second is path to - thumbnail. - """ - filename_template = get_frame_filename_template(mark_out) - - self.log.debug("Preparing data for rendering.") - first_frame_filepath = os.path.join( - output_dir, - filename_template.format(frame=mark_in) - ) - - bg_color = self._get_review_bg_color() - - george_script_lines = [ - # Change bg color to color from settings - "tv_background \"color\" {} {} {}".format(*bg_color), - "tv_SaveMode \"PNG\"", - "export_path = \"{}\"".format( - first_frame_filepath.replace("\\", "/") - ), - "tv_savesequence '\"'export_path'\"' {} {}".format( - mark_in, mark_out - ) - ] - if scene_bg_color: - # Change bg color back to previous scene bg color - _scene_bg_color = copy.deepcopy(scene_bg_color) - bg_type = _scene_bg_color.pop(0) - orig_color_command = [ - "tv_background", - "\"{}\"".format(bg_type) - ] - orig_color_command.extend(_scene_bg_color) - - george_script_lines.append(" ".join(orig_color_command)) - - execute_george_through_file("\n".join(george_script_lines)) - - first_frame_filepath = None - output_filepaths_by_frame_idx = {} - for frame_idx in range(mark_in, mark_out + 1): - filename = filename_template.format(frame=frame_idx) - filepath = os.path.join(output_dir, filename) - - output_filepaths_by_frame_idx[frame_idx] = filepath - - if not os.path.exists(filepath): - raise KnownPublishError( - "Output was not rendered. File was not found {}".format( - filepath - ) - ) - - if first_frame_filepath is None: - first_frame_filepath = filepath - - thumbnail_filepath = None - if first_frame_filepath and os.path.exists(first_frame_filepath): - thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg") - source_img = Image.open(first_frame_filepath) - if source_img.mode.lower() != "rgb": - source_img = source_img.convert("RGB") - source_img.save(thumbnail_filepath) - - return output_filepaths_by_frame_idx, thumbnail_filepath - - def render( - self, output_dir, mark_in, mark_out, layers, ignore_layer_opacity - ): - """ Export images from TVPaint. - - Args: - output_dir (str): Directory where files will be stored. - mark_in (int): Starting frame index from which export will begin. - mark_out (int): On which frame index export will end. - layers (list): List of layers to be exported. - ignore_layer_opacity (bool): Layer's opacity will be ignored. - - Returns: - tuple: With 2 items first is list of filenames second is path to - thumbnail. - """ - self.log.debug("Preparing data for rendering.") - - # Map layers by position - layers_by_position = {} - layers_by_id = {} - layer_ids = [] - for layer in layers: - layer_id = layer["layer_id"] - position = layer["position"] - layers_by_position[position] = layer - layers_by_id[layer_id] = layer - - layer_ids.append(layer_id) - - # Sort layer positions in reverse order - sorted_positions = list(reversed(sorted(layers_by_position.keys()))) - if not sorted_positions: - return [], None - - self.log.debug("Collecting pre/post behavior of individual layers.") - behavior_by_layer_id = get_layers_pre_post_behavior(layer_ids) - exposure_frames_by_layer_id = get_layers_exposure_frames( - layer_ids, layers - ) - extraction_data_by_layer_id = calculate_layers_extraction_data( - layers, - exposure_frames_by_layer_id, - behavior_by_layer_id, - mark_in, - mark_out - ) - # Render layers - filepaths_by_layer_id = {} - for layer_id, render_data in extraction_data_by_layer_id.items(): - layer = layers_by_id[layer_id] - filepaths_by_layer_id[layer_id] = self._render_layer( - render_data, layer, output_dir, ignore_layer_opacity - ) - - # Prepare final filepaths where compositing should store result - output_filepaths_by_frame = {} - thumbnail_src_filepath = None - finale_template = get_frame_filename_template(mark_out) - for frame_idx in range(mark_in, mark_out + 1): - filename = finale_template.format(frame=frame_idx) - - filepath = os.path.join(output_dir, filename) - output_filepaths_by_frame[frame_idx] = filepath - - if thumbnail_src_filepath is None: - thumbnail_src_filepath = filepath - - self.log.info("Started compositing of layer frames.") - composite_rendered_layers( - layers, filepaths_by_layer_id, - mark_in, mark_out, - output_filepaths_by_frame - ) - - self.log.info("Compositing finished") - thumbnail_filepath = None - if thumbnail_src_filepath and os.path.exists(thumbnail_src_filepath): - source_img = Image.open(thumbnail_src_filepath) - thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg") - # Composite background only on rgba images - # - just making sure - if source_img.mode.lower() == "rgba": - bg_color = self._get_review_bg_color() - self.log.debug("Adding thumbnail background color {}.".format( - " ".join([str(val) for val in bg_color]) - )) - bg_image = Image.new("RGBA", source_img.size, bg_color) - thumbnail_obj = Image.alpha_composite(bg_image, source_img) - thumbnail_obj.convert("RGB").save(thumbnail_filepath) - - else: - self.log.info(( - "Source for thumbnail has mode \"{}\" (Expected: RGBA)." - " Can't use thubmanail background color." - ).format(source_img.mode)) - source_img.save(thumbnail_filepath) - - return output_filepaths_by_frame, thumbnail_filepath - - def _get_review_bg_color(self): - red = green = blue = 255 - if self.review_bg: - if len(self.review_bg) == 4: - red, green, blue, _ = self.review_bg - elif len(self.review_bg) == 3: - red, green, blue = self.review_bg - return (red, green, blue) - - def _render_layer( - self, render_data, layer, output_dir, ignore_layer_opacity - ): - frame_references = render_data["frame_references"] - filenames_by_frame_index = render_data["filenames_by_frame_index"] - - layer_id = layer["layer_id"] - george_script_lines = [ - "tv_layerset {}".format(layer_id), - "tv_SaveMode \"PNG\"" - ] - # Set density to 100 and store previous opacity - if ignore_layer_opacity: - george_script_lines.extend([ - "tv_layerdensity 100", - "orig_opacity = result", - ]) - - filepaths_by_frame = {} - frames_to_render = [] - for frame_idx, ref_idx in frame_references.items(): - # None reference is skipped because does not have source - if ref_idx is None: - filepaths_by_frame[frame_idx] = None - continue - filename = filenames_by_frame_index[frame_idx] - dst_path = "/".join([output_dir, filename]) - filepaths_by_frame[frame_idx] = dst_path - if frame_idx != ref_idx: - continue - - frames_to_render.append(str(frame_idx)) - # Go to frame - george_script_lines.append("tv_layerImage {}".format(frame_idx)) - # Store image to output - george_script_lines.append("tv_saveimage \"{}\"".format(dst_path)) - - # Set density back to origin opacity - if ignore_layer_opacity: - george_script_lines.append("tv_layerdensity orig_opacity") - - self.log.debug("Rendering Exposure frames {} of layer {} ({})".format( - ",".join(frames_to_render), layer_id, layer["name"] - )) - # Let TVPaint render layer's image - execute_george_through_file("\n".join(george_script_lines)) - - # Fill frames between `frame_start_index` and `frame_end_index` - self.log.debug("Filling frames not rendered frames.") - fill_reference_frames(frame_references, filepaths_by_frame) - - return filepaths_by_frame diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_asset_name.xml b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_asset_name.xml deleted file mode 100644 index bba0104c54..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_asset_name.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - -Product context -## Invalid product context - -Context of the given product doesn't match your current scene. - -### How to repair? - -Yout can fix this with "Repair" button on the right. This will use '{expected_folder}' folder path and overwrite '{found_folder}' folder path in scene metadata. - -After that restart publishing with Reload button. - - -### How could this happen? - -The product was created in different scene with different context -or the scene file was copy pasted from different context. - - - diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml deleted file mode 100644 index 23c899cfc6..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - -Layer names -## Duplicated layer names - -Can't determine which layers should be published because there are duplicated layer names in the scene. - -### Duplicated layer names - -{layer_names} - -*Check layer names for all products in list on left side.* - -### How to repair? - -Hide/rename/remove layers that should not be published. - -If all of them should be published then you have duplicated product names in the scene. In that case you have to recrete them and use different variant name. - - - diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_layers_visibility.xml b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_layers_visibility.xml deleted file mode 100644 index 5013f38eca..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_layers_visibility.xml +++ /dev/null @@ -1,20 +0,0 @@ - - - -Layers visibility -## All layers are not visible - -Layers visibility was changed during publishing which caused that all layers for product "{instance_name}" are hidden. - -### Layer names for **{instance_name}** - -{layer_names} - -*Check layer names for all products in the list on the left side.* - -### How to repair? - -Reset publishing and do not change visibility of layers after hitting publish button. - - - diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_marks.xml b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_marks.xml deleted file mode 100644 index f0e01ebaa7..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_marks.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - -Frame range -## Invalid render frame range - -Scene frame range which will be rendered is defined by MarkIn and MarkOut. Expected frame range is {expected_frame_range} and current frame range is {current_frame_range}. - -It is also required that MarkIn and MarkOut are enabled in the scene. Their color is highlighted on timeline when are enabled. - -- MarkIn is {mark_in_enable_state} -- MarkOut is {mark_out_enable_state} - -### How to repair? - -Yout can fix this with "Repair" button on the right. That will change MarkOut to {expected_mark_out}. - -Or you can manually modify MarkIn and MarkOut in the scene timeline. - - - diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_missing_layer_names.xml b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_missing_layer_names.xml deleted file mode 100644 index 000fe84844..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_missing_layer_names.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - -Missing layers -## Missing layers for render pass - -Render pass product "{instance_name}" has stored layer names that belong to it's rendering scope but layers were not found in scene. - -### Missing layer names - -{layer_names} - -### How to repair? - -Find layers that belong to product {instance_name} and rename them back to expected layer names or remove the product and create new with right layers. - - - diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_render_layer_group.xml b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_render_layer_group.xml deleted file mode 100644 index a95387356f..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_render_layer_group.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - -Overused Color group -## One Color group is used by multiple Render Layers - -Single color group used by multiple Render Layers would cause clashes of rendered TVPaint layers. The same layers would be used for output files of both groups. - -### Missing layer names - -{groups_information} - -### How to repair? - -Refresh, go to 'Publish' tab and go through Render Layers and change their groups to not clash each other. If you reach limit of TVPaint color groups there is nothing you can do about it to fix the issue. - - - diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_render_pass_group.xml b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_render_pass_group.xml deleted file mode 100644 index df7bdf36e5..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_render_pass_group.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - -Render pass group -## Invalid group of Render Pass layers - -Layers of Render Pass {instance_name} belong to Render Group which is defined by TVPaint color group {expected_group}. But the layers are not in the group. - -### How to repair? - -Change the color group to {expected_group} on layers {layer_names}. - - - diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_scene_settings.xml b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_scene_settings.xml deleted file mode 100644 index f741c71456..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_scene_settings.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - -Scene settings -## Invalid scene settings - -Scene settings do not match to expected values. - -**FPS** -- Expected value: {expected_fps} -- Current value: {current_fps} - -**Resolution** -- Expected value: {expected_width}x{expected_height} -- Current value: {current_width}x{current_height} - -**Pixel ratio** -- Expected value: {expected_pixel_ratio} -- Current value: {current_pixel_ratio} - -### How to repair? - -FPS and Pixel ratio can be modified in scene setting. Wrong resolution can be fixed with changing resolution of scene but due to TVPaint limitations it is possible that you will need to create new scene. - - - diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_start_frame.xml b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_start_frame.xml deleted file mode 100644 index 9052abf66c..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_start_frame.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - -First frame -## MarkIn is not set to 0 - -MarkIn in your scene must start from 0 fram index but MarkIn is set to {current_start_frame}. - -### How to repair? - -You can modify MarkIn manually or hit the "Repair" button on the right which will change MarkIn to 0 (does not change MarkOut). - - - diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_workfile_metadata.xml b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_workfile_metadata.xml deleted file mode 100644 index 0fc03c2948..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_workfile_metadata.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - -Missing metadata -## Your scene miss context metadata - -Your scene does not contain metadata about {missing_metadata}. - -### How to repair? - -Resave the scene using Workfiles tool or hit the "Repair" button on the right. - - -### How this could happen? - -You're using scene file that was not created using Workfiles tool. - - - diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_workfile_project_name.xml b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_workfile_project_name.xml deleted file mode 100644 index bb57e93bf2..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/help/validate_workfile_project_name.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - -Project name -## Your scene is from different project - -It is not possible to publish into project "{workfile_project_name}" when TVPaint was opened with project "{env_project_name}" in context. - -### How to repair? - -If the workfile belongs to project "{env_project_name}" then use Workfiles tool to resave it. - -Otherwise close TVPaint and launch it again from project you want to publish in. - - -### How this could happen? - -You've opened workfile from different project. You've opened TVPaint on a task from "{env_project_name}" then you've opened TVPaint again on task from "{workfile_project_name}" without closing the TVPaint. Because TVPaint can run only once the project didn't change. - -### Why it is important? -Because project may affect how TVPaint works or change publishing behavior it is dangerous to allow change project context in many ways. For example publishing will not run as expected. - - - diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/increment_workfile_version.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/increment_workfile_version.py deleted file mode 100644 index 601d276b97..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/increment_workfile_version.py +++ /dev/null @@ -1,25 +0,0 @@ -import pyblish.api - -from ayon_core.lib import version_up -from ayon_core.pipeline import registered_host - - -class IncrementWorkfileVersion(pyblish.api.ContextPlugin): - """Increment current workfile version.""" - - order = pyblish.api.IntegratorOrder + 1 - label = "Increment Workfile Version" - optional = True - hosts = ["tvpaint"] - - settings_category = "tvpaint" - - def process(self, context): - - assert all(result["success"] for result in context.data["results"]), ( - "Publishing not successful so version is not increased.") - - host = registered_host() - path = context.data["currentFile"] - host.save_workfile(version_up(path)) - self.log.info('Incrementing workfile version') diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_asset_name.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_asset_name.py deleted file mode 100644 index 8763c005dc..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_asset_name.py +++ /dev/null @@ -1,79 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import ( - PublishXmlValidationError, - OptionalPyblishPluginMixin, -) -from ayon_tvpaint.api.pipeline import ( - list_instances, - write_instances, -) - - -class FixFolderPaths(pyblish.api.Action): - """Repair the folder paths. - - Change instanace metadata in the workfile. - """ - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - context_folder_path = context.data["folderPath"] - old_instance_items = list_instances() - new_instance_items = [] - for instance_item in old_instance_items: - instance_folder_path = instance_item.get("folderPath") - if ( - instance_folder_path - and instance_folder_path != context_folder_path - ): - instance_item["folderPath"] = context_folder_path - new_instance_items.append(instance_item) - write_instances(new_instance_items) - - -class ValidateAssetName( - OptionalPyblishPluginMixin, - pyblish.api.ContextPlugin -): - """Validate folder path present on instance. - - Folder path on instance should be the same as context's. - """ - - label = "Validate Folder Paths" - order = pyblish.api.ValidatorOrder - hosts = ["tvpaint"] - actions = [FixFolderPaths] - - settings_category = "tvpaint" - - def process(self, context): - if not self.is_active(context.data): - return - context_folder_path = context.data["folderPath"] - for instance in context: - folder_path = instance.data.get("folderPath") - if folder_path and folder_path == context_folder_path: - continue - - instance_label = ( - instance.data.get("label") or instance.data["name"] - ) - - raise PublishXmlValidationError( - self, - ( - "Different folder path on instance then context's." - " Instance \"{}\" has folder path: \"{}\"" - " Context folder path is: \"{}\"" - ).format( - instance_label, folder_path, context_folder_path - ), - formatting_data={ - "expected_folder": context_folder_path, - "found_folder": folder_path - } - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_duplicated_layer_names.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_duplicated_layer_names.py deleted file mode 100644 index be4dc0f123..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_duplicated_layer_names.py +++ /dev/null @@ -1,55 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import PublishXmlValidationError - - -class ValidateLayersGroup(pyblish.api.InstancePlugin): - """Validate layer names for publishing are unique for whole workfile.""" - - label = "Validate Duplicated Layers Names" - order = pyblish.api.ValidatorOrder - families = ["renderPass"] - - settings_category = "tvpaint" - - def process(self, instance): - # Prepare layers - layers_by_name = instance.context.data["layersByName"] - - # Layers ids of an instance - layer_names = instance.data["layer_names"] - - # Check if all layers from render pass are in right group - duplicated_layer_names = [] - for layer_name in layer_names: - layers = layers_by_name.get(layer_name) - # It is not job of this validator to handle missing layers - if layers is None: - continue - if len(layers) > 1: - duplicated_layer_names.append(layer_name) - - # Everything is OK and skip exception - if not duplicated_layer_names: - return - - layers_msg = ", ".join([ - "\"{}\"".format(layer_name) - for layer_name in duplicated_layer_names - ]) - detail_lines = [ - "- {}".format(layer_name) - for layer_name in set(duplicated_layer_names) - ] - raise PublishXmlValidationError( - self, - ( - "Layers have duplicated names for instance {}." - # Description what's wrong - " There are layers with same name and one of them is marked" - " for publishing so it is not possible to know which should" - " be published. Please look for layers with names: {}" - ).format(instance.data["label"], layers_msg), - formatting_data={ - "layer_names": "
".join(detail_lines) - } - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_layers_visibility.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_layers_visibility.py deleted file mode 100644 index f58b8a6973..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_layers_visibility.py +++ /dev/null @@ -1,43 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import PublishXmlValidationError - - -# TODO @iLLiCiTiT add repair action to disable instances? -class ValidateLayersVisiblity(pyblish.api.InstancePlugin): - """Validate existence of renderPass layers.""" - - label = "Validate Layers Visibility" - order = pyblish.api.ValidatorOrder - families = ["review", "render"] - - settings_category = "tvpaint" - - def process(self, instance): - layers = instance.data.get("layers") - # Instance have empty layers - # - it is not job of this validator to check that - if not layers: - return - layer_names = set() - for layer in layers: - layer_names.add(layer["name"]) - if layer["visible"]: - return - - instance_label = ( - instance.data.get("label") or instance.data["name"] - ) - - raise PublishXmlValidationError( - self, - "All layers of instance \"{}\" are not visible.".format( - instance_label - ), - formatting_data={ - "instance_name": instance_label, - "layer_names": "
".join([ - "- {}".format(layer_name) - for layer_name in layer_names - ]) - } - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_marks.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_marks.py deleted file mode 100644 index 0911beb4e8..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_marks.py +++ /dev/null @@ -1,118 +0,0 @@ -import json - -import pyblish.api -from ayon_core.pipeline import ( - PublishXmlValidationError, - OptionalPyblishPluginMixin, -) -from ayon_tvpaint.api.lib import execute_george - - -class ValidateMarksRepair(pyblish.api.Action): - """Repair the marks.""" - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - expected_data = ValidateMarks.get_expected_data(context) - - execute_george( - "tv_markin {} set".format(expected_data["markIn"]) - ) - execute_george( - "tv_markout {} set".format(expected_data["markOut"]) - ) - - -class ValidateMarks( - OptionalPyblishPluginMixin, - pyblish.api.ContextPlugin -): - """Validate mark in and out are enabled and it's duration. - - Mark In/Out does not have to match frameStart and frameEnd but duration is - important. - """ - - label = "Validate Mark In/Out" - order = pyblish.api.ValidatorOrder - optional = True - actions = [ValidateMarksRepair] - - settings_category = "tvpaint" - - @staticmethod - def get_expected_data(context): - scene_mark_in = context.data["sceneMarkIn"] - - # Data collected in `CollectContextEntities` - frame_end = context.data["frameEnd"] - frame_start = context.data["frameStart"] - handle_start = context.data["handleStart"] - handle_end = context.data["handleEnd"] - - # Calculate expected Mark out (Mark In + duration - 1) - expected_mark_out = ( - scene_mark_in - + (frame_end - frame_start) - + handle_start + handle_end - ) - return { - "markIn": scene_mark_in, - "markInState": True, - "markOut": expected_mark_out, - "markOutState": True - } - - def process(self, context): - if not self.is_active(context.data): - return - - current_data = { - "markIn": context.data["sceneMarkIn"], - "markInState": context.data["sceneMarkInState"], - "markOut": context.data["sceneMarkOut"], - "markOutState": context.data["sceneMarkOutState"] - } - expected_data = self.get_expected_data(context) - invalid = {} - for k in current_data.keys(): - if current_data[k] != expected_data[k]: - invalid[k] = { - "current": current_data[k], - "expected": expected_data[k] - } - - # Validation ends - if not invalid: - return - - current_frame_range = ( - (current_data["markOut"] - current_data["markIn"]) + 1 - ) - expected_frame_range = ( - (expected_data["markOut"] - expected_data["markIn"]) + 1 - ) - mark_in_enable_state = "disabled" - if current_data["markInState"]: - mark_in_enable_state = "enabled" - - mark_out_enable_state = "disabled" - if current_data["markOutState"]: - mark_out_enable_state = "enabled" - - raise PublishXmlValidationError( - self, - "Marks does not match database:\n{}".format( - json.dumps(invalid, sort_keys=True, indent=4) - ), - formatting_data={ - "current_frame_range": str(current_frame_range), - "expected_frame_range": str(expected_frame_range), - "mark_in_enable_state": mark_in_enable_state, - "mark_out_enable_state": mark_out_enable_state, - "expected_mark_out": expected_data["markOut"] - } - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_missing_layer_names.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_missing_layer_names.py deleted file mode 100644 index f340d3c10d..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_missing_layer_names.py +++ /dev/null @@ -1,57 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import PublishXmlValidationError - - -class ValidateMissingLayers(pyblish.api.InstancePlugin): - """Validate existence of renderPass layers.""" - - label = "Validate Missing Layers Names" - order = pyblish.api.ValidatorOrder - families = ["renderPass"] - - settings_category = "tvpaint" - - def process(self, instance): - # Prepare layers - layers_by_name = instance.context.data["layersByName"] - - # Layers ids of an instance - layer_names = instance.data["layer_names"] - - # Check if all layers from render pass are in right group - missing_layer_names = [] - for layer_name in layer_names: - layers = layers_by_name.get(layer_name) - if not layers: - missing_layer_names.append(layer_name) - - # Everything is OK and skip exception - if not missing_layer_names: - return - - layers_msg = ", ".join([ - "\"{}\"".format(layer_name) - for layer_name in missing_layer_names - ]) - instance_label = ( - instance.data.get("label") or instance.data["name"] - ) - description_layer_names = "
".join([ - "- {}".format(layer_name) - for layer_name in missing_layer_names - ]) - - # Raise an error - raise PublishXmlValidationError( - self, - ( - "Layers were not found by name for instance \"{}\"." - # Description what's wrong - " Layer names marked for publishing are not available" - " in layers list. Missing layer names: {}" - ).format(instance.data["label"], layers_msg), - formatting_data={ - "instance_name": instance_label, - "layer_names": description_layer_names - } - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_render_layer_group.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_render_layer_group.py deleted file mode 100644 index b20ea3cac6..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_render_layer_group.py +++ /dev/null @@ -1,78 +0,0 @@ -import collections -import pyblish.api -from ayon_core.pipeline import PublishXmlValidationError - - -class ValidateRenderLayerGroups(pyblish.api.ContextPlugin): - """Validate group ids of renderLayer products. - - Validate that there are not 2 render layers using the same group. - """ - - label = "Validate Render Layers Group" - order = pyblish.api.ValidatorOrder + 0.1 - - settings_category = "tvpaint" - - def process(self, context): - # Prepare layers - render_layers_by_group_id = collections.defaultdict(list) - for instance in context: - families = instance.data.get("families") - if not families or "renderLayer" not in families: - continue - - group_id = instance.data["creator_attributes"]["group_id"] - render_layers_by_group_id[group_id].append(instance) - - duplicated_instances = [] - for group_id, instances in render_layers_by_group_id.items(): - if len(instances) > 1: - duplicated_instances.append((group_id, instances)) - - if not duplicated_instances: - return - - # Exception message preparations - groups_data = context.data["groupsData"] - groups_by_id = { - group["group_id"]: group - for group in groups_data - } - - per_group_msgs = [] - groups_information_lines = [] - for group_id, instances in duplicated_instances: - group = groups_by_id[group_id] - group_label = "Group \"{}\" ({})".format( - group["name"], - group["group_id"], - ) - line_join_product_names = "\n".join([ - f" - {instance['productName']}" - for instance in instances - ]) - joined_product_names = ", ".join([ - f"\"{instance['productName']}\"" - for instance in instances - ]) - per_group_msgs.append( - "{} < {} >".format(group_label, joined_product_names) - ) - groups_information_lines.append( - "{}\n{}".format( - group_label, line_join_product_names - ) - ) - - # Raise an error - raise PublishXmlValidationError( - self, - ( - "More than one Render Layer is using the same TVPaint" - " group color. {}" - ).format(" | ".join(per_group_msgs)), - formatting_data={ - "groups_information": "\n".join(groups_information_lines) - } - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_render_pass_group.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_render_pass_group.py deleted file mode 100644 index 3d00fd031f..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_render_pass_group.py +++ /dev/null @@ -1,91 +0,0 @@ -import collections -import pyblish.api -from ayon_core.pipeline import PublishXmlValidationError - - -class ValidateLayersGroup(pyblish.api.InstancePlugin): - """Validate group ids of renderPass layers. - - Validates that all layers are in same group as they were during creation. - """ - - label = "Validate Layers Group" - order = pyblish.api.ValidatorOrder + 0.1 - families = ["renderPass"] - - settings_category = "tvpaint" - - def process(self, instance): - # Prepare layers - layers_data = instance.context.data["layersData"] - layers_by_name = { - layer["name"]: layer - for layer in layers_data - } - - # Expected group id for instance layers - group_id = instance.data["group_id"] - # Layers ids of an instance - layer_names = instance.data["layer_names"] - # Check if all layers from render pass are in right group - invalid_layers_by_group_id = collections.defaultdict(list) - invalid_layer_names = set() - for layer_name in layer_names: - layer = layers_by_name.get(layer_name) - _group_id = layer["group_id"] - if _group_id != group_id: - invalid_layers_by_group_id[_group_id].append(layer) - invalid_layer_names.add(layer_name) - - # Everything is OK and skip exception - if not invalid_layers_by_group_id: - return - - # Exception message preparations - groups_data = instance.context.data["groupsData"] - groups_by_id = { - group["group_id"]: group - for group in groups_data - } - correct_group = groups_by_id[group_id] - - per_group_msgs = [] - for _group_id, layers in invalid_layers_by_group_id.items(): - _group = groups_by_id[_group_id] - layers_msgs = [] - for layer in layers: - layers_msgs.append( - "\"{}\" (id: {})".format(layer["name"], layer["layer_id"]) - ) - per_group_msgs.append( - "Group \"{}\" (id: {}) < {} >".format( - _group["name"], - _group["group_id"], - ", ".join(layers_msgs) - ) - ) - - # Raise an error - raise PublishXmlValidationError( - self, - ( - # Short message - "Layers in wrong group." - # Description what's wrong - " Layers from render pass \"{}\" must be in group {} (id: {})." - # Detailed message - " Layers in wrong group: {}" - ).format( - instance.data["label"], - correct_group["name"], - correct_group["group_id"], - " | ".join(per_group_msgs) - ), - formatting_data={ - "instance_name": ( - instance.data.get("label") or instance.data["name"] - ), - "expected_group": correct_group["name"], - "layer_names": ", ".join(invalid_layer_names) - } - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_scene_settings.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_scene_settings.py deleted file mode 100644 index 8bad5c43c8..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_scene_settings.py +++ /dev/null @@ -1,60 +0,0 @@ -import json - -import pyblish.api -from ayon_core.pipeline import ( - PublishXmlValidationError, - OptionalPyblishPluginMixin, -) - - -# TODO @iLliCiTiT add fix action for fps -class ValidateProjectSettings( - OptionalPyblishPluginMixin, - pyblish.api.ContextPlugin -): - """Validate scene settings against database.""" - - label = "Validate Scene Settings" - order = pyblish.api.ValidatorOrder - - settings_category = "tvpaint" - optional = True - - def process(self, context): - if not self.is_active(context.data): - return - - folder_attributes = context.data["folderEntity"]["attrib"] - scene_data = { - "fps": context.data.get("sceneFps"), - "resolutionWidth": context.data.get("sceneWidth"), - "resolutionHeight": context.data.get("sceneHeight"), - "pixelAspect": context.data.get("scenePixelAspect") - } - invalid = {} - for k in scene_data.keys(): - expected_value = folder_attributes[k] - if scene_data[k] != expected_value: - invalid[k] = { - "current": scene_data[k], "expected": expected_value - } - - if not invalid: - return - - raise PublishXmlValidationError( - self, - "Scene settings does not match database:\n{}".format( - json.dumps(invalid, sort_keys=True, indent=4) - ), - formatting_data={ - "expected_fps": folder_attributes["fps"], - "current_fps": scene_data["fps"], - "expected_width": folder_attributes["resolutionWidth"], - "expected_height": folder_attributes["resolutionHeight"], - "current_width": scene_data["resolutionWidth"], - "current_height": scene_data["resolutionHeight"], - "expected_pixel_ratio": folder_attributes["pixelAspect"], - "current_pixel_ratio": scene_data["pixelAspect"] - } - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_start_frame.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_start_frame.py deleted file mode 100644 index 9669acf1b5..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_start_frame.py +++ /dev/null @@ -1,48 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import ( - PublishXmlValidationError, - OptionalPyblishPluginMixin, -) -from ayon_tvpaint.api.lib import execute_george - - -class RepairStartFrame(pyblish.api.Action): - """Repair start frame.""" - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - execute_george("tv_startframe 0") - - -class ValidateStartFrame( - OptionalPyblishPluginMixin, - pyblish.api.ContextPlugin -): - """Validate start frame being at frame 0.""" - - label = "Validate Start Frame" - order = pyblish.api.ValidatorOrder - hosts = ["tvpaint"] - actions = [RepairStartFrame] - - settings_category = "tvpaint" - optional = True - - def process(self, context): - if not self.is_active(context.data): - return - - start_frame = execute_george("tv_startframe") - if start_frame == 0: - return - - raise PublishXmlValidationError( - self, - "Start frame has to be frame 0.", - formatting_data={ - "current_start_frame": start_frame - } - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_workfile_metadata.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_workfile_metadata.py deleted file mode 100644 index 34c02c78ed..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_workfile_metadata.py +++ /dev/null @@ -1,65 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import ( - PublishXmlValidationError, - PublishValidationError, - registered_host, -) - - -class ValidateWorkfileMetadataRepair(pyblish.api.Action): - """Store current context into workfile metadata.""" - - label = "Use current context" - icon = "wrench" - on = "failed" - - def process(self, context, _plugin): - """Save current workfile which should trigger storing of metadata.""" - current_file = context.data["currentFile"] - host = registered_host() - # Save file should trigger - host.save_workfile(current_file) - - -class ValidateWorkfileMetadata(pyblish.api.ContextPlugin): - """Validate if wokrfile contain required metadata for publising.""" - - label = "Validate Workfile Metadata" - order = pyblish.api.ValidatorOrder - - families = ["workfile"] - - actions = [ValidateWorkfileMetadataRepair] - - settings_category = "tvpaint" - - required_keys = {"project_name", "folder_path", "task_name"} - - def process(self, context): - workfile_context = context.data["workfile_context"] - if not workfile_context: - raise PublishValidationError( - "Current workfile is missing whole metadata about context.", - "Missing context", - ( - "Current workfile is missing metadata about task." - " To fix this issue save the file using Workfiles tool." - ) - ) - - missing_keys = [] - for key in self.required_keys: - value = workfile_context.get(key) - if not value: - missing_keys.append(key) - - if missing_keys: - raise PublishXmlValidationError( - self, - "Current workfile is missing metadata about {}.".format( - ", ".join(missing_keys) - ), - formatting_data={ - "missing_metadata": ", ".join(missing_keys) - } - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_workfile_project_name.py b/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_workfile_project_name.py deleted file mode 100644 index 868c7d44fc..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/plugins/publish/validate_workfile_project_name.py +++ /dev/null @@ -1,55 +0,0 @@ -import pyblish.api -from ayon_core.pipeline import PublishXmlValidationError - - -class ValidateWorkfileProjectName(pyblish.api.ContextPlugin): - """Validate project name stored in workfile metadata. - - It is not possible to publish from different project than is set in - environment variable "AYON_PROJECT_NAME". - """ - - label = "Validate Workfile Project Name" - order = pyblish.api.ValidatorOrder - - settings_category = "tvpaint" - - def process(self, context): - workfile_context = context.data.get("workfile_context") - # If workfile context is missing than project is matching to - # global project - if not workfile_context: - self.log.info( - "Workfile context (\"workfile_context\") is not filled." - ) - return - - workfile_project_name = workfile_context["project_name"] - env_project_name = context.data["projectName"] - if workfile_project_name == env_project_name: - self.log.info(( - "Both workfile project and environment project are same. {}" - ).format(env_project_name)) - return - - # Raise an error - raise PublishXmlValidationError( - self, - ( - # Short message - "Workfile from different Project ({})." - # Description what's wrong - " It is not possible to publish when TVPaint was launched in" - "context of different project. Current context project is" - " \"{}\". Launch TVPaint in context of project \"{}\"" - " and then publish." - ).format( - workfile_project_name, - env_project_name, - workfile_project_name, - ), - formatting_data={ - "workfile_project_name": workfile_project_name, - "expected_project_name": env_project_name - } - ) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/resources/template.tvpp b/server_addon/tvpaint/client/ayon_tvpaint/resources/template.tvpp deleted file mode 100644 index 4bf05d3595..0000000000 Binary files a/server_addon/tvpaint/client/ayon_tvpaint/resources/template.tvpp and /dev/null differ diff --git a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/__init__.py b/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/__init__.py deleted file mode 100644 index 59a7aaf99b..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -import os - - -def get_plugin_files_path(): - current_dir = os.path.dirname(os.path.abspath(__file__)) - return os.path.join(current_dir, "plugin_files") diff --git a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/CMakeLists.txt b/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/CMakeLists.txt deleted file mode 100644 index c221eb0431..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/CMakeLists.txt +++ /dev/null @@ -1,56 +0,0 @@ -cmake_minimum_required(VERSION 3.17) -project(OpenPypePlugin C CXX) - -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_EXTENSIONS OFF) - -set(IP_ENABLE_UNICODE OFF) -set(IP_ENABLE_DOCTEST OFF) - -if(MSVC) - set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) - add_definitions(-D_CRT_SECURE_NO_WARNINGS) - # Define WIN64 or WIN32 for TVPaint SDK - if(CMAKE_SIZEOF_VOID_P EQUAL 8) - message("64bit") - add_definitions(-DWIN64) - elseif(CMAKE_SIZEOF_VOID_P EQUAL 4) - message("32bit") - add_definitions(-DWIN32) - endif() -endif() - -# TODO better options -option(BOOST_ROOT "Path to root of Boost" "") - -option(OPENSSL_INCLUDE "OpenSSL include path" "") -option(OPENSSL_LIB_DIR "OpenSSL lib path" "") - -option(WEBSOCKETPP_INCLUDE "Websocketpp include path" "") - -option(JSONRPCPP_INCLUDE "Jsonrpcpp include path" "") - -# Use static boost libraries -set(Boost_USE_STATIC_LIBS ON) - -find_package(Boost COMPONENTS random chrono date_time regex REQUIRED) - -include_directories( - "${TVPAINT_SDK_INCLUDE}" - "${OPENSSL_INCLUDE}" - "${WEBSOCKETPP_INCLUDE}" - "${JSONRPCPP_INCLUDE}" - "${Boost_INCLUDE_DIRS}" -) - -link_directories( - "${OPENSSL_LIB_DIR}" - "${Boost_LIBRARY_DIRS}" -) - -add_library(jsonrpcpp INTERFACE) - -add_library(${PROJECT_NAME} SHARED library.cpp library.def "${TVPAINT_SDK_LIB}/dllx.c") - -target_link_libraries(${PROJECT_NAME} ${Boost_LIBRARIES}) -target_link_libraries(${PROJECT_NAME} jsonrpcpp) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/README.md b/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/README.md deleted file mode 100644 index 70a96b2919..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/README.md +++ /dev/null @@ -1,34 +0,0 @@ -README for TVPaint Avalon plugin -================================ -Introduction ------------- -This project is dedicated to integrate Avalon functionality to TVPaint. -This implementation is using TVPaint plugin (C/C++) which can communicate with python process. The communication should allow to trigger tools or pipeline functions from TVPaint and accept requests from python process at the same time. - -Current implementation is based on websocket protocol, using json-rpc communication (specification 2.0). Project is in beta stage, tested only on Windows. - -To be able to load plugin, environment variable `WEBSOCKET_URL` must be set otherwise plugin won't load at all. Plugin should not affect TVPaint if python server crash, but buttons won't work. - -## Requirements - Python server -- python >= 3.6 -- aiohttp -- aiohttp-json-rpc - -### Windows -- pywin32 - required only for plugin installation - -## Requirements - Plugin compilation -- TVPaint SDK - Ask for SDK on TVPaint support. -- Boost 1.72.0 - Boost is used across other plugins (Should be possible to use different version with CMakeLists modification) -- Websocket++/Websocketpp - Websocket library (https://github.com/zaphoyd/websocketpp) -- OpenSSL library - Required by Websocketpp -- jsonrpcpp - C++ library handling json-rpc 2.0 (https://github.com/badaix/jsonrpcpp) -- nlohmann/json - Required for jsonrpcpp (https://github.com/nlohmann/json) - -### jsonrpcpp -This library has `nlohmann/json` as it's part, but current `master` has old version which has bug and probably won't be possible to use library on windows without using last `nlohmann/json`. - -## TODO -- modify code and CMake to be able to compile on MacOS/Linux -- separate websocket logic from plugin logic -- hide buttons and show error message if server is closed diff --git a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/library.cpp b/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/library.cpp deleted file mode 100644 index c6c8ff244e..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/library.cpp +++ /dev/null @@ -1,807 +0,0 @@ -#ifdef _WIN32 -// Include before -#include -#endif - -#include -#include -#include -#include -#include -#include -#include - -#include "plugdllx.h" - -#include - -#include -#include - -#include "json.hpp" -#include "jsonrpcpp.hpp" - - -// All functions not exported should be static. -// All global variables should be static. - -// mReq Identification of the requester. (=0 closed, !=0 requester ID) -static struct { - bool firstParams; - DWORD mReq; - void* mLocalFile; - PIFilter *current_filter; - // Id counter for client requests - int client_request_id; - // There are new menu items - bool newMenuItems; - // Menu item definitions received from connection - nlohmann::json menuItems; - // Menu items used in requester by their ID - nlohmann::json menuItemsById; - std::list menuItemsIds; - // Messages from server before processing. - // - messages can't be process at the moment of receive as client is running in thread - std::queue messages; - // Responses to requests mapped by request id - std::map responses; - -} Data = { - true, - 0, - nullptr, - nullptr, - 1, - false, - nlohmann::json::object(), - nlohmann::json::object() -}; - -// Json rpc 2.0 parser - for handling messages and callbacks -jsonrpcpp::Parser parser; -typedef websocketpp::client client; - - -class connection_metadata { -private: - websocketpp::connection_hdl m_hdl; - client *m_endpoint; - std::string m_status; -public: - typedef websocketpp::lib::shared_ptr ptr; - - connection_metadata(websocketpp::connection_hdl hdl, client *endpoint) - : m_hdl(hdl), m_status("Connecting") { - m_endpoint = endpoint; - } - - void on_open(client *c, websocketpp::connection_hdl hdl) { - m_status = "Open"; - } - - void on_fail(client *c, websocketpp::connection_hdl hdl) { - m_status = "Failed"; - } - - void on_close(client *c, websocketpp::connection_hdl hdl) { - m_status = "Closed"; - } - - void on_message(websocketpp::connection_hdl, client::message_ptr msg) { - std::string json_str; - if (msg->get_opcode() == websocketpp::frame::opcode::text) { - json_str = msg->get_payload(); - } else { - json_str = websocketpp::utility::to_hex(msg->get_payload()); - } - process_message(json_str); - } - - void process_message(std::string msg) { - std::cout << "--> " << msg << "\n"; - try { - jsonrpcpp::entity_ptr entity = parser.do_parse(msg); - if (!entity) { - // Return error code? - - } else if (entity->is_response()) { - jsonrpcpp::Response response = jsonrpcpp::Response(entity->to_json()); - Data.responses[response.id().int_id()] = response; - - } else if (entity->is_request() || entity->is_notification()) { - Data.messages.push(msg); - } - } - catch (const jsonrpcpp::RequestException &e) { - std::string message = e.to_json().dump(); - std::cout << "<-- " << e.to_json().dump() << "\n"; - send(message); - } - catch (const jsonrpcpp::ParseErrorException &e) { - std::string message = e.to_json().dump(); - std::cout << "<-- " << message << "\n"; - send(message); - } - catch (const jsonrpcpp::RpcException &e) { - std::cerr << "RpcException: " << e.what() << "\n"; - std::string message = jsonrpcpp::ParseErrorException(e.what()).to_json().dump(); - std::cout << "<-- " << message << "\n"; - send(message); - } - catch (const std::exception &e) { - std::cerr << "Exception: " << e.what() << "\n"; - } - } - - void send(std::string message) { - if (get_status() != "Open") { - return; - } - websocketpp::lib::error_code ec; - - m_endpoint->send(m_hdl, message, websocketpp::frame::opcode::text, ec); - if (ec) { - std::cout << "> Error sending message: " << ec.message() << std::endl; - return; - } - } - - void send_notification(jsonrpcpp::Notification *notification) { - send(notification->to_json().dump()); - } - - void send_response(jsonrpcpp::Response *response) { - send(response->to_json().dump()); - } - - void send_request(jsonrpcpp::Request *request) { - send(request->to_json().dump()); - } - - websocketpp::connection_hdl get_hdl() const { - return m_hdl; - } - - std::string get_status() const { - return m_status; - } -}; - - -class websocket_endpoint { -private: - client m_endpoint; - connection_metadata::ptr client_metadata; - websocketpp::lib::shared_ptr m_thread; - bool thread_is_running = false; - -public: - websocket_endpoint() { - m_endpoint.clear_access_channels(websocketpp::log::alevel::all); - m_endpoint.clear_error_channels(websocketpp::log::elevel::all); - } - - ~websocket_endpoint() { - close_connection(); - } - - void close_connection() { - m_endpoint.stop_perpetual(); - if (connected()) - { - // Close client - close(websocketpp::close::status::normal, ""); - } - if (thread_is_running) { - // Join thread - m_thread->join(); - thread_is_running = false; - } - } - - bool connected() - { - return (client_metadata && client_metadata->get_status() == "Open"); - } - int connect(std::string const &uri) { - if (client_metadata && client_metadata->get_status() == "Open") { - std::cout << "> Already connected" << std::endl; - return 0; - } - - m_endpoint.init_asio(); - m_endpoint.start_perpetual(); - - m_thread.reset(new websocketpp::lib::thread(&client::run, &m_endpoint)); - thread_is_running = true; - - websocketpp::lib::error_code ec; - - client::connection_ptr con = m_endpoint.get_connection(uri, ec); - - if (ec) { - std::cout << "> Connect initialization error: " << ec.message() << std::endl; - return -1; - } - - client_metadata = websocketpp::lib::make_shared(con->get_handle(), &m_endpoint); - - con->set_open_handler(websocketpp::lib::bind( - &connection_metadata::on_open, - client_metadata, - &m_endpoint, - websocketpp::lib::placeholders::_1 - )); - con->set_fail_handler(websocketpp::lib::bind( - &connection_metadata::on_fail, - client_metadata, - &m_endpoint, - websocketpp::lib::placeholders::_1 - )); - con->set_close_handler(websocketpp::lib::bind( - &connection_metadata::on_close, - client_metadata, - &m_endpoint, - websocketpp::lib::placeholders::_1 - )); - con->set_message_handler(websocketpp::lib::bind( - &connection_metadata::on_message, - client_metadata, - websocketpp::lib::placeholders::_1, - websocketpp::lib::placeholders::_2 - )); - - m_endpoint.connect(con); - - return 1; - } - - void close(websocketpp::close::status::value code, std::string reason) { - if (!client_metadata || client_metadata->get_status() != "Open") { - std::cout << "> Not connected yet" << std::endl; - return; - } - - websocketpp::lib::error_code ec; - - m_endpoint.close(client_metadata->get_hdl(), code, reason, ec); - if (ec) { - std::cout << "> Error initiating close: " << ec.message() << std::endl; - } - } - - void send(std::string message) { - if (!client_metadata || client_metadata->get_status() != "Open") { - std::cout << "> Not connected yet" << std::endl; - return; - } - - client_metadata->send(message); - } - - void send_notification(jsonrpcpp::Notification *notification) { - client_metadata->send_notification(notification); - } - - void send_response(jsonrpcpp::Response *response) { - client_metadata->send(response->to_json().dump()); - } - - void send_response(std::shared_ptr response) { - client_metadata->send(response->to_json().dump()); - } - - void send_request(jsonrpcpp::Request *request) { - client_metadata->send_request(request); - } -}; - -class Communicator { -private: - // URL to websocket server - std::string websocket_url; - // Should be avalon plugin available? - // - this may change during processing if websocketet url is not set or server is down - bool server_available; -public: - Communicator(std::string url); - Communicator(); - websocket_endpoint endpoint; - bool is_connected(); - bool is_usable(); - void connect(); - void process_requests(); - jsonrpcpp::Response call_method(std::string method_name, nlohmann::json params); - void call_notification(std::string method_name, nlohmann::json params); -}; - - -Communicator::Communicator(std::string url) { - // URL to websocket server - websocket_url = url; - // Should be avalon plugin available? - // - this may change during processing if websocketet url is not set or server is down - if (url == "") { - server_available = false; - } else { - server_available = true; - } -} - - -bool Communicator::is_connected(){ - return endpoint.connected(); -} - -bool Communicator::is_usable(){ - return server_available; -} - -void Communicator::connect() -{ - if (!server_available) { - return; - } - int con_result; - con_result = endpoint.connect(websocket_url); - if (con_result == -1) - { - server_available = false; - } else { - server_available = true; - } -} - -void Communicator::call_notification(std::string method_name, nlohmann::json params) { - if (!server_available || !is_connected()) {return;} - - jsonrpcpp::Notification notification = {method_name, params}; - endpoint.send_notification(¬ification); -} - -jsonrpcpp::Response Communicator::call_method(std::string method_name, nlohmann::json params) { - jsonrpcpp::Response response; - if (!server_available || !is_connected()) - { - return response; - } - int request_id = Data.client_request_id++; - jsonrpcpp::Request request = {request_id, method_name, params}; - endpoint.send_request(&request); - - bool found = false; - while (!found) { - std::map::iterator iter = Data.responses.find(request_id); - if (iter != Data.responses.end()) { - //element found == was found response - response = iter->second; - Data.responses.erase(request_id); - found = true; - } else { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - } - } - return response; -} - -void Communicator::process_requests() { - if (!server_available || !is_connected() || Data.messages.empty()) {return;} - - std::string msg = Data.messages.front(); - Data.messages.pop(); - std::cout << "Parsing: " << msg << std::endl; - // TODO: add try->except block - auto response = parser.parse(msg); - if (response->is_response()) { - endpoint.send_response(response); - } else { - jsonrpcpp::request_ptr request = std::dynamic_pointer_cast(response); - jsonrpcpp::Error error("Method \"" + request->method() + "\" not found", -32601); - jsonrpcpp::Response _response(request->id(), error); - endpoint.send_response(&_response); - } -} - -jsonrpcpp::response_ptr define_menu(const jsonrpcpp::Id &id, const jsonrpcpp::Parameter ¶ms) { - /* Define plugin menu. - - Menu is defined with json with "title" and "menu_items". - Each item in "menu_items" must have keys: - - "callback" - callback called with RPC when button is clicked - - "label" - label of button - - "help" - tooltip of button - ``` - { - "title": "< Menu title>", - "menu_items": [ - { - "callback": "workfiles_tool", - "label": "Workfiles", - "help": "Open workfiles tool" - }, - ... - ] - } - ``` - */ - Data.menuItems = params.to_json()[0]; - Data.newMenuItems = true; - - std::string output; - - return std::make_shared(id, output); -} - -jsonrpcpp::response_ptr execute_george(const jsonrpcpp::Id &id, const jsonrpcpp::Parameter ¶ms) { - const char *george_script; - char cmd_output[1024] = {0}; - char empty_char = {0}; - std::string std_george_script; - std::string output; - - nlohmann::json json_params = params.to_json(); - std_george_script = json_params[0]; - george_script = std_george_script.c_str(); - - // Result of `TVSendCmd` is int with length of output string - TVSendCmd(Data.current_filter, george_script, cmd_output); - - for (int i = 0; i < sizeof(cmd_output); i++) - { - if (cmd_output[i] == empty_char){ - break; - } - output += cmd_output[i]; - } - return std::make_shared(id, output); -} - -void register_callbacks(){ - parser.register_request_callback("define_menu", define_menu); - parser.register_request_callback("execute_george", execute_george); -} - -Communicator* communication = nullptr; - -//////////////////////////////////////////////////////////////////////////////////////// - -static char* GetLocalString( PIFilter* iFilter, int iNum, char* iDefault ) -{ - char* str; - - if( Data.mLocalFile == NULL ) - return iDefault; - - str = TVGetLocalString( iFilter, Data.mLocalFile, iNum ); - if( str == NULL || strlen( str ) == 0 ) - return iDefault; - - return str; -} - -/**************************************************************************************/ -// Localisation - -// numbers (like 10011) are IDs in the localized file. -// strings are the default values to use when the ID is not found -// in the localized file (or the localized file doesn't exist). -std::string label_from_evn() -{ - std::string _plugin_label = "OpenPype"; - if (std::getenv("AYON_MENU_LABEL") && std::getenv("AYON_MENU_LABEL") != "") - { - _plugin_label = std::getenv("AYON_MENU_LABEL"); - } - return _plugin_label; -} -std::string plugin_label = label_from_evn(); - -#define TXT_REQUESTER GetLocalString( iFilter, 100, "OpenPype Tools" ) - -#define TXT_REQUESTER_ERROR GetLocalString( iFilter, 30001, "Can't Open Requester !" ) - -//////////////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////////////// - -// The functions directly called by Aura through the plugin interface - - - -/**************************************************************************************/ -// "About" function. - - -void FAR PASCAL PI_About( PIFilter* iFilter ) -{ - char text[256]; - - sprintf( text, "%s %d,%d", iFilter->PIName, iFilter->PIVersion, iFilter->PIRevision ); - - // Just open a warning popup with the filter name and version. - // You can open a much nicer requester if you want. - TVWarning( iFilter, text ); -} - - -/**************************************************************************************/ -// Function called at Aura startup, when the filter is loaded. -// Should do as little as possible to keep Aura's startup time small. - -int FAR PASCAL PI_Open( PIFilter* iFilter ) -{ - Data.current_filter = iFilter; - char tmp[256]; - - strcpy( iFilter->PIName, plugin_label.c_str() ); - iFilter->PIVersion = 1; - iFilter->PIRevision = 0; - - // If this plugin was the one open at Aura shutdown, re-open it - TVReadUserString( iFilter, iFilter->PIName, "Open", tmp, "0", 255 ); - if( atoi( tmp ) ) - { - PI_Parameters( iFilter, NULL ); // NULL as iArg means "open the requester" - } - char *env_value = std::getenv("WEBSOCKET_URL"); - if (env_value != NULL) { - communication = new Communicator(env_value); - communication->connect(); - register_callbacks(); - } - return 1; // OK -} - - -/**************************************************************************************/ -// Aura shutdown: we make all the necessary cleanup - -void FAR PASCAL PI_Close( PIFilter* iFilter ) -{ - if( Data.mLocalFile ) - { - TVCloseLocalFile( iFilter, Data.mLocalFile ); - } - if( Data.mReq ) - { - TVCloseReq( iFilter, Data.mReq ); - } - if (communication != nullptr) { - communication->endpoint.close_connection(); - delete communication; - } -} - - -int newMenuItemsProcess(PIFilter* iFilter) { - // Menu items defined with `define_menu` should be propagated. - - // Change flag that there are new menu items (avoid infinite loop) - Data.newMenuItems = false; - // Skip if requester does not exists - if (Data.mReq == 0) { - return 0; - } - // Remove all previous menu items - for (int menu_id : Data.menuItemsIds) - { - TVRemoveButtonReq(iFilter, Data.mReq, menu_id); - } - // Clear caches - Data.menuItemsById.clear(); - Data.menuItemsIds.clear(); - - // We use a variable to contains the vertical position of the buttons. - // Each time we create a button, we add its size to this variable. - // This makes it very easy to add/remove/displace buttons in a requester. - int x_pos = 9; - int y_pos = 5; - - // Menu width - int menu_width = 185; - // Single menu item width - int btn_width = menu_width - 19; - // Single row height (btn height is 18) - int row_height = 20; - // Additional height to menu - int height_offset = 5; - - // This is a very simple requester, so we create it's content right here instead - // of waiting for the PICBREQ_OPEN message... - // Not recommended for more complex requesters. (see the other examples) - - const char *menu_title = TXT_REQUESTER; - if (Data.menuItems.contains("title")) - { - menu_title = Data.menuItems["title"].get()->c_str(); - } - // Sets the title of the requester. - TVSetReqTitle( iFilter, Data.mReq, menu_title ); - - // Resize menu - // First get current position and sizes (we only need the position) - int current_x = 0; - int current_y = 0; - int current_width = 0; - int current_height = 0; - TVInfoReq(iFilter, Data.mReq, ¤t_x, ¤t_y, ¤t_width, ¤t_height); - - // Calculate new height - int menu_height = (row_height * Data.menuItems["menu_items"].size()) + height_offset; - // Resize - TVResizeReq(iFilter, Data.mReq, current_x, current_y, menu_width, menu_height); - - // Add menu items - int item_counter = 1; - for (auto& item : Data.menuItems["menu_items"].items()) - { - int item_id = item_counter * 10; - item_counter ++; - std::string item_id_str = std::to_string(item_id); - nlohmann::json item_data = item.value(); - const char *item_label = item_data["label"].get()->c_str(); - const char *help_text = item_data["help"].get()->c_str(); - std::string item_callback = item_data["callback"].get(); - TVAddButtonReq(iFilter, Data.mReq, x_pos, y_pos, btn_width, 0, item_id, PIRBF_BUTTON_NORMAL|PIRBF_BUTTON_ACTION, item_label); - TVSetButtonInfoText( iFilter, Data.mReq, item_id, help_text ); - y_pos += row_height; - - Data.menuItemsById[std::to_string(item_id)] = item_callback; - Data.menuItemsIds.push_back(item_id); - } - - return 1; -} - -/**************************************************************************************/ -// we have something to do ! - -int FAR PASCAL PI_Parameters( PIFilter* iFilter, char* iArg ) -{ - if( !iArg ) - { - - // If the requester is not open, we open it. - if( Data.mReq == 0) - { - // Create empty requester because menu items are defined with - // `define_menu` callback - DWORD req = TVOpenFilterReqEx( - iFilter, - 185, - 20, - NULL, - NULL, - PIRF_STANDARD_REQ | PIRF_COLLAPSABLE_REQ, - FILTERREQ_NO_TBAR - ); - if( req == 0 ) - { - TVWarning( iFilter, TXT_REQUESTER_ERROR ); - return 0; - } - - Data.mReq = req; - - // This is a very simple requester, so we create it's content right here instead - // of waiting for the PICBREQ_OPEN message... - // Not recommended for more complex requesters. (see the other examples) - - // Sets the title of the requester. - TVSetReqTitle( iFilter, Data.mReq, TXT_REQUESTER ); - // Request to listen to ticks - TVGrabTicks(iFilter, req, PITICKS_FLAG_ON); - - if ( Data.firstParams == true ) { - Data.firstParams = false; - } else { - newMenuItemsProcess(iFilter); - } - } - else - { - // If it is already open, we just put it on front of all other requesters. - TVReqToFront( iFilter, Data.mReq ); - } - } - - return 1; -} - -/**************************************************************************************/ -// something happened that needs our attention. -// Global variable where current button up data are stored -std::string button_up_item_id_str; -int FAR PASCAL PI_Msg( PIFilter* iFilter, INTPTR iEvent, INTPTR iReq, INTPTR* iArgs ) -{ - Data.current_filter = iFilter; - // what did happen ? - switch( iEvent ) - { - // The user just 'clicked' on a normal button - case PICBREQ_BUTTON_UP: - button_up_item_id_str = std::to_string(iArgs[0]); - if (Data.menuItemsById.contains(button_up_item_id_str)) - { - std::string callback_name = Data.menuItemsById[button_up_item_id_str].get(); - communication->call_method(callback_name, nlohmann::json::array()); - } - TVExecute( iFilter ); - break; - - // The requester was just closed. - case PICBREQ_CLOSE: - // requester doesn't exists anymore - Data.mReq = 0; - - char tmp[256]; - // Save the requester state (opened or closed) - // iArgs[4] contains a flag which tells us if the requester - // has been closed by the user (flag=0) or by Aura's shutdown (flag=1). - // If it was by Aura's shutdown, that means this requester was the - // last one open, so we should reopen this one the next time Aura - // is started. Else we won't open it next time. - sprintf( tmp, "%d", (int)(iArgs[4]) ); - - // Save it in Aura's init file. - TVWriteUserString( iFilter, iFilter->PIName, "Open", tmp ); - break; - - case PICBREQ_TICKS: - if (Data.newMenuItems) - { - newMenuItemsProcess(iFilter); - } - if (communication != nullptr) { - communication->process_requests(); - } - } - - return 1; -} - - -/**************************************************************************************/ -// Start of the 'execution' of the filter for a new sequence. -// - iNumImages contains the total number of frames to be processed. -// Here you should allocate memory that is used for all frames, -// and precompute all the stuff that doesn't change from frame to frame. - - -int FAR PASCAL PI_SequenceStart( PIFilter* iFilter, int iNumImages ) -{ - // In this simple example we don't have anything to allocate/precompute. - - // 1 means 'continue', 0 means 'error, abort' (like 'not enough memory') - return 1; -} - - -// Here you should cleanup what you've done in PI_SequenceStart - -void FAR PASCAL PI_SequenceFinish( PIFilter* iFilter ) -{} - - -/**************************************************************************************/ -// This is called before each frame. -// Here you should allocate memory and precompute all the stuff you can. - -int FAR PASCAL PI_Start( PIFilter* iFilter, double iPos, double iSize ) -{ - return 1; -} - - -void FAR PASCAL PI_Finish( PIFilter* iFilter ) -{ - // nothing special to cleanup -} - - -/**************************************************************************************/ -// 'Execution' of the filter. -int FAR PASCAL PI_Work( PIFilter* iFilter ) -{ - return 1; -} diff --git a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/library.def b/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/library.def deleted file mode 100644 index 882f2b4719..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_code/library.def +++ /dev/null @@ -1,10 +0,0 @@ -LIBRARY Avalonplugin -EXPORTS - PI_Msg - PI_Open - PI_About - PI_Parameters - PI_Start - PI_Work - PI_Finish - PI_Close diff --git a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll b/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll deleted file mode 100644 index 9c6e969e24..0000000000 Binary files a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll and /dev/null differ diff --git a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll b/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll deleted file mode 100644 index b573476a21..0000000000 Binary files a/server_addon/tvpaint/client/ayon_tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll and /dev/null differ diff --git a/server_addon/tvpaint/client/ayon_tvpaint/version.py b/server_addon/tvpaint/client/ayon_tvpaint/version.py deleted file mode 100644 index cbd6a19229..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring AYON addon 'tvpaint' version.""" -__version__ = "0.2.2" diff --git a/server_addon/tvpaint/client/ayon_tvpaint/worker/__init__.py b/server_addon/tvpaint/client/ayon_tvpaint/worker/__init__.py deleted file mode 100644 index 69208a7566..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/worker/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from .worker_job import ( - JobFailed, - ExecuteSimpleGeorgeScript, - ExecuteGeorgeScript, - CollectSceneData, - SenderTVPaintCommands, - ProcessTVPaintCommands -) - -from .worker import main - -__all__ = ( - "JobFailed", - "ExecuteSimpleGeorgeScript", - "ExecuteGeorgeScript", - "CollectSceneData", - "SenderTVPaintCommands", - "ProcessTVPaintCommands", - - "main" -) diff --git a/server_addon/tvpaint/client/ayon_tvpaint/worker/init_file.tvpp b/server_addon/tvpaint/client/ayon_tvpaint/worker/init_file.tvpp deleted file mode 100644 index 22170b45bc..0000000000 Binary files a/server_addon/tvpaint/client/ayon_tvpaint/worker/init_file.tvpp and /dev/null differ diff --git a/server_addon/tvpaint/client/ayon_tvpaint/worker/worker.py b/server_addon/tvpaint/client/ayon_tvpaint/worker/worker.py deleted file mode 100644 index 3a03b54eae..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/worker/worker.py +++ /dev/null @@ -1,156 +0,0 @@ -import os -import signal -import time -import tempfile -import shutil -import asyncio - -from ayon_tvpaint.api.communication_server import ( - BaseCommunicator, - CommunicationWrapper -) -from ayon_jobqueue.job_workers import WorkerJobsConnection - -from .worker_job import ProcessTVPaintCommands - - -class TVPaintWorkerCommunicator(BaseCommunicator): - """Modified commuicator which cares about processing jobs. - - Received jobs are send to TVPaint by parsing 'ProcessTVPaintCommands'. - """ - def __init__(self, server_url): - super().__init__() - - self.return_code = 1 - self._server_url = server_url - self._worker_connection = None - - def _start_webserver(self): - """Create connection to workers server before TVPaint server.""" - loop = self.websocket_server.loop - self._worker_connection = WorkerJobsConnection( - self._server_url, "tvpaint", loop - ) - asyncio.ensure_future( - self._worker_connection.main_loop(register_worker=False), - loop=loop - ) - - super()._start_webserver() - - def _open_init_file(self): - """Open init TVPaint file. - - File triggers dialog missing path to audio file which must be closed - once and is ignored for rest of running process. - """ - current_dir = os.path.dirname(os.path.abspath(__file__)) - init_filepath = os.path.join(current_dir, "init_file.tvpp") - with tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".tvpp" - ) as tmp_file: - tmp_filepath = tmp_file.name.replace("\\", "/") - - shutil.copy(init_filepath, tmp_filepath) - george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(tmp_filepath) - self.execute_george_through_file(george_script) - self.execute_george("tv_projectclose") - os.remove(tmp_filepath) - - def _on_client_connect(self, *args, **kwargs): - super()._on_client_connect(*args, **kwargs) - self._open_init_file() - # Register as "ready to work" worker - self._worker_connection.register_as_worker() - - def stop(self): - """Stop worker connection and TVPaint server.""" - self._worker_connection.stop() - self.return_code = 0 - super().stop() - - @property - def current_job(self): - """Retrieve job which should be processed.""" - if self._worker_connection: - return self._worker_connection.current_job - return None - - def _check_process(self): - if self.process is None: - return True - - if self.process.poll() is not None: - asyncio.ensure_future( - self._worker_connection.disconnect(), - loop=self.websocket_server.loop - ) - self._exit() - return False - return True - - def _process_job(self): - job = self.current_job - if job is None: - return - - # Prepare variables used for sendig - success = False - message = "Unknown function" - data = None - job_data = job["data"] - workfile = job_data["workfile"] - # Currently can process only "commands" function - if job_data.get("function") == "commands": - try: - commands = ProcessTVPaintCommands( - workfile, job_data["commands"], self - ) - commands.execute() - data = commands.response_data() - success = True - message = "Executed" - - except Exception as exc: - message = "Error on worker: {}".format(str(exc)) - - self._worker_connection.finish_job(success, message, data) - - def main_loop(self): - """Main loop where jobs are processed. - - Server is stopped by killing this process or TVPaint process. - """ - while self.server_is_running: - if self._check_process(): - self._process_job() - time.sleep(1) - - return self.return_code - - -def _start_tvpaint(tvpaint_executable_path, server_url): - communicator = TVPaintWorkerCommunicator(server_url) - CommunicationWrapper.set_communicator(communicator) - communicator.launch([tvpaint_executable_path]) - - -def main(tvpaint_executable_path, server_url): - # Register terminal signal handler - def signal_handler(*_args): - print("Termination signal received. Stopping.") - if CommunicationWrapper.communicator is not None: - CommunicationWrapper.communicator.stop() - - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - - _start_tvpaint(tvpaint_executable_path, server_url) - - communicator = CommunicationWrapper.communicator - if communicator is None: - print("Communicator is not set") - return 1 - - return communicator.main_loop() diff --git a/server_addon/tvpaint/client/ayon_tvpaint/worker/worker_job.py b/server_addon/tvpaint/client/ayon_tvpaint/worker/worker_job.py deleted file mode 100644 index db91010c47..0000000000 --- a/server_addon/tvpaint/client/ayon_tvpaint/worker/worker_job.py +++ /dev/null @@ -1,537 +0,0 @@ -import os -import tempfile -import inspect -import copy -import json -import time -from uuid import uuid4 -from abc import ABCMeta, abstractmethod, abstractproperty - -import six - -from ayon_core.lib import Logger -from ayon_core.addons import AddonsManger - - -TMP_FILE_PREFIX = "opw_tvp_" - - -class JobFailed(Exception): - """Raised when job was sent and finished unsuccessfully.""" - def __init__(self, job_status): - job_state = job_status["state"] - job_message = job_status["message"] or "Unknown issue" - error_msg = ( - "Job didn't finish properly." - " Job state: \"{}\" | Job message: \"{}\"" - ).format(job_state, job_message) - - self.job_status = job_status - - super().__init__(error_msg) - - -@six.add_metaclass(ABCMeta) -class BaseCommand: - """Abstract TVPaint command which can be executed through worker. - - Each command must have unique name and implemented 'execute' and - 'from_existing' methods. - - Command also have id which is created on command creation. - - The idea is that command is just a data container on sender side send - through server to a worker where is replicated one by one, executed and - result sent back to sender through server. - """ - @abstractproperty - def name(self): - """Command name (must be unique).""" - pass - - def __init__(self, data=None): - if data is None: - data = {} - else: - data = copy.deepcopy(data) - - # Use 'id' from data when replicating on process side - command_id = data.get("id") - if command_id is None: - command_id = str(uuid4()) - data["id"] = command_id - data["command"] = self.name - - self._parent = None - self._result = None - self._command_data = data - self._done = False - - def job_queue_root(self): - """Access to job queue root. - - Job queue root is shared access point to files shared across senders - and workers. - """ - if self._parent is None: - return None - return self._parent.job_queue_root() - - def set_parent(self, parent): - self._parent = parent - - @property - def id(self): - """Command id.""" - return self._command_data["id"] - - @property - def parent(self): - """Parent of command expected type of 'TVPaintCommands'.""" - return self._parent - - @property - def communicator(self): - """TVPaint communicator. - - Available only on worker side. - """ - return self._parent.communicator - - @property - def done(self): - """Is command done.""" - return self._done - - def set_done(self): - """Change state of done.""" - self._done = True - - def set_result(self, result): - """Set result of executed command.""" - self._result = result - - def result(self): - """Result of command.""" - return copy.deepcopy(self._result) - - def response_data(self): - """Data send as response to sender.""" - return { - "id": self.id, - "result": self._result, - "done": self._done - } - - def command_data(self): - """Raw command data.""" - return copy.deepcopy(self._command_data) - - @abstractmethod - def execute(self): - """Execute command on worker side.""" - pass - - @classmethod - @abstractmethod - def from_existing(cls, data): - """Recreate object based on passed data.""" - pass - - def execute_george(self, george_script): - """Execute george script in TVPaint.""" - return self.parent.execute_george(george_script) - - def execute_george_through_file(self, george_script): - """Execute george script through temp file in TVPaint.""" - return self.parent.execute_george_through_file(george_script) - - -class ExecuteSimpleGeorgeScript(BaseCommand): - """Execute simple george script in TVPaint. - - Args: - script(str): Script that will be executed. - """ - name = "execute_george_simple" - - def __init__(self, script, data=None): - data = data or {} - data["script"] = script - self._script = script - super().__init__(data) - - def execute(self): - self._result = self.execute_george(self._script) - - @classmethod - def from_existing(cls, data): - script = data.pop("script") - return cls(script, data) - - -class ExecuteGeorgeScript(BaseCommand): - """Execute multiline george script in TVPaint. - - Args: - script_lines(list): Lines that will be executed in george script - through temp george file. - tmp_file_keys(list): List of formatting keys in george script that - require replacement with path to a temp file where result will be - stored. The content of file is stored to result by the key. - root_dir_key(str): Formatting key that will be replaced in george - script with job queue root which can be different on worker side. - data(dict): Raw data about command. - """ - name = "execute_george_through_file" - - def __init__( - self, script_lines, tmp_file_keys=None, root_dir_key=None, data=None - ): - data = data or {} - if not tmp_file_keys: - tmp_file_keys = data.get("tmp_file_keys") or [] - - data["script_lines"] = script_lines - data["tmp_file_keys"] = tmp_file_keys - data["root_dir_key"] = root_dir_key - self._script_lines = script_lines - self._tmp_file_keys = tmp_file_keys - self._root_dir_key = root_dir_key - super().__init__(data) - - def execute(self): - filepath_by_key = {} - script = self._script_lines - if isinstance(script, list): - script = "\n".join(script) - - # Replace temporary files in george script - for key in self._tmp_file_keys: - output_file = tempfile.NamedTemporaryFile( - mode="w", prefix=TMP_FILE_PREFIX, suffix=".txt", delete=False - ) - output_file.close() - format_key = "{" + key + "}" - output_path = output_file.name.replace("\\", "/") - script = script.replace(format_key, output_path) - filepath_by_key[key] = output_path - - # Replace job queue root in script - if self._root_dir_key: - job_queue_root = self.job_queue_root() - format_key = "{" + self._root_dir_key + "}" - script = script.replace( - format_key, job_queue_root.replace("\\", "/") - ) - - # Execute the script - self.execute_george_through_file(script) - - # Store result of temporary files - result = {} - for key, filepath in filepath_by_key.items(): - with open(filepath, "r") as stream: - data = stream.read() - result[key] = data - os.remove(filepath) - - self._result = result - - @classmethod - def from_existing(cls, data): - """Recreate the object from data.""" - script_lines = data.pop("script_lines") - tmp_file_keys = data.pop("tmp_file_keys", None) - root_dir_key = data.pop("root_dir_key", None) - return cls(script_lines, tmp_file_keys, root_dir_key, data) - - -class CollectSceneData(BaseCommand): - """Helper command which will collect all useful info about workfile. - - Result is dictionary with all layers data, exposure frames by layer ids - pre/post behavior of layers by their ids, group information and scene data. - """ - name = "collect_scene_data" - - def execute(self): - from ayon_tvpaint.api.lib import ( - get_layers_data, - get_groups_data, - get_layers_pre_post_behavior, - get_layers_exposure_frames, - get_scene_data - ) - - groups_data = get_groups_data(communicator=self.communicator) - layers_data = get_layers_data(communicator=self.communicator) - layer_ids = [ - layer_data["layer_id"] - for layer_data in layers_data - ] - pre_post_beh_by_layer_id = get_layers_pre_post_behavior( - layer_ids, communicator=self.communicator - ) - exposure_frames_by_layer_id = get_layers_exposure_frames( - layer_ids, layers_data, communicator=self.communicator - ) - - self._result = { - "layers_data": layers_data, - "exposure_frames_by_layer_id": exposure_frames_by_layer_id, - "pre_post_beh_by_layer_id": pre_post_beh_by_layer_id, - "groups_data": groups_data, - "scene_data": get_scene_data(self.communicator) - } - - @classmethod - def from_existing(cls, data): - return cls(data) - - -@six.add_metaclass(ABCMeta) -class TVPaintCommands: - """Wrapper around TVPaint commands to be able send multiple commands. - - Commands may send one or multiple commands at once. Also gives api access - for commands info. - - Base for sender and receiver which are extending the logic for their - purposes. One of differences is preparation of workfile path. - - Args: - workfile(str): Path to workfile. - job_queue_module(JobQueueModule): Object of OpenPype module JobQueue. - """ - def __init__(self, workfile, job_queue_module=None): - self._log = None - self._commands = [] - self._command_classes_by_name = None - if job_queue_module is None: - manager = AddonsManger() - job_queue_module = manager["job_queue"] - self._job_queue_module = job_queue_module - - self._workfile = self._prepare_workfile(workfile) - - @abstractmethod - def _prepare_workfile(self, workfile): - """Modification of workfile path on initialization to match platorm.""" - pass - - def job_queue_root(self): - """Job queue root for current platform using current settings.""" - return self._job_queue_module.get_jobs_root_from_settings() - - @property - def log(self): - """Access to logger object.""" - if self._log is None: - self._log = Logger.get_logger(self.__class__.__name__) - return self._log - - @property - def classes_by_name(self): - """Prepare commands classes for validation and recreation of commands. - - It is expected that all commands are defined in this python file so - we're looking for all implementation of BaseCommand in globals. - """ - if self._command_classes_by_name is None: - command_classes_by_name = {} - for attr in globals().values(): - if ( - not inspect.isclass(attr) - or not issubclass(attr, BaseCommand) - or attr is BaseCommand - ): - continue - - if inspect.isabstract(attr): - self.log.debug( - "Skipping abstract class {}".format(attr.__name__) - ) - command_classes_by_name[attr.name] = attr - self._command_classes_by_name = command_classes_by_name - - return self._command_classes_by_name - - def add_command(self, command): - """Add command to process.""" - command.set_parent(self) - self._commands.append(command) - - def result(self): - """Result of commands in list in which they were processed.""" - return [ - command.result() - for command in self._commands - ] - - def response_data(self): - """Data which should be send from worker.""" - return [ - command.response_data() - for command in self._commands - ] - - -class SenderTVPaintCommands(TVPaintCommands): - """Sender implementation of TVPaint Commands.""" - def _prepare_workfile(self, workfile): - """Remove job queue root from workfile path. - - It is expected that worker will add it's root before passed workfile. - """ - new_workfile = workfile.replace("\\", "/") - job_queue_root = self.job_queue_root().replace("\\", "/") - if job_queue_root not in new_workfile: - raise ValueError(( - "Workfile is not located in JobQueue root." - " Workfile path: \"{}\". JobQueue root: \"{}\"" - ).format(workfile, job_queue_root)) - return new_workfile.replace(job_queue_root, "") - - def commands_data(self): - """Commands data to be able recreate them.""" - return [ - command.command_data() - for command in self._commands - ] - - def to_job_data(self): - """Convert commands to job data before sending to workers server.""" - return { - "workfile": self._workfile, - "function": "commands", - "commands": self.commands_data() - } - - def set_result(self, result): - commands_by_id = { - command.id: command - for command in self._commands - } - - for item in result: - command = commands_by_id[item["id"]] - command.set_result(item["result"]) - command.set_done() - - def _send_job(self): - """Send job to a workers server.""" - # Send job data to job queue server - job_data = self.to_job_data() - self.log.debug("Sending job to JobQueue server.\n{}".format( - json.dumps(job_data, indent=4) - )) - job_id = self._job_queue_module.send_job("tvpaint", job_data) - self.log.info(( - "Job sent to JobQueue server and got id \"{}\"." - " Waiting for finishing the job." - ).format(job_id)) - - return job_id - - def send_job_and_wait(self): - """Send job to workers server and wait for response. - - Result of job is stored into the object. - - Raises: - JobFailed: When job was finished but not successfully. - """ - job_id = self._send_job() - while True: - job_status = self._job_queue_module.get_job_status(job_id) - if job_status["done"]: - break - time.sleep(1) - - # Check if job state is done - if job_status["state"] != "done": - raise JobFailed(job_status) - - self.set_result(job_status["result"]) - - self.log.debug("Job is done and result is stored.") - - -class ProcessTVPaintCommands(TVPaintCommands): - """Worker side of TVPaint Commands. - - It is expected this object is created only on worker's side from existing - data loaded from job. - - Workfile path logic is based on 'SenderTVPaintCommands'. - """ - def __init__(self, workfile, commands, communicator): - super(ProcessTVPaintCommands, self).__init__(workfile) - - self._communicator = communicator - - self.commands_from_data(commands) - - def _prepare_workfile(self, workfile): - """Preprend job queue root before passed workfile.""" - workfile = workfile.replace("\\", "/") - job_queue_root = self.job_queue_root().replace("\\", "/") - new_workfile = "/".join([job_queue_root, workfile]) - while "//" in new_workfile: - new_workfile = new_workfile.replace("//", "/") - return os.path.normpath(new_workfile) - - @property - def communicator(self): - """Access to TVPaint communicator.""" - return self._communicator - - def commands_from_data(self, commands_data): - """Recreate command from passed data.""" - for command_data in commands_data: - command_name = command_data["command"] - - klass = self.classes_by_name[command_name] - command = klass.from_existing(command_data) - self.add_command(command) - - def execute_george(self, george_script): - """Helper method to execute george script.""" - return self.communicator.execute_george(george_script) - - def execute_george_through_file(self, george_script): - """Helper method to execute george script through temp file.""" - temporary_file = tempfile.NamedTemporaryFile( - mode="w", prefix=TMP_FILE_PREFIX, suffix=".grg", delete=False - ) - temporary_file.write(george_script) - temporary_file.close() - temp_file_path = temporary_file.name.replace("\\", "/") - self.execute_george("tv_runscript {}".format(temp_file_path)) - os.remove(temp_file_path) - - def _open_workfile(self): - """Open workfile in TVPaint.""" - workfile = self._workfile - print("Opening workfile {}".format(workfile)) - george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(workfile) - self.execute_george_through_file(george_script) - - def _close_workfile(self): - """Close workfile in TVPaint.""" - print("Closing workfile") - self.execute_george_through_file("tv_projectclose") - - def execute(self): - """Execute commands.""" - # First open the workfile - self._open_workfile() - # Execute commands one by one - # TODO maybe stop processing when command fails? - print("Commands execution started ({})".format(len(self._commands))) - for command in self._commands: - command.execute() - command.set_done() - # Finally close workfile - self._close_workfile() diff --git a/server_addon/tvpaint/package.py b/server_addon/tvpaint/package.py deleted file mode 100644 index c6c7194312..0000000000 --- a/server_addon/tvpaint/package.py +++ /dev/null @@ -1,11 +0,0 @@ -name = "tvpaint" -title = "TVPaint" -version = "0.2.2" -client_dir = "ayon_tvpaint" - -ayon_required_addons = { - "core": ">0.3.2", -} -ayon_compatible_addons = { - "jobqueue": ">=1.1.0", -} diff --git a/server_addon/tvpaint/pyproject.toml b/server_addon/tvpaint/pyproject.toml deleted file mode 100644 index 46d0611d74..0000000000 --- a/server_addon/tvpaint/pyproject.toml +++ /dev/null @@ -1,6 +0,0 @@ -[project] -name="tvpaint" -description="AYON TVPaint addon." - -[ayon.runtimeDependencies] -aiohttp_json_rpc = "*" diff --git a/server_addon/tvpaint/server/__init__.py b/server_addon/tvpaint/server/__init__.py deleted file mode 100644 index 658dcf0bb6..0000000000 --- a/server_addon/tvpaint/server/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Type - -from ayon_server.addons import BaseServerAddon - -from .settings import TvpaintSettings, DEFAULT_VALUES - - -class TvpaintAddon(BaseServerAddon): - settings_model: Type[TvpaintSettings] = TvpaintSettings - - async def get_default_settings(self): - settings_model_cls = self.get_settings_model() - return settings_model_cls(**DEFAULT_VALUES) diff --git a/server_addon/tvpaint/server/settings/__init__.py b/server_addon/tvpaint/server/settings/__init__.py deleted file mode 100644 index abee32e897..0000000000 --- a/server_addon/tvpaint/server/settings/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .main import ( - TvpaintSettings, - DEFAULT_VALUES, -) - - -__all__ = ( - "TvpaintSettings", - "DEFAULT_VALUES", -) diff --git a/server_addon/tvpaint/server/settings/create_plugins.py b/server_addon/tvpaint/server/settings/create_plugins.py deleted file mode 100644 index b3351dca28..0000000000 --- a/server_addon/tvpaint/server/settings/create_plugins.py +++ /dev/null @@ -1,136 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -class CreateWorkfileModel(BaseSettingsModel): - enabled: bool = SettingsField(True) - default_variant: str = SettingsField(title="Default variant") - default_variants: list[str] = SettingsField( - default_factory=list, title="Default variants") - - -class CreateReviewModel(BaseSettingsModel): - enabled: bool = SettingsField(True) - active_on_create: bool = SettingsField(True, title="Active by default") - default_variant: str = SettingsField(title="Default variant") - default_variants: list[str] = SettingsField( - default_factory=list, title="Default variants") - - -class CreateRenderSceneModel(BaseSettingsModel): - enabled: bool = SettingsField(True) - active_on_create: bool = SettingsField(True, title="Active by default") - mark_for_review: bool = SettingsField(True, title="Review by default") - default_pass_name: str = SettingsField(title="Default beauty pass") - default_variant: str = SettingsField(title="Default variant") - default_variants: list[str] = SettingsField( - default_factory=list, title="Default variants") - - -class CreateRenderLayerModel(BaseSettingsModel): - mark_for_review: bool = SettingsField(True, title="Review by default") - default_pass_name: str = SettingsField(title="Default beauty pass") - default_variant: str = SettingsField(title="Default variant") - default_variants: list[str] = SettingsField( - default_factory=list, title="Default variants") - - -class CreateRenderPassModel(BaseSettingsModel): - mark_for_review: bool = SettingsField(True, title="Review by default") - default_variant: str = SettingsField(title="Default variant") - default_variants: list[str] = SettingsField( - default_factory=list, title="Default variants") - - -class AutoDetectCreateRenderModel(BaseSettingsModel): - """The creator tries to auto-detect Render Layers and Render Passes in scene. - - For Render Layers is used group name as a variant and for Render Passes is - used TVPaint layer name. - - Group names can be renamed by their used order in scene. The renaming - template where can be used '{group_index}' formatting key which is - filled by "used position index of group". - - Template: 'L{group_index}' - - Group offset: '10' - - Group padding: '3' - - Would create group names "L010", "L020", ... - """ - - enabled: bool = SettingsField(True) - allow_group_rename: bool = SettingsField(title="Allow group rename") - group_name_template: str = SettingsField(title="Group name template") - group_idx_offset: int = SettingsField( - 1, title="Group index Offset", ge=1 - ) - group_idx_padding: int = SettingsField( - 4, title="Group index Padding", ge=1 - ) - - -class CreatePluginsModel(BaseSettingsModel): - create_workfile: CreateWorkfileModel = SettingsField( - default_factory=CreateWorkfileModel, - title="Create Workfile" - ) - create_review: CreateReviewModel = SettingsField( - default_factory=CreateReviewModel, - title="Create Review" - ) - create_render_scene: CreateRenderSceneModel = SettingsField( - default_factory=CreateReviewModel, - title="Create Render Scene" - ) - create_render_layer: CreateRenderLayerModel = SettingsField( - default_factory=CreateRenderLayerModel, - title="Create Render Layer" - ) - create_render_pass: CreateRenderPassModel = SettingsField( - default_factory=CreateRenderPassModel, - title="Create Render Pass" - ) - auto_detect_render: AutoDetectCreateRenderModel = SettingsField( - default_factory=AutoDetectCreateRenderModel, - title="Auto-Detect Create Render", - ) - - -DEFAULT_CREATE_SETTINGS = { - "create_workfile": { - "enabled": True, - "default_variant": "Main", - "default_variants": [] - }, - "create_review": { - "enabled": True, - "active_on_create": True, - "default_variant": "Main", - "default_variants": [] - }, - "create_render_scene": { - "enabled": True, - "active_on_create": False, - "mark_for_review": True, - "default_pass_name": "beauty", - "default_variant": "Main", - "default_variants": [] - }, - "create_render_layer": { - "mark_for_review": False, - "default_pass_name": "beauty", - "default_variant": "Main", - "default_variants": [] - }, - "create_render_pass": { - "mark_for_review": False, - "default_variant": "Main", - "default_variants": [] - }, - "auto_detect_render": { - "enabled": False, - "allow_group_rename": True, - "group_name_template": "L{group_index}", - "group_idx_offset": 10, - "group_idx_padding": 3 - } -} diff --git a/server_addon/tvpaint/server/settings/filters.py b/server_addon/tvpaint/server/settings/filters.py deleted file mode 100644 index 9720e82281..0000000000 --- a/server_addon/tvpaint/server/settings/filters.py +++ /dev/null @@ -1,17 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField - - -class FiltersSubmodel(BaseSettingsModel): - _layout = "compact" - name: str = SettingsField(title="Name") - value: str = SettingsField( - "", - title="Textarea", - widget="textarea", - ) - - -class PublishFiltersModel(BaseSettingsModel): - env_search_replace_values: list[FiltersSubmodel] = SettingsField( - default_factory=list - ) diff --git a/server_addon/tvpaint/server/settings/imageio.py b/server_addon/tvpaint/server/settings/imageio.py deleted file mode 100644 index ec7ee19e13..0000000000 --- a/server_addon/tvpaint/server/settings/imageio.py +++ /dev/null @@ -1,63 +0,0 @@ -from pydantic import validator -from ayon_server.settings import BaseSettingsModel, SettingsField -from ayon_server.settings.validators import ensure_unique_names - - -class ImageIOConfigModel(BaseSettingsModel): - """[DEPRECATED] Addon OCIO config settings. Please set the OCIO config - path in the Core addon profiles here - (ayon+settings://core/imageio/ocio_config_profiles). - """ - - override_global_config: bool = SettingsField( - False, - title="Override global OCIO config", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - filepath: list[str] = SettingsField( - default_factory=list, - title="Config path", - description=( - "DEPRECATED functionality. Please set the OCIO config path in the " - "Core addon profiles here (ayon+settings://core/imageio/" - "ocio_config_profiles)." - ), - ) - - -class ImageIOFileRuleModel(BaseSettingsModel): - name: str = SettingsField("", title="Rule name") - pattern: str = SettingsField("", title="Regex pattern") - colorspace: str = SettingsField("", title="Colorspace name") - ext: str = SettingsField("", title="File extension") - - -class ImageIOFileRulesModel(BaseSettingsModel): - activate_host_rules: bool = SettingsField(False) - rules: list[ImageIOFileRuleModel] = SettingsField( - default_factory=list, - title="Rules" - ) - - @validator("rules") - def validate_unique_outputs(cls, value): - ensure_unique_names(value) - return value - - -class TVPaintImageIOModel(BaseSettingsModel): - activate_host_color_management: bool = SettingsField( - True, title="Enable Color Management" - ) - ocio_config: ImageIOConfigModel = SettingsField( - default_factory=ImageIOConfigModel, - title="OCIO config" - ) - file_rules: ImageIOFileRulesModel = SettingsField( - default_factory=ImageIOFileRulesModel, - title="File Rules" - ) diff --git a/server_addon/tvpaint/server/settings/main.py b/server_addon/tvpaint/server/settings/main.py deleted file mode 100644 index f20e9ecc9c..0000000000 --- a/server_addon/tvpaint/server/settings/main.py +++ /dev/null @@ -1,64 +0,0 @@ -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, -) - -from .imageio import TVPaintImageIOModel -from .workfile_builder import WorkfileBuilderPlugin -from .create_plugins import CreatePluginsModel, DEFAULT_CREATE_SETTINGS -from .publish_plugins import ( - PublishPluginsModel, - LoadPluginsModel, - DEFAULT_PUBLISH_SETTINGS, -) - - -class TvpaintSettings(BaseSettingsModel): - imageio: TVPaintImageIOModel = SettingsField( - default_factory=TVPaintImageIOModel, - title="Color Management (ImageIO)" - ) - stop_timer_on_application_exit: bool = SettingsField( - title="Stop timer on application exit") - create: CreatePluginsModel = SettingsField( - default_factory=CreatePluginsModel, - title="Create plugins" - ) - publish: PublishPluginsModel = SettingsField( - default_factory=PublishPluginsModel, - title="Publish plugins") - load: LoadPluginsModel = SettingsField( - default_factory=LoadPluginsModel, - title="Load plugins") - workfile_builder: WorkfileBuilderPlugin = SettingsField( - default_factory=WorkfileBuilderPlugin, - title="Workfile Builder" - ) - - -DEFAULT_VALUES = { - "stop_timer_on_application_exit": False, - "create": DEFAULT_CREATE_SETTINGS, - "publish": DEFAULT_PUBLISH_SETTINGS, - "load": { - "LoadImage": { - "defaults": { - "stretch": True, - "timestretch": True, - "preload": True - } - }, - "ImportImage": { - "defaults": { - "stretch": True, - "timestretch": True, - "preload": True - } - } - }, - "workfile_builder": { - "create_first_version": False, - "custom_templates": [] - }, - "filters": [] -} diff --git a/server_addon/tvpaint/server/settings/publish_plugins.py b/server_addon/tvpaint/server/settings/publish_plugins.py deleted file mode 100644 index db1c7bd11a..0000000000 --- a/server_addon/tvpaint/server/settings/publish_plugins.py +++ /dev/null @@ -1,133 +0,0 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField -from ayon_server.types import ColorRGBA_uint8 - - -class CollectRenderInstancesModel(BaseSettingsModel): - ignore_render_pass_transparency: bool = SettingsField( - title="Ignore Render Pass opacity" - ) - - -class ExtractSequenceModel(BaseSettingsModel): - """Review BG color is used for whole scene review and for thumbnails.""" - review_bg: ColorRGBA_uint8 = SettingsField( - (255, 255, 255, 1.0), - title="Review BG color") - # review_bg: ColorRGB_uint8 = SettingsField( - # (255, 255, 255), - # title="Review BG color") - - -class ValidatePluginModel(BaseSettingsModel): - enabled: bool = True - optional: bool = SettingsField(True, title="Optional") - active: bool = SettingsField(True, title="Active") - - -def compression_enum(): - return [ - {"value": "ZIP", "label": "ZIP"}, - {"value": "ZIPS", "label": "ZIPS"}, - {"value": "DWAA", "label": "DWAA"}, - {"value": "DWAB", "label": "DWAB"}, - {"value": "PIZ", "label": "PIZ"}, - {"value": "RLE", "label": "RLE"}, - {"value": "PXR24", "label": "PXR24"}, - {"value": "B44", "label": "B44"}, - {"value": "B44A", "label": "B44A"}, - {"value": "none", "label": "None"} - ] - - -class ExtractConvertToEXRModel(BaseSettingsModel): - """WARNING: This plugin does not work on MacOS (using OIIO tool).""" - enabled: bool = False - replace_pngs: bool = True - - exr_compression: str = SettingsField( - "ZIP", - enum_resolver=compression_enum, - title="EXR Compression" - ) - - -class LoadImageDefaultModel(BaseSettingsModel): - _layout = "expanded" - stretch: bool = SettingsField(title="Stretch") - timestretch: bool = SettingsField(title="TimeStretch") - preload: bool = SettingsField(title="Preload") - - -class LoadImageModel(BaseSettingsModel): - defaults: LoadImageDefaultModel = SettingsField( - default_factory=LoadImageDefaultModel - ) - - -class PublishPluginsModel(BaseSettingsModel): - CollectRenderInstances: CollectRenderInstancesModel = SettingsField( - default_factory=CollectRenderInstancesModel, - title="Collect Render Instances") - ExtractSequence: ExtractSequenceModel = SettingsField( - default_factory=ExtractSequenceModel, - title="Extract Sequence") - ValidateProjectSettings: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate Project Settings") - ValidateMarks: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate MarkIn/Out") - ValidateStartFrame: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate Scene Start Frame") - ValidateAssetName: ValidatePluginModel = SettingsField( - default_factory=ValidatePluginModel, - title="Validate Folder Name") - ExtractConvertToEXR: ExtractConvertToEXRModel = SettingsField( - default_factory=ExtractConvertToEXRModel, - title="Extract Convert To EXR") - - -class LoadPluginsModel(BaseSettingsModel): - LoadImage: LoadImageModel = SettingsField( - default_factory=LoadImageModel, - title="Load Image") - ImportImage: LoadImageModel = SettingsField( - default_factory=LoadImageModel, - title="Import Image") - - -DEFAULT_PUBLISH_SETTINGS = { - "CollectRenderInstances": { - "ignore_render_pass_transparency": False - }, - "ExtractSequence": { - # "review_bg": [255, 255, 255] - "review_bg": [255, 255, 255, 1.0] - }, - "ValidateProjectSettings": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateMarks": { - "enabled": True, - "optional": True, - "active": True - }, - "ValidateStartFrame": { - "enabled": False, - "optional": True, - "active": True - }, - "ValidateAssetName": { - "enabled": True, - "optional": True, - "active": True - }, - "ExtractConvertToEXR": { - "enabled": False, - "replace_pngs": True, - "exr_compression": "ZIP" - } -} diff --git a/server_addon/tvpaint/server/settings/workfile_builder.py b/server_addon/tvpaint/server/settings/workfile_builder.py deleted file mode 100644 index 0799497bf9..0000000000 --- a/server_addon/tvpaint/server/settings/workfile_builder.py +++ /dev/null @@ -1,29 +0,0 @@ -from ayon_server.settings import ( - BaseSettingsModel, - SettingsField, - MultiplatformPathModel, - task_types_enum, -) - - -class CustomBuilderTemplate(BaseSettingsModel): - task_types: list[str] = SettingsField( - default_factory=list, - title="Task types", - enum_resolver=task_types_enum - ) - template_path: MultiplatformPathModel = SettingsField( - default_factory=MultiplatformPathModel - ) - - -class WorkfileBuilderPlugin(BaseSettingsModel): - _title = "Workfile Builder" - create_first_version: bool = SettingsField( - False, - title="Create first workfile" - ) - - custom_templates: list[CustomBuilderTemplate] = SettingsField( - default_factory=CustomBuilderTemplate - )