From 2ebbefdec052f5fa88a7a9237da84103fb3be5fa Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 14:27:11 +0100 Subject: [PATCH 001/295] reversed condition of raising exception --- .../plugins/publish/collect_matching_asset.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py index 48065c46620..089ca32561e 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py @@ -31,20 +31,20 @@ def process(self, instance): matching_asset_doc = asset_doc break - if matching_asset_doc: - instance.data["asset"] = matching_asset_doc["name"] - instance.data["assetEntity"] = matching_asset_doc - self.log.info( - f"Matching asset found: {pformat(matching_asset_doc)}" - ) - - else: + if not matching_asset_doc: # TODO better error message raise AssertionError(( "Filename \"{}\" does not match" " any name of asset documents in database for your selection." ).format(instance.data["source"])) + instance.data["asset"] = matching_asset_doc["name"] + instance.data["assetEntity"] = matching_asset_doc + + self.log.info( + f"Matching asset found: {pformat(matching_asset_doc)}" + ) + def selection_children_by_name(self, instance): storing_key = "childrenDocsForSelection" From dc4757b38f710f3125ed7fc2857f0b202ebc0244 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 14:27:33 +0100 Subject: [PATCH 002/295] added regex for version check in source filename --- .../plugins/publish/collect_matching_asset.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py index 089ca32561e..cdb5403caa3 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py @@ -1,4 +1,5 @@ import os +import re import collections import pyblish.api from avalon import io @@ -16,6 +17,9 @@ class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): hosts = ["standalonepublisher"] families = ["background_batch"] + # Version regex to parse asset name and version from filename + version_regex = re.compile(r"^(.+)_v([0-9]+)$") + def process(self, instance): source_file = os.path.basename(instance.data["source"]).lower() self.log.info("Looking for asset document for file \"{}\"".format( From 0769eda05f21e2007a8a4c17b9a8efa31ee260e1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 14:29:06 +0100 Subject: [PATCH 003/295] use the regex to try check if filename contain asset name --- .../plugins/publish/collect_matching_asset.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py index cdb5403caa3..93551f1c4ca 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py @@ -28,7 +28,18 @@ def process(self, instance): asset_docs_by_name = self.selection_children_by_name(instance) + version_number = None + # Always first check if source filename is in assets matching_asset_doc = asset_docs_by_name.get(source_file) + if matching_asset_doc is None: + # Check if source file contain version in name + regex_result = self.version_regex.findall(source_file) + if regex_result: + asset_name, _version_number = regex_result[0] + matching_asset_doc = asset_docs_by_name.get(asset_name) + if matching_asset_doc: + version_number = int(_version_number) + if matching_asset_doc is None: for asset_name_low, asset_doc in asset_docs_by_name.items(): if asset_name_low in source_file: From ba4f62a3fba13d203267b73671eaa7c5f49c6053 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 14:29:17 +0100 Subject: [PATCH 004/295] store version to instance data if is found --- .../plugins/publish/collect_matching_asset.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py index 93551f1c4ca..8a845e60d7f 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py @@ -55,6 +55,8 @@ def process(self, instance): instance.data["asset"] = matching_asset_doc["name"] instance.data["assetEntity"] = matching_asset_doc + if version_number is not None: + instance.data["version"] = version_number self.log.info( f"Matching asset found: {pformat(matching_asset_doc)}" From 406c665d1d36a4deb129b09397c7854ed1c0e230 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 14:46:12 +0100 Subject: [PATCH 005/295] created copy of psb bulk plugin but for render mov batch # Conflicts: # pype/hosts/standalonepublisher/plugins/publish/collect_mov_instances.py --- .../plugins/publish/collect_mov_instances.py | 54 +++++++++++++++++++ .../plugins/publish/collect_psd_instances.py | 1 - 2 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 pype/hosts/standalonepublisher/plugins/publish/collect_mov_instances.py diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_mov_instances.py b/pype/hosts/standalonepublisher/plugins/publish/collect_mov_instances.py new file mode 100644 index 00000000000..0dcbd119d91 --- /dev/null +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_mov_instances.py @@ -0,0 +1,54 @@ +import copy +import pyblish.api +from pprint import pformat + + +class CollectMovInstances(pyblish.api.InstancePlugin): + """Collect all available instances from render mov batch.""" + + label = "Collect Mov Instances" + order = pyblish.api.CollectorOrder + 0.489 + hosts = ["standalonepublisher"] + families = ["render_mov_batch"] + + # presets + subsets = { + "render": { + "task": "compositing", + "family": "render" + } + } + unchecked_by_default = [] + + def process(self, instance): + context = instance.context + asset_name = instance.data["asset"] + for subset_name, subset_data in self.subsets.items(): + instance_name = f"{asset_name}_{subset_name}" + task_name = subset_data["task"] + + # create new instance + new_instance = context.create_instance(instance_name) + + # add original instance data except name key + for key, value in instance.data.items(): + if key not in ["name"]: + # Make sure value is copy since value may be object which + # can be shared across all new created objects + new_instance.data[key] = copy.deepcopy(value) + + # add subset data from preset + new_instance.data.update(subset_data) + + new_instance.data["label"] = instance_name + new_instance.data["subset"] = subset_name + new_instance.data["task"] = task_name + + if subset_name in self.unchecked_by_default: + new_instance.data["publish"] = False + + self.log.info(f"Created new instance: {instance_name}") + self.log.debug(f"New instance data: {pformat(new_instance.data)}") + + # delete original instance + context.remove(instance) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py b/pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py index b5db4374738..11cedc30b9f 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py @@ -55,7 +55,6 @@ def process(self, instance): new_instance.data["subset"] = subset_name new_instance.data["task"] = task - if subset_name in self.unchecked_by_default: new_instance.data["publish"] = False From 82646f712625bb091bebba775896562f14b23b12 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 14:52:15 +0100 Subject: [PATCH 006/295] added render_mov_batch to collect matching asset --- .../plugins/publish/collect_matching_asset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py index 8a845e60d7f..16147dc7384 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py @@ -15,7 +15,7 @@ class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): label = "Collect Matching Asset to Instance" order = pyblish.api.CollectorOrder - 0.05 hosts = ["standalonepublisher"] - families = ["background_batch"] + families = ["background_batch", "render_mov_batch"] # Version regex to parse asset name and version from filename version_regex = re.compile(r"^(.+)_v([0-9]+)$") From 6ec18b00ea8dca105b3e8ae9141b3355f2f70342 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 15:29:54 +0100 Subject: [PATCH 007/295] single collector for batch instances # Conflicts: # pype/hosts/standalonepublisher/plugins/publish/collect_mov_instances.py --- .../plugins/publish/collect_mov_instances.py | 54 ------------------- .../plugins/publish/collect_psd_instances.py | 34 +++++++----- 2 files changed, 22 insertions(+), 66 deletions(-) delete mode 100644 pype/hosts/standalonepublisher/plugins/publish/collect_mov_instances.py diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_mov_instances.py b/pype/hosts/standalonepublisher/plugins/publish/collect_mov_instances.py deleted file mode 100644 index 0dcbd119d91..00000000000 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_mov_instances.py +++ /dev/null @@ -1,54 +0,0 @@ -import copy -import pyblish.api -from pprint import pformat - - -class CollectMovInstances(pyblish.api.InstancePlugin): - """Collect all available instances from render mov batch.""" - - label = "Collect Mov Instances" - order = pyblish.api.CollectorOrder + 0.489 - hosts = ["standalonepublisher"] - families = ["render_mov_batch"] - - # presets - subsets = { - "render": { - "task": "compositing", - "family": "render" - } - } - unchecked_by_default = [] - - def process(self, instance): - context = instance.context - asset_name = instance.data["asset"] - for subset_name, subset_data in self.subsets.items(): - instance_name = f"{asset_name}_{subset_name}" - task_name = subset_data["task"] - - # create new instance - new_instance = context.create_instance(instance_name) - - # add original instance data except name key - for key, value in instance.data.items(): - if key not in ["name"]: - # Make sure value is copy since value may be object which - # can be shared across all new created objects - new_instance.data[key] = copy.deepcopy(value) - - # add subset data from preset - new_instance.data.update(subset_data) - - new_instance.data["label"] = instance_name - new_instance.data["subset"] = subset_name - new_instance.data["task"] = task_name - - if subset_name in self.unchecked_by_default: - new_instance.data["publish"] = False - - self.log.info(f"Created new instance: {instance_name}") - self.log.debug(f"New instance data: {pformat(new_instance.data)}") - - # delete original instance - context.remove(instance) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py b/pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py index 11cedc30b9f..09ec78af39b 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py @@ -11,21 +11,29 @@ class CollectPsdInstances(pyblish.api.InstancePlugin): label = "Collect Psd Instances" order = pyblish.api.CollectorOrder + 0.489 hosts = ["standalonepublisher"] - families = ["background_batch"] + families = ["background_batch", "render_mov_batch"] # presets subsets = { - "backgroundLayout": { - "task": "background", - "family": "backgroundLayout" + "background_batch": { + "backgroundLayout": { + "task": "background", + "family": "backgroundLayout" + }, + "backgroundComp": { + "task": "background", + "family": "backgroundComp" + }, + "workfileBackground": { + "task": "background", + "family": "workfile" + } }, - "backgroundComp": { - "task": "background", - "family": "backgroundComp" - }, - "workfileBackground": { - "task": "background", - "family": "workfile" + "render_mov_batch": { + "renderCompositingDefault": { + "task": "Compositing", + "family": "render" + } } } unchecked_by_default = [] @@ -34,7 +42,9 @@ def process(self, instance): context = instance.context asset_data = instance.data["assetEntity"] asset_name = instance.data["asset"] - for subset_name, subset_data in self.subsets.items(): + family = instance.data["family"] + + for subset_name, subset_data in self.subsets[family].items(): instance_name = f"{asset_name}_{subset_name}" task = subset_data.get("task", "background") From d119373dc12441f6a59900060467401e9c387cdf Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 15:30:15 +0100 Subject: [PATCH 008/295] renamed collect psd instances to collect batch instances # Conflicts: # pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py # pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py # pype/plugins/standalonepublisher/publish/collect_psd_instances.py --- .../{collect_psd_instances.py => collect_batch_instances.py} | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename pype/hosts/standalonepublisher/plugins/publish/{collect_psd_instances.py => collect_batch_instances.py} (96%) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py b/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py similarity index 96% rename from pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py rename to pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py index 09ec78af39b..3e83a7dcace 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_psd_instances.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py @@ -3,12 +3,12 @@ from pprint import pformat -class CollectPsdInstances(pyblish.api.InstancePlugin): +class CollectBatchInstances(pyblish.api.InstancePlugin): """ Collect all available instances from psd batch. """ - label = "Collect Psd Instances" + label = "Collect Batch Instances" order = pyblish.api.CollectorOrder + 0.489 hosts = ["standalonepublisher"] families = ["background_batch", "render_mov_batch"] From 0257e04192b40fb3df7b65f12a15fcbcf76cb753 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 16:15:03 +0100 Subject: [PATCH 009/295] fixed asset name lookup --- .../plugins/publish/collect_matching_asset.py | 38 ++++++++++++++++--- 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py index 16147dc7384..f1686dc42f5 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py @@ -21,19 +21,23 @@ class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): version_regex = re.compile(r"^(.+)_v([0-9]+)$") def process(self, instance): - source_file = os.path.basename(instance.data["source"]).lower() + source_filename = self.get_source_filename(instance) self.log.info("Looking for asset document for file \"{}\"".format( - instance.data["source"] + source_filename )) + asset_name = os.path.splitext(source_filename)[0].lower() asset_docs_by_name = self.selection_children_by_name(instance) version_number = None # Always first check if source filename is in assets - matching_asset_doc = asset_docs_by_name.get(source_file) + matching_asset_doc = asset_docs_by_name.get(asset_name) if matching_asset_doc is None: # Check if source file contain version in name - regex_result = self.version_regex.findall(source_file) + self.log.debug(( + "Asset doc by \"{}\" was not found trying version regex." + ).format(asset_name)) + regex_result = self.version_regex.findall(asset_name) if regex_result: asset_name, _version_number = regex_result[0] matching_asset_doc = asset_docs_by_name.get(asset_name) @@ -42,16 +46,19 @@ def process(self, instance): if matching_asset_doc is None: for asset_name_low, asset_doc in asset_docs_by_name.items(): - if asset_name_low in source_file: + if asset_name_low in asset_name: matching_asset_doc = asset_doc break if not matching_asset_doc: + self.log.debug("Available asset names {}".format( + str(list(asset_docs_by_name.keys())) + )) # TODO better error message raise AssertionError(( "Filename \"{}\" does not match" " any name of asset documents in database for your selection." - ).format(instance.data["source"])) + ).format(source_filename)) instance.data["asset"] = matching_asset_doc["name"] instance.data["assetEntity"] = matching_asset_doc @@ -62,6 +69,25 @@ def process(self, instance): f"Matching asset found: {pformat(matching_asset_doc)}" ) + def get_source_filename(self, instance): + if instance.data["family"] == "background_batch": + return os.path.basename(instance.data["source"]) + + if len(instance.data["representations"]) != 1: + raise ValueError(( + "Implementation bug: Instance data contain" + " more than one representation." + )) + + repre = instance.data["representations"][0] + repre_files = repre["files"] + if not isinstance(repre_files, str): + raise ValueError(( + "Implementation bug: Instance's representation contain" + " unexpected value (expected single file). {}" + ).format(str(repre_files))) + return repre_files + def selection_children_by_name(self, instance): storing_key = "childrenDocsForSelection" From 256f28f3a74ca94e7e61129b90632de6ca696815 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 16:15:21 +0100 Subject: [PATCH 010/295] added mov batch specific method # Conflicts: # pype/plugins/standalonepublisher/publish/collect_context.py --- .../plugins/publish/collect_context.py | 101 +++++++++++++++--- 1 file changed, 84 insertions(+), 17 deletions(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_context.py b/pype/hosts/standalonepublisher/plugins/publish/collect_context.py index b40c081fccf..f6c102f2cb9 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -48,8 +48,12 @@ def process(self, context): self.log.debug(f"_ in_data: {pformat(in_data)}") # exception for editorial - if in_data["family"] in ["editorial", "background_batch"]: + if in_data["family"] == "render_mov_batch": + in_data_list = self.prepare_mov_batch_instances(context, in_data) + + elif in_data["family"] in ["editorial", "background_batch"]: in_data_list = self.multiple_instances(context, in_data) + else: in_data_list = [in_data] @@ -66,38 +70,38 @@ def multiple_instances(self, context, in_data): in_data_list = list() representations = in_data.pop("representations") - for repre in representations: + for repr in representations: in_data_copy = copy.deepcopy(in_data) - ext = repre["ext"][1:] + ext = repr["ext"][1:] subset = in_data_copy["subset"] # filter out non editorial files if ext not in self.batch_extensions: - in_data_copy["representations"] = [repre] + in_data_copy["representations"] = [repr] in_data_copy["subset"] = f"{ext}{subset}" in_data_list.append(in_data_copy) - files = repre.get("files") + files = repr.get("files") # delete unneeded keys delete_repr_keys = ["frameStart", "frameEnd"] for k in delete_repr_keys: - if repre.get(k): - repre.pop(k) + if repr.get(k): + repr.pop(k) # convert files to list if it isnt if not isinstance(files, (tuple, list)): files = [files] self.log.debug(f"_ files: {files}") - for index, _file in enumerate(files): + for index, f in enumerate(files): index += 1 # copy dictionaries in_data_copy = copy.deepcopy(in_data_copy) - new_repre = copy.deepcopy(repre) + repr_new = copy.deepcopy(repr) - new_repre["files"] = _file - new_repre["name"] = ext - in_data_copy["representations"] = [new_repre] + repr_new["files"] = f + repr_new["name"] = ext + in_data_copy["representations"] = [repr_new] # create subset Name new_subset = f"{ext}{index}{subset}" @@ -112,6 +116,73 @@ def multiple_instances(self, context, in_data): return in_data_list + def prepare_mov_batch_instances(self, context, in_data): + """Copy of `multiple_instances` method. + + Method was copied because `batch_extensions` is used in + `multiple_instances` but without any family filtering. Since usage + of the filtering is unknown and modification of that part may break + editorial or PSD batch publishing it was decided to create a copy with + this family specific filtering. + + TODO: + - Merge logic with `multiple_instances` method. + """ + self.log.info("Preparing data for mov batch processing.") + in_data_list = [] + + representations = in_data.pop("representations") + for repre in representations: + self.log.debug("Processing representation with files {}".format( + str(repre["files"]) + )) + ext = repre["ext"][1:] + # Skip files that are not available for mov batch publishing + # TODO add dynamic expected extensions by family from `in_data` + # - with this modification it would be possible to use only + # `multiple_instances` method + expected_exts = ["mov"] + if ext not in expected_exts: + self.log.warning(( + "Skipping representation." + " Does not match expected extensions <{}>. {}" + ).format(", ".join(expected_exts), str(repre))) + continue + + # Delete key from representation + # QUESTION is this needed in mov batch processing? + delete_repr_keys = ["frameStart", "frameEnd"] + for key in delete_repr_keys: + repre.pop(key, None) + + files = repre["files"] + # Convert files to list if it isnt + if not isinstance(files, (tuple, list)): + files = [files] + + # Loop through files and create new instance per each file + for filename in files: + # Create copy of representation and change it's files and name + new_repre = copy.deepcopy(repre) + new_repre["files"] = filename + new_repre["name"] = ext + + # Prepare new subset name (temporary name) + # - subset name will be changed in batch specific plugins + new_subset_name = "{}{}".format( + in_data["subset"], + os.path.basename(filename) + ) + # Create copy of instance data as new instance and pass in new + # representation + in_data_copy = copy.deepcopy(in_data) + in_data_copy["representations"] = [new_repre] + in_data_copy["subset"] = new_subset_name + + in_data_list.append(in_data_copy) + + return in_data_list + def create_instance(self, context, in_data): subset = in_data["subset"] @@ -145,16 +216,12 @@ def create_instance(self, context, in_data): component["stagingDir"] = component["stagingDir"] if isinstance(component["files"], list): - collections, _remainder = clique.assemble(component["files"]) + collections, remainder = clique.assemble(component["files"]) self.log.debug("collecting sequence: {}".format(collections)) instance.data["frameStart"] = int(component["frameStart"]) instance.data["frameEnd"] = int(component["frameEnd"]) instance.data["fps"] = int(component["fps"]) - ext = component["ext"] - if ext.startswith("."): - component["ext"] = ext[1:] - if component["preview"]: instance.data["families"].append("review") instance.data["repreProfiles"] = ["h264"] From a351c54e2740582a43614f70272bc9e0587d77ee Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 16:31:05 +0100 Subject: [PATCH 011/295] dont remove frame start and frame end from instance data --- .../plugins/publish/collect_context.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_context.py b/pype/hosts/standalonepublisher/plugins/publish/collect_context.py index f6c102f2cb9..318335a6d20 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -123,7 +123,8 @@ def prepare_mov_batch_instances(self, context, in_data): `multiple_instances` but without any family filtering. Since usage of the filtering is unknown and modification of that part may break editorial or PSD batch publishing it was decided to create a copy with - this family specific filtering. + this family specific filtering. Also "frameStart" and "frameEnd" keys + are removed from instance which is needed for this processing. TODO: - Merge logic with `multiple_instances` method. @@ -149,12 +150,6 @@ def prepare_mov_batch_instances(self, context, in_data): ).format(", ".join(expected_exts), str(repre))) continue - # Delete key from representation - # QUESTION is this needed in mov batch processing? - delete_repr_keys = ["frameStart", "frameEnd"] - for key in delete_repr_keys: - repre.pop(key, None) - files = repre["files"] # Convert files to list if it isnt if not isinstance(files, (tuple, list)): From b44f8c40dae99be9d42e14dcc6b7f25e8eb81ad8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 16:31:17 +0100 Subject: [PATCH 012/295] removed unused variable --- .../plugins/publish/collect_batch_instances.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py b/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py index 3e83a7dcace..5820fc62477 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py @@ -31,7 +31,7 @@ class CollectBatchInstances(pyblish.api.InstancePlugin): }, "render_mov_batch": { "renderCompositingDefault": { - "task": "Compositing", + "task": "compositing", "family": "render" } } @@ -40,7 +40,6 @@ class CollectBatchInstances(pyblish.api.InstancePlugin): def process(self, instance): context = instance.context - asset_data = instance.data["assetEntity"] asset_name = instance.data["asset"] family = instance.data["family"] From c20261693a4ce0f85181278098c8e6283c87f27e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 16:59:25 +0100 Subject: [PATCH 013/295] added message before logged values # Conflicts: # pype/plugins/global/publish/integrate_new.py --- pype/plugins/publish/integrate_new.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/plugins/publish/integrate_new.py b/pype/plugins/publish/integrate_new.py index 14b25b9c46b..3fb9f668deb 100644 --- a/pype/plugins/publish/integrate_new.py +++ b/pype/plugins/publish/integrate_new.py @@ -812,7 +812,9 @@ def template_name_from_instance(self, instance): matching_profiles = {} highest_value = -1 - self.log.info(self.template_name_profiles) + self.log.debug( + "Template name profiles:\n{}".format(self.template_name_profiles) + ) for name, filters in self.template_name_profiles.items(): value = 0 families = filters.get("families") From a898e5bd2b3eae8b32b496d120f9428ec24b3f75 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 16:59:51 +0100 Subject: [PATCH 014/295] added definitions of default task names --- .../plugins/publish/collect_batch_instances.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py b/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py index 5820fc62477..94574ad19cf 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py @@ -14,6 +14,10 @@ class CollectBatchInstances(pyblish.api.InstancePlugin): families = ["background_batch", "render_mov_batch"] # presets + default_subset_task = { + "background_batch": "background", + "render_mov_batch": "compositing" + } subsets = { "background_batch": { "backgroundLayout": { @@ -43,9 +47,10 @@ def process(self, instance): asset_name = instance.data["asset"] family = instance.data["family"] + default_task_name = self.default_subset_task.get(family) for subset_name, subset_data in self.subsets[family].items(): instance_name = f"{asset_name}_{subset_name}" - task = subset_data.get("task", "background") + task_name = subset_data.get("task") or default_task_name # create new instance new_instance = context.create_instance(instance_name) @@ -62,7 +67,7 @@ def process(self, instance): new_instance.data["label"] = f"{instance_name}" new_instance.data["subset"] = subset_name - new_instance.data["task"] = task + new_instance.data["task"] = task_name if subset_name in self.unchecked_by_default: new_instance.data["publish"] = False From 5ee3e848747295307dbb0e4050ab7747517603b5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 9 Mar 2021 18:13:35 +0100 Subject: [PATCH 015/295] fix variable override --- .../plugins/publish/collect_matching_asset.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py index f1686dc42f5..0d629b1b449 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py @@ -39,8 +39,8 @@ def process(self, instance): ).format(asset_name)) regex_result = self.version_regex.findall(asset_name) if regex_result: - asset_name, _version_number = regex_result[0] - matching_asset_doc = asset_docs_by_name.get(asset_name) + _asset_name, _version_number = regex_result[0] + matching_asset_doc = asset_docs_by_name.get(_asset_name) if matching_asset_doc: version_number = int(_version_number) From d866e819d0152369636c571d6697142fb0f5b55e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 10 Mar 2021 15:37:36 +0100 Subject: [PATCH 016/295] cleanup plugin can skip filpaths defined in context data --- pype/plugins/publish/cleanup.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/pype/plugins/publish/cleanup.py b/pype/plugins/publish/cleanup.py index 5fded85ccbc..b8104078d9f 100644 --- a/pype/plugins/publish/cleanup.py +++ b/pype/plugins/publish/cleanup.py @@ -37,9 +37,16 @@ def process(self, instance): ) ) + _skip_cleanup_filepaths = instance.context.data.get( + "skipCleanupFilepaths" + ) or [] + skip_cleanup_filepaths = set() + for path in _skip_cleanup_filepaths: + skip_cleanup_filepaths.add(os.path.normpath(path)) + if self.remove_temp_renders: self.log.info("Cleaning renders new...") - self.clean_renders(instance) + self.clean_renders(instance, skip_cleanup_filepaths) if [ef for ef in self.exclude_families if instance.data["family"] in ef]: @@ -65,7 +72,7 @@ def process(self, instance): self.log.info("Removing staging directory {}".format(staging_dir)) shutil.rmtree(staging_dir) - def clean_renders(self, instance): + def clean_renders(self, instance, skip_cleanup_filepaths): transfers = instance.data.get("transfers", list()) current_families = instance.data.get("families", list()) @@ -84,6 +91,12 @@ def clean_renders(self, instance): # add dest dir into clearing dir paths (regex paterns) transfers_dirs.append(os.path.dirname(dest)) + if src in skip_cleanup_filepaths: + self.log.debug(( + "Source file is marked to be skipped in cleanup. {}" + ).format(src)) + continue + if os.path.normpath(src) != os.path.normpath(dest): if instance_family == 'render' or 'render' in current_families: self.log.info("Removing src: `{}`...".format(src)) @@ -116,6 +129,9 @@ def clean_renders(self, instance): # remove all files which match regex patern for f in files: + if os.path.normpath(f) in skip_cleanup_filepaths: + continue + for p in self.paterns: patern = re.compile(p) if not patern.findall(f): From edb868a64dcf5c8627979f7d290efde9c29ac80c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 10 Mar 2021 15:38:20 +0100 Subject: [PATCH 017/295] all filepaths from standalone publisher are added to list of filepaths that won't be deleted during cleanup --- .../plugins/publish/collect_context.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_context.py b/pype/hosts/standalonepublisher/plugins/publish/collect_context.py index 318335a6d20..8930bedab8e 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -45,8 +45,9 @@ def process(self, context): with open(input_json_path, "r") as f: in_data = json.load(f) - self.log.debug(f"_ in_data: {pformat(in_data)}") + self.log.debug(f"_ in_data: {pformat(in_data)}") + self.add_files_to_ignore_cleanup(in_data, context) # exception for editorial if in_data["family"] == "render_mov_batch": in_data_list = self.prepare_mov_batch_instances(context, in_data) @@ -63,6 +64,21 @@ def process(self, context): # create instance self.create_instance(context, in_data) + def add_files_to_ignore_cleanup(self, in_data, context): + all_filepaths = context.data.get("skipCleanupFilepaths") or [] + for repre in in_data["representations"]: + files = repre["files"] + if not isinstance(files, list): + files = [files] + + dirpath = repre["stagingDir"] + for filename in files: + filepath = os.path.normpath(os.path.join(dirpath, filename)) + if filepath not in all_filepaths: + all_filepaths.append(filepath) + + context.data["skipCleanupFilepaths"] = all_filepaths + def multiple_instances(self, context, in_data): # avoid subset name duplicity if not context.data.get("subsetNamesCheck"): From a794398b23c2f52bbfade3d6099e6aa28b838887 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 10 Mar 2021 16:12:16 +0100 Subject: [PATCH 018/295] added back pype 3 changes --- .../standalonepublisher/plugins/publish/collect_context.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_context.py b/pype/hosts/standalonepublisher/plugins/publish/collect_context.py index 8930bedab8e..f7f3f00ebe5 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -227,12 +227,16 @@ def create_instance(self, context, in_data): component["stagingDir"] = component["stagingDir"] if isinstance(component["files"], list): - collections, remainder = clique.assemble(component["files"]) + collections, _remainder = clique.assemble(component["files"]) self.log.debug("collecting sequence: {}".format(collections)) instance.data["frameStart"] = int(component["frameStart"]) instance.data["frameEnd"] = int(component["frameEnd"]) instance.data["fps"] = int(component["fps"]) + ext = component["ext"] + if ext.startswith("."): + component["ext"] = ext[1:] + if component["preview"]: instance.data["families"].append("review") instance.data["repreProfiles"] = ["h264"] From 664e62ec29a0c1edc3507cd9fce9e6ced22122ed Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 12 Mar 2021 18:48:37 +0100 Subject: [PATCH 019/295] removed addition ftrack paths from project settings --- .../defaults/project_settings/ftrack.json | 2 -- .../schema_project_ftrack.json | 22 ------------------- 2 files changed, 24 deletions(-) diff --git a/pype/settings/defaults/project_settings/ftrack.json b/pype/settings/defaults/project_settings/ftrack.json index 17607b6bc1f..03ac8f309f5 100644 --- a/pype/settings/defaults/project_settings/ftrack.json +++ b/pype/settings/defaults/project_settings/ftrack.json @@ -1,6 +1,4 @@ { - "ftrack_actions_path": [], - "ftrack_events_path": [], "events": { "sync_to_avalon": { "enabled": true, diff --git a/pype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/pype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index 39cd79e8519..eefc0e12b77 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/pype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -5,28 +5,6 @@ "collapsible": true, "is_file": true, "children": [ - { - "type": "splitter" - }, - { - "type": "label", - "label": "Additional Ftrack paths" - }, - { - "type": "list", - "key": "ftrack_actions_path", - "label": "Action paths", - "object_type": "text" - }, - { - "type": "list", - "key": "ftrack_events_path", - "label": "Event paths", - "object_type": "text" - }, - { - "type": "splitter" - }, { "type": "dict", "key": "events", From a6ed0d65c36c7e15d2492ebc0079217f1c60ade8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 12 Mar 2021 18:51:18 +0100 Subject: [PATCH 020/295] renamed type "path-widget" to "path" --- pype/settings/entities/item_entities.py | 2 +- pype/settings/entities/schemas/README.md | 4 ++-- .../entities/schemas/projects_schema/schema_main.json | 2 +- .../projects_schema/schemas/schema_anatomy_imageio.json | 4 ++-- .../projects_schema/schemas/schema_maya_publish.json | 2 +- .../entities/schemas/system_schema/example_schema.json | 8 ++++---- .../entities/schemas/system_schema/example_template.json | 2 +- .../host_settings/template_host_variant.json | 2 +- .../entities/schemas/system_schema/schema_general.json | 4 ++-- .../entities/schemas/system_schema/schema_modules.json | 2 +- 10 files changed, 16 insertions(+), 16 deletions(-) diff --git a/pype/settings/entities/item_entities.py b/pype/settings/entities/item_entities.py index 5986d685d69..2f2573721b1 100644 --- a/pype/settings/entities/item_entities.py +++ b/pype/settings/entities/item_entities.py @@ -10,7 +10,7 @@ class PathEntity(ItemEntity): - schema_types = ["path-widget", "path-item"] + schema_types = ["path"] platforms = ("windows", "darwin", "linux") platform_labels_mapping = { "windows": "Windows", diff --git a/pype/settings/entities/schemas/README.md b/pype/settings/entities/schemas/README.md index 65d73a07286..80125d4b1bb 100644 --- a/pype/settings/entities/schemas/README.md +++ b/pype/settings/entities/schemas/README.md @@ -47,7 +47,7 @@ "key": "{host_name}_environments", "env_group_key": "{host_name}" }, { - "type": "path-widget", + "type": "path", "key": "{host_name}_executables", "label": "{host_label} - Full paths to executables", "multiplatform": "{multipath_executables}", @@ -361,7 +361,7 @@ ``` { - "type": "path-widget", + "type": "path", "key": "ffmpeg_path", "label": "FFmpeg path", "multiplatform": true, diff --git a/pype/settings/entities/schemas/projects_schema/schema_main.json b/pype/settings/entities/schemas/projects_schema/schema_main.json index 31d73738733..828739e0468 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_main.json +++ b/pype/settings/entities/schemas/projects_schema/schema_main.json @@ -15,7 +15,7 @@ "is_group": true, "expandable": false, "object_type": { - "type": "path-widget", + "type": "path", "multiplatform": true } }, diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json index 7070e4b54f4..b48f90bd918 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json @@ -39,7 +39,7 @@ ] }, { - "type": "path-widget", + "type": "path", "key": "ocioconfigpath", "label": "Custom OCIO path", "multiplatform": true, @@ -175,7 +175,7 @@ ] }, { - "type": "path-widget", + "type": "path", "key": "customOCIOConfigPath", "label": "Custom OCIO config path", "multiplatform": true, diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json index 6ecda224eaf..bb0e162c045 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json @@ -93,7 +93,7 @@ "label": "Path to material file defining list of material names to check. This is material name per line simple text file.
It will be checked against named group shader in your Validation regex.

For example:
^.*(?P=<shader>.+)_GEO

" }, { - "type": "path-widget", + "type": "path", "key": "material_file", "label": "Material File", "multiplatform": true, diff --git a/pype/settings/entities/schemas/system_schema/example_schema.json b/pype/settings/entities/schemas/system_schema/example_schema.json index c8920db88af..6e7a47d1bfa 100644 --- a/pype/settings/entities/schemas/system_schema/example_schema.json +++ b/pype/settings/entities/schemas/system_schema/example_schema.json @@ -298,28 +298,28 @@ } }, { - "type": "path-widget", + "type": "path", "key": "single_path_input", "label": "Single path input", "multiplatform": false, "multipath": false }, { - "type": "path-widget", + "type": "path", "key": "multi_path_input", "label": "Multi path input", "multiplatform": false, "multipath": true }, { - "type": "path-widget", + "type": "path", "key": "single_os_specific_path_input", "label": "Single OS specific path input", "multiplatform": true, "multipath": false }, { - "type": "path-widget", + "type": "path", "key": "multi_os_specific_path_input", "label": "Multi OS specific path input", "multiplatform": true, diff --git a/pype/settings/entities/schemas/system_schema/example_template.json b/pype/settings/entities/schemas/system_schema/example_template.json index d9e69354075..ff78c78e8f9 100644 --- a/pype/settings/entities/schemas/system_schema/example_template.json +++ b/pype/settings/entities/schemas/system_schema/example_template.json @@ -11,7 +11,7 @@ "env_group_key": "{host_name}" }, { - "type": "path-widget", + "type": "path", "key": "{host_name}_executables", "label": "{host_label} - Full paths to executables", "multiplatform": "{multipath_executables}", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json index 06f78591178..244b9c1f56a 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json @@ -39,7 +39,7 @@ "roles": ["developer"] }, { - "type": "path-widget", + "type": "path", "key": "executables", "label": "Executables", "multiplatform": "{multiplatform}", diff --git a/pype/settings/entities/schemas/system_schema/schema_general.json b/pype/settings/entities/schemas/system_schema/schema_general.json index f75b584c1bd..b029081c7c6 100644 --- a/pype/settings/entities/schemas/system_schema/schema_general.json +++ b/pype/settings/entities/schemas/system_schema/schema_general.json @@ -20,14 +20,14 @@ }, { "key": "project_plugins", - "type": "path-widget", + "type": "path", "label": "Additional Project Plugins Path", "multiplatform": true, "multipath": false }, { "key": "studio_soft", - "type": "path-widget", + "type": "path", "label": "Studio Software Location", "multiplatform": true, "multipath": false diff --git a/pype/settings/entities/schemas/system_schema/schema_modules.json b/pype/settings/entities/schemas/system_schema/schema_modules.json index 2ee316f29ef..8bfb0e90dcd 100644 --- a/pype/settings/entities/schemas/system_schema/schema_modules.json +++ b/pype/settings/entities/schemas/system_schema/schema_modules.json @@ -18,7 +18,7 @@ "label": "Avalon Mongo Timeout (ms)" }, { - "type": "path-widget", + "type": "path", "label": "Thumbnail Storage Location", "key": "AVALON_THUMBNAIL_ROOT", "multiplatform": true, From abb3223a1ed0c3ee40f09c81585912b9bd66d26e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 12 Mar 2021 20:19:51 +0100 Subject: [PATCH 021/295] path widget can handle `use_label_wrap` --- .../settings/settings/widgets/item_widgets.py | 104 ++++++++++++++---- 1 file changed, 84 insertions(+), 20 deletions(-) diff --git a/pype/tools/settings/settings/widgets/item_widgets.py b/pype/tools/settings/settings/widgets/item_widgets.py index bdc96840f2f..7cfcd844883 100644 --- a/pype/tools/settings/settings/widgets/item_widgets.py +++ b/pype/tools/settings/settings/widgets/item_widgets.py @@ -489,16 +489,54 @@ def _on_value_change(self): class PathWidget(BaseWidget): def create_ui(self): - self.content_widget = self - self.content_layout = QtWidgets.QGridLayout(self) - self.content_layout.setContentsMargins(0, 0, 0, 0) - self.content_layout.setSpacing(5) + self._child_style_state = "" + + if self.entity.use_label_wrap: + entity_label = None + self._create_label_wrapper() + else: + entity_label = self.entity.label + self.content_widget = self + self.content_layout = QtWidgets.QGridLayout(self) + self.content_layout.setContentsMargins(0, 0, 0, 0) + self.content_layout.setSpacing(5) + self.body_widget = None + + self.setAttribute(QtCore.Qt.WA_TranslucentBackground) self.input_field = self.create_ui_for_entity( self.category_widget, self.entity.child_obj, self ) + self.entity_widget.add_widget_to_layout(self, entity_label) - self.entity_widget.add_widget_to_layout(self, self.entity.label) + def _create_label_wrapper(self): + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.setSpacing(0) + + body_widget = ExpandingWidget(self.entity.label, self) + main_layout.addWidget(body_widget) + self.label_widget = body_widget.label_widget + + self.body_widget = body_widget + + content_widget = QtWidgets.QWidget(body_widget) + content_widget.setObjectName("ContentWidget") + content_widget.setProperty("content_state", "") + content_layout = QtWidgets.QGridLayout(content_widget) + content_layout.setContentsMargins(CHILD_OFFSET, 5, 0, 5) + + body_widget.set_content_widget(content_widget) + + self.body_widget = body_widget + self.content_widget = content_widget + self.content_layout = content_layout + + if not self.entity.collapsible: + body_widget.hide_toolbox(hide_content=False) + + elif self.entity.collapsed: + body_widget.toggle_content() def add_widget_to_layout(self, widget, label=None): row = self.content_layout.rowCount() @@ -523,26 +561,52 @@ def _on_entity_change(self): pass def update_style(self): - if not self.label_widget: + if not self.body_widget and not self.label_widget: return - has_unsaved_changes = self.entity.has_unsaved_changes - if not has_unsaved_changes and self.entity.group_item: - has_unsaved_changes = self.entity.group_item.has_unsaved_changes + if self.entity.group_item: + group_item = self.entity.group_item + has_unsaved_changes = group_item.has_unsaved_changes + has_project_override = group_item.has_project_override + has_studio_override = group_item.has_studio_override + else: + has_unsaved_changes = self.entity.has_unsaved_changes + has_project_override = self.entity.has_project_override + has_studio_override = self.entity.has_studio_override + + child_invalid = self.is_invalid - state = self.get_style_state( - self.is_invalid, - has_unsaved_changes, - self.entity.has_project_override, - self.entity.has_studio_override - ) - if self._style_state == state: - return + if self.body_widget: + child_style_state = self.get_style_state( + child_invalid, + has_unsaved_changes, + has_project_override, + has_studio_override + ) + if child_style_state: + child_style_state = "child-{}".format(child_style_state) - self._style_state = state + if child_style_state != self._child_style_state: + self.body_widget.side_line_widget.setProperty( + "state", child_style_state + ) + self.body_widget.side_line_widget.style().polish( + self.body_widget.side_line_widget + ) + self._child_style_state = child_style_state + + if self.label_widget: + style_state = self.get_style_state( + child_invalid, + has_unsaved_changes, + has_project_override, + has_studio_override + ) + if self._style_state != style_state: + self.label_widget.setProperty("state", style_state) + self.label_widget.style().polish(self.label_widget) - self.label_widget.setProperty("state", state) - self.label_widget.style().polish(self.label_widget) + self._style_state = style_state @property def is_invalid(self): From c18c87e2e2776536ff5a91e7374f0fc7bc7564b4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 15 Mar 2021 14:31:04 +0100 Subject: [PATCH 022/295] fix inheritance of CreateBackdrop creator --- pype/hosts/nuke/plugins/create/create_backdrop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/hosts/nuke/plugins/create/create_backdrop.py b/pype/hosts/nuke/plugins/create/create_backdrop.py index 6d8e6a07101..00539ceeaaf 100644 --- a/pype/hosts/nuke/plugins/create/create_backdrop.py +++ b/pype/hosts/nuke/plugins/create/create_backdrop.py @@ -3,7 +3,7 @@ import nuke -class CreateBackdrop(plugin.Creator): +class CreateBackdrop(plugin.PypeCreator): """Add Publishable Backdrop""" name = "nukenodes" From 493280b2749531208e7d28d0d73ef55a84b2bde9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 10:46:26 +0100 Subject: [PATCH 023/295] added `copy_workspace_mel` function to maya.lib --- pype/hosts/maya/lib.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 pype/hosts/maya/lib.py diff --git a/pype/hosts/maya/lib.py b/pype/hosts/maya/lib.py new file mode 100644 index 00000000000..6c142053e67 --- /dev/null +++ b/pype/hosts/maya/lib.py @@ -0,0 +1,26 @@ +import os +import shutil + + +def copy_workspace_mel(workdir): + # Check that source mel exists + current_dir = os.path.dirname(os.path.abspath(__file__)) + src_filepath = os.path.join(current_dir, "resources", "workspace.mel") + if not os.path.exists(src_filepath): + print("Source mel file does not exist. {}".format(src_filepath)) + return + + # Skip if workspace.mel already exists + dst_filepath = os.path.join(workdir, "workspace.mel") + if os.path.exists(dst_filepath): + return + + # Create workdir if does not exists yet + if not os.path.exists(workdir): + os.makedirs(workdir) + + # Copy file + print("Copying workspace mel \"{}\" -> \"{}\"".format( + src_filepath, dst_filepath + )) + shutil.copy(src_filepath, dst_filepath) From 1852161892203f298364b6629c6727b08d4f1c02 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 10:48:33 +0100 Subject: [PATCH 024/295] added prelaunch hook that cares about creation of mel before host is launched --- pype/hosts/maya/hooks/pre_copy_mel.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 pype/hosts/maya/hooks/pre_copy_mel.py diff --git a/pype/hosts/maya/hooks/pre_copy_mel.py b/pype/hosts/maya/hooks/pre_copy_mel.py new file mode 100644 index 00000000000..a56f3f71b2a --- /dev/null +++ b/pype/hosts/maya/hooks/pre_copy_mel.py @@ -0,0 +1,18 @@ +from pype.lib import PreLaunchHook +from pype.hosts.maya.lib import copy_workspace_mel + + +class PreCopyMel(PreLaunchHook): + """Copy workspace.mel to workdir. + + Hook `GlobalHostDataHook` must be executed before this hook. + """ + app_groups = ["maya"] + + def execute(self): + workdir = self.launch_context.env.get("AVALON_WORKDIR") + if not workdir: + self.log.warning("BUG: Workdir is not filled.") + return + + copy_workspace_mel(workdir) From 7cb129e5a3471b5d384eddf047cd794ace5b56f3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 10:51:25 +0100 Subject: [PATCH 025/295] workfiles tool emmit's 'before.workfile.save' and 'after.workfile.save' signals --- pype/tools/workfiles/app.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/tools/workfiles/app.py b/pype/tools/workfiles/app.py index e6b211152aa..d058841462a 100644 --- a/pype/tools/workfiles/app.py +++ b/pype/tools/workfiles/app.py @@ -695,11 +695,15 @@ def on_save_as_pressed(self): file_path = os.path.join(self.root, work_file) + pipeline.emit("before.workfile.save", file_path) + self._enter_session() # Make sure we are in the right session self.host.save_file(file_path) self.set_asset_task(self._asset, self._task) + pipeline.emit("after.workfile.save", file_path) + self.workfile_created.emit(file_path) self.refresh() From 66ce310e8db1f43c7a75476a85e501b7f5cbba35 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 10:52:58 +0100 Subject: [PATCH 026/295] maya capture 'before.workfile.save' signal and triggers copy_workspace_mel --- pype/hosts/maya/api/__init__.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pype/hosts/maya/api/__init__.py b/pype/hosts/maya/api/__init__.py index ea100dda0f6..9d4418e58d4 100644 --- a/pype/hosts/maya/api/__init__.py +++ b/pype/hosts/maya/api/__init__.py @@ -12,6 +12,7 @@ from pyblish import api as pyblish from pype.lib import any_outdated import pype.hosts.maya +from pype.hosts.maya.lib import copy_workspace_mel from . import menu, lib log = logging.getLogger("pype.hosts.maya") @@ -46,6 +47,7 @@ def install(): avalon.on("new", on_new) avalon.before("save", on_before_save) avalon.on("taskChanged", on_task_changed) + avalon.on("before.workfile.save", before_workfile_save) log.info("Setting default family states for loader..") avalon.data["familiesStateToggled"] = ["imagesequence"] @@ -203,3 +205,11 @@ def on_task_changed(*args): "Context was changed", ("Context was changed to {}".format(avalon.Session["AVALON_ASSET"])), ) + + +def before_workfile_save(workfile_path): + if not workfile_path: + return + + workdir = os.path.dirname(workfile_path) + copy_workspace_mel(workdir) From 2fbe1d80e45c13a9846c971805121effe699a089 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 10:59:02 +0100 Subject: [PATCH 027/295] removed tvpaint's prelaunch hook that install pywin32 module with pip --- pype/hosts/tvpaint/hooks/pre_install_pywin.py | 35 ------------------- 1 file changed, 35 deletions(-) delete mode 100644 pype/hosts/tvpaint/hooks/pre_install_pywin.py diff --git a/pype/hosts/tvpaint/hooks/pre_install_pywin.py b/pype/hosts/tvpaint/hooks/pre_install_pywin.py deleted file mode 100644 index 7abab33757b..00000000000 --- a/pype/hosts/tvpaint/hooks/pre_install_pywin.py +++ /dev/null @@ -1,35 +0,0 @@ -from pype.lib import ( - PreLaunchHook, - ApplicationLaunchFailed, - run_subprocess -) - - -class PreInstallPyWin(PreLaunchHook): - """Hook makes sure there is installed python module pywin32 on windows.""" - # WARNING This hook will probably be deprecated in Pype 3 - kept for test - order = 10 - app_groups = ["tvpaint"] - platforms = ["windows"] - - def execute(self): - installed = False - try: - from win32com.shell import shell - self.log.debug("Python module `pywin32` already installed.") - installed = True - except Exception: - pass - - if installed: - return - - try: - output = run_subprocess( - ["pip", "install", "pywin32==227"] - ) - self.log.debug("Pip install pywin32 output:\n{}'".format(output)) - except RuntimeError: - msg = "Installation of python module `pywin32` crashed." - self.log.warning(msg, exc_info=True) - raise ApplicationLaunchFailed(msg) From b35d8d3a924624d2ccf213df40b4e0223e5dc2cd Mon Sep 17 00:00:00 2001 From: Petr Kalis Date: Tue, 16 Mar 2021 12:22:09 +0100 Subject: [PATCH 028/295] #21 - Change timers after task change --- pype/__init__.py | 9 ++- pype/lib/__init__.py | 6 +- pype/lib/avalon_context.py | 26 +++++++ pype/modules/timers_manager/timers_manager.py | 73 ++++++++++++++++++- 4 files changed, 110 insertions(+), 4 deletions(-) diff --git a/pype/__init__.py b/pype/__init__.py index 400bf4c7bbd..fd0ba321ed4 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -5,7 +5,8 @@ import logging from .settings import get_project_settings -from .lib import Anatomy, filter_pyblish_plugins +from .lib import Anatomy, filter_pyblish_plugins, \ + change_timer_to_current_context pyblish = avalon = _original_discover = None @@ -132,6 +133,12 @@ def install(): log.info("Patching discovery") avalon.discover = patched_discover + avalon.on("taskChanged", _on_task_change) + + +def _on_task_change(*args): + change_timer_to_current_context() + @import_wrapper def uninstall(): diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 62cd363d521..27dd93c1a1b 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -63,7 +63,9 @@ BuildWorkfile, - get_creator_by_name + get_creator_by_name, + + change_timer_to_current_context ) from .applications import ( @@ -160,6 +162,8 @@ "get_creator_by_name", + "change_timer_to_current_context", + "ApplicationLaunchFailed", "ApplictionExecutableNotFound", "ApplicationNotFound", diff --git a/pype/lib/avalon_context.py b/pype/lib/avalon_context.py index 3d3a288b10f..6a85baa756a 100644 --- a/pype/lib/avalon_context.py +++ b/pype/lib/avalon_context.py @@ -1,3 +1,4 @@ +"""Should be used only inside of hosts.""" import os import json import re @@ -1147,3 +1148,28 @@ def get_creator_by_name(creator_name, case_sensitive=False): if _creator_name == creator_name: return creator_plugin return None + + +@with_avalon +def change_timer_to_current_context(): + """Called after context change to change timers""" + webserver_url = os.environ.get("PYPE_WEBSERVER_URL") + if not webserver_url: + log.warning("Couldn't find webserver url") + return + + rest_api_url = "{}/timers_manager/start_timer".format(webserver_url) + try: + import requests + except Exception: + log.warning("Couldn't start timer") + return + data = { + "project_name": avalon.io.Session["AVALON_PROJECT"], + "asset_name": avalon.io.Session["AVALON_ASSET"], + "task_name": avalon.io.Session["AVALON_TASK"] + } + + requests.post(rest_api_url, json=data) + + diff --git a/pype/modules/timers_manager/timers_manager.py b/pype/modules/timers_manager/timers_manager.py index d5b65a7bd74..282aacdd2e9 100644 --- a/pype/modules/timers_manager/timers_manager.py +++ b/pype/modules/timers_manager/timers_manager.py @@ -1,6 +1,8 @@ +import os from abc import ABCMeta, abstractmethod import six -from .. import PypeModule, ITrayService, IIdleManager +from .. import PypeModule, ITrayService, IIdleManager, IWebServerRoutes +from avalon.api import AvalonMongoDB @six.add_metaclass(ABCMeta) @@ -28,7 +30,7 @@ def timer_stopped(self): self.timer_manager_module.timer_stopped(self.id) -class TimersManager(PypeModule, ITrayService, IIdleManager): +class TimersManager(PypeModule, ITrayService, IIdleManager, IWebServerRoutes): """ Handles about Timers. Should be able to start/stop all timers at once. @@ -72,6 +74,52 @@ def tray_exit(self): """Nothing special for TimersManager.""" return + def webserver_initialization(self, server_manager): + """Implementation of IWebServerRoutes interface.""" + if self.tray_initialized: + from .rest_api import TimersManagerModuleRestApi + self.rest_api_obj = TimersManagerModuleRestApi(self, + server_manager) + + def start_timer(self, project_name, asset_name, task_name): + """ + Start timer for 'project_name', 'asset_name' and 'task_name' + + Called from REST api by hosts. + + Args: + project_name (string) + asset_name (string) + task_name (string) + """ + dbconn = AvalonMongoDB() + dbconn.install() + dbconn.Session["AVALON_PROJECT"] = project_name + + asset_doc = dbconn.find_one({ + "type": "asset", "name": asset_name + }) + if not asset_doc: + raise ValueError("Uknown asset {}".format(asset_name)) + + task_type = '' + try: + task_type = asset_doc["data"]["tasks"][task_name]["type"] + except KeyError: + self.log.warning("Couldn't find task_type for {}".\ + format(task_name)) + + hierarchy = asset_doc["data"]["hierarchy"].split("\\") + hierarchy.append(asset_name) + + data = { + "project_name": project_name, + "task_name": task_name, + "task_type": task_type, + "hierarchy": hierarchy + } + self.timer_started(None, data) + def timer_started(self, source_id, data): for module in self.modules: if module.id != source_id: @@ -169,3 +217,24 @@ def show_message(self): return if self.widget_user_idle.bool_is_showed is False: self.widget_user_idle.show() + + def change_timer_from_host(self, project_name, asset_name, task_name): + """Prepared method for calling change timers on REST api""" + webserver_url = os.environ.get("PYPE_WEBSERVER_URL") + if not webserver_url: + self.log.warning("Couldn't find webserver url") + return + + rest_api_url = "{}/timers_manager/start_timer".format(webserver_url) + try: + import requests + except Exception: + self.log.warning("Couldn't start timer") + return + data = { + "project_name": project_name, + "asset_name": asset_name, + "task_name": task_name + } + + requests.post(rest_api_url, json=data) From b138bdd2022d13c61ff2e534e820790ccbfaa7a0 Mon Sep 17 00:00:00 2001 From: Petr Kalis Date: Tue, 16 Mar 2021 12:26:44 +0100 Subject: [PATCH 029/295] Hound --- pype/lib/avalon_context.py | 2 -- pype/modules/timers_manager/timers_manager.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/pype/lib/avalon_context.py b/pype/lib/avalon_context.py index 6a85baa756a..9df5754126b 100644 --- a/pype/lib/avalon_context.py +++ b/pype/lib/avalon_context.py @@ -1171,5 +1171,3 @@ def change_timer_to_current_context(): } requests.post(rest_api_url, json=data) - - diff --git a/pype/modules/timers_manager/timers_manager.py b/pype/modules/timers_manager/timers_manager.py index 282aacdd2e9..44264042d5a 100644 --- a/pype/modules/timers_manager/timers_manager.py +++ b/pype/modules/timers_manager/timers_manager.py @@ -106,7 +106,7 @@ def start_timer(self, project_name, asset_name, task_name): try: task_type = asset_doc["data"]["tasks"][task_name]["type"] except KeyError: - self.log.warning("Couldn't find task_type for {}".\ + self.log.warning("Couldn't find task_type for {}". format(task_name)) hierarchy = asset_doc["data"]["hierarchy"].split("\\") From 1675d6690bf502f5daeaa2bbb94334e984da3b46 Mon Sep 17 00:00:00 2001 From: Petr Kalis Date: Tue, 16 Mar 2021 13:23:01 +0100 Subject: [PATCH 030/295] #21 - Safer handling of hierarchy 'hierarchy' in asset might not be used everywhere Added missed file with route definition --- pype/lib/avalon_context.py | 3 +- pype/modules/timers_manager/rest_api.py | 40 +++++++++++++++++++ pype/modules/timers_manager/timers_manager.py | 5 ++- 3 files changed, 45 insertions(+), 3 deletions(-) create mode 100644 pype/modules/timers_manager/rest_api.py diff --git a/pype/lib/avalon_context.py b/pype/lib/avalon_context.py index 9df5754126b..dc0ce9a8733 100644 --- a/pype/lib/avalon_context.py +++ b/pype/lib/avalon_context.py @@ -1167,7 +1167,8 @@ def change_timer_to_current_context(): data = { "project_name": avalon.io.Session["AVALON_PROJECT"], "asset_name": avalon.io.Session["AVALON_ASSET"], - "task_name": avalon.io.Session["AVALON_TASK"] + "task_name": avalon.io.Session["AVALON_TASK"], + "hierarchy": get_hierarchy() } requests.post(rest_api_url, json=data) diff --git a/pype/modules/timers_manager/rest_api.py b/pype/modules/timers_manager/rest_api.py new file mode 100644 index 00000000000..2247a6f7696 --- /dev/null +++ b/pype/modules/timers_manager/rest_api.py @@ -0,0 +1,40 @@ +from aiohttp.web_response import Response +from pype.api import Logger + +log = Logger().get_logger("Event processor") + +class TimersManagerModuleRestApi: + """ + REST API endpoint used for calling from hosts when context change + happens in Workfile app. + """ + def __init__(self, user_module, server_manager): + self.module = user_module + self.server_manager = server_manager + + self.prefix = "/timers_manager" + + self.register() + + def register(self): + self.server_manager.add_route( + "POST", + self.prefix + "/start_timer", + self.start_timer + ) + + async def start_timer(self, request): + data = await request.json() + try: + project_name = data['project_name'] + asset_name = data['asset_name'] + task_name = data['task_name'] + hierarchy = data['hierarchy'] + except KeyError: + log.error("Payload must contain fields 'project_name, " + + "'asset_name', 'task_name', 'hierarchy'") + return Response(status=400) + + self.module.stop_timers() + self.module.start_timer(project_name, asset_name, task_name, hierarchy) + return Response(status=200) diff --git a/pype/modules/timers_manager/timers_manager.py b/pype/modules/timers_manager/timers_manager.py index 44264042d5a..68890640b32 100644 --- a/pype/modules/timers_manager/timers_manager.py +++ b/pype/modules/timers_manager/timers_manager.py @@ -81,7 +81,7 @@ def webserver_initialization(self, server_manager): self.rest_api_obj = TimersManagerModuleRestApi(self, server_manager) - def start_timer(self, project_name, asset_name, task_name): + def start_timer(self, project_name, asset_name, task_name, hierarchy): """ Start timer for 'project_name', 'asset_name' and 'task_name' @@ -91,6 +91,7 @@ def start_timer(self, project_name, asset_name, task_name): project_name (string) asset_name (string) task_name (string) + hierarchy (string) """ dbconn = AvalonMongoDB() dbconn.install() @@ -109,7 +110,7 @@ def start_timer(self, project_name, asset_name, task_name): self.log.warning("Couldn't find task_type for {}". format(task_name)) - hierarchy = asset_doc["data"]["hierarchy"].split("\\") + hierarchy = hierarchy.split("\\") hierarchy.append(asset_name) data = { From 351df91ce1cc02ba8414e7ae120ab38ac921e167 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 16:11:36 +0100 Subject: [PATCH 031/295] Duplicated keys in dictionary are not handled during initialization but schema validations --- pype/settings/entities/dict_immutable_keys_entity.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/pype/settings/entities/dict_immutable_keys_entity.py b/pype/settings/entities/dict_immutable_keys_entity.py index af0ddcb7589..208f9763146 100644 --- a/pype/settings/entities/dict_immutable_keys_entity.py +++ b/pype/settings/entities/dict_immutable_keys_entity.py @@ -10,6 +10,7 @@ M_OVERRIDEN_KEY ) from . import ( + BaseItemEntity, ItemEntity, BoolEntity, GUIEntity @@ -75,6 +76,15 @@ def set(self, value): def schema_validations(self): """Validation of schema data.""" + children_keys = set() + for child_entity in self.children: + if not isinstance(child_entity, BaseItemEntity): + continue + elif child_entity.key not in children_keys: + children_keys.add(child_entity.key) + else: + raise SchemaDuplicatedKeys(self.path, child_entity.key) + if self.checkbox_key: checkbox_child = self.non_gui_children.get(self.checkbox_key) if not checkbox_child: @@ -134,8 +144,6 @@ def _add_children(self, schema_data, first=True): if isinstance(child_obj, GUIEntity): continue - if child_obj.key in self.non_gui_children: - raise SchemaDuplicatedKeys("", child_obj.key) self.non_gui_children[child_obj.key] = child_obj if not first: From c01a22147cb5f1016d27222ad0940658ab3e6d35 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 22:38:55 +0100 Subject: [PATCH 032/295] moved model roles to constants --- pype/tools/launcher/constants.py | 12 ++++++++++++ pype/tools/launcher/models.py | 20 +++++++++++--------- pype/tools/launcher/widgets.py | 19 ++++++++++++++----- 3 files changed, 37 insertions(+), 14 deletions(-) create mode 100644 pype/tools/launcher/constants.py diff --git a/pype/tools/launcher/constants.py b/pype/tools/launcher/constants.py new file mode 100644 index 00000000000..e6dbbb6e192 --- /dev/null +++ b/pype/tools/launcher/constants.py @@ -0,0 +1,12 @@ +from Qt import QtCore + + +ACTION_ROLE = QtCore.Qt.UserRole +GROUP_ROLE = QtCore.Qt.UserRole + 1 +VARIANT_GROUP_ROLE = QtCore.Qt.UserRole + 2 +ACTION_ID_ROLE = QtCore.Qt.UserRole + 3 +ANIMATION_START_ROLE = QtCore.Qt.UserRole + 4 +ANIMATION_STATE_ROLE = QtCore.Qt.UserRole + 5 + + +ANIMATION_LEN = 10 diff --git a/pype/tools/launcher/models.py b/pype/tools/launcher/models.py index 631f6ddc988..b07321e4d6a 100644 --- a/pype/tools/launcher/models.py +++ b/pype/tools/launcher/models.py @@ -3,6 +3,12 @@ import collections from . import lib +from .constants import ( + ACTION_ROLE, + GROUP_ROLE, + VARIANT_GROUP_ROLE, + ACTION_ID_ROLE +) from .actions import ApplicationAction from Qt import QtCore, QtGui from avalon.vendor import qtawesome @@ -109,10 +115,6 @@ def headerData(self, section, orientation, role): class ActionModel(QtGui.QStandardItemModel): - ACTION_ROLE = QtCore.Qt.UserRole - GROUP_ROLE = QtCore.Qt.UserRole + 1 - VARIANT_GROUP_ROLE = QtCore.Qt.UserRole + 2 - def __init__(self, dbcon, parent=None): super(ActionModel, self).__init__(parent=parent) self.dbcon = dbcon @@ -235,8 +237,8 @@ def filter_actions(self): item = QtGui.QStandardItem(icon, label) item.setData(label, QtCore.Qt.ToolTipRole) - item.setData(actions, self.ACTION_ROLE) - item.setData(True, self.VARIANT_GROUP_ROLE) + item.setData(actions, ACTION_ROLE) + item.setData(True, VARIANT_GROUP_ROLE) items_by_order[order].append(item) for action in single_actions: @@ -244,7 +246,7 @@ def filter_actions(self): label = lib.get_action_label(action) item = QtGui.QStandardItem(icon, label) item.setData(label, QtCore.Qt.ToolTipRole) - item.setData(action, self.ACTION_ROLE) + item.setData(action, ACTION_ROLE) items_by_order[action.order].append(item) for group_name, actions in grouped_actions.items(): @@ -263,8 +265,8 @@ def filter_actions(self): icon = self.default_icon item = QtGui.QStandardItem(icon, group_name) - item.setData(actions, self.ACTION_ROLE) - item.setData(True, self.GROUP_ROLE) + item.setData(actions, ACTION_ROLE) + item.setData(True, GROUP_ROLE) items_by_order[order].append(item) diff --git a/pype/tools/launcher/widgets.py b/pype/tools/launcher/widgets.py index 42b24de8cd2..06c2daef9c6 100644 --- a/pype/tools/launcher/widgets.py +++ b/pype/tools/launcher/widgets.py @@ -7,6 +7,15 @@ from . import lib from .models import TaskModel, ActionModel, ProjectModel from .flickcharm import FlickCharm +from .constants import ( + ACTION_ROLE, + GROUP_ROLE, + VARIANT_GROUP_ROLE, + ACTION_ID_ROLE, + ANIMATION_START_ROLE, + ANIMATION_STATE_ROLE, + ANIMATION_LEN +) class ProjectBar(QtWidgets.QWidget): @@ -105,7 +114,7 @@ def __init__(self, dbcon, parent=None): # TODO better group delegate delegate = ActionDelegate( - [model.GROUP_ROLE, model.VARIANT_GROUP_ROLE], + [GROUP_ROLE, VARIANT_GROUP_ROLE], self ) view.setItemDelegate(delegate) @@ -136,14 +145,14 @@ def on_clicked(self, index): if not index.isValid(): return - is_group = index.data(self.model.GROUP_ROLE) - is_variant_group = index.data(self.model.VARIANT_GROUP_ROLE) + is_group = index.data(GROUP_ROLE) + is_variant_group = index.data(VARIANT_GROUP_ROLE) if not is_group and not is_variant_group: - action = index.data(self.model.ACTION_ROLE) + action = index.data(ACTION_ROLE) self.action_clicked.emit(action) return - actions = index.data(self.model.ACTION_ROLE) + actions = index.data(ACTION_ROLE) menu = QtWidgets.QMenu(self) actions_mapping = {} From ec7304414f589367a186822308e6c0386c40f697 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 22:39:17 +0100 Subject: [PATCH 033/295] model store items by id --- pype/tools/launcher/models.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pype/tools/launcher/models.py b/pype/tools/launcher/models.py index b07321e4d6a..d1742014ef8 100644 --- a/pype/tools/launcher/models.py +++ b/pype/tools/launcher/models.py @@ -1,3 +1,4 @@ +import uuid import copy import logging import collections @@ -125,6 +126,7 @@ def __init__(self, dbcon, parent=None): self.default_icon = qtawesome.icon("fa.cube", color="white") # Cache of available actions self._registered_actions = list() + self.items_by_id = {} def discover(self): """Set up Actions cache. Run this for each new project.""" @@ -136,6 +138,7 @@ def discover(self): actions.extend(app_actions) self._registered_actions = actions + self.items_by_id.clear() def get_application_actions(self): actions = [] @@ -182,6 +185,7 @@ def filter_actions(self): # Validate actions based on compatibility self.clear() + self.items_by_id.clear() self._groups.clear() actions = self.filter_compatible_actions(self._registered_actions) @@ -272,6 +276,9 @@ def filter_actions(self): for order in sorted(items_by_order.keys()): for item in items_by_order[order]: + item_id = str(uuid.uuid4()) + item.setData(item_id, ACTION_ID_ROLE) + self.items_by_id[item_id] = item self.appendRow(item) self.endResetModel() From 745507f8d692c5ddcb8ff5e70be3ed3eff30f043 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 22:40:37 +0100 Subject: [PATCH 034/295] implemented animation itself --- pype/tools/launcher/delegates.py | 58 ++++++++++++++++++++++++++++++++ pype/tools/launcher/widgets.py | 39 ++++++++++++++++++++- 2 files changed, 96 insertions(+), 1 deletion(-) diff --git a/pype/tools/launcher/delegates.py b/pype/tools/launcher/delegates.py index e2eecc6ad5f..9b88903bc14 100644 --- a/pype/tools/launcher/delegates.py +++ b/pype/tools/launcher/delegates.py @@ -1,4 +1,10 @@ +import time from Qt import QtCore, QtWidgets, QtGui +from .constants import ( + ANIMATION_LEN, + ANIMATION_START_ROLE, + ANIMATION_STATE_ROLE +) class ActionDelegate(QtWidgets.QStyledItemDelegate): @@ -9,8 +15,60 @@ class ActionDelegate(QtWidgets.QStyledItemDelegate): def __init__(self, group_roles, *args, **kwargs): super(ActionDelegate, self).__init__(*args, **kwargs) self.group_roles = group_roles + self._anim_start_color = QtGui.QColor(178, 255, 246) + self._anim_end_color = QtGui.QColor(5, 44, 50) + + def _draw_animation(self, painter, option, index): + grid_size = option.widget.gridSize() + x_offset = int( + (grid_size.width() / 2) + - (option.rect.width() / 2) + ) + item_x = option.rect.x() - x_offset + rect_offset = grid_size.width() / 20 + size = grid_size.width() - (rect_offset * 2) + anim_rect = QtCore.QRect( + item_x + rect_offset, + option.rect.y() + rect_offset, + size, + size + ) + + painter.save() + + painter.setBrush(QtCore.Qt.transparent) + painter.setRenderHint(QtGui.QPainter.Antialiasing) + + gradient = QtGui.QConicalGradient() + gradient.setCenter(anim_rect.center()) + gradient.setColorAt(0, self._anim_start_color) + gradient.setColorAt(1, self._anim_end_color) + + time_diff = time.time() - index.data(ANIMATION_START_ROLE) + + # Repeat 4 times + part_anim = ANIMATION_LEN / 4 + part_time = time_diff % part_anim + offset = (part_time / part_anim) * 360 + angle = (offset + 90) % 360 + + gradient.setAngle(-angle) + + pen = QtGui.QPen(QtGui.QBrush(gradient), rect_offset) + pen.setCapStyle(QtCore.Qt.RoundCap) + painter.setPen(pen) + painter.drawArc( + anim_rect, + -16 * (angle + 10), + -16 * offset + ) + + painter.restore() def paint(self, painter, option, index): + if index.data(ANIMATION_STATE_ROLE): + self._draw_animation(painter, option, index) + super(ActionDelegate, self).paint(painter, option, index) is_group = False for group_role in self.group_roles: diff --git a/pype/tools/launcher/widgets.py b/pype/tools/launcher/widgets.py index 06c2daef9c6..62545fb9660 100644 --- a/pype/tools/launcher/widgets.py +++ b/pype/tools/launcher/widgets.py @@ -1,4 +1,5 @@ import copy +import time import collections from Qt import QtWidgets, QtCore, QtGui from avalon.vendor import qtawesome @@ -124,6 +125,13 @@ def __init__(self, dbcon, parent=None): self.model = model self.view = view + self._animated_items = set() + + animation_timer = QtCore.QTimer() + animation_timer.setInterval(50) + animation_timer.timeout.connect(self._on_animation) + self._animation_timer = animation_timer + # Make view flickable flick = FlickCharm(parent=view) flick.activateOn(view) @@ -141,8 +149,35 @@ def filter_actions(self): def set_row_height(self, rows): self.setMinimumHeight(rows * 75) + def _on_animation(self): + time_now = time.time() + for action_id in tuple(self._animated_items): + item = self.model.items_by_id.get(action_id) + if not item: + self._animated_items.remove(action_id) + continue + + start_time = item.data(ANIMATION_START_ROLE) + if (time_now - start_time) > ANIMATION_LEN: + item.setData(0, ANIMATION_STATE_ROLE) + self._animated_items.remove(action_id) + + if not self._animated_items: + self._animation_timer.stop() + + self.update() + + def _start_animation(self, index): + action_id = index.data(ACTION_ID_ROLE) + item = self.model.items_by_id.get(action_id) + if item: + item.setData(time.time(), ANIMATION_START_ROLE) + item.setData(1, ANIMATION_STATE_ROLE) + self._animated_items.add(action_id) + self._animation_timer.start() + def on_clicked(self, index): - if not index.isValid(): + if not index or not index.isValid(): return is_group = index.data(GROUP_ROLE) @@ -150,6 +185,7 @@ def on_clicked(self, index): if not is_group and not is_variant_group: action = index.data(ACTION_ROLE) self.action_clicked.emit(action) + self._start_animation(index) return actions = index.data(ACTION_ROLE) @@ -213,6 +249,7 @@ def on_clicked(self, index): if result: action = actions_mapping[result] self.action_clicked.emit(action) + self._start_animation(index) class TasksWidget(QtWidgets.QWidget): From 66b82c39cd0afe50dd7cfb86f8476006630bf3eb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 09:44:14 +0100 Subject: [PATCH 035/295] changed order of execution --- pype/tools/launcher/widgets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/tools/launcher/widgets.py b/pype/tools/launcher/widgets.py index 62545fb9660..9a7d8ca772c 100644 --- a/pype/tools/launcher/widgets.py +++ b/pype/tools/launcher/widgets.py @@ -184,8 +184,8 @@ def on_clicked(self, index): is_variant_group = index.data(VARIANT_GROUP_ROLE) if not is_group and not is_variant_group: action = index.data(ACTION_ROLE) - self.action_clicked.emit(action) self._start_animation(index) + self.action_clicked.emit(action) return actions = index.data(ACTION_ROLE) @@ -248,8 +248,8 @@ def on_clicked(self, index): result = menu.exec_(QtGui.QCursor.pos()) if result: action = actions_mapping[result] - self.action_clicked.emit(action) self._start_animation(index) + self.action_clicked.emit(action) class TasksWidget(QtWidgets.QWidget): From 34c8a113da4de302d084ccbfd5f58240692c9f6a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 10:20:50 +0100 Subject: [PATCH 036/295] length of single orbit is not defined by animation length --- pype/tools/launcher/delegates.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/tools/launcher/delegates.py b/pype/tools/launcher/delegates.py index 9b88903bc14..cef0f5e1a2b 100644 --- a/pype/tools/launcher/delegates.py +++ b/pype/tools/launcher/delegates.py @@ -1,7 +1,6 @@ import time from Qt import QtCore, QtWidgets, QtGui from .constants import ( - ANIMATION_LEN, ANIMATION_START_ROLE, ANIMATION_STATE_ROLE ) @@ -47,7 +46,7 @@ def _draw_animation(self, painter, option, index): time_diff = time.time() - index.data(ANIMATION_START_ROLE) # Repeat 4 times - part_anim = ANIMATION_LEN / 4 + part_anim = 2.5 part_time = time_diff % part_anim offset = (part_time / part_anim) * 360 angle = (offset + 90) % 360 From 72150df3131e87c2ec4b4bdfcb7f03972ee0df4d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 12:38:47 +0100 Subject: [PATCH 037/295] Allow more executables --- pype/lib/applications.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index d20b01c3d20..a3acba31de6 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -227,7 +227,7 @@ def __init__(self, executable): self.default_launch_args = default_launch_args def __iter__(self): - yield distutils.spawn.find_executable(self.executable_path) + yield self._realpath() for arg in self.default_launch_args: yield arg @@ -237,10 +237,23 @@ def __str__(self): def as_args(self): return list(self) + def _realpath(self): + """Check if path is valid executable path.""" + # Check for executable in PATH + result = distutils.spawn.find_executable(self.executable_path) + if result is not None: + return result + + # This is not 100% validation but it is better than remove ability to + # launch .bat, .sh or extentionless files + if os.path.exists(self.executable_path): + return self.executable_path + return None + def exists(self): if not self.executable_path: return False - return bool(distutils.spawn.find_executable(self.executable_path)) + return bool(self._realpath()) class Application: From 8a2a40e6ae7918a491555f2fab2fc5a192320615 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 13:22:44 +0100 Subject: [PATCH 038/295] moved task short names outside of attribute group --- .../entities/schemas/projects_schema/schema_main.json | 6 ++++++ .../projects_schema/schemas/schema_anatomy_attributes.json | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pype/settings/entities/schemas/projects_schema/schema_main.json b/pype/settings/entities/schemas/projects_schema/schema_main.json index 828739e0468..81beaa1e396 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_main.json +++ b/pype/settings/entities/schemas/projects_schema/schema_main.json @@ -27,6 +27,12 @@ "type": "schema", "name": "schema_anatomy_attributes" }, + { + "type": "dict-modifiable", + "key": "task_short_names", + "label": "Task short names (by Task type)", + "object_type": "text" + }, { "type": "schema", "name": "schema_anatomy_imageio" diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json index 8925233bb87..f06e5d5dace 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json @@ -69,12 +69,6 @@ { "blender_2.91": "Blender 2.91" }, { "aftereffects_2021": "After Effects 2021" } ] - }, - { - "type": "dict-modifiable", - "key": "task_short_names", - "label": "Task short names (by Task type)", - "object_type": "text" } ] } From 498cd76888c2dffdf62315d93f20ef5969360fa7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 13:23:22 +0100 Subject: [PATCH 039/295] Changed task shor names to Task types with defined possible keys for each task type --- .../schemas/projects_schema/schema_main.json | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/pype/settings/entities/schemas/projects_schema/schema_main.json b/pype/settings/entities/schemas/projects_schema/schema_main.json index 81beaa1e396..2ac6678d722 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_main.json +++ b/pype/settings/entities/schemas/projects_schema/schema_main.json @@ -29,9 +29,20 @@ }, { "type": "dict-modifiable", - "key": "task_short_names", - "label": "Task short names (by Task type)", - "object_type": "text" + "key": "tasks", + "label": "Task types", + "is_file": true, + "is_group": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "short_name", + "label": "Short name" + } + ] + } }, { "type": "schema", From 1f8f0286fa5dbc48652e43393fe75f74085c1585 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 13:23:35 +0100 Subject: [PATCH 040/295] modified defaults of task types --- .../defaults/project_anatomy/attributes.json | 18 +------- .../defaults/project_anatomy/tasks.json | 44 +++++++++++++++++++ 2 files changed, 45 insertions(+), 17 deletions(-) create mode 100644 pype/settings/defaults/project_anatomy/tasks.json diff --git a/pype/settings/defaults/project_anatomy/attributes.json b/pype/settings/defaults/project_anatomy/attributes.json index 8f35e415335..fbf02189991 100644 --- a/pype/settings/defaults/project_anatomy/attributes.json +++ b/pype/settings/defaults/project_anatomy/attributes.json @@ -9,21 +9,5 @@ "resolutionWidth": 1920, "resolutionHeight": 1080, "pixelAspect": 1, - "applications": [], - "task_short_names": { - "Generic": "gener", - "Art": "art", - "Modeling": "mdl", - "Texture": "tex", - "Lookdev": "look", - "Rigging": "rig", - "Edit": "edit", - "Layout": "lay", - "Setdress": "dress", - "Animation": "anim", - "FX": "fx", - "Lighting": "lgt", - "Paint": "paint", - "Compositing": "comp" - } + "applications": [] } \ No newline at end of file diff --git a/pype/settings/defaults/project_anatomy/tasks.json b/pype/settings/defaults/project_anatomy/tasks.json new file mode 100644 index 00000000000..74504cc4d7c --- /dev/null +++ b/pype/settings/defaults/project_anatomy/tasks.json @@ -0,0 +1,44 @@ +{ + "Generic": { + "short_name": "gener" + }, + "Art": { + "short_name": "art" + }, + "Modeling": { + "short_name": "mdl" + }, + "Texture": { + "short_name": "tex" + }, + "Lookdev": { + "short_name": "look" + }, + "Rigging": { + "short_name": "rig" + }, + "Edit": { + "short_name": "edit" + }, + "Layout": { + "short_name": "lay" + }, + "Setdress": { + "short_name": "dress" + }, + "Animation": { + "short_name": "anim" + }, + "FX": { + "short_name": "fx" + }, + "Lighting": { + "short_name": "lgt" + }, + "Paint": { + "short_name": "paint" + }, + "Compositing": { + "short_name": "comp" + } +} \ No newline at end of file From c97b6cb39a239edcc2eff995d197806f10a945c9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 15:32:01 +0100 Subject: [PATCH 041/295] fix python 2 compatibility of setting schemas --- pype/settings/entities/input_entities.py | 5 +++-- pype/settings/entities/item_entities.py | 3 ++- pype/settings/entities/lib.py | 6 +++++- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/pype/settings/entities/input_entities.py b/pype/settings/entities/input_entities.py index 0eaafb6c25a..0c104e3ce76 100644 --- a/pype/settings/entities/input_entities.py +++ b/pype/settings/entities/input_entities.py @@ -4,6 +4,7 @@ from .base_entity import ItemEntity from .lib import ( NOT_SET, + STRING_TYPE, OverrideState ) from .exceptions import ( @@ -421,7 +422,7 @@ class TextEntity(InputEntity): schema_types = ["text"] def _item_initalization(self): - self.valid_value_types = (str, ) + self.valid_value_types = (STRING_TYPE, ) self.value_on_not_set = "" # GUI attributes @@ -438,7 +439,7 @@ def _item_initalization(self): self.valid_value_types = (list, ) self.value_on_not_set = ["", ""] else: - self.valid_value_types = (str, ) + self.valid_value_types = (STRING_TYPE, ) self.value_on_not_set = "" diff --git a/pype/settings/entities/item_entities.py b/pype/settings/entities/item_entities.py index 2f2573721b1..11e43e4fa6e 100644 --- a/pype/settings/entities/item_entities.py +++ b/pype/settings/entities/item_entities.py @@ -1,5 +1,6 @@ from .lib import ( NOT_SET, + STRING_TYPE, OverrideState ) from .exceptions import ( @@ -56,7 +57,7 @@ def _item_initalization(self): # Create child object if not self.multiplatform and not self.multipath: - valid_value_types = (str, ) + valid_value_types = (STRING_TYPE, ) item_schema = { "type": "path-input", "key": self.key, diff --git a/pype/settings/entities/lib.py b/pype/settings/entities/lib.py index 1ca3f9efe04..42164fbce9d 100644 --- a/pype/settings/entities/lib.py +++ b/pype/settings/entities/lib.py @@ -8,6 +8,10 @@ SchemaDuplicatedEnvGroupKeys ) +try: + STRING_TYPE = basestring +except Exception: + STRING_TYPE = str WRAPPER_TYPES = ["form", "collapsible-wrap"] NOT_SET = type("NOT_SET", (), {"__bool__": lambda obj: False})() @@ -55,7 +59,7 @@ def _fill_schema_template_data( value, template_data, required_keys, missing_keys ) - elif isinstance(template, str): + elif isinstance(template, STRING_TYPE): # TODO find much better way how to handle filling template data for replacement_string in template_key_pattern.findall(template): key = str(replacement_string[1:-1]) From bf313d4d08d519cfd4f332c2cc7ca33ef47a035e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 15:32:52 +0100 Subject: [PATCH 042/295] system and project schemas have their methods for getting schema data --- pype/settings/entities/lib.py | 10 +++++++++- pype/settings/entities/root_entities.py | 7 ++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/pype/settings/entities/lib.py b/pype/settings/entities/lib.py index 42164fbce9d..7eec3225a1a 100644 --- a/pype/settings/entities/lib.py +++ b/pype/settings/entities/lib.py @@ -237,7 +237,7 @@ def validate_schema(schema_data): validate_environment_groups_uniquenes(schema_data) -def gui_schema(subfolder, main_schema_name): +def get_gui_schema(subfolder, main_schema_name): dirpath = os.path.join( os.path.dirname(__file__), "schemas", @@ -273,6 +273,14 @@ def gui_schema(subfolder, main_schema_name): return main_schema +def get_studio_settings_schema(): + return get_gui_schema("projects_schema", "schema_main") + + +def get_project_settings_schema(): + return get_gui_schema("projects_schema", "schema_main") + + class OverrideStateItem: """Object used as item for `OverrideState` enum. diff --git a/pype/settings/entities/root_entities.py b/pype/settings/entities/root_entities.py index 88b9da2428f..6e804cb2864 100644 --- a/pype/settings/entities/root_entities.py +++ b/pype/settings/entities/root_entities.py @@ -10,7 +10,8 @@ NOT_SET, WRAPPER_TYPES, OverrideState, - gui_schema + get_studio_settings_schema, + get_project_settings_schema ) from pype.settings.constants import ( SYSTEM_SETTINGS_KEY, @@ -450,7 +451,7 @@ def __init__( ): if schema_data is None: # Load system schemas - schema_data = gui_schema("system_schema", "schema_main") + schema_data = get_studio_settings_schema() super(SystemSettings, self).__init__(schema_data, reset) @@ -565,7 +566,7 @@ def __init__( if schema_data is None: # Load system schemas - schema_data = gui_schema("projects_schema", "schema_main") + schema_data = get_project_settings_schema() super(ProjectSettings, self).__init__(schema_data, reset) From c1852d8a2aeb4f81773849de9d804fa60a595797 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 16 Mar 2021 16:10:23 +0100 Subject: [PATCH 043/295] fix system settings schema usage --- pype/settings/entities/lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/settings/entities/lib.py b/pype/settings/entities/lib.py index 7eec3225a1a..ed3d7aed84e 100644 --- a/pype/settings/entities/lib.py +++ b/pype/settings/entities/lib.py @@ -274,7 +274,7 @@ def get_gui_schema(subfolder, main_schema_name): def get_studio_settings_schema(): - return get_gui_schema("projects_schema", "schema_main") + return get_gui_schema("system_schema", "schema_main") def get_project_settings_schema(): From fa6f5092ddc73032050f5f055100cb5b60b6e3bc Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 13:43:06 +0100 Subject: [PATCH 044/295] added tools to anatomy attributes --- .../defaults/project_anatomy/attributes.json | 3 ++- .../schemas/schema_anatomy_attributes.json | 12 ++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/pype/settings/defaults/project_anatomy/attributes.json b/pype/settings/defaults/project_anatomy/attributes.json index fbf02189991..cc5516fd1f3 100644 --- a/pype/settings/defaults/project_anatomy/attributes.json +++ b/pype/settings/defaults/project_anatomy/attributes.json @@ -9,5 +9,6 @@ "resolutionWidth": 1920, "resolutionHeight": 1080, "pixelAspect": 1, - "applications": [] + "applications": [], + "tools": [] } \ No newline at end of file diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json index f06e5d5dace..fbe12daa423 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json @@ -69,6 +69,18 @@ { "blender_2.91": "Blender 2.91" }, { "aftereffects_2021": "After Effects 2021" } ] + }, + { + "type": "enum", + "key": "tools", + "label": "Tools", + "multiselection": true, + "enum_items": [ + {"mtoa_3.2": "mtoa_3.2"}, + {"mtoa_3.1": "mtoa_3.1"}, + {"mtoa_3.1.1": "mtoa_3.1.1"}, + {"vray_4300": "vray_4300"} + ] } ] } From 1a5cb96ba3ad43860f67c5ff8ef783b54e2880ef Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 13:43:18 +0100 Subject: [PATCH 045/295] added are current applications to applications enum --- .../schemas/schema_anatomy_attributes.json | 47 ++++++++++++++++--- 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json index fbe12daa423..1f746f7e548 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json @@ -62,12 +62,47 @@ "label": "Applications", "multiselection": true, "enum_items": [ - { "maya_2020": "Maya 2020" }, - { "nuke_12.2": "Nuke 12.2" }, - { "hiero_12.2": "Hiero 12.2" }, - { "houdini_18": "Houdini 18" }, - { "blender_2.91": "Blender 2.91" }, - { "aftereffects_2021": "After Effects 2021" } + {"harmony_20": "Harmony 20"}, + {"photoshop_2021": "Photoshop 2021"}, + {"photoshop_2020": "Photoshop 2020"}, + {"unreal_4.24": "Unreal Editor 4.24"}, + {"nuke_11.3": "Nuke 11.3"}, + {"nuke_11.2": "Nuke 11.2"}, + {"fusion_9": "Fusion 9"}, + {"celation_Publish": "CelAction 2D Pulblish"}, + {"nuke_12.0": "Nuke 12.0"}, + {"nuke_12.2": "Nuke 12.2"}, + {"maya_2018": "Maya 2018"}, + {"mayabatch_2018": "MayaBatch 2018"}, + {"mayabatch_2019": "MayaBatch 2019"}, + {"aftereffects_2020": "AfterEffects 2020"}, + {"aftereffects_2021": "AfterEffects 2021"}, + {"blender_2.83": "Blender 2.83"}, + {"tvpaint_Animation 11 (64bits)": "TVPaint 11 (64bits)"}, + {"celation_Local": "CelAction 2D Local"}, + {"tvpaint_Animation 11 (32bits)": "TVPaint 11 (32bits)"}, + {"harmony_17": "Harmony 17"}, + {"resolve_16": "Resolve 16"}, + {"fusion_16": "Fusion 16"}, + {"maya_2019": "Maya 2019"}, + {"djvview_1.1": "DJV View 1.1"}, + {"nukestudio_12.0": "Nuke Studio 12.0"}, + {"nukestudio_12.2": "Nuke Studio 12.2"}, + {"hiero_12.2": "Hiero 12.2"}, + {"hiero_12.0": "Hiero 12.0"}, + {"nukestudio_11.3": "Nuke Studio 11.3"}, + {"nukestudio_11.2": "Nuke Studio 11.2"}, + {"houdini_18": "Houdini 18"}, + {"mayabatch_2020": "MayaBatch 2020"}, + {"hiero_11.2": "Hiero 11.2"}, + {"hiero_11.3": "Hiero 11.3"}, + {"houdini_17": "Houdini 17"}, + {"blender_2.90": "Blender 2.90"}, + {"nukex_12.0": "Nuke X 12.0"}, + {"maya_2020": "Maya 2020"}, + {"nukex_12.2": "Nuke X 12.2"}, + {"nukex_11.2": "Nuke X 11.2"}, + {"nukex_11.3": "Nuke X 11.3"} ] }, { From 721314d744fcf2e50d656721bf21005936bb593b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 13:51:10 +0100 Subject: [PATCH 046/295] fps and pixel aspect can be decimal --- .../schemas/schema_anatomy_attributes.json | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json index 1f746f7e548..adda6b26f59 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json @@ -9,7 +9,9 @@ { "type": "number", "key": "fps", - "label": "Frame Rate" + "label": "Frame Rate", + "decimal": 2, + "minimum": 0 }, { "type": "number", @@ -54,7 +56,9 @@ { "type": "number", "key": "pixelAspect", - "label": "Pixel Aspect Ratio" + "label": "Pixel Aspect Ratio", + "decimal": 2, + "minimum": 0 }, { "type": "enum", From 6859207eb340572ce5df713de668545b33c436c7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 14:03:32 +0100 Subject: [PATCH 047/295] Others in anatomy templates is dictionary that can store different template groups --- .../schemas/schema_anatomy_templates.json | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json index 05718e0bc97..d6187575c09 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json @@ -27,6 +27,9 @@ "key": "frame", "label": "Frame" }, + { + "type": "separator" + }, { "type": "dict", "key": "work", @@ -130,7 +133,11 @@ "type": "dict-modifiable", "key": "other", "label": "Other", - "object_type": "text" + "collapsible_key": true, + "object_type": { + "type": "dict-modifiable", + "object_type": "text" + } } ] } From 997a34800bb21329b512c11c5e975aaac522bc50 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 14:18:08 +0100 Subject: [PATCH 048/295] changed key `other` to `others` --- pype/settings/defaults/project_anatomy/templates.json | 2 +- .../projects_schema/schemas/schema_anatomy_templates.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/settings/defaults/project_anatomy/templates.json b/pype/settings/defaults/project_anatomy/templates.json index 397f7257fde..dcb21d72e17 100644 --- a/pype/settings/defaults/project_anatomy/templates.json +++ b/pype/settings/defaults/project_anatomy/templates.json @@ -25,5 +25,5 @@ "path": "{@folder}/{@file}" }, "delivery": {}, - "other": {} + "others": {} } \ No newline at end of file diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json index d6187575c09..0b5339f9e62 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json @@ -131,8 +131,8 @@ }, { "type": "dict-modifiable", - "key": "other", - "label": "Other", + "key": "others", + "label": "Others", "collapsible_key": true, "object_type": { "type": "dict-modifiable", From e4138b82b7e6684ec722c4766a6643ed21e7e3da Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 14:18:33 +0100 Subject: [PATCH 049/295] move defaults into special dictionary with key "defaults" --- .../defaults/project_anatomy/templates.json | 10 +++-- .../schemas/schema_anatomy_templates.json | 42 +++++++++++-------- 2 files changed, 30 insertions(+), 22 deletions(-) diff --git a/pype/settings/defaults/project_anatomy/templates.json b/pype/settings/defaults/project_anatomy/templates.json index dcb21d72e17..862b7328463 100644 --- a/pype/settings/defaults/project_anatomy/templates.json +++ b/pype/settings/defaults/project_anatomy/templates.json @@ -1,8 +1,10 @@ { - "version_padding": 3, - "version": "v{version:0>{@version_padding}}", - "frame_padding": 4, - "frame": "{frame:0>{@frame_padding}}", + "defaults": { + "version_padding": 3, + "version": "v{version:0>{@version_padding}}", + "frame_padding": 4, + "frame": "{frame:0>{@frame_padding}}" + }, "work": { "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/work/{task}", "file": "{project[code]}_{asset}_{task}_{@version}<_{comment}>.{ext}", diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json index 0b5339f9e62..46b48036a7f 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json @@ -8,24 +8,30 @@ "is_group": true, "children": [ { - "type": "number", - "key": "version_padding", - "label": "Version Padding" - }, - { - "type": "text", - "key": "version", - "label": "Version" - }, - { - "type": "number", - "key": "frame_padding", - "label": "Frame Padding" - }, - { - "type": "text", - "key": "frame", - "label": "Frame" + "type": "dict", + "key": "defaults", + "children": [ + { + "type": "number", + "key": "version_padding", + "label": "Version Padding" + }, + { + "type": "text", + "key": "version", + "label": "Version" + }, + { + "type": "number", + "key": "frame_padding", + "label": "Frame Padding" + }, + { + "type": "text", + "key": "frame", + "label": "Frame" + } + ] }, { "type": "separator" From f3628c13fa075aa8e5ef71a950d6c7f41d1efd34 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 14:18:51 +0100 Subject: [PATCH 050/295] anatomy can handle "defaults" key --- pype/lib/anatomy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/lib/anatomy.py b/pype/lib/anatomy.py index 67b8c01a562..7062e1abdca 100644 --- a/pype/lib/anatomy.py +++ b/pype/lib/anatomy.py @@ -727,7 +727,7 @@ def solve_template_inner_links(cls, templates): key_2: "value_2" key_4: "value_3/value_2" """ - default_key_values = {} + default_key_values = templates.pop("defaults", {}) for key, value in tuple(templates.items()): if isinstance(value, dict): continue From 6d29a6eda09dbffaf85925321360ff481220cd18 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 14:20:34 +0100 Subject: [PATCH 051/295] added min and max to frame and version padding --- .../projects_schema/schemas/schema_anatomy_templates.json | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json index 46b48036a7f..8410ec48f47 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json @@ -14,7 +14,9 @@ { "type": "number", "key": "version_padding", - "label": "Version Padding" + "label": "Version Padding", + "minimum": 1, + "maximum": 10 }, { "type": "text", @@ -24,7 +26,9 @@ { "type": "number", "key": "frame_padding", - "label": "Frame Padding" + "label": "Frame Padding", + "minimum": 1, + "maximum": 10 }, { "type": "text", From 5c1d9655c6e55b49948744da9aa6e348c5c95899 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 14:23:16 +0100 Subject: [PATCH 052/295] anatomy can handle others templates --- pype/lib/anatomy.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pype/lib/anatomy.py b/pype/lib/anatomy.py index 7062e1abdca..4e7643dbbbf 100644 --- a/pype/lib/anatomy.py +++ b/pype/lib/anatomy.py @@ -740,6 +740,19 @@ def solve_template_inner_links(cls, templates): key_values.update(sub_value) keys_by_subkey[sub_key] = cls.prepare_inner_keys(key_values) + other_templates = templates.get("others") or {} + for sub_key, sub_value in other_templates.items(): + if sub_key in keys_by_subkey: + log.warning(( + "Key \"{}\" is duplicated in others. Skipping." + ).format(sub_key)) + continue + + key_values = {} + key_values.update(default_key_values) + key_values.update(sub_value) + keys_by_subkey[sub_key] = cls.prepare_inner_keys(key_values) + default_keys_by_subkeys = cls.prepare_inner_keys(default_key_values) for key, value in default_keys_by_subkeys.items(): From 0471fbff60cd6b03a069ac64f237d3dac673ff7e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 14:26:59 +0100 Subject: [PATCH 053/295] changed tools to list --- .../schemas/schema_anatomy_attributes.json | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json index adda6b26f59..d5d36e79a9b 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json @@ -110,16 +110,10 @@ ] }, { - "type": "enum", + "type": "list", "key": "tools", "label": "Tools", - "multiselection": true, - "enum_items": [ - {"mtoa_3.2": "mtoa_3.2"}, - {"mtoa_3.1": "mtoa_3.1"}, - {"mtoa_3.1.1": "mtoa_3.1.1"}, - {"vray_4300": "vray_4300"} - ] + "object_type": "text" } ] } From 469bcf37d7a5ce74464fad9c30580e1b3c617e68 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 14:50:10 +0100 Subject: [PATCH 054/295] moved pynput threads to separate file --- pype/modules/idle_manager/idle_logic.py | 24 +++++++++++++++++++++++ pype/modules/idle_manager/idle_manager.py | 24 ----------------------- 2 files changed, 24 insertions(+), 24 deletions(-) create mode 100644 pype/modules/idle_manager/idle_logic.py diff --git a/pype/modules/idle_manager/idle_logic.py b/pype/modules/idle_manager/idle_logic.py new file mode 100644 index 00000000000..ab3f6790e66 --- /dev/null +++ b/pype/modules/idle_manager/idle_logic.py @@ -0,0 +1,24 @@ +from pynput import mouse, keyboard + + +class MouseThread(mouse.Listener): + """Listens user's mouse movement.""" + + def __init__(self, callback): + super(MouseThread, self).__init__(on_move=self.on_move) + self.callback = callback + + def on_move(self, posx, posy): + self.callback() + + +class KeyboardThread(keyboard.Listener): + """Listens user's keyboard input.""" + + def __init__(self, callback): + super(KeyboardThread, self).__init__(on_press=self.on_press) + + self.callback = callback + + def on_press(self, key): + self.callback() diff --git a/pype/modules/idle_manager/idle_manager.py b/pype/modules/idle_manager/idle_manager.py index fa6d70d229e..3790d503ff4 100644 --- a/pype/modules/idle_manager/idle_manager.py +++ b/pype/modules/idle_manager/idle_manager.py @@ -4,7 +4,6 @@ from abc import ABCMeta, abstractmethod import six -from pynput import mouse, keyboard from pype.lib import PypeLogger from pype.modules import PypeModule, ITrayService @@ -162,26 +161,3 @@ def run(self): pass self.on_stop() - - -class MouseThread(mouse.Listener): - """Listens user's mouse movement.""" - - def __init__(self, callback): - super(MouseThread, self).__init__(on_move=self.on_move) - self.callback = callback - - def on_move(self, posx, posy): - self.callback() - - -class KeyboardThread(keyboard.Listener): - """Listens user's keyboard input.""" - - def __init__(self, callback): - super(KeyboardThread, self).__init__(on_press=self.on_press) - - self.callback = callback - - def on_press(self, key): - self.callback() From 6d09da37c07799c5bed7818bf8ee0668c5a535ad Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 14:50:24 +0100 Subject: [PATCH 055/295] import pynput threads only when needed --- pype/modules/idle_manager/idle_manager.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pype/modules/idle_manager/idle_manager.py b/pype/modules/idle_manager/idle_manager.py index 3790d503ff4..81e03c96df6 100644 --- a/pype/modules/idle_manager/idle_manager.py +++ b/pype/modules/idle_manager/idle_manager.py @@ -119,12 +119,18 @@ def on_stop(self): self.log.info("IdleManagerThread has stopped") self.module.on_thread_stop() + def _create_threads(self): + from .idle_logic import MouseThread, KeyboardThread + + thread_mouse = MouseThread(self.reset_time) + thread_keyboard = KeyboardThread(self.reset_time) + return thread_mouse, thread_keyboard + def run(self): self.log.info("IdleManagerThread has started") self.is_running = True - thread_mouse = MouseThread(self.reset_time) + thread_mouse, thread_keyboard = self._create_threads() thread_mouse.start() - thread_keyboard = KeyboardThread(self.reset_time) thread_keyboard.start() try: while self.is_running: From 345e5971145a39b2b91cac1c5165361666ba3b03 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 14:50:33 +0100 Subject: [PATCH 056/295] minor change --- pype/modules/idle_manager/idle_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/modules/idle_manager/idle_manager.py b/pype/modules/idle_manager/idle_manager.py index 81e03c96df6..25309e94436 100644 --- a/pype/modules/idle_manager/idle_manager.py +++ b/pype/modules/idle_manager/idle_manager.py @@ -98,7 +98,7 @@ def on_thread_stop(self): class IdleManagerThread(threading.Thread): def __init__(self, module, *args, **kwargs): super(IdleManagerThread, self).__init__(*args, **kwargs) - self.log = PypeLogger().get_logger(self.__class__.__name__) + self.log = PypeLogger.get_logger(self.__class__.__name__) self.module = module self.threads = [] self.is_running = False From d5a3e92f851e4ac19300dc1414b9922a18fe0707 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 14:51:30 +0100 Subject: [PATCH 057/295] renamed file `idle_manager` to `idle_module` --- pype/modules/idle_manager/__init__.py | 2 +- pype/modules/idle_manager/{idle_manager.py => idle_module.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename pype/modules/idle_manager/{idle_manager.py => idle_module.py} (100%) diff --git a/pype/modules/idle_manager/__init__.py b/pype/modules/idle_manager/__init__.py index 4bc33c87c15..651f360c505 100644 --- a/pype/modules/idle_manager/__init__.py +++ b/pype/modules/idle_manager/__init__.py @@ -1,4 +1,4 @@ -from .idle_manager import ( +from .idle_module import ( IdleManager, IIdleManager ) diff --git a/pype/modules/idle_manager/idle_manager.py b/pype/modules/idle_manager/idle_module.py similarity index 100% rename from pype/modules/idle_manager/idle_manager.py rename to pype/modules/idle_manager/idle_module.py From b9dd3edbbdb75e47bef9b66a5146e2d9e301a82f Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 17 Mar 2021 16:25:19 +0100 Subject: [PATCH 058/295] feat(nuke): menu shortcut from settings --- pype/hosts/nuke/api/menu.py | 23 +++++++++- .../defaults/project_settings/nuke.json | 9 ++++ .../projects_schema/schema_project_nuke.json | 42 +++++++++++++++++++ 3 files changed, 73 insertions(+), 1 deletion(-) diff --git a/pype/hosts/nuke/api/menu.py b/pype/hosts/nuke/api/menu.py index 8161b9024cc..9ff1dc251ad 100644 --- a/pype/hosts/nuke/api/menu.py +++ b/pype/hosts/nuke/api/menu.py @@ -2,7 +2,7 @@ from avalon.api import Session from .lib import WorkfileSettings -from pype.api import Logger, BuildWorkfile +from pype.api import Logger, BuildWorkfile, get_current_project_settings log = Logger().get_logger(__name__) @@ -66,6 +66,9 @@ def install(): ) log.debug("Adding menu item: {}".format(name)) + # adding shortcuts + add_shortcuts_from_presets() + def uninstall(): @@ -75,3 +78,21 @@ def uninstall(): for item in menu.items(): log.info("Removing menu item: {}".format(item.name())) menu.removeItem(item.name()) + + +def add_shortcuts_from_presets(): + menubar = nuke.menu("Nuke") + nuke_presets = get_current_project_settings()["nuke"] + + if nuke_presets.get("menu"): + for menu_name, menuitems in nuke_presets.get("menu").items(): + menu = menubar.findItem(menu_name) + for mitem_name, shortcut in menuitems.items(): + log.info("Adding Shortcut `{}` to `{}`".format( + shortcut, mitem_name + )) + try: + menuitem = menu.findItem(mitem_name) + menuitem.setShortcut(shortcut) + except AttributeError as e: + log.error(e) diff --git a/pype/settings/defaults/project_settings/nuke.json b/pype/settings/defaults/project_settings/nuke.json index 5821584932c..d209a671064 100644 --- a/pype/settings/defaults/project_settings/nuke.json +++ b/pype/settings/defaults/project_settings/nuke.json @@ -1,4 +1,13 @@ { + "menu": { + "Pype": { + "Create...": "ctrl+shift+alt+c", + "Publish...": "ctrl+alt+p", + "Load...": "ctrl+alt+l", + "Manage...": "ctrl+alt+m", + "Build Workfile": "ctrl+alt+b" + } + }, "create": { "CreateWriteRender": { "fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}" diff --git a/pype/settings/entities/schemas/projects_schema/schema_project_nuke.json b/pype/settings/entities/schemas/projects_schema/schema_project_nuke.json index 220d56a3062..90e068ba339 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_project_nuke.json +++ b/pype/settings/entities/schemas/projects_schema/schema_project_nuke.json @@ -5,6 +5,48 @@ "label": "Nuke", "is_file": true, "children": [ + { + "type": "dict", + "collapsible": true, + "key": "menu", + "label": "Menu shortcuts", + "children": [ + { + "type": "dict", + "collapsible": false, + "key": "Pype", + "label": "Pype", + "is_group": true, + "children": [ + { + "type": "text", + "key": "Create...", + "label": "Create..." + }, + { + "type": "text", + "key": "Publish...", + "label": "Publish..." + }, + { + "type": "text", + "key": "Load...", + "label": "Load..." + }, + { + "type": "text", + "key": "Manage...", + "label": "Manage..." + }, + { + "type": "text", + "key": "Build Workfile", + "label": "Build Workfile" + } + ] + } + ] + }, { "type": "dict", "collapsible": true, From 4600d27c1f0d11efc2f950d6d830dde4cb17684f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 16:44:01 +0100 Subject: [PATCH 059/295] added filecmp module to required modules --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b9ecc580ebf..6a86f0f97ac 100644 --- a/setup.py +++ b/setup.py @@ -43,7 +43,9 @@ "Qt", "speedcopy", "googleapiclient", - "httplib2" + "httplib2", + # Harmony implementation + "filecmp" ] includes = [] From d233231a448d67b2a76b2dac7fe997936e4dd935 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 16:57:06 +0100 Subject: [PATCH 060/295] added SchemaError --- pype/settings/entities/exceptions.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/pype/settings/entities/exceptions.py b/pype/settings/entities/exceptions.py index 951cd072437..ae1fd388a0a 100644 --- a/pype/settings/entities/exceptions.py +++ b/pype/settings/entities/exceptions.py @@ -28,7 +28,13 @@ def __init__(self, valid_types, invalid_type, path): super(InvalidValueType, self).__init__(msg) -class SchemaMissingFileInfo(Exception): + + +class SchemaError(Exception): + pass + + +class SchemaMissingFileInfo(SchemaError): def __init__(self, invalid): full_path_keys = [] for item in invalid: @@ -41,7 +47,7 @@ def __init__(self, invalid): super(SchemaMissingFileInfo, self).__init__(msg) -class SchemeGroupHierarchyBug(Exception): +class SchemeGroupHierarchyBug(SchemaError): def __init__(self, entity_path): msg = ( "Items with attribute \"is_group\" can't have another item with" @@ -50,7 +56,7 @@ def __init__(self, entity_path): super(SchemeGroupHierarchyBug, self).__init__(msg) -class SchemaDuplicatedKeys(Exception): +class SchemaDuplicatedKeys(SchemaError): def __init__(self, entity_path, key): msg = ( "Schema item contain duplicated key \"{}\" in" @@ -59,7 +65,7 @@ def __init__(self, entity_path, key): super(SchemaDuplicatedKeys, self).__init__(msg) -class SchemaDuplicatedEnvGroupKeys(Exception): +class SchemaDuplicatedEnvGroupKeys(SchemaError): def __init__(self, invalid): items = [] for key_path, keys in invalid.items(): @@ -74,7 +80,7 @@ def __init__(self, invalid): super(SchemaDuplicatedEnvGroupKeys, self).__init__(msg) -class SchemaTemplateMissingKeys(Exception): +class SchemaTemplateMissingKeys(SchemaError): def __init__(self, missing_keys, required_keys, template_name=None): self.missing_keys = missing_keys self.required_keys = required_keys From 512174256e08521a82295ec483dc9f67bb0ca6f6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 16:57:23 +0100 Subject: [PATCH 061/295] added exception for modifying required key --- pype/settings/entities/exceptions.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/settings/entities/exceptions.py b/pype/settings/entities/exceptions.py index ae1fd388a0a..7080a9b187e 100644 --- a/pype/settings/entities/exceptions.py +++ b/pype/settings/entities/exceptions.py @@ -28,6 +28,10 @@ def __init__(self, valid_types, invalid_type, path): super(InvalidValueType, self).__init__(msg) +class RequiredKeyModified(KeyError): + def __init__(self, entity_path, key): + msg = "{} - Tried to modify required key \"{}\"." + super(RequiredKeyModified, self).__init__(msg.format(entity_path, key)) class SchemaError(Exception): From 293d27abfff6f410a8c113f67af2574585bad652 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 16:57:57 +0100 Subject: [PATCH 062/295] raise an ex exception if required key is modified --- pype/settings/entities/dict_mutable_keys_entity.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pype/settings/entities/dict_mutable_keys_entity.py b/pype/settings/entities/dict_mutable_keys_entity.py index 2fd2b873110..f2058a4231b 100644 --- a/pype/settings/entities/dict_mutable_keys_entity.py +++ b/pype/settings/entities/dict_mutable_keys_entity.py @@ -7,7 +7,8 @@ from . import EndpointEntity from .exceptions import ( DefaultsNotDefined, - StudioDefaultsNotDefined + StudioDefaultsNotDefined, + RequiredKeyModified ) from pype.settings.constants import ( METADATA_KEYS, @@ -51,6 +52,8 @@ def __contains__(self, key): return key in self.children_by_key def pop(self, key, *args, **kwargs): + if key in self.required_keys: + raise RequiredKeyModified(self.path, key) result = self.children_by_key.pop(key, *args, **kwargs) self.on_change() return result @@ -93,6 +96,9 @@ def set_key_value(self, key, value): child_obj.set(value) def change_key(self, old_key, new_key): + if old_key in self.required_keys: + raise RequiredKeyModified(self.path, old_key) + if new_key == old_key: return self.children_by_key[new_key] = self.children_by_key.pop(old_key) From fd1dbac6aadc2260b2c6e102edcc51d37eb097b9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 16:58:11 +0100 Subject: [PATCH 063/295] make sure that required key is always set --- pype/settings/entities/dict_mutable_keys_entity.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/settings/entities/dict_mutable_keys_entity.py b/pype/settings/entities/dict_mutable_keys_entity.py index f2058a4231b..c8acb748cd3 100644 --- a/pype/settings/entities/dict_mutable_keys_entity.py +++ b/pype/settings/entities/dict_mutable_keys_entity.py @@ -315,6 +315,10 @@ def set_override_state(self, state): for key in tuple(self.children_by_key.keys()): self.children_by_key.pop(key) + for required_key in self.required_keys: + if required_key not in new_value: + new_value[required_key] = NOT_SET + # Create new children children_label_by_id = {} metadata_labels = metadata.get(M_DYNAMIC_KEY_LABEL) or {} From 64fcec4d4a9eafe1370f05cfe58ca81351642e68 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 17 Mar 2021 16:58:24 +0100 Subject: [PATCH 064/295] fix README.md and pype root determination when running from sources --- README.md | 13 +------------ start.py | 5 ++++- test_localsystem.txt | 1 - 3 files changed, 5 insertions(+), 14 deletions(-) delete mode 100644 test_localsystem.txt diff --git a/README.md b/README.md index c43b8c0d4b0..456655bfb9c 100644 --- a/README.md +++ b/README.md @@ -208,21 +208,10 @@ install it to user data directory (on Windows to `%LOCALAPPDATA%\pypeclub\pype`, ### From sources Pype can be run directly from sources by activating virtual environment: -**On Windows:** -```powershell -.\venv\Scripts\Activate.ps1 -``` -and running: -```powershell -python start.py tray -``` -**On macOS/Linux:** ```sh -source ./venv/bin/activate -python start.py tray +poetry run python start.py tray ``` - This will use current Pype version with sources. You can override this with `--use-version=x.x.x` and then Pype will try to find locally installed specified version (present in user data directory). diff --git a/start.py b/start.py index 875a69f6044..8d60a14403d 100644 --- a/start.py +++ b/start.py @@ -111,6 +111,7 @@ paths.append(frozen_libs) os.environ["PYTHONPATH"] = os.pathsep.join(paths) +import igniter # noqa: E402 from igniter import BootstrapRepos # noqa: E402 from igniter.tools import get_pype_path_from_db # noqa from igniter.bootstrap_repos import PypeVersion # noqa: E402 @@ -469,7 +470,9 @@ def _bootstrap_from_code(use_version): assert local_version else: pype_root = os.path.normpath( - os.path.dirname(os.path.realpath(__file__))) + os.path.dirname( + os.path.dirname( + os.path.realpath(igniter.__file__)))) # get current version of Pype local_version = bootstrap.get_local_live_version() diff --git a/test_localsystem.txt b/test_localsystem.txt deleted file mode 100644 index dde7986af89..00000000000 --- a/test_localsystem.txt +++ /dev/null @@ -1 +0,0 @@ -I have run From 3f13db2ef2dc5246e77b66fa04fa9d9dfc1bc19e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 17:14:41 +0100 Subject: [PATCH 065/295] view can show required keys --- .../settings/widgets/dict_mutable_widget.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/pype/tools/settings/settings/widgets/dict_mutable_widget.py b/pype/tools/settings/settings/widgets/dict_mutable_widget.py index b27e0e492b4..53b2d1ddd25 100644 --- a/pype/tools/settings/settings/widgets/dict_mutable_widget.py +++ b/pype/tools/settings/settings/widgets/dict_mutable_widget.py @@ -827,10 +827,25 @@ def set_entity_value(self): while self.input_fields: self.remove_row(self.input_fields[0]) - for key, child_entity in self.entity.items(): + keys_order = list(self.entity.required_keys) + last_required = None + if keys_order: + last_required = keys_order[-1] + for key in self.entity.keys(): + if key in keys_order: + continue + keys_order.append(key) + + for key in keys_order: + child_entity = self.entity[key] input_field = self.add_widget_for_child(child_entity) input_field.origin_key = key - input_field.set_key(key) + if key in self.entity.required_keys: + input_field.set_as_required(key) + if key == last_required: + input_field.set_as_last_required() + else: + input_field.set_key(key) if self.entity.collapsible_key: label = self.entity.get_child_label(child_entity) input_field.origin_key_label = label From 3d025d7b788feec53c8c53d0df746cb97735d766 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 17:17:33 +0100 Subject: [PATCH 066/295] added required keys to example schema --- .../entities/schemas/system_schema/example_schema.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pype/settings/entities/schemas/system_schema/example_schema.json b/pype/settings/entities/schemas/system_schema/example_schema.json index 6e7a47d1bfa..48a21cc0c66 100644 --- a/pype/settings/entities/schemas/system_schema/example_schema.json +++ b/pype/settings/entities/schemas/system_schema/example_schema.json @@ -141,6 +141,16 @@ "maximum": 100 } }, + { + "type": "dict-modifiable", + "key": "modifiable_dict_with_required_keys", + "label": "Modifiable dict with required keys", + "required_keys": [ + "key_1", + "key_2" + ], + "object_type": "text" + }, { "type": "list-strict", "key": "strict_list_labels_horizontal", From bf7e6fae43b77ae55af7d05139a64dde188622c1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 17 Mar 2021 17:24:28 +0100 Subject: [PATCH 067/295] fixed default values check --- pype/settings/entities/dict_mutable_keys_entity.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pype/settings/entities/dict_mutable_keys_entity.py b/pype/settings/entities/dict_mutable_keys_entity.py index c8acb748cd3..8c9b5e03b13 100644 --- a/pype/settings/entities/dict_mutable_keys_entity.py +++ b/pype/settings/entities/dict_mutable_keys_entity.py @@ -451,7 +451,13 @@ def _prepare_value(self, value): def update_default_value(self, value): value = self._check_update_value(value, "default") - self.has_default_value = value is not NOT_SET + has_default_value = value is not NOT_SET + if has_default_value: + for required_key in self.required_keys: + if required_key not in value: + has_default_value = False + break + self.has_default_value = has_default_value value, metadata = self._prepare_value(value) self._default_value = value self._default_metadata = metadata From 2a1a7180a895e68a02f5a2321a67b74038567d28 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 17 Mar 2021 18:09:13 +0100 Subject: [PATCH 068/295] better redshift support in expected files --- pype/hosts/maya/api/expected_files.py | 42 +++++++++++++++++++-------- 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/pype/hosts/maya/api/expected_files.py b/pype/hosts/maya/api/expected_files.py index 41463b53f80..9a8c8d6e0f8 100644 --- a/pype/hosts/maya/api/expected_files.py +++ b/pype/hosts/maya/api/expected_files.py @@ -58,7 +58,8 @@ ) R_AOV_TOKEN = re.compile(r".*%a.*|.*.*|.*.*", re.IGNORECASE) R_SUBSTITUTE_AOV_TOKEN = re.compile(r"%a||", re.IGNORECASE) -R_REMOVE_AOV_TOKEN = re.compile(r"_%a|_|_", re.IGNORECASE) +R_REMOVE_AOV_TOKEN = re.compile( + r"_%a|\.%a|_|\.|_|\.", re.IGNORECASE) # to remove unused renderman tokens R_CLEAN_FRAME_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE) R_CLEAN_EXT_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE) @@ -246,7 +247,8 @@ def _get_layer_data(self): } return scene_data - def _generate_single_file_sequence(self, layer_data): + def _generate_single_file_sequence( + self, layer_data, force_aov_name=None): expected_files = [] for cam in layer_data["cameras"]: file_prefix = layer_data["filePrefix"] @@ -256,7 +258,9 @@ def _generate_single_file_sequence(self, layer_data): (R_SUBSTITUTE_CAMERA_TOKEN, self.sanitize_camera_name(cam)), # this is required to remove unfilled aov token, for example # in Redshift - (R_REMOVE_AOV_TOKEN, ""), + (R_REMOVE_AOV_TOKEN, "") if not force_aov_name \ + else (R_SUBSTITUTE_AOV_TOKEN, force_aov_name), + (R_CLEAN_FRAME_TOKEN, ""), (R_CLEAN_EXT_TOKEN, ""), ) @@ -709,7 +713,7 @@ def get_renderer_prefix(self): """ prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix() - prefix = "{}_".format(prefix) + prefix = "{}.".format(prefix) return prefix def get_files(self): @@ -721,15 +725,7 @@ def get_files(self): """ expected_files = super(ExpectedFilesRedshift, self).get_files() - - # we need to add one sequence for plain beauty if AOVs are enabled. - # as redshift output beauty without 'beauty' in filename. - layer_data = self._get_layer_data() - if layer_data.get("enabledAOVs"): - expected_files[0][u"beauty"] = self._generate_single_file_sequence( - layer_data - ) # Redshift doesn't merge Cryptomatte AOV to final exr. We need to check # for such condition and add it to list of expected files. @@ -741,6 +737,28 @@ def get_files(self): {aov_name: self._generate_single_file_sequence(layer_data)} ) + if layer_data.get("enabledAOVs"): + # because if Beauty is added manually, it will be rendered as + # 'Beauty_other' in file name and "standard" beauty will have + # 'Beauty' in its name. When disabled, standard output will be + # without `Beauty`. + if expected_files[0].get(u"Beauty"): + expected_files[0][u"Beauty_other"] = expected_files[0].pop( + u"Beauty") + new_list = [ + seq.replace(".Beauty", ".Beauty_other") + for seq in expected_files[0][u"Beauty_other"] + ] + + expected_files[0][u"Beauty_other"] = new_list + expected_files[0][u"Beauty"] = self._generate_single_file_sequence( # noqa: E501 + layer_data, force_aov_name="Beauty" + ) + else: + expected_files[0][u"Beauty"] = self._generate_single_file_sequence( # noqa: E501 + layer_data + ) + return expected_files def get_aovs(self): From f66c08c05ef5d8dddcd90d5396f033e44f9e7f83 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:38:28 +0100 Subject: [PATCH 069/295] modified imports in actions and events --- .../actions/action_store_thumbnails_to_avalon.py | 2 +- .../ftrack/events/action_sync_to_avalon.py | 2 +- .../ftrack/events/event_first_version_status.py | 2 +- .../ftrack/events/event_next_task_update.py | 2 +- .../events/event_push_frame_values_to_task.py | 2 +- .../ftrack/events/event_sync_to_avalon.py | 16 ++++++++++------ .../ftrack/events/event_task_to_parent_status.py | 2 +- .../events/event_task_to_version_status.py | 2 +- .../ftrack/events/event_thumbnail_updates.py | 2 +- .../ftrack/events/event_user_assigment.py | 2 +- .../events/event_version_to_task_statuses.py | 2 +- 11 files changed, 20 insertions(+), 16 deletions(-) diff --git a/pype/modules/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/modules/ftrack/actions/action_store_thumbnails_to_avalon.py index 84f857e37a1..4fbea6b8a5b 100644 --- a/pype/modules/ftrack/actions/action_store_thumbnails_to_avalon.py +++ b/pype/modules/ftrack/actions/action_store_thumbnails_to_avalon.py @@ -1,7 +1,7 @@ import os -import requests import errno import json +import requests from bson.objectid import ObjectId from pype.modules.ftrack.lib import BaseAction, statics_icon diff --git a/pype/modules/ftrack/events/action_sync_to_avalon.py b/pype/modules/ftrack/events/action_sync_to_avalon.py index 486b977f04d..6bec1fb259c 100644 --- a/pype/modules/ftrack/events/action_sync_to_avalon.py +++ b/pype/modules/ftrack/events/action_sync_to_avalon.py @@ -1,7 +1,7 @@ import time import traceback -from pype.modules.ftrack import ServerAction +from pype.modules.ftrack.lib import ServerAction from pype.modules.ftrack.lib.avalon_sync import SyncEntitiesFactory diff --git a/pype/modules/ftrack/events/event_first_version_status.py b/pype/modules/ftrack/events/event_first_version_status.py index cfca047c097..440b455edf4 100644 --- a/pype/modules/ftrack/events/event_first_version_status.py +++ b/pype/modules/ftrack/events/event_first_version_status.py @@ -1,4 +1,4 @@ -from pype.modules.ftrack import BaseEvent +from pype.modules.ftrack.lib import BaseEvent class FirstVersionStatus(BaseEvent): diff --git a/pype/modules/ftrack/events/event_next_task_update.py b/pype/modules/ftrack/events/event_next_task_update.py index 284cff886bd..e546f00c777 100644 --- a/pype/modules/ftrack/events/event_next_task_update.py +++ b/pype/modules/ftrack/events/event_next_task_update.py @@ -1,5 +1,5 @@ import collections -from pype.modules.ftrack import BaseEvent +from pype.modules.ftrack.lib import BaseEvent class NextTaskUpdate(BaseEvent): diff --git a/pype/modules/ftrack/events/event_push_frame_values_to_task.py b/pype/modules/ftrack/events/event_push_frame_values_to_task.py index 8e277679bd8..f41466d664d 100644 --- a/pype/modules/ftrack/events/event_push_frame_values_to_task.py +++ b/pype/modules/ftrack/events/event_push_frame_values_to_task.py @@ -2,7 +2,7 @@ import datetime import ftrack_api -from pype.modules.ftrack import BaseEvent +from pype.modules.ftrack.lib import BaseEvent class PushFrameValuesToTaskEvent(BaseEvent): diff --git a/pype/modules/ftrack/events/event_sync_to_avalon.py b/pype/modules/ftrack/events/event_sync_to_avalon.py index 527f3996e1e..7c9c4d196fe 100644 --- a/pype/modules/ftrack/events/event_sync_to_avalon.py +++ b/pype/modules/ftrack/events/event_sync_to_avalon.py @@ -10,16 +10,20 @@ from bson.objectid import ObjectId from pymongo import UpdateOne +import ftrack_api + from avalon import schema +from avalon.api import AvalonMongoDB -from pype.modules.ftrack.lib import avalon_sync +from pype.modules.ftrack.lib import ( + avalon_sync, + BaseEvent +) from pype.modules.ftrack.lib.avalon_sync import ( - CUST_ATTR_ID_KEY, CUST_ATTR_AUTO_SYNC, EntitySchemas + CUST_ATTR_ID_KEY, + CUST_ATTR_AUTO_SYNC, + EntitySchemas ) -import ftrack_api -from pype.modules.ftrack import BaseEvent - -from avalon.api import AvalonMongoDB class SyncToAvalonEvent(BaseEvent): diff --git a/pype/modules/ftrack/events/event_task_to_parent_status.py b/pype/modules/ftrack/events/event_task_to_parent_status.py index 9b1f61911e2..72b66754040 100644 --- a/pype/modules/ftrack/events/event_task_to_parent_status.py +++ b/pype/modules/ftrack/events/event_task_to_parent_status.py @@ -1,5 +1,5 @@ import collections -from pype.modules.ftrack import BaseEvent +from pype.modules.ftrack.lib import BaseEvent class TaskStatusToParent(BaseEvent): diff --git a/pype/modules/ftrack/events/event_task_to_version_status.py b/pype/modules/ftrack/events/event_task_to_version_status.py index d27a7f9e98f..14f456831f5 100644 --- a/pype/modules/ftrack/events/event_task_to_version_status.py +++ b/pype/modules/ftrack/events/event_task_to_version_status.py @@ -1,5 +1,5 @@ import collections -from pype.modules.ftrack import BaseEvent +from pype.modules.ftrack.lib import BaseEvent class TaskToVersionStatus(BaseEvent): diff --git a/pype/modules/ftrack/events/event_thumbnail_updates.py b/pype/modules/ftrack/events/event_thumbnail_updates.py index b71322c894c..de189463d06 100644 --- a/pype/modules/ftrack/events/event_thumbnail_updates.py +++ b/pype/modules/ftrack/events/event_thumbnail_updates.py @@ -1,5 +1,5 @@ import collections -from pype.modules.ftrack import BaseEvent +from pype.modules.ftrack.lib import BaseEvent class ThumbnailEvents(BaseEvent): diff --git a/pype/modules/ftrack/events/event_user_assigment.py b/pype/modules/ftrack/events/event_user_assigment.py index 59880fabe52..85cf6db12b5 100644 --- a/pype/modules/ftrack/events/event_user_assigment.py +++ b/pype/modules/ftrack/events/event_user_assigment.py @@ -2,7 +2,7 @@ import re import subprocess -from pype.modules.ftrack import BaseEvent +from pype.modules.ftrack.lib import BaseEvent from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from avalon.api import AvalonMongoDB diff --git a/pype/modules/ftrack/events/event_version_to_task_statuses.py b/pype/modules/ftrack/events/event_version_to_task_statuses.py index 4a42e273361..58caf7db514 100644 --- a/pype/modules/ftrack/events/event_version_to_task_statuses.py +++ b/pype/modules/ftrack/events/event_version_to_task_statuses.py @@ -1,4 +1,4 @@ -from pype.modules.ftrack import BaseEvent +from pype.modules.ftrack.lib import BaseEvent class VersionToTaskStatus(BaseEvent): From 6862284b3c01ad3aa762aaf10979a6892bd7639a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:39:42 +0100 Subject: [PATCH 070/295] FtrackServer does not care about server type --- pype/modules/ftrack/ftrack_server/ftrack_server.py | 6 ++---- pype/modules/ftrack/ftrack_server/sub_event_processor.py | 3 +-- pype/modules/ftrack/ftrack_server/sub_event_status.py | 2 +- pype/modules/ftrack/ftrack_server/sub_event_storer.py | 2 +- pype/modules/ftrack/ftrack_server/sub_user_server.py | 2 +- 5 files changed, 6 insertions(+), 9 deletions(-) diff --git a/pype/modules/ftrack/ftrack_server/ftrack_server.py b/pype/modules/ftrack/ftrack_server/ftrack_server.py index 3e0c7525961..e093636dfa1 100644 --- a/pype/modules/ftrack/ftrack_server/ftrack_server.py +++ b/pype/modules/ftrack/ftrack_server/ftrack_server.py @@ -32,14 +32,14 @@ class FtrackServer: - def __init__(self, handler_paths=None, server_type='action'): + def __init__(self, handler_paths=None): """ - 'type' is by default set to 'action' - Runs Action server - enter 'event' for Event server EXAMPLE FOR EVENT SERVER: ... - server = FtrackServer('event') + server = FtrackServer() server.run_server() .. """ @@ -52,8 +52,6 @@ def __init__(self, handler_paths=None, server_type='action'): self.handler_paths = handler_paths or [] - self.server_type = server_type - def stop_session(self): self.stopped = True if self.session.event_hub.connected is True: diff --git a/pype/modules/ftrack/ftrack_server/sub_event_processor.py b/pype/modules/ftrack/ftrack_server/sub_event_processor.py index f48b2141e63..51d796cea6e 100644 --- a/pype/modules/ftrack/ftrack_server/sub_event_processor.py +++ b/pype/modules/ftrack/ftrack_server/sub_event_processor.py @@ -83,8 +83,7 @@ def main(args): manager = ModulesManager() ftrack_module = manager.modules_by_name["ftrack"] server = FtrackServer( - ftrack_module.server_event_handlers_paths, - "event" + ftrack_module.server_event_handlers_paths ) log.debug("Launched Ftrack Event processor") server.run_server(session) diff --git a/pype/modules/ftrack/ftrack_server/sub_event_status.py b/pype/modules/ftrack/ftrack_server/sub_event_status.py index 07b233282f4..bb72d9ac159 100644 --- a/pype/modules/ftrack/ftrack_server/sub_event_status.py +++ b/pype/modules/ftrack/ftrack_server/sub_event_status.py @@ -370,7 +370,7 @@ def main(args): ObjectFactory.session = session session.event_hub.heartbeat_callbacks.append(heartbeat) register(session) - server = FtrackServer(server_type="event") + server = FtrackServer() log.debug("Launched Ftrack Event statuser") server.run_server(session, load_files=False) diff --git a/pype/modules/ftrack/ftrack_server/sub_event_storer.py b/pype/modules/ftrack/ftrack_server/sub_event_storer.py index 2fdd3b07f76..2032c122ccb 100644 --- a/pype/modules/ftrack/ftrack_server/sub_event_storer.py +++ b/pype/modules/ftrack/ftrack_server/sub_event_storer.py @@ -195,7 +195,7 @@ def main(args): ) SessionFactory.session = session register(session) - server = FtrackServer(server_type="event") + server = FtrackServer() log.debug("Launched Ftrack Event storer") server.run_server(session, load_files=False) diff --git a/pype/modules/ftrack/ftrack_server/sub_user_server.py b/pype/modules/ftrack/ftrack_server/sub_user_server.py index b968714faf3..79cd90a0d7a 100644 --- a/pype/modules/ftrack/ftrack_server/sub_user_server.py +++ b/pype/modules/ftrack/ftrack_server/sub_user_server.py @@ -36,7 +36,7 @@ def main(args): ftrack_module = manager.modules_by_name["ftrack"] ftrack_module.user_event_handlers_paths server = FtrackServer( - ftrack_module.user_event_handlers_paths, "action" + ftrack_module.user_event_handlers_paths ) log.debug("Launching User Ftrack Server") server.run_server(session=session) From 20909b50cd69ceec24856a3e58e1e9db5621702a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:43:37 +0100 Subject: [PATCH 071/295] modules_from_path can return crashed modules --- pype/lib/python_module_tools.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/pype/lib/python_module_tools.py b/pype/lib/python_module_tools.py index b5400c9981e..559dd04bab5 100644 --- a/pype/lib/python_module_tools.py +++ b/pype/lib/python_module_tools.py @@ -9,15 +9,20 @@ PY3 = sys.version_info[0] == 3 -def modules_from_path(folder_path): +def modules_from_path(folder_path, return_crashed=False): """Get python scripts as modules from a path. Arguments: path (str): Path to folder containing python scripts. + return_crasher (bool): Crashed module paths with exception info + will be returned too. Returns: - List of modules. + list, tuple: List of modules when `return_crashed` is False else tuple + with list of modules at first place and tuple of path and exception + info at second place. """ + crashed = [] modules = [] # Just skip and return empty list if path is not set if not folder_path: @@ -70,12 +75,15 @@ def modules_from_path(folder_path): modules.append(module) except Exception: + crashed.append((full_path, sys.exc_info())) log.warning( "Failed to load path: \"{0}\"".format(full_path), exc_info=True ) continue + if return_crashed: + return modules, crashed return modules From 329a39e4440ae7f00fa197cd2136baf8c97b61e2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:44:14 +0100 Subject: [PATCH 072/295] FtrackServer is using `modules_from_path` from pype.lib --- .../ftrack/ftrack_server/ftrack_server.py | 86 ++++++++----------- 1 file changed, 38 insertions(+), 48 deletions(-) diff --git a/pype/modules/ftrack/ftrack_server/ftrack_server.py b/pype/modules/ftrack/ftrack_server/ftrack_server.py index e093636dfa1..9d9d8c4630b 100644 --- a/pype/modules/ftrack/ftrack_server/ftrack_server.py +++ b/pype/modules/ftrack/ftrack_server/ftrack_server.py @@ -1,17 +1,17 @@ import os -import sys import types -import importlib -import time import logging -import inspect +import traceback import ftrack_api -from pype.lib import PypeLogger +from pype.lib import ( + PypeLogger, + modules_from_path +) -log = PypeLogger().get_logger(__name__) +log = PypeLogger.get_logger(__name__) """ # Required - Needed for connection to Ftrack @@ -61,60 +61,50 @@ def stop_session(self): def set_files(self, paths): # Iterate all paths - register_functions_dict = [] + register_functions = [] for path in paths: - # add path to PYTHON PATH - if path not in sys.path: - sys.path.append(path) - # Get all modules with functions - for file in os.listdir(path): - # Get only .py files with action functions - try: - if '.pyc' in file or '.py' not in file: - continue - - mod = importlib.import_module(os.path.splitext(file)[0]) - importlib.reload(mod) - mod_functions = dict( - [ - (name, function) - for name, function in mod.__dict__.items() - if isinstance(function, types.FunctionType) - ] - ) + modules, crashed = modules_from_path(path, return_crashed=True) + for filepath, exc_info in crashed: + log.warning("Filepath load crashed {}.\n{}".format( + filepath, traceback.format_exception(*exc_info) + )) - # separate files by register function - if 'register' not in mod_functions: - msg = ('"{}" - Missing register method').format(file) - log.warning(msg) - continue - - register_functions_dict.append({ - 'name': file, - 'register': mod_functions['register'] - }) - except Exception as e: - msg = 'Loading of file "{}" failed ({})'.format( - file, str(e) + for module in modules: + register_function = None + for name, attr in module.__dict__.items(): + if ( + name == "register" + and isinstance(attr, types.FunctionType) + ): + register_function = attr + break + + filepath = os.path.abspath(module.__file__) + if not register_function: + log.warning( + "\"{}\" - Missing register method".format(filepath) ) - log.warning(msg, exc_info=e) + continue + + register_functions.append( + (filepath, register_function) + ) - if len(register_functions_dict) < 1: + if not register_functions: log.warning(( "There are no events with `register` function" " in registered paths: \"{}\"" ).format("| ".join(paths))) - for function_dict in register_functions_dict: - register = function_dict["register"] + for filepath, register_func in register_functions: try: - register(self.session) - except Exception as exc: - msg = '"{}" - register was not successful ({})'.format( - function_dict['name'], str(exc) + register_func(self.session) + except Exception: + log.warning( + "\"{}\" - register was not successful".format(filepath), + exc_info=True ) - log.warning(msg, exc_info=True) def set_handler_paths(self, paths): self.handler_paths = paths From 36eab984dd1ff65da0a472d5bc06521f95ea4a2a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:45:07 +0100 Subject: [PATCH 073/295] ftrack module is python 2 compatible --- .../events/event_push_frame_values_to_task.py | 7 ++--- pype/modules/ftrack/ftrack_module.py | 19 +++++++----- pype/modules/ftrack/lib/avalon_sync.py | 30 +++++++++++-------- 3 files changed, 33 insertions(+), 23 deletions(-) diff --git a/pype/modules/ftrack/events/event_push_frame_values_to_task.py b/pype/modules/ftrack/events/event_push_frame_values_to_task.py index f41466d664d..338866ba5bf 100644 --- a/pype/modules/ftrack/events/event_push_frame_values_to_task.py +++ b/pype/modules/ftrack/events/event_push_frame_values_to_task.py @@ -272,10 +272,9 @@ def finalize( if new_value == old_value: continue - entity_key = collections.OrderedDict({ - "configuration_id": attr_id, - "entity_id": entity_id - }) + entity_key = collections.OrderedDict() + entity_key["configuration_id"] = attr_id + entity_key["entity_id"] = entity_id self._cached_changes.append({ "attr_key": attr_key, "entity_id": entity_id, diff --git a/pype/modules/ftrack/ftrack_module.py b/pype/modules/ftrack/ftrack_module.py index 2cbd79c32e4..fd6d1efb7cb 100644 --- a/pype/modules/ftrack/ftrack_module.py +++ b/pype/modules/ftrack/ftrack_module.py @@ -42,16 +42,21 @@ def initialize(self, settings): self.ftrack_url = ftrack_settings["ftrack_server"] current_dir = os.path.dirname(os.path.abspath(__file__)) - self.server_event_handlers_paths = [ - os.path.join(current_dir, "events"), - *ftrack_settings["ftrack_events_path"] + server_event_handlers_paths = [ + os.path.join(current_dir, "events") ] - self.user_event_handlers_paths = [ - os.path.join(current_dir, "actions"), - *ftrack_settings["ftrack_actions_path"] + server_event_handlers_paths.extend( + ftrack_settings["ftrack_events_path"] + ) + user_event_handlers_paths = [ + os.path.join(current_dir, "actions") ] - + user_event_handlers_paths.extend( + ftrack_settings["ftrack_actions_path"] + ) # Prepare attribute + self.server_event_handlers_paths = server_event_handlers_paths + self.user_event_handlers_paths = user_event_handlers_paths self.tray_module = None def get_global_environments(self): diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index e9dc1734c6b..f651c1785d4 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -1,10 +1,16 @@ import os import re -import queue import json import collections import copy +import six + +if six.PY3: + from queue import Queue +else: + from Queue import Queue + from avalon.api import AvalonMongoDB import avalon @@ -135,7 +141,7 @@ def from_dict_to_set(data, is_project): data.pop("data") result = {"$set": {}} - dict_queue = queue.Queue() + dict_queue = Queue() dict_queue.put((None, data)) while not dict_queue.empty(): @@ -687,7 +693,7 @@ def duplicity_regex_check(self): self.filter_by_duplicate_regex() def filter_by_duplicate_regex(self): - filter_queue = queue.Queue() + filter_queue = Queue() failed_regex_msg = "{} - Entity has invalid symbols in the name" duplicate_msg = "There are multiple entities with the name: \"{}\":" @@ -741,7 +747,7 @@ def filter_by_ignore_sync(self): ) == "_notset_": return - self.filter_queue = queue.Queue() + self.filter_queue = Queue() self.filter_queue.put((self.ft_project_id, False)) while not self.filter_queue.empty(): parent_id, remove = self.filter_queue.get() @@ -778,8 +784,8 @@ def filter_by_selection(self, event): selected_ids.append(entity["entityId"]) sync_ids = [self.ft_project_id] - parents_queue = queue.Queue() - children_queue = queue.Queue() + parents_queue = Queue() + children_queue = Queue() for id in selected_ids: # skip if already filtered with ignore sync custom attribute if id in self.filtered_ids: @@ -1046,7 +1052,7 @@ def set_hierarchical_attribute(self, hier_attrs, sync_ids): if value is not None: project_values[key] = value - hier_down_queue = queue.Queue() + hier_down_queue = Queue() hier_down_queue.put((project_values, top_id)) while not hier_down_queue.empty(): @@ -1225,7 +1231,7 @@ def prepare_avalon_entities(self, ft_project_name): create_ftrack_ids.append(self.ft_project_id) # make it go hierarchically - prepare_queue = queue.Queue() + prepare_queue = Queue() for child_id in self.entities_dict[self.ft_project_id]["children"]: prepare_queue.put(child_id) @@ -1348,7 +1354,7 @@ def filter_with_children(self, ftrack_id): parent_id = ent_dict["parent_id"] self.entities_dict[parent_id]["children"].remove(ftrack_id) - children_queue = queue.Queue() + children_queue = Queue() children_queue.put(ftrack_id) while not children_queue.empty(): _ftrack_id = children_queue.get() @@ -1361,7 +1367,7 @@ def prepare_changes(self): hierarchy_changing_ids = [] ignore_keys = collections.defaultdict(list) - update_queue = queue.Queue() + update_queue = Queue() for ftrack_id in self.update_ftrack_ids: update_queue.put(ftrack_id) @@ -1941,7 +1947,7 @@ def create_avalon_project(self): entity["custom_attributes"][CUST_ATTR_ID_KEY] = str(new_id) def _bubble_changeability(self, unchangeable_ids): - unchangeable_queue = queue.Queue() + unchangeable_queue = Queue() for entity_id in unchangeable_ids: unchangeable_queue.put((entity_id, False)) @@ -2067,7 +2073,7 @@ def update_entities(self): self.dbcon.bulk_write(mongo_changes_bulk) def reload_parents(self, hierarchy_changing_ids): - parents_queue = queue.Queue() + parents_queue = Queue() parents_queue.put((self.ft_project_id, [], False)) while not parents_queue.empty(): ftrack_id, parent_parents, changed = parents_queue.get() From 6e8f477f31e9cb5fc67eada80af36dac87ffb2a8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:45:25 +0100 Subject: [PATCH 074/295] fix legacy server --- pype/modules/ftrack/ftrack_server/sub_legacy_server.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/modules/ftrack/ftrack_server/sub_legacy_server.py b/pype/modules/ftrack/ftrack_server/sub_legacy_server.py index e09bcbb699a..2e45b564b39 100644 --- a/pype/modules/ftrack/ftrack_server/sub_legacy_server.py +++ b/pype/modules/ftrack/ftrack_server/sub_legacy_server.py @@ -66,8 +66,7 @@ def main(args): manager = ModulesManager() ftrack_module = manager.modules_by_name["ftrack"] server = FtrackServer( - ftrack_module.server_event_handlers_paths, - "event" + ftrack_module.server_event_handlers_paths ) session = ftrack_api.Session(auto_connect_event_hub=True) From 99ff266760b10a198608ad29d3d80bf028e380f6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:45:44 +0100 Subject: [PATCH 075/295] formatting changes --- pype/modules/ftrack/lib/avalon_sync.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index f651c1785d4..7373059b68d 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -22,7 +22,7 @@ import ftrack_api from pype.lib import ApplicationManager -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) # Current schemas for avalon types @@ -42,7 +42,8 @@ def default_custom_attributes_definition(): json_file_path = os.path.join( - os.path.dirname(__file__), "custom_attributes.json" + os.path.dirname(os.path.abspath(__file__)), + "custom_attributes.json" ) with open(json_file_path, "r") as json_stream: data = json.load(json_stream) From 06f01f3680422b506b2a01390ca8a2352bb05438 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:46:52 +0100 Subject: [PATCH 076/295] removed event_test.py --- pype/modules/ftrack/events/event_test.py | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 pype/modules/ftrack/events/event_test.py diff --git a/pype/modules/ftrack/events/event_test.py b/pype/modules/ftrack/events/event_test.py deleted file mode 100644 index c07f8b8d161..00000000000 --- a/pype/modules/ftrack/events/event_test.py +++ /dev/null @@ -1,22 +0,0 @@ -from pype.modules.ftrack import BaseEvent - - -class TestEvent(BaseEvent): - - ignore_me = True - - priority = 10000 - - def launch(self, session, event): - - '''just a testing event''' - - # self.log.info(event) - - return True - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - TestEvent(session).register() From 4bc0152d604e01a69f9923c25d747f2ea4f9a384 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:48:50 +0100 Subject: [PATCH 077/295] reduced ftrack module init file --- pype/modules/ftrack/__init__.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/pype/modules/ftrack/__init__.py b/pype/modules/ftrack/__init__.py index 4fb427f13a3..c1a557812cc 100644 --- a/pype/modules/ftrack/__init__.py +++ b/pype/modules/ftrack/__init__.py @@ -3,20 +3,9 @@ IFtrackEventHandlerPaths, FTRACK_MODULE_DIR ) -from . import ftrack_server -from .ftrack_server import FtrackServer, check_ftrack_url -from .lib import BaseHandler, BaseEvent, BaseAction, ServerAction __all__ = ( "FtrackModule", "IFtrackEventHandlerPaths", - "FTRACK_MODULE_DIR", - - "ftrack_server", - "FtrackServer", - "check_ftrack_url", - "BaseHandler", - "BaseEvent", - "BaseAction", - "ServerAction" + "FTRACK_MODULE_DIR" ) From e4c5c4eaa209ce48c513d54aa5cf4fe8046e7793 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:49:35 +0100 Subject: [PATCH 078/295] renamed folder `actions` to `event_handlers_user` --- .../{actions => event_handlers_user}/action_applications.py | 0 .../action_batch_task_creation.py | 0 .../action_clean_hierarchical_attributes.py | 0 .../{actions => event_handlers_user}/action_client_review_sort.py | 0 .../{actions => event_handlers_user}/action_component_open.py | 0 .../{actions => event_handlers_user}/action_create_cust_attrs.py | 0 .../{actions => event_handlers_user}/action_create_folders.py | 0 .../action_create_project_structure.py | 0 .../{actions => event_handlers_user}/action_delete_asset.py | 0 .../action_delete_old_versions.py | 0 .../ftrack/{actions => event_handlers_user}/action_delivery.py | 0 .../ftrack/{actions => event_handlers_user}/action_djvview.py | 0 .../ftrack/{actions => event_handlers_user}/action_job_killer.py | 0 .../{actions => event_handlers_user}/action_multiple_notes.py | 0 .../{actions => event_handlers_user}/action_prepare_project.py | 0 pype/modules/ftrack/{actions => event_handlers_user}/action_rv.py | 0 .../ftrack/{actions => event_handlers_user}/action_seed.py | 0 .../action_store_thumbnails_to_avalon.py | 0 .../{actions => event_handlers_user}/action_sync_to_avalon.py | 0 .../ftrack/{actions => event_handlers_user}/action_test.py | 0 .../action_thumbnail_to_childern.py | 0 .../action_thumbnail_to_parent.py | 0 .../{actions => event_handlers_user}/action_where_run_ask.py | 0 .../{actions => event_handlers_user}/action_where_run_show.py | 0 24 files changed, 0 insertions(+), 0 deletions(-) rename pype/modules/ftrack/{actions => event_handlers_user}/action_applications.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_batch_task_creation.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_clean_hierarchical_attributes.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_client_review_sort.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_component_open.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_create_cust_attrs.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_create_folders.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_create_project_structure.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_delete_asset.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_delete_old_versions.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_delivery.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_djvview.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_job_killer.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_multiple_notes.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_prepare_project.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_rv.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_seed.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_store_thumbnails_to_avalon.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_sync_to_avalon.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_test.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_thumbnail_to_childern.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_thumbnail_to_parent.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_where_run_ask.py (100%) rename pype/modules/ftrack/{actions => event_handlers_user}/action_where_run_show.py (100%) diff --git a/pype/modules/ftrack/actions/action_applications.py b/pype/modules/ftrack/event_handlers_user/action_applications.py similarity index 100% rename from pype/modules/ftrack/actions/action_applications.py rename to pype/modules/ftrack/event_handlers_user/action_applications.py diff --git a/pype/modules/ftrack/actions/action_batch_task_creation.py b/pype/modules/ftrack/event_handlers_user/action_batch_task_creation.py similarity index 100% rename from pype/modules/ftrack/actions/action_batch_task_creation.py rename to pype/modules/ftrack/event_handlers_user/action_batch_task_creation.py diff --git a/pype/modules/ftrack/actions/action_clean_hierarchical_attributes.py b/pype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py similarity index 100% rename from pype/modules/ftrack/actions/action_clean_hierarchical_attributes.py rename to pype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py diff --git a/pype/modules/ftrack/actions/action_client_review_sort.py b/pype/modules/ftrack/event_handlers_user/action_client_review_sort.py similarity index 100% rename from pype/modules/ftrack/actions/action_client_review_sort.py rename to pype/modules/ftrack/event_handlers_user/action_client_review_sort.py diff --git a/pype/modules/ftrack/actions/action_component_open.py b/pype/modules/ftrack/event_handlers_user/action_component_open.py similarity index 100% rename from pype/modules/ftrack/actions/action_component_open.py rename to pype/modules/ftrack/event_handlers_user/action_component_open.py diff --git a/pype/modules/ftrack/actions/action_create_cust_attrs.py b/pype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py similarity index 100% rename from pype/modules/ftrack/actions/action_create_cust_attrs.py rename to pype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py diff --git a/pype/modules/ftrack/actions/action_create_folders.py b/pype/modules/ftrack/event_handlers_user/action_create_folders.py similarity index 100% rename from pype/modules/ftrack/actions/action_create_folders.py rename to pype/modules/ftrack/event_handlers_user/action_create_folders.py diff --git a/pype/modules/ftrack/actions/action_create_project_structure.py b/pype/modules/ftrack/event_handlers_user/action_create_project_structure.py similarity index 100% rename from pype/modules/ftrack/actions/action_create_project_structure.py rename to pype/modules/ftrack/event_handlers_user/action_create_project_structure.py diff --git a/pype/modules/ftrack/actions/action_delete_asset.py b/pype/modules/ftrack/event_handlers_user/action_delete_asset.py similarity index 100% rename from pype/modules/ftrack/actions/action_delete_asset.py rename to pype/modules/ftrack/event_handlers_user/action_delete_asset.py diff --git a/pype/modules/ftrack/actions/action_delete_old_versions.py b/pype/modules/ftrack/event_handlers_user/action_delete_old_versions.py similarity index 100% rename from pype/modules/ftrack/actions/action_delete_old_versions.py rename to pype/modules/ftrack/event_handlers_user/action_delete_old_versions.py diff --git a/pype/modules/ftrack/actions/action_delivery.py b/pype/modules/ftrack/event_handlers_user/action_delivery.py similarity index 100% rename from pype/modules/ftrack/actions/action_delivery.py rename to pype/modules/ftrack/event_handlers_user/action_delivery.py diff --git a/pype/modules/ftrack/actions/action_djvview.py b/pype/modules/ftrack/event_handlers_user/action_djvview.py similarity index 100% rename from pype/modules/ftrack/actions/action_djvview.py rename to pype/modules/ftrack/event_handlers_user/action_djvview.py diff --git a/pype/modules/ftrack/actions/action_job_killer.py b/pype/modules/ftrack/event_handlers_user/action_job_killer.py similarity index 100% rename from pype/modules/ftrack/actions/action_job_killer.py rename to pype/modules/ftrack/event_handlers_user/action_job_killer.py diff --git a/pype/modules/ftrack/actions/action_multiple_notes.py b/pype/modules/ftrack/event_handlers_user/action_multiple_notes.py similarity index 100% rename from pype/modules/ftrack/actions/action_multiple_notes.py rename to pype/modules/ftrack/event_handlers_user/action_multiple_notes.py diff --git a/pype/modules/ftrack/actions/action_prepare_project.py b/pype/modules/ftrack/event_handlers_user/action_prepare_project.py similarity index 100% rename from pype/modules/ftrack/actions/action_prepare_project.py rename to pype/modules/ftrack/event_handlers_user/action_prepare_project.py diff --git a/pype/modules/ftrack/actions/action_rv.py b/pype/modules/ftrack/event_handlers_user/action_rv.py similarity index 100% rename from pype/modules/ftrack/actions/action_rv.py rename to pype/modules/ftrack/event_handlers_user/action_rv.py diff --git a/pype/modules/ftrack/actions/action_seed.py b/pype/modules/ftrack/event_handlers_user/action_seed.py similarity index 100% rename from pype/modules/ftrack/actions/action_seed.py rename to pype/modules/ftrack/event_handlers_user/action_seed.py diff --git a/pype/modules/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py similarity index 100% rename from pype/modules/ftrack/actions/action_store_thumbnails_to_avalon.py rename to pype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py diff --git a/pype/modules/ftrack/actions/action_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py similarity index 100% rename from pype/modules/ftrack/actions/action_sync_to_avalon.py rename to pype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py diff --git a/pype/modules/ftrack/actions/action_test.py b/pype/modules/ftrack/event_handlers_user/action_test.py similarity index 100% rename from pype/modules/ftrack/actions/action_test.py rename to pype/modules/ftrack/event_handlers_user/action_test.py diff --git a/pype/modules/ftrack/actions/action_thumbnail_to_childern.py b/pype/modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py similarity index 100% rename from pype/modules/ftrack/actions/action_thumbnail_to_childern.py rename to pype/modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py diff --git a/pype/modules/ftrack/actions/action_thumbnail_to_parent.py b/pype/modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py similarity index 100% rename from pype/modules/ftrack/actions/action_thumbnail_to_parent.py rename to pype/modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py diff --git a/pype/modules/ftrack/actions/action_where_run_ask.py b/pype/modules/ftrack/event_handlers_user/action_where_run_ask.py similarity index 100% rename from pype/modules/ftrack/actions/action_where_run_ask.py rename to pype/modules/ftrack/event_handlers_user/action_where_run_ask.py diff --git a/pype/modules/ftrack/actions/action_where_run_show.py b/pype/modules/ftrack/event_handlers_user/action_where_run_show.py similarity index 100% rename from pype/modules/ftrack/actions/action_where_run_show.py rename to pype/modules/ftrack/event_handlers_user/action_where_run_show.py From ba692441804e6ec77c87f2ff43943ff82b050c7e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:50:01 +0100 Subject: [PATCH 079/295] renamed folder `events` to `event_handlers_server` --- .../action_clone_review_session.py | 0 .../action_push_frame_values_to_task.py | 0 .../{events => event_handlers_server}/action_sync_to_avalon.py | 0 .../event_del_avalon_id_from_new.py | 0 .../event_first_version_status.py | 0 .../{events => event_handlers_server}/event_next_task_update.py | 0 .../event_push_frame_values_to_task.py | 0 .../{events => event_handlers_server}/event_radio_buttons.py | 0 .../{events => event_handlers_server}/event_sync_to_avalon.py | 0 .../event_task_to_parent_status.py | 0 .../event_task_to_version_status.py | 0 .../{events => event_handlers_server}/event_thumbnail_updates.py | 0 .../{events => event_handlers_server}/event_user_assigment.py | 0 .../event_version_to_task_statuses.py | 0 14 files changed, 0 insertions(+), 0 deletions(-) rename pype/modules/ftrack/{events => event_handlers_server}/action_clone_review_session.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/action_push_frame_values_to_task.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/action_sync_to_avalon.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/event_del_avalon_id_from_new.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/event_first_version_status.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/event_next_task_update.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/event_push_frame_values_to_task.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/event_radio_buttons.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/event_sync_to_avalon.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/event_task_to_parent_status.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/event_task_to_version_status.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/event_thumbnail_updates.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/event_user_assigment.py (100%) rename pype/modules/ftrack/{events => event_handlers_server}/event_version_to_task_statuses.py (100%) diff --git a/pype/modules/ftrack/events/action_clone_review_session.py b/pype/modules/ftrack/event_handlers_server/action_clone_review_session.py similarity index 100% rename from pype/modules/ftrack/events/action_clone_review_session.py rename to pype/modules/ftrack/event_handlers_server/action_clone_review_session.py diff --git a/pype/modules/ftrack/events/action_push_frame_values_to_task.py b/pype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py similarity index 100% rename from pype/modules/ftrack/events/action_push_frame_values_to_task.py rename to pype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py diff --git a/pype/modules/ftrack/events/action_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py similarity index 100% rename from pype/modules/ftrack/events/action_sync_to_avalon.py rename to pype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py diff --git a/pype/modules/ftrack/events/event_del_avalon_id_from_new.py b/pype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py similarity index 100% rename from pype/modules/ftrack/events/event_del_avalon_id_from_new.py rename to pype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py diff --git a/pype/modules/ftrack/events/event_first_version_status.py b/pype/modules/ftrack/event_handlers_server/event_first_version_status.py similarity index 100% rename from pype/modules/ftrack/events/event_first_version_status.py rename to pype/modules/ftrack/event_handlers_server/event_first_version_status.py diff --git a/pype/modules/ftrack/events/event_next_task_update.py b/pype/modules/ftrack/event_handlers_server/event_next_task_update.py similarity index 100% rename from pype/modules/ftrack/events/event_next_task_update.py rename to pype/modules/ftrack/event_handlers_server/event_next_task_update.py diff --git a/pype/modules/ftrack/events/event_push_frame_values_to_task.py b/pype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py similarity index 100% rename from pype/modules/ftrack/events/event_push_frame_values_to_task.py rename to pype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py diff --git a/pype/modules/ftrack/events/event_radio_buttons.py b/pype/modules/ftrack/event_handlers_server/event_radio_buttons.py similarity index 100% rename from pype/modules/ftrack/events/event_radio_buttons.py rename to pype/modules/ftrack/event_handlers_server/event_radio_buttons.py diff --git a/pype/modules/ftrack/events/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py similarity index 100% rename from pype/modules/ftrack/events/event_sync_to_avalon.py rename to pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py diff --git a/pype/modules/ftrack/events/event_task_to_parent_status.py b/pype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py similarity index 100% rename from pype/modules/ftrack/events/event_task_to_parent_status.py rename to pype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py diff --git a/pype/modules/ftrack/events/event_task_to_version_status.py b/pype/modules/ftrack/event_handlers_server/event_task_to_version_status.py similarity index 100% rename from pype/modules/ftrack/events/event_task_to_version_status.py rename to pype/modules/ftrack/event_handlers_server/event_task_to_version_status.py diff --git a/pype/modules/ftrack/events/event_thumbnail_updates.py b/pype/modules/ftrack/event_handlers_server/event_thumbnail_updates.py similarity index 100% rename from pype/modules/ftrack/events/event_thumbnail_updates.py rename to pype/modules/ftrack/event_handlers_server/event_thumbnail_updates.py diff --git a/pype/modules/ftrack/events/event_user_assigment.py b/pype/modules/ftrack/event_handlers_server/event_user_assigment.py similarity index 100% rename from pype/modules/ftrack/events/event_user_assigment.py rename to pype/modules/ftrack/event_handlers_server/event_user_assigment.py diff --git a/pype/modules/ftrack/events/event_version_to_task_statuses.py b/pype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py similarity index 100% rename from pype/modules/ftrack/events/event_version_to_task_statuses.py rename to pype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py From 0b5acc75e9a74bed4aa477c22bbca5e193f4aef6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 09:50:17 +0100 Subject: [PATCH 080/295] changed paths of default event handlers in module --- pype/modules/ftrack/ftrack_module.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/modules/ftrack/ftrack_module.py b/pype/modules/ftrack/ftrack_module.py index fd6d1efb7cb..a257ede8454 100644 --- a/pype/modules/ftrack/ftrack_module.py +++ b/pype/modules/ftrack/ftrack_module.py @@ -43,13 +43,13 @@ def initialize(self, settings): current_dir = os.path.dirname(os.path.abspath(__file__)) server_event_handlers_paths = [ - os.path.join(current_dir, "events") + os.path.join(current_dir, "event_handlers_server") ] server_event_handlers_paths.extend( ftrack_settings["ftrack_events_path"] ) user_event_handlers_paths = [ - os.path.join(current_dir, "actions") + os.path.join(current_dir, "event_handlers_user") ] user_event_handlers_paths.extend( ftrack_settings["ftrack_actions_path"] From f2ecff26888ce0906f3a3224a1b4db6e05cd36be Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 10:16:50 +0100 Subject: [PATCH 081/295] modules_from_path always return all information with filepaths --- pype/lib/applications.py | 8 ++++---- pype/lib/python_module_tools.py | 8 +++----- pype/modules/ftrack/ftrack_server/ftrack_server.py | 5 ++--- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index d20b01c3d20..f7d029e2ea4 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -618,13 +618,13 @@ def discover_launch_hooks(self, force=False): ) continue - modules = modules_from_path(path) - for _module in modules: + modules, _crashed = modules_from_path(path) + for _filepath, module in modules: all_classes["pre"].extend( - classes_from_module(PreLaunchHook, _module) + classes_from_module(PreLaunchHook, module) ) all_classes["post"].extend( - classes_from_module(PostLaunchHook, _module) + classes_from_module(PostLaunchHook, module) ) for launch_type, classes in all_classes.items(): diff --git a/pype/lib/python_module_tools.py b/pype/lib/python_module_tools.py index 559dd04bab5..44a10078891 100644 --- a/pype/lib/python_module_tools.py +++ b/pype/lib/python_module_tools.py @@ -9,7 +9,7 @@ PY3 = sys.version_info[0] == 3 -def modules_from_path(folder_path, return_crashed=False): +def modules_from_path(folder_path): """Get python scripts as modules from a path. Arguments: @@ -72,7 +72,7 @@ def modules_from_path(folder_path, return_crashed=False): module.__file__ = full_path - modules.append(module) + modules.append((full_path, module)) except Exception: crashed.append((full_path, sys.exc_info())) @@ -82,9 +82,7 @@ def modules_from_path(folder_path, return_crashed=False): ) continue - if return_crashed: - return modules, crashed - return modules + return modules, crashed def recursive_bases_from_class(klass): diff --git a/pype/modules/ftrack/ftrack_server/ftrack_server.py b/pype/modules/ftrack/ftrack_server/ftrack_server.py index 9d9d8c4630b..1612a2f4742 100644 --- a/pype/modules/ftrack/ftrack_server/ftrack_server.py +++ b/pype/modules/ftrack/ftrack_server/ftrack_server.py @@ -64,13 +64,13 @@ def set_files(self, paths): register_functions = [] for path in paths: # Get all modules with functions - modules, crashed = modules_from_path(path, return_crashed=True) + modules, crashed = modules_from_path(path) for filepath, exc_info in crashed: log.warning("Filepath load crashed {}.\n{}".format( filepath, traceback.format_exception(*exc_info) )) - for module in modules: + for filepath, module in modules: register_function = None for name, attr in module.__dict__.items(): if ( @@ -80,7 +80,6 @@ def set_files(self, paths): register_function = attr break - filepath = os.path.abspath(module.__file__) if not register_function: log.warning( "\"{}\" - Missing register method".format(filepath) From 355872938b693f328ccb41f51676aa312a416b4f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 10:18:16 +0100 Subject: [PATCH 082/295] default mongo url is accessible with method --- pype/lib/mongo.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pype/lib/mongo.py b/pype/lib/mongo.py index 04798d88ff7..3ee43bb9347 100644 --- a/pype/lib/mongo.py +++ b/pype/lib/mongo.py @@ -137,10 +137,14 @@ class PypeMongoConnection: mongo_clients = {} log = logging.getLogger("PypeMongoConnection") + @staticmethod + def get_default_mongo_url(): + return os.environ["PYPE_MONGO"] + @classmethod def get_mongo_client(cls, mongo_url=None): if mongo_url is None: - mongo_url = os.environ["PYPE_MONGO"] + mongo_url = cls.get_default_mongo_url() connection = cls.mongo_clients.get(mongo_url) if connection: From 017a2df3959ee2bed2ef57c40f656e7429168836 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 10:35:49 +0100 Subject: [PATCH 083/295] modified mongo validation in ftrack --- .../ftrack/ftrack_server/event_server_cli.py | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/pype/modules/ftrack/ftrack_server/event_server_cli.py b/pype/modules/ftrack/ftrack_server/event_server_cli.py index 27b25bd8cfe..5e885e47f3a 100644 --- a/pype/modules/ftrack/ftrack_server/event_server_cli.py +++ b/pype/modules/ftrack/ftrack_server/event_server_cli.py @@ -14,7 +14,10 @@ import ftrack_api import pymongo -from pype.lib import get_pype_execute_args +from pype.lib import ( + get_pype_execute_args, + PypeMongoConnection +) from pype.modules.ftrack.lib import ( credentials, get_ftrack_url_from_settings @@ -35,17 +38,18 @@ def __init__(self, message=None): super().__init__(message) -def check_mongo_url(host, port, log_error=False): +def check_mongo_url(mongo_uri, log_error=False): """Checks if mongo server is responding""" try: - client = pymongo.MongoClient(host=host, port=port) + client = pymongo.MongoClient(mongo_uri) # Force connection on a request as the connect=True parameter of # MongoClient seems to be useless here client.server_info() + client.close() except pymongo.errors.ServerSelectionTimeoutError as err: if log_error: - print("Can't connect to MongoDB at {}:{} because: {}".format( - host, port, err + print("Can't connect to MongoDB at {} because: {}".format( + mongo_uri, err )) return False @@ -175,11 +179,8 @@ def main_loop(ftrack_url): """ os.environ["FTRACK_EVENT_SUB_ID"] = str(uuid.uuid1()) - # Get mongo hostname and port for testing mongo connection - mongo_uri, mongo_port, database_name, collection_name = ( - get_ftrack_event_mongo_info() - ) + mongo_uri = PypeMongoConnection.get_default_mongo_url() # Current file file_path = os.path.dirname(os.path.realpath(__file__)) @@ -257,7 +258,7 @@ def on_exit(processor_thread, storer_thread, statuser_thread): ftrack_accessible = check_ftrack_url(ftrack_url) if not mongo_accessible: - mongo_accessible = check_mongo_url(mongo_uri, mongo_port) + mongo_accessible = check_mongo_url(mongo_uri) # Run threads only if Ftrack is accessible if not ftrack_accessible or not mongo_accessible: From 0f5d7df35dc527abb41e067d3d5ec47a13086a21 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 10:37:13 +0100 Subject: [PATCH 084/295] replaced CustomDbConnector with PypeMongoConnection --- .../event_del_avalon_id_from_new.py | 4 +- .../ftrack_server/custom_db_connector.py | 253 ------------------ pype/modules/ftrack/ftrack_server/lib.py | 29 +- .../ftrack/ftrack_server/sub_event_storer.py | 19 +- pype/modules/ftrack/lib/settings.py | 23 +- 5 files changed, 26 insertions(+), 302 deletions(-) delete mode 100644 pype/modules/ftrack/ftrack_server/custom_db_connector.py diff --git a/pype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py b/pype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py index 21e581e76a0..b30d21e05a9 100644 --- a/pype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py +++ b/pype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py @@ -1,6 +1,8 @@ from pype.modules.ftrack.lib import BaseEvent from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY -from pype.modules.ftrack.events.event_sync_to_avalon import SyncToAvalonEvent +from pype.modules.ftrack.event_handlers_server.event_sync_to_avalon import ( + SyncToAvalonEvent +) class DelAvalonIdFromNew(BaseEvent): diff --git a/pype/modules/ftrack/ftrack_server/custom_db_connector.py b/pype/modules/ftrack/ftrack_server/custom_db_connector.py deleted file mode 100644 index f435086e8a7..00000000000 --- a/pype/modules/ftrack/ftrack_server/custom_db_connector.py +++ /dev/null @@ -1,253 +0,0 @@ -""" -Wrapper around interactions with the database - -Copy of io module in avalon-core. - - In this case not working as singleton with api.Session! -""" - -import time -import logging -import functools -import atexit -import os - -# Third-party dependencies -import pymongo -from pype.api import decompose_url - - -class NotActiveCollection(Exception): - def __init__(self, *args, **kwargs): - msg = "Active collection is not set. (This is bug)" - if not (args or kwargs): - args = [msg] - super().__init__(*args, **kwargs) - - -def auto_reconnect(func): - """Handling auto reconnect in 3 retry times""" - @functools.wraps(func) - def decorated(*args, **kwargs): - object = args[0] - for retry in range(3): - try: - return func(*args, **kwargs) - except pymongo.errors.AutoReconnect: - object.log.error("Reconnecting..") - time.sleep(0.1) - else: - raise - return decorated - - -def check_active_collection(func): - """Check if CustomDbConnector has active collection.""" - @functools.wraps(func) - def decorated(obj, *args, **kwargs): - if not obj.active_collection: - raise NotActiveCollection() - return func(obj, *args, **kwargs) - return decorated - - -class CustomDbConnector: - log = logging.getLogger(__name__) - - def __init__( - self, uri, database_name, port=None, collection_name=None - ): - self.timeout = int(os.environ["AVALON_TIMEOUT"]) - self._mongo_client = None - self._sentry_client = None - self._sentry_logging_handler = None - self._database = None - self._is_installed = False - - self._uri = uri - components = decompose_url(uri) - if port is None: - port = components.get("port") - - if database_name is None: - raise ValueError( - "Database is not defined for connection. {}".format(uri) - ) - - self._port = port - self._database_name = database_name - - self.active_collection = collection_name - - def __getitem__(self, key): - # gives direct access to collection withou setting `active_collection` - return self._database[key] - - def __getattribute__(self, attr): - # not all methods of PyMongo database are implemented with this it is - # possible to use them too - try: - return super(CustomDbConnector, self).__getattribute__(attr) - except AttributeError: - if self.active_collection is None: - raise NotActiveCollection() - return self._database[self.active_collection].__getattribute__( - attr - ) - - def install(self): - """Establish a persistent connection to the database""" - if self._is_installed: - return - atexit.register(self.uninstall) - logging.basicConfig() - - kwargs = { - "host": self._uri, - "serverSelectionTimeoutMS": self.timeout - } - if self._port is not None: - kwargs["port"] = self._port - - self._mongo_client = pymongo.MongoClient(**kwargs) - if self._port is None: - self._port = self._mongo_client.PORT - - for retry in range(3): - try: - t1 = time.time() - self._mongo_client.server_info() - except Exception: - self.log.error("Retrying..") - time.sleep(1) - else: - break - - else: - raise IOError( - "ERROR: Couldn't connect to %s in " - "less than %.3f ms" % (self._uri, self.timeout) - ) - - self.log.info("Connected to %s, delay %.3f s" % ( - self._uri, time.time() - t1 - )) - - self._database = self._mongo_client[self._database_name] - self._is_installed = True - - def uninstall(self): - """Close any connection to the database""" - - try: - self._mongo_client.close() - except AttributeError: - pass - - self._mongo_client = None - self._database = None - self._is_installed = False - atexit.unregister(self.uninstall) - - def collection_exists(self, collection_name): - return collection_name in self.collections() - - def create_collection(self, name, **options): - if self.collection_exists(name): - return - - return self._database.create_collection(name, **options) - - @auto_reconnect - def collections(self): - for col_name in self._database.collection_names(): - if col_name not in ("system.indexes",): - yield col_name - - @check_active_collection - @auto_reconnect - def insert_one(self, item, **options): - assert isinstance(item, dict), "item must be of type " - return self._database[self.active_collection].insert_one( - item, **options - ) - - @check_active_collection - @auto_reconnect - def insert_many(self, items, ordered=True, **options): - # check if all items are valid - assert isinstance(items, list), "`items` must be of type " - for item in items: - assert isinstance(item, dict), "`item` must be of type " - - options["ordered"] = ordered - return self._database[self.active_collection].insert_many( - items, **options - ) - - @check_active_collection - @auto_reconnect - def find(self, filter, projection=None, sort=None, **options): - options["sort"] = sort - return self._database[self.active_collection].find( - filter, projection, **options - ) - - @check_active_collection - @auto_reconnect - def find_one(self, filter, projection=None, sort=None, **options): - assert isinstance(filter, dict), "filter must be " - options["sort"] = sort - return self._database[self.active_collection].find_one( - filter, - projection, - **options - ) - - @check_active_collection - @auto_reconnect - def replace_one(self, filter, replacement, **options): - return self._database[self.active_collection].replace_one( - filter, replacement, **options - ) - - @check_active_collection - @auto_reconnect - def update_one(self, filter, update, **options): - return self._database[self.active_collection].update_one( - filter, update, **options - ) - - @check_active_collection - @auto_reconnect - def update_many(self, filter, update, **options): - return self._database[self.active_collection].update_many( - filter, update, **options - ) - - @check_active_collection - @auto_reconnect - def distinct(self, **options): - return self._database[self.active_collection].distinct(**options) - - @check_active_collection - @auto_reconnect - def drop_collection(self, name_or_collection, **options): - return self._database[self.active_collection].drop( - name_or_collection, **options - ) - - @check_active_collection - @auto_reconnect - def delete_one(self, filter, collation=None, **options): - options["collation"] = collation - return self._database[self.active_collection].delete_one( - filter, **options - ) - - @check_active_collection - @auto_reconnect - def delete_many(self, filter, collation=None, **options): - options["collation"] = collation - return self._database[self.active_collection].delete_many( - filter, **options - ) diff --git a/pype/modules/ftrack/ftrack_server/lib.py b/pype/modules/ftrack/ftrack_server/lib.py index 08c77d89a28..3b016d38fd3 100644 --- a/pype/modules/ftrack/ftrack_server/lib.py +++ b/pype/modules/ftrack/ftrack_server/lib.py @@ -21,7 +21,7 @@ from pype.modules.ftrack.lib import get_ftrack_event_mongo_info -from .custom_db_connector import CustomDbConnector +from pype.lib import PypeMongoConnection from pype.api import Logger TOPIC_STATUS_SERVER = "pype.event.server.status" @@ -133,27 +133,22 @@ class ProcessEventHub(SocketBaseEventHub): pypelog = Logger().get_logger("Session Processor") def __init__(self, *args, **kwargs): - self.uri, self.port, self.database, self.collection_name = ( - get_ftrack_event_mongo_info() - ) - self.dbcon = CustomDbConnector( - self.uri, - self.database, - self.port, - self.collection_name - ) + self.mongo_url = None + self.dbcon = None + super(ProcessEventHub, self).__init__(*args, **kwargs) def prepare_dbcon(self): try: - self.dbcon.install() - self.dbcon._database.list_collection_names() + database_name, collection_name = get_ftrack_event_mongo_info() + mongo_client = PypeMongoConnection.get_mongo_client() + self.dbcon = mongo_client[database_name][collection_name] + self.mongo_client = mongo_client + except pymongo.errors.AutoReconnect: - self.pypelog.error( - "Mongo server \"{}\" is not responding, exiting.".format( - os.environ["AVALON_MONGO"] - ) - ) + self.pypelog.error(( + "Mongo server \"{}\" is not responding, exiting." + ).format(PypeMongoConnection.get_default_mongo_url())) sys.exit(0) except pymongo.errors.OperationFailure: diff --git a/pype/modules/ftrack/ftrack_server/sub_event_storer.py b/pype/modules/ftrack/ftrack_server/sub_event_storer.py index 2032c122ccb..d70ef8bd426 100644 --- a/pype/modules/ftrack/ftrack_server/sub_event_storer.py +++ b/pype/modules/ftrack/ftrack_server/sub_event_storer.py @@ -14,12 +14,10 @@ TOPIC_STATUS_SERVER_RESULT ) from pype.modules.ftrack.lib import get_ftrack_event_mongo_info -from pype.modules.ftrack.ftrack_server.custom_db_connector import ( - CustomDbConnector -) +from pype.lib import PypeMongoConnection from pype.api import Logger -log = Logger().get_logger("Event storer") +log = Logger.get_logger("Event storer") subprocess_started = datetime.datetime.now() @@ -27,20 +25,21 @@ class SessionFactory: session = None -uri, port, database, collection_name = get_ftrack_event_mongo_info() -dbcon = CustomDbConnector(uri, database, port, collection_name) +database_name, collection_name = get_ftrack_event_mongo_info() +dbcon = None # ignore_topics = ["ftrack.meta.connected"] ignore_topics = [] def install_db(): + global dbcon try: - dbcon.install() - dbcon._database.list_collection_names() + mongo_client = PypeMongoConnection.get_mongo_client() + dbcon = mongo_client[database_name][collection_name] except pymongo.errors.AutoReconnect: log.error("Mongo server \"{}\" is not responding, exiting.".format( - os.environ["AVALON_MONGO"] + PypeMongoConnection.get_default_mongo_url() )) sys.exit(0) @@ -204,7 +203,7 @@ def main(args): "Error with Mongo access, probably permissions." "Check if exist database with name \"{}\"" " and collection \"{}\" inside." - ).format(database, collection_name)) + ).format(database_name, collection_name)) sock.sendall(b"MongoError") finally: diff --git a/pype/modules/ftrack/lib/settings.py b/pype/modules/ftrack/lib/settings.py index 1167d5443ef..4afac9c29fb 100644 --- a/pype/modules/ftrack/lib/settings.py +++ b/pype/modules/ftrack/lib/settings.py @@ -1,13 +1,4 @@ -import os -from pype.api import ( - Logger, - get_system_settings, - get_default_components, - decompose_url, - compose_url -) - -log = Logger().get_logger(__name__) +from pype.api import get_system_settings def get_ftrack_settings(): @@ -22,14 +13,4 @@ def get_ftrack_event_mongo_info(): ftrack_settings = get_ftrack_settings() database_name = ftrack_settings["mongo_database_name"] collection_name = ftrack_settings["mongo_collection_name"] - - # TODO add possibility to set in settings and use PYPE_MONGO_URL if not set - mongo_url = os.environ.get("FTRACK_EVENTS_MONGO_URL") - if mongo_url is not None: - components = decompose_url(mongo_url) - else: - components = get_default_components() - - uri = compose_url(**components) - - return uri, components["port"], database_name, collection_name + return database_name, collection_name From e248c5f585f83a97ccef6fb8c564556faaf91557 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 11:50:52 +0100 Subject: [PATCH 085/295] moved enum entity to specific file --- pype/settings/entities/__init__.py | 8 ++- pype/settings/entities/enum_entity.py | 70 ++++++++++++++++++++++++ pype/settings/entities/input_entities.py | 68 ----------------------- 3 files changed, 76 insertions(+), 70 deletions(-) create mode 100644 pype/settings/entities/enum_entity.py diff --git a/pype/settings/entities/__init__.py b/pype/settings/entities/__init__.py index f67286832cd..2c5d22328c2 100644 --- a/pype/settings/entities/__init__.py +++ b/pype/settings/entities/__init__.py @@ -90,12 +90,15 @@ NumberEntity, BoolEntity, - EnumEntity, TextEntity, PathInput, RawJsonEntity ) +from .enum_entity import ( + EnumEntity, +) + from .list_entity import ListEntity from .dict_immutable_keys_entity import DictImmutableKeysEntity from .dict_mutable_keys_entity import DictMutableKeysEntity @@ -136,11 +139,12 @@ "NumberEntity", "BoolEntity", - "EnumEntity", "TextEntity", "PathInput", "RawJsonEntity", + "EnumEntity", + "ListEntity", "DictImmutableKeysEntity", diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py new file mode 100644 index 00000000000..cb19369de07 --- /dev/null +++ b/pype/settings/entities/enum_entity.py @@ -0,0 +1,70 @@ +from .input_entities import InputEntity +from .lib import NOT_SET + + +class EnumEntity(InputEntity): + schema_types = ["enum"] + + def _item_initalization(self): + self.multiselection = self.schema_data.get("multiselection", False) + self.enum_items = self.schema_data["enum_items"] + if not self.enum_items: + raise ValueError("Attribute `enum_items` is not defined.") + + valid_keys = set() + for item in self.enum_items: + valid_keys.add(tuple(item.keys())[0]) + + self.valid_keys = valid_keys + + if self.multiselection: + self.valid_value_types = (list, ) + self.value_on_not_set = [] + else: + valid_value_types = set() + for key in valid_keys: + if self.value_on_not_set is NOT_SET: + self.value_on_not_set = key + valid_value_types.add(type(key)) + + self.valid_value_types = tuple(valid_value_types) + + # GUI attribute + self.placeholder = self.schema_data.get("placeholder") + + def schema_validations(self): + enum_keys = set() + for item in self.enum_items: + key = tuple(item.keys())[0] + if key in enum_keys: + raise ValueError( + "{}: Key \"{}\" is more than once in enum items.".format( + self.path, key + ) + ) + enum_keys.add(key) + + super(EnumEntity, self).schema_validations() + + def set(self, value): + if self.multiselection: + if not isinstance(value, list): + if isinstance(value, (set, tuple)): + value = list(value) + else: + value = [value] + check_values = value + else: + check_values = [value] + + self._validate_value_type(value) + + for item in check_values: + if item not in self.valid_keys: + raise ValueError( + "Invalid value \"{}\". Expected: {}".format( + item, self.valid_keys + ) + ) + self._current_value = value + self._on_value_change() diff --git a/pype/settings/entities/input_entities.py b/pype/settings/entities/input_entities.py index 0c104e3ce76..c26cb249a67 100644 --- a/pype/settings/entities/input_entities.py +++ b/pype/settings/entities/input_entities.py @@ -350,74 +350,6 @@ def _item_initalization(self): self.value_on_not_set = True -class EnumEntity(InputEntity): - schema_types = ["enum"] - - def _item_initalization(self): - self.multiselection = self.schema_data.get("multiselection", False) - self.enum_items = self.schema_data["enum_items"] - if not self.enum_items: - raise ValueError("Attribute `enum_items` is not defined.") - - valid_keys = set() - for item in self.enum_items: - valid_keys.add(tuple(item.keys())[0]) - - self.valid_keys = valid_keys - - if self.multiselection: - self.valid_value_types = (list, ) - self.value_on_not_set = [] - else: - valid_value_types = set() - for key in valid_keys: - if self.value_on_not_set is NOT_SET: - self.value_on_not_set = key - valid_value_types.add(type(key)) - - self.valid_value_types = tuple(valid_value_types) - - # GUI attribute - self.placeholder = self.schema_data.get("placeholder") - - def schema_validations(self): - enum_keys = set() - for item in self.enum_items: - key = tuple(item.keys())[0] - if key in enum_keys: - raise ValueError( - "{}: Key \"{}\" is more than once in enum items.".format( - self.path, key - ) - ) - enum_keys.add(key) - - super(EnumEntity, self).schema_validations() - - def set(self, value): - if self.multiselection: - if not isinstance(value, list): - if isinstance(value, (set, tuple)): - value = list(value) - else: - value = [value] - check_values = value - else: - check_values = [value] - - self._validate_value_type(value) - - for item in check_values: - if item not in self.valid_keys: - raise ValueError( - "Invalid value \"{}\". Expected: {}".format( - item, self.valid_keys - ) - ) - self._current_value = value - self._on_value_change() - - class TextEntity(InputEntity): schema_types = ["text"] From c0076058a00ca8e7b9c62c51ed25b0bbae03f022 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Thu, 18 Mar 2021 11:51:39 +0100 Subject: [PATCH 086/295] add support for setting Deadline group and Limit Groups --- .../plugins/publish/submit_maya_deadline.py | 13 +++++++++++-- .../defaults/project_settings/deadline.json | 4 +++- .../projects_schema/schema_project_deadline.json | 11 +++++++++++ 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/pype/modules/deadline/plugins/publish/submit_maya_deadline.py b/pype/modules/deadline/plugins/publish/submit_maya_deadline.py index 55705d1bbbb..4d5aec9d50e 100644 --- a/pype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/pype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -254,6 +254,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): use_published = True tile_assembler_plugin = "PypeTileAssembler" asset_dependencies = False + limit_groups = [] + group = "none" def process(self, instance): """Plugin entry point.""" @@ -402,8 +404,15 @@ def process(self, instance): # Arbitrary username, for visualisation in Monitor self.payload_skeleton["JobInfo"]["UserName"] = deadline_user # Set job priority - self.payload_skeleton["JobInfo"]["Priority"] = self._instance.data.get( - "priority", 50) + self.payload_skeleton["JobInfo"]["Priority"] = \ + self._instance.data.get("priority", 50) + + if self.group != "none": + self.payload_skeleton["JobInfo"]["Group"] = self.group + + if self.limit: + self.payload_skeleton["JobInfo"]["LimitGroups"] = \ + ",".join(self.limit) # Optional, enable double-click to preview rendered # frames from Deadline Monitor self.payload_skeleton["JobInfo"]["OutputDirectory0"] = \ diff --git a/pype/settings/defaults/project_settings/deadline.json b/pype/settings/defaults/project_settings/deadline.json index 9e5665bee9d..892fb5d29fe 100644 --- a/pype/settings/defaults/project_settings/deadline.json +++ b/pype/settings/defaults/project_settings/deadline.json @@ -5,7 +5,9 @@ "optional": false, "tile_assembler_plugin": "oiio", "use_published": true, - "asset_dependencies": true + "asset_dependencies": true, + "group": "none", + "limit": [] }, "NukeSubmitDeadline": { "enabled": true, diff --git a/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json index c103f9467c7..97b28059593 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -52,6 +52,17 @@ "type": "boolean", "key": "asset_dependencies", "label": "Use Asset dependencies" + }, + { + "type": "text", + "key": "group", + "label": "Group Name" + }, + { + "type": "list", + "key": "limit", + "label": "Limit Groups", + "object_type": "text" } ] }, From 8a8f0b7e8d09682b9726a4c04e889471c46fce49 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 11:52:26 +0100 Subject: [PATCH 087/295] root entities have `get_entity_from_path` but not all are implemented --- pype/settings/entities/base_entity.py | 8 +++++ pype/settings/entities/root_entities.py | 44 +++++++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/pype/settings/entities/base_entity.py b/pype/settings/entities/base_entity.py index 9003a66d764..3a4bb23a90f 100644 --- a/pype/settings/entities/base_entity.py +++ b/pype/settings/entities/base_entity.py @@ -203,6 +203,11 @@ def get_child_path(self, child_entity): """Return path for a direct child entity.""" pass + @abstractmethod + def get_entity_from_path(self, path): + """Return system settings entity.""" + pass + def schema_validations(self): """Validate schema of entity and it's hierachy. @@ -790,6 +795,9 @@ def create_schema_object(self, *args, **kwargs): """Reference method for creation of entities defined in RootEntity.""" return self.root_item.create_schema_object(*args, **kwargs) + def get_entity_from_path(self, path): + return self.root_item.get_entity_from_path(path) + @abstractmethod def update_default_value(self, parent_values): """Fill default values on startup or on refresh. diff --git a/pype/settings/entities/root_entities.py b/pype/settings/entities/root_entities.py index 6e804cb2864..b4dc6678269 100644 --- a/pype/settings/entities/root_entities.py +++ b/pype/settings/entities/root_entities.py @@ -151,6 +151,12 @@ def schema_validations(self): ).format(self.__class__.__name__)) child_entity.schema_validations() + def get_entity_from_path(self, path): + """Return system settings entity.""" + raise NotImplementedError(( + "Method `get_entity_from_path` not available for \"{}\"" + ).format(self.__class__.__name__)) + def create_schema_object(self, schema_data, *args, **kwargs): """Create entity by entered schema data. @@ -564,6 +570,8 @@ def __init__( ): self._project_name = project_name + self._system_settings_entity = None + if schema_data is None: # Load system schemas schema_data = get_project_settings_schema() @@ -584,6 +592,40 @@ def project_name(self): def project_name(self, project_name): self.change_project(project_name) + @property + def system_settings_entity(self): + output = self._system_settings_entity + if output is None: + output = SystemSettings() + self._system_settings_entity = output + + if self.override_state is OverrideState.DEFAULTS: + if output.override_state is not OverrideState.DEFAULTS: + output.set_defaults_state() + elif self.override_state > OverrideState.DEFAULTS: + if output.override_state <= OverrideState.DEFAULTS: + try: + output.set_studio_state() + except Exception: + output.set_defaults_state() + return output + + def get_entity_from_path(self, path): + path_parts = path.split("/") + first_part = path_parts[0] + # TODO replace with constants + if first_part == "system_settings": + output = self.system_settings_entity + path_parts.pop(0) + else: + output = self + if first_part == "project_settings": + path_parts.pop(0) + + for path_part in path_parts: + output = output[path_part] + return output + def change_project(self, project_name): if project_name == self._project_name: return @@ -648,6 +690,8 @@ def reset(self, new_state=None): if new_state is OverrideState.NOT_DEFINED: new_state = OverrideState.DEFAULTS + self._system_settings_entity = None + self._reset_values() self.set_override_state(new_state) From 0c34efc291c684a33ceeee9d30b115944ece2416 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 11:53:13 +0100 Subject: [PATCH 088/295] implemented apps and tools enums using system settings --- pype/settings/entities/__init__.py | 4 ++ pype/settings/entities/enum_entity.py | 90 +++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/pype/settings/entities/__init__.py b/pype/settings/entities/__init__.py index 2c5d22328c2..b48f763c731 100644 --- a/pype/settings/entities/__init__.py +++ b/pype/settings/entities/__init__.py @@ -97,6 +97,8 @@ from .enum_entity import ( EnumEntity, + AppsEnumEntity, + ToolsEnumEntity ) from .list_entity import ListEntity @@ -144,6 +146,8 @@ "RawJsonEntity", "EnumEntity", + "AppsEnumEntity", + "ToolsEnumEntity", "ListEntity", diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index cb19369de07..4d6d268c702 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -68,3 +68,93 @@ def set(self, value): ) self._current_value = value self._on_value_change() + + +class AppsEnumEntity(EnumEntity): + schema_types = ["apps-enum"] + + def _item_initalization(self): + self.multiselection = True + self.value_on_not_set = [] + self.enum_items = [] + self.valid_keys = set() + self.valid_value_types = (list, ) + self.placeholder = None + + def _get_enum_values(self): + system_settings_entity = self.get_entity_from_path("system_settings") + + valid_keys = set() + enum_items = [] + for app_group in system_settings_entity["applications"].values(): + enabled_entity = app_group.get("enabled") + if enabled_entity and not enabled_entity.value: + continue + + host_name_entity = app_group.get("host_name") + if not host_name_entity or not host_name_entity.value: + continue + + group_label = app_group["label"].value + + for variant_name, variant_entity in app_group["variants"].items(): + enabled_entity = variant_entity.get("enabled") + if enabled_entity and not enabled_entity.value: + continue + + _group_label = variant_entity["label"].value + if not _group_label: + _group_label = group_label + variant_label = variant_entity["variant_label"].value + + full_label = "{} {}".format(_group_label, variant_label) + enum_items.append({variant_name: full_label}) + valid_keys.add(variant_name) + return enum_items, valid_keys + + def set_override_state(self, *args, **kwargs): + super(AppsEnumEntity, self).set_override_state(*args, **kwargs) + + self.enum_items, self.valid_keys = self._get_enum_values() + new_value = [] + for key in self._current_value: + if key in self.valid_keys: + new_value.append(key) + self._current_value = new_value + + +class ToolsEnumEntity(EnumEntity): + schema_types = ["tools-enum"] + + def _item_initalization(self): + self.multiselection = True + self.value_on_not_set = [] + self.enum_items = [] + self.valid_keys = set() + self.valid_value_types = (list, ) + self.placeholder = None + + def _get_enum_values(self): + system_settings_entity = self.get_entity_from_path("system_settings") + + valid_keys = set() + enum_items = [] + for tool_group in system_settings_entity["tools"].values(): + enabled_entity = tool_group.get("enabled") + if enabled_entity and not enabled_entity.value: + continue + + for variant_name in tool_group["variants"].keys(): + enum_items.append({variant_name: variant_name}) + valid_keys.add(variant_name) + return enum_items, valid_keys + + def set_override_state(self, *args, **kwargs): + super(ToolsEnumEntity, self).set_override_state(*args, **kwargs) + + self.enum_items, self.valid_keys = self._get_enum_values() + new_value = [] + for key in self._current_value: + if key in self.valid_keys: + new_value.append(key) + self._current_value = new_value From 51054b8c97520217b427eb3cd550ef63aa10ba86 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 11:53:32 +0100 Subject: [PATCH 089/295] apps and tools entities are used in schema --- .../schemas/schema_anatomy_attributes.json | 52 ++----------------- 1 file changed, 4 insertions(+), 48 deletions(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json index d5d36e79a9b..76c597e88d1 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json @@ -61,58 +61,14 @@ "minimum": 0 }, { - "type": "enum", + "type": "apps-enum", "key": "applications", - "label": "Applications", - "multiselection": true, - "enum_items": [ - {"harmony_20": "Harmony 20"}, - {"photoshop_2021": "Photoshop 2021"}, - {"photoshop_2020": "Photoshop 2020"}, - {"unreal_4.24": "Unreal Editor 4.24"}, - {"nuke_11.3": "Nuke 11.3"}, - {"nuke_11.2": "Nuke 11.2"}, - {"fusion_9": "Fusion 9"}, - {"celation_Publish": "CelAction 2D Pulblish"}, - {"nuke_12.0": "Nuke 12.0"}, - {"nuke_12.2": "Nuke 12.2"}, - {"maya_2018": "Maya 2018"}, - {"mayabatch_2018": "MayaBatch 2018"}, - {"mayabatch_2019": "MayaBatch 2019"}, - {"aftereffects_2020": "AfterEffects 2020"}, - {"aftereffects_2021": "AfterEffects 2021"}, - {"blender_2.83": "Blender 2.83"}, - {"tvpaint_Animation 11 (64bits)": "TVPaint 11 (64bits)"}, - {"celation_Local": "CelAction 2D Local"}, - {"tvpaint_Animation 11 (32bits)": "TVPaint 11 (32bits)"}, - {"harmony_17": "Harmony 17"}, - {"resolve_16": "Resolve 16"}, - {"fusion_16": "Fusion 16"}, - {"maya_2019": "Maya 2019"}, - {"djvview_1.1": "DJV View 1.1"}, - {"nukestudio_12.0": "Nuke Studio 12.0"}, - {"nukestudio_12.2": "Nuke Studio 12.2"}, - {"hiero_12.2": "Hiero 12.2"}, - {"hiero_12.0": "Hiero 12.0"}, - {"nukestudio_11.3": "Nuke Studio 11.3"}, - {"nukestudio_11.2": "Nuke Studio 11.2"}, - {"houdini_18": "Houdini 18"}, - {"mayabatch_2020": "MayaBatch 2020"}, - {"hiero_11.2": "Hiero 11.2"}, - {"hiero_11.3": "Hiero 11.3"}, - {"houdini_17": "Houdini 17"}, - {"blender_2.90": "Blender 2.90"}, - {"nukex_12.0": "Nuke X 12.0"}, - {"maya_2020": "Maya 2020"}, - {"nukex_12.2": "Nuke X 12.2"}, - {"nukex_11.2": "Nuke X 11.2"}, - {"nukex_11.3": "Nuke X 11.3"} - ] + "label": "Applications" }, { - "type": "list", + "type": "tools-enum", "key": "tools", - "label": "Tools", + "label": "Tools" "object_type": "text" } ] From 325551be7e79ed742c50c3276beb9420d0e0a5ee Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 11:54:50 +0100 Subject: [PATCH 090/295] removed unused key from tools schema --- .../projects_schema/schemas/schema_anatomy_attributes.json | 1 - 1 file changed, 1 deletion(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json index 76c597e88d1..f75319c7e1c 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json @@ -69,7 +69,6 @@ "type": "tools-enum", "key": "tools", "label": "Tools" - "object_type": "text" } ] } From a8ab4d288b1b153c5cc7201d33a7686370ba161d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 14:25:48 +0100 Subject: [PATCH 091/295] moved scripts to scripts folder --- .../ftrack/ftrack_server/event_server_cli.py | 13 +++++++------ .../sub_event_processor.py | 0 .../{ftrack_server => scripts}/sub_event_status.py | 0 .../{ftrack_server => scripts}/sub_event_storer.py | 0 .../{ftrack_server => scripts}/sub_legacy_server.py | 0 .../{ftrack_server => scripts}/sub_user_server.py | 0 pype/modules/ftrack/tray/ftrack_tray.py | 3 ++- 7 files changed, 9 insertions(+), 7 deletions(-) rename pype/modules/ftrack/{ftrack_server => scripts}/sub_event_processor.py (100%) rename pype/modules/ftrack/{ftrack_server => scripts}/sub_event_status.py (100%) rename pype/modules/ftrack/{ftrack_server => scripts}/sub_event_storer.py (100%) rename pype/modules/ftrack/{ftrack_server => scripts}/sub_legacy_server.py (100%) rename pype/modules/ftrack/{ftrack_server => scripts}/sub_user_server.py (100%) diff --git a/pype/modules/ftrack/ftrack_server/event_server_cli.py b/pype/modules/ftrack/ftrack_server/event_server_cli.py index 5e885e47f3a..c70a12aefbe 100644 --- a/pype/modules/ftrack/ftrack_server/event_server_cli.py +++ b/pype/modules/ftrack/ftrack_server/event_server_cli.py @@ -18,6 +18,7 @@ get_pype_execute_args, PypeMongoConnection ) +from pype.modules.ftrack import FTRACK_MODULE_DIR from pype.modules.ftrack.lib import ( credentials, get_ftrack_url_from_settings @@ -98,14 +99,14 @@ def validate_credentials(url, user, api): def legacy_server(ftrack_url): # Current file - file_path = os.path.dirname(os.path.realpath(__file__)) + scripts_dir = os.path.join(FTRACK_MODULE_DIR, "scripts") min_fail_seconds = 5 max_fail_count = 3 wait_time_after_max_fail = 10 subproc = None - subproc_path = "{}/sub_legacy_server.py".format(file_path) + subproc_path = "{}/sub_legacy_server.py".format(scripts_dir) subproc_last_failed = datetime.datetime.now() subproc_failed_count = 0 @@ -183,7 +184,7 @@ def main_loop(ftrack_url): mongo_uri = PypeMongoConnection.get_default_mongo_url() # Current file - file_path = os.path.dirname(os.path.realpath(__file__)) + scripts_dir = os.path.join(FTRACK_MODULE_DIR, "scripts") min_fail_seconds = 5 max_fail_count = 3 @@ -192,21 +193,21 @@ def main_loop(ftrack_url): # Threads data storer_name = "StorerThread" storer_port = 10001 - storer_path = "{}/sub_event_storer.py".format(file_path) + storer_path = "{}/sub_event_storer.py".format(scripts_dir) storer_thread = None storer_last_failed = datetime.datetime.now() storer_failed_count = 0 processor_name = "ProcessorThread" processor_port = 10011 - processor_path = "{}/sub_event_processor.py".format(file_path) + processor_path = "{}/sub_event_processor.py".format(scripts_dir) processor_thread = None processor_last_failed = datetime.datetime.now() processor_failed_count = 0 statuser_name = "StorerThread" statuser_port = 10021 - statuser_path = "{}/sub_event_status.py".format(file_path) + statuser_path = "{}/sub_event_status.py".format(scripts_dir) statuser_thread = None statuser_last_failed = datetime.datetime.now() statuser_failed_count = 0 diff --git a/pype/modules/ftrack/ftrack_server/sub_event_processor.py b/pype/modules/ftrack/scripts/sub_event_processor.py similarity index 100% rename from pype/modules/ftrack/ftrack_server/sub_event_processor.py rename to pype/modules/ftrack/scripts/sub_event_processor.py diff --git a/pype/modules/ftrack/ftrack_server/sub_event_status.py b/pype/modules/ftrack/scripts/sub_event_status.py similarity index 100% rename from pype/modules/ftrack/ftrack_server/sub_event_status.py rename to pype/modules/ftrack/scripts/sub_event_status.py diff --git a/pype/modules/ftrack/ftrack_server/sub_event_storer.py b/pype/modules/ftrack/scripts/sub_event_storer.py similarity index 100% rename from pype/modules/ftrack/ftrack_server/sub_event_storer.py rename to pype/modules/ftrack/scripts/sub_event_storer.py diff --git a/pype/modules/ftrack/ftrack_server/sub_legacy_server.py b/pype/modules/ftrack/scripts/sub_legacy_server.py similarity index 100% rename from pype/modules/ftrack/ftrack_server/sub_legacy_server.py rename to pype/modules/ftrack/scripts/sub_legacy_server.py diff --git a/pype/modules/ftrack/ftrack_server/sub_user_server.py b/pype/modules/ftrack/scripts/sub_user_server.py similarity index 100% rename from pype/modules/ftrack/ftrack_server/sub_user_server.py rename to pype/modules/ftrack/scripts/sub_user_server.py diff --git a/pype/modules/ftrack/tray/ftrack_tray.py b/pype/modules/ftrack/tray/ftrack_tray.py index 3f6432e5410..1009d93afe6 100644 --- a/pype/modules/ftrack/tray/ftrack_tray.py +++ b/pype/modules/ftrack/tray/ftrack_tray.py @@ -8,6 +8,7 @@ from ..ftrack_server.lib import check_ftrack_url from ..ftrack_server import socket_thread from ..lib import credentials +from ..ftrack_module import FTRACK_MODULE_DIR from . import login_dialog from pype.api import Logger, resources @@ -132,7 +133,7 @@ def set_action_server(self): thread_name = "ActionServerThread" thread_port = 10021 subprocess_path = ( - "{}/ftrack_server/sub_user_server.py".format(parent_file_path) + "{}/scripts/sub_user_server.py".format(FTRACK_MODULE_DIR) ) if self.thread_socket_server is not None: self.thread_socket_server.stop() From a48fa5f9cf905dd8363b4db5f6f66e3728702027 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 14:37:31 +0100 Subject: [PATCH 092/295] dynamic CustomSession --- pype/modules/ftrack/ftrack_server/lib.py | 50 ++++++++++++++---------- 1 file changed, 30 insertions(+), 20 deletions(-) diff --git a/pype/modules/ftrack/ftrack_server/lib.py b/pype/modules/ftrack/ftrack_server/lib.py index 3b016d38fd3..0b92f6486aa 100644 --- a/pype/modules/ftrack/ftrack_server/lib.py +++ b/pype/modules/ftrack/ftrack_server/lib.py @@ -18,7 +18,10 @@ import ftrack_api._centralized_storage_scenario import ftrack_api.event from ftrack_api.logging import LazyLogMessage as L - +try: + from weakref import WeakMethod +except ImportError: + from ftrack_api._weakref import WeakMethod from pype.modules.ftrack.lib import get_ftrack_event_mongo_info from pype.lib import PypeMongoConnection @@ -243,14 +246,16 @@ def _handle_packet(self, code, packet_identifier, path, data): return super()._handle_packet(code, packet_identifier, path, data) -class SocketSession(ftrack_api.session.Session): +class CustomEventHubSession(ftrack_api.session.Session): '''An isolated session for interaction with an ftrack server.''' def __init__( self, server_url=None, api_key=None, api_user=None, auto_populate=True, plugin_paths=None, cache=None, cache_key_maker=None, - auto_connect_event_hub=None, schema_cache_path=None, - plugin_arguments=None, sock=None, Eventhub=None + auto_connect_event_hub=False, schema_cache_path=None, + plugin_arguments=None, **kwargs ): + self.kwargs = kwargs + super(ftrack_api.session.Session, self).__init__() self.logger = logging.getLogger( __name__ + '.' + self.__class__.__name__ @@ -336,17 +341,10 @@ def __init__( self.check_server_compatibility() # Construct event hub and load plugins. - if Eventhub is None: - Eventhub = ftrack_api.event.hub.EventHub - self._event_hub = Eventhub( - self._server_url, - self._api_user, - self._api_key, - sock=sock - ) + self._event_hub = self._create_event_hub() self._auto_connect_event_hub_thread = None - if auto_connect_event_hub in (None, True): + if auto_connect_event_hub: # Connect to event hub in background thread so as not to block main # session usage waiting for event hub connection. self._auto_connect_event_hub_thread = threading.Thread( @@ -355,14 +353,8 @@ def __init__( self._auto_connect_event_hub_thread.daemon = True self._auto_connect_event_hub_thread.start() - # To help with migration from auto_connect_event_hub default changing - # from True to False. - self._event_hub._deprecation_warning_auto_connect = ( - auto_connect_event_hub is None - ) - # Register to auto-close session on exit. - atexit.register(self.close) + atexit.register(WeakMethod(self.close)) self._plugin_paths = plugin_paths if self._plugin_paths is None: @@ -399,3 +391,21 @@ def __init__( ), synchronous=True ) + + def _create_event_hub(self): + return ftrack_api.event.hub.EventHub( + self._server_url, + self._api_user, + self._api_key + ) + + +class SocketSession(CustomEventHubSession): + def _create_event_hub(self): + self.sock = self.kwargs["sock"] + return self.kwargs["Eventhub"]( + self._server_url, + self._api_user, + self._api_key, + sock=self.sock + ) From fb453a3a9f6846c40695ecd565d7b6138d1b0d87 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 15:34:16 +0100 Subject: [PATCH 093/295] collect ftrack api collect entities for all instances --- .../plugins/publish/collect_ftrack_api.py | 131 ++++++++++++++++-- 1 file changed, 121 insertions(+), 10 deletions(-) diff --git a/pype/modules/ftrack/plugins/publish/collect_ftrack_api.py b/pype/modules/ftrack/plugins/publish/collect_ftrack_api.py index 1683ec4bb76..28815ca0109 100644 --- a/pype/modules/ftrack/plugins/publish/collect_ftrack_api.py +++ b/pype/modules/ftrack/plugins/publish/collect_ftrack_api.py @@ -1,16 +1,16 @@ import os import logging import pyblish.api +import avalon.api class CollectFtrackApi(pyblish.api.ContextPlugin): """ Collects an ftrack session and the current task id. """ - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder + 0.4999 label = "Collect Ftrack Api" def process(self, context): - ftrack_log = logging.getLogger('ftrack_api') ftrack_log.setLevel(logging.WARNING) ftrack_log = logging.getLogger('ftrack_api_old') @@ -22,28 +22,27 @@ def process(self, context): session = ftrack_api.Session(auto_connect_event_hub=True) self.log.debug("Ftrack user: \"{0}\"".format(session.api_user)) - context.data["ftrackSession"] = session # Collect task - project_name = os.environ.get('AVALON_PROJECT', '') - asset_name = os.environ.get('AVALON_ASSET', '') - task_name = os.environ.get('AVALON_TASK', None) + project_name = avalon.api.Session["AVALON_PROJECT"] + asset_name = avalon.api.Session["AVALON_ASSET"] + task_name = avalon.api.Session["AVALON_TASK"] # Find project entity project_query = 'Project where full_name is "{0}"'.format(project_name) self.log.debug("Project query: < {0} >".format(project_query)) - project_entity = list(session.query(project_query).all()) - if len(project_entity) == 0: + project_entities = list(session.query(project_query).all()) + if len(project_entities) == 0: raise AssertionError( "Project \"{0}\" not found in Ftrack.".format(project_name) ) # QUESTION Is possible to happen? - elif len(project_entity) > 1: + elif len(project_entities) > 1: raise AssertionError(( "Found more than one project with name \"{0}\" in Ftrack." ).format(project_name)) - project_entity = project_entity[0] + project_entity = project_entities[0] self.log.debug("Project found: {0}".format(project_entity)) # Find asset entity @@ -93,7 +92,119 @@ def process(self, context): task_entity = None self.log.warning("Task name is not set.") + context.data["ftrackSession"] = session context.data["ftrackPythonModule"] = ftrack_api context.data["ftrackProject"] = project_entity context.data["ftrackEntity"] = asset_entity context.data["ftrackTask"] = task_entity + + self.per_instance_process(context, asset_name, task_name) + + def per_instance_process( + self, context, context_asset_name, context_task_name + ): + instance_by_asset_and_task = {} + for instance in context: + self.log.debug( + "Checking entities of instance \"{}\"".format(str(instance)) + ) + instance_asset_name = instance.data.get("asset") + instance_task_name = instance.data.get("task") + + if not instance_asset_name and not instance_task_name: + self.log.debug("Instance does not have set context keys.") + continue + + elif instance_asset_name and instance_task_name: + if ( + instance_asset_name == context_asset_name + and instance_task_name == context_task_name + ): + self.log.debug(( + "Instance's context is same as in publish context." + " Asset: {} | Task: {}" + ).format(context_asset_name, context_task_name)) + continue + asset_name = instance_asset_name + task_name = instance_task_name + + elif instance_task_name: + if instance_task_name == context_task_name: + self.log.debug(( + "Instance's context task is same as in publish" + " context. Task: {}" + ).format(context_task_name)) + continue + + asset_name = context_asset_name + task_name = instance_task_name + + elif instance_asset_name: + if instance_asset_name == context_asset_name: + self.log.debug(( + "Instance's context asset is same as in publish" + " context. Asset: {}" + ).format(context_asset_name)) + continue + + # Do not use context's task name + task_name = instance_task_name + asset_name = instance_asset_name + + if asset_name not in instance_by_asset_and_task: + instance_by_asset_and_task[asset_name] = {} + + if task_name not in instance_by_asset_and_task[asset_name]: + instance_by_asset_and_task[asset_name][task_name] = [] + instance_by_asset_and_task[asset_name][task_name].append(instance) + + if not instance_by_asset_and_task: + return + + session = context.data["ftrackSession"] + project_entity = context.data["ftrackProject"] + asset_names = set() + for asset_name in instance_by_asset_and_task.keys(): + asset_names.add(asset_name) + + joined_asset_names = ",".join([ + "\"{}\"".format(name) + for name in asset_names + ]) + entities = session.query(( + "TypedContext where project_id is \"{}\" and name in ({})" + ).format(project_entity["id"], joined_asset_names)).all() + + entities_by_name = { + entity["name"]: entity + for entity in entities + } + + for asset_name, by_task_data in instance_by_asset_and_task.items(): + entity = entities_by_name.get(asset_name) + task_entity_by_name = {} + if not entity: + self.log.warning(( + "Didn't find entity with name \"{}\" in Project \"{}\"" + ).format(asset_name, project_entity["full_name"])) + else: + task_entities = session.query(( + "select id, name from Task where parent_id is \"{}\"" + ).format(entity["id"])).all() + for task_entity in task_entities: + task_name_low = task_entity["name"].lower() + task_entity_by_name[task_name_low] = task_entity + + for task_name, instances in by_task_data.items(): + task_entity = None + if task_name and entity: + task_entity = task_entity_by_name.get(task_name.lower()) + + for instance in instances: + instance.data["ftrackEntity"] = entity + instance.data["ftrackTask"] = task_entity + + self.log.debug(( + "Instance {} has own ftrack entities" + " as has different context. TypedContext: {} Task: {}" + ).format(str(instance), str(entity), str(task_entity))) From c34eee3eebd8e640dacf927166a0609668546245 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 15:36:28 +0100 Subject: [PATCH 094/295] integrate ftrack api use right entities from instance --- .../plugins/publish/integrate_ftrack_api.py | 44 ++++++++++++------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/pype/modules/ftrack/plugins/publish/integrate_ftrack_api.py b/pype/modules/ftrack/plugins/publish/integrate_ftrack_api.py index 2c8e06a0997..6c25b9191e4 100644 --- a/pype/modules/ftrack/plugins/publish/integrate_ftrack_api.py +++ b/pype/modules/ftrack/plugins/publish/integrate_ftrack_api.py @@ -102,25 +102,37 @@ def _set_task_status(self, instance, task_entity, session): def process(self, instance): session = instance.context.data["ftrackSession"] - if instance.data.get("ftrackTask"): - task = instance.data["ftrackTask"] - name = task - parent = task["parent"] - elif instance.data.get("ftrackEntity"): - task = None - name = instance.data.get("ftrackEntity")['name'] + context = instance.context + + name = None + # If instance has set "ftrackEntity" or "ftrackTask" then use them from + # instance. Even if they are set to None. If they are set to None it + # has a reason. (like has different context) + if "ftrackEntity" in instance.data or "ftrackTask" in instance.data: + task = instance.data.get("ftrackTask") parent = instance.data.get("ftrackEntity") - elif instance.context.data.get("ftrackTask"): - task = instance.context.data["ftrackTask"] - name = task + + elif "ftrackEntity" in context.data or "ftrackTask" in context.data: + task = context.data.get("ftrackTask") + parent = context.data.get("ftrackEntity") + + if task: parent = task["parent"] - elif instance.context.data.get("ftrackEntity"): - task = None - name = instance.context.data.get("ftrackEntity")['name'] - parent = instance.context.data.get("ftrackEntity") + name = task + elif parent: + name = parent["name"] + + if not name: + self.log.info(( + "Skipping ftrack integration. Instance \"{}\" does not" + " have specified ftrack entities." + ).format(str(instance))) + return - info_msg = "Created new {entity_type} with data: {data}" - info_msg += ", metadata: {metadata}." + info_msg = ( + "Created new {entity_type} with data: {data}" + ", metadata: {metadata}." + ) used_asset_versions = [] From afe4d48615e6ebce1d182a210c6a22d56f5382ed Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 15:40:02 +0100 Subject: [PATCH 095/295] formatting changes --- .../plugins/publish/collect_batch_instances.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py b/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py index 94574ad19cf..545efcb3035 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py @@ -4,9 +4,7 @@ class CollectBatchInstances(pyblish.api.InstancePlugin): - """ - Collect all available instances from psd batch. - """ + """Collect all available instances for batch publish.""" label = "Collect Batch Instances" order = pyblish.api.CollectorOrder + 0.489 @@ -65,7 +63,7 @@ def process(self, instance): # add subset data from preset new_instance.data.update(subset_data) - new_instance.data["label"] = f"{instance_name}" + new_instance.data["label"] = instance_name new_instance.data["subset"] = subset_name new_instance.data["task"] = task_name From 1aef1c78b76c0d5dd5cb895f1ae05f29bfbbfb3b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 15:43:25 +0100 Subject: [PATCH 096/295] added review and thumbnail for bulk mov --- .../plugins/publish/collect_context.py | 36 ++++++++++++++----- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_context.py b/pype/hosts/standalonepublisher/plugins/publish/collect_context.py index f7f3f00ebe5..43ab13cd791 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -14,12 +14,12 @@ """ import os -import pyblish.api -from avalon import io import json import copy -import clique from pprint import pformat +import clique +import pyblish.api +from avalon import io class CollectContextDataSAPublish(pyblish.api.ContextPlugin): @@ -50,7 +50,7 @@ def process(self, context): self.add_files_to_ignore_cleanup(in_data, context) # exception for editorial if in_data["family"] == "render_mov_batch": - in_data_list = self.prepare_mov_batch_instances(context, in_data) + in_data_list = self.prepare_mov_batch_instances(in_data) elif in_data["family"] in ["editorial", "background_batch"]: in_data_list = self.multiple_instances(context, in_data) @@ -132,7 +132,7 @@ def multiple_instances(self, context, in_data): return in_data_list - def prepare_mov_batch_instances(self, context, in_data): + def prepare_mov_batch_instances(self, in_data): """Copy of `multiple_instances` method. Method was copied because `batch_extensions` is used in @@ -142,8 +142,10 @@ def prepare_mov_batch_instances(self, context, in_data): this family specific filtering. Also "frameStart" and "frameEnd" keys are removed from instance which is needed for this processing. + Instance data will also care about families. + TODO: - - Merge logic with `multiple_instances` method. + - Merge possible logic with `multiple_instances` method. """ self.log.info("Preparing data for mov batch processing.") in_data_list = [] @@ -154,6 +156,11 @@ def prepare_mov_batch_instances(self, context, in_data): str(repre["files"]) )) ext = repre["ext"][1:] + + # Rename representation name + repre_name = repre["name"] + if repre_name.startswith(ext + "_"): + repre["name"] = ext # Skip files that are not available for mov batch publishing # TODO add dynamic expected extensions by family from `in_data` # - with this modification it would be possible to use only @@ -177,6 +184,11 @@ def prepare_mov_batch_instances(self, context, in_data): new_repre = copy.deepcopy(repre) new_repre["files"] = filename new_repre["name"] = ext + new_repre["thumbnail"] = True + + if "tags" not in new_repre: + new_repre["tags"] = [] + new_repre["tags"].append("review") # Prepare new subset name (temporary name) # - subset name will be changed in batch specific plugins @@ -189,6 +201,9 @@ def prepare_mov_batch_instances(self, context, in_data): in_data_copy = copy.deepcopy(in_data) in_data_copy["representations"] = [new_repre] in_data_copy["subset"] = new_subset_name + if "families" not in in_data_copy: + in_data_copy["families"] = [] + in_data_copy["families"].append("review") in_data_list.append(in_data_copy) @@ -196,6 +211,12 @@ def prepare_mov_batch_instances(self, context, in_data): def create_instance(self, context, in_data): subset = in_data["subset"] + # If instance data already contain families then use it + instance_families = in_data.get("families") or [] + # Make sure default families are in instance + for default_family in self.default_families or []: + if default_family not in instance_families: + instance_families.append(default_family) instance = context.create_instance(subset) instance.data.update( @@ -212,7 +233,7 @@ def create_instance(self, context, in_data): "frameEnd": in_data.get("representations", [None])[0].get( "frameEnd", None ), - "families": self.default_families or [], + "families": instance_families } ) self.log.info("collected instance: {}".format(pformat(instance.data))) @@ -239,7 +260,6 @@ def create_instance(self, context, in_data): if component["preview"]: instance.data["families"].append("review") - instance.data["repreProfiles"] = ["h264"] component["tags"] = ["review"] self.log.debug("Adding review family") From 34ad43a8699e2ea9d7f04b93f257a4936c7ab7e1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 17:47:37 +0100 Subject: [PATCH 097/295] temporary fix of short names from settings --- pype/modules/ftrack/lib/avalon_sync.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index e9dc1734c6b..b88c0104e1e 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -1103,9 +1103,7 @@ def prepare_ftrack_ent_data(self): project_name = self.entities_dict[self.ft_project_id]["name"] project_anatomy_data = get_anatomy_settings(project_name) - task_type_mapping = ( - project_anatomy_data["attributes"]["task_short_names"] - ) + task_type_mapping = project_anatomy_data["tasks"] not_set_ids = [] for id, entity_dict in self.entities_dict.items(): @@ -1145,7 +1143,8 @@ def prepare_ftrack_ent_data(self): tasks = {} for task_type in task_types: task_type_name = task_type["name"] - short_name = task_type_mapping.get(task_type_name) + task_type_def = task_type_mapping.get(task_type_name) or {} + short_name = task_type_def.get("short_name") tasks[task_type_name] = { "short_name": short_name or task_type_name } From 30481e952991e6860796205b6fe81ab339ffa88b Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Thu, 18 Mar 2021 17:54:05 +0100 Subject: [PATCH 098/295] fix typos --- pype/modules/deadline/plugins/publish/submit_maya_deadline.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/modules/deadline/plugins/publish/submit_maya_deadline.py b/pype/modules/deadline/plugins/publish/submit_maya_deadline.py index 4d5aec9d50e..b17dd6ba8d3 100644 --- a/pype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/pype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -410,9 +410,9 @@ def process(self, instance): if self.group != "none": self.payload_skeleton["JobInfo"]["Group"] = self.group - if self.limit: + if self.limit_groups: self.payload_skeleton["JobInfo"]["LimitGroups"] = \ - ",".join(self.limit) + ",".join(self.limit_groups) # Optional, enable double-click to preview rendered # frames from Deadline Monitor self.payload_skeleton["JobInfo"]["OutputDirectory0"] = \ From 6337354ca95df02ff6817de06ede0aff35bda19d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 18:11:34 +0100 Subject: [PATCH 099/295] always register lists of callbacks as time may override them --- pype/modules/timers_manager/timers_manager.py | 12 ++++++------ pype/modules/timers_manager/widget_user_idle.py | 11 ++++++----- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/pype/modules/timers_manager/timers_manager.py b/pype/modules/timers_manager/timers_manager.py index 68890640b32..b83f51f0bae 100644 --- a/pype/modules/timers_manager/timers_manager.py +++ b/pype/modules/timers_manager/timers_manager.py @@ -1,4 +1,5 @@ import os +import collections from abc import ABCMeta, abstractmethod import six from .. import PypeModule, ITrayService, IIdleManager, IWebServerRoutes @@ -159,26 +160,25 @@ def connect_with_modules(self, enabled_modules): def callbacks_by_idle_time(self): """Implementation of IIdleManager interface.""" # Time when message is shown - callbacks = { - self.time_show_message: lambda: self.time_callback(0) - } + callbacks = collections.defaultdict(list) + callbacks[self.time_show_message].append(lambda: self.time_callback(0)) # Times when idle is between show widget and stop timers show_to_stop_range = range( self.time_show_message - 1, self.time_stop_timer ) for num in show_to_stop_range: - callbacks[num] = lambda: self.time_callback(1) + callbacks[num].append(lambda: self.time_callback(1)) # Times when widget is already shown and user restart idle shown_and_moved_range = range( self.time_stop_timer - self.time_show_message ) for num in shown_and_moved_range: - callbacks[num] = lambda: self.time_callback(1) + callbacks[num].append(lambda: self.time_callback(1)) # Time when timers are stopped - callbacks[self.time_stop_timer] = lambda: self.time_callback(2) + callbacks[self.time_stop_timer].append(lambda: self.time_callback(2)) return callbacks diff --git a/pype/modules/timers_manager/widget_user_idle.py b/pype/modules/timers_manager/widget_user_idle.py index 5e47cdaddfe..cbdb7fd30a4 100644 --- a/pype/modules/timers_manager/widget_user_idle.py +++ b/pype/modules/timers_manager/widget_user_idle.py @@ -163,8 +163,9 @@ class SignalHandler(QtCore.QObject): signal_change_label = QtCore.Signal() signal_stop_timers = QtCore.Signal() - def __init__(self, cls): - super().__init__() - self.signal_show_message.connect(cls.show_message) - self.signal_change_label.connect(cls.change_label) - self.signal_stop_timers.connect(cls.stop_timers) + def __init__(self, module): + super(SignalHandler, self).__init__() + self.module = module + self.signal_show_message.connect(module.show_message) + self.signal_change_label.connect(module.change_label) + self.signal_stop_timers.connect(module.stop_timers) From 13d3b42617df5765fb9aeb56319f2553ebe2c900 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 18:39:01 +0100 Subject: [PATCH 100/295] store only application name to config --- pype/modules/ftrack/lib/avalon_sync.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index d639e814a5c..e4631aad292 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -194,12 +194,8 @@ def get_project_apps(in_app_list): missing_app_msg = "Missing definition of application" application_manager = ApplicationManager() for app_name in in_app_list: - app = application_manager.applications.get(app_name) - if app: - apps.append({ - "name": app_name, - "label": app.full_label - }) + if application_manager.applications.get(app_name): + apps.append({"name": app_name}) else: warnings[missing_app_msg].append(app_name) return apps, warnings From 834aa9c4053d0e59a6fbb3d9de738bce3215e436 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 18:39:29 +0100 Subject: [PATCH 101/295] skip applications lookup if passed value is empty --- pype/modules/ftrack/lib/avalon_sync.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index e4631aad292..be1e150cb71 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -191,6 +191,9 @@ def get_project_apps(in_app_list): apps = [] warnings = collections.defaultdict(list) + if not in_app_list: + return apps, warnings + missing_app_msg = "Missing definition of application" application_manager = ApplicationManager() for app_name in in_app_list: From f4e11d69e3fb176aed2729a915a09b99ee6ac8a9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 18:39:38 +0100 Subject: [PATCH 102/295] pop applicaitons from data --- pype/modules/ftrack/lib/avalon_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index be1e150cb71..5ce62631986 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -1140,7 +1140,7 @@ def prepare_ftrack_ent_data(self): proj_schema = entity["project_schema"] task_types = proj_schema["_task_type_schema"]["types"] proj_apps, warnings = get_project_apps( - (data.get("applications") or []) + data.pop("applications", []) ) for msg, items in warnings.items(): if not msg or not items: From 166f623cd483bddd4baf6719e8b30a33f2947372 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 18:39:50 +0100 Subject: [PATCH 103/295] don't care about old parent name on rename --- pype/modules/ftrack/lib/avalon_sync.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index 5ce62631986..bab69ab8e4b 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -1427,8 +1427,13 @@ def prepare_changes(self): old_parent_name = self.entities_dict[ self.ft_project_id]["name"] else: - old_parent_name = self.avalon_ents_by_id[ - ftrack_parent_mongo_id]["name"] + old_parent_name = "N/A" + if ftrack_parent_mongo_id in self.avalon_ents_by_id: + old_parent_name = ( + self.avalon_ents_by_id + [ftrack_parent_mongo_id] + ["name"] + ) self.updates[avalon_id]["data"] = { "visualParent": new_parent_id From b166b8133a809966ae387c42766ec5dfc8abdd7b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 18:40:06 +0100 Subject: [PATCH 104/295] task types are not overriden with new but only added new ones --- pype/modules/ftrack/lib/avalon_sync.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index bab69ab8e4b..7a8800ffebb 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -2143,11 +2143,22 @@ def prepare_project_changes(self): final_doc_data = self.entities_dict[self.ft_project_id]["final_entity"] final_doc_tasks = final_doc_data["config"].pop("tasks") current_doc_tasks = self.avalon_project.get("config", {}).get("tasks") - # Update project's tasks if tasks are empty or are not same - if not final_doc_tasks: + # Update project's task types + if not current_doc_tasks: update_tasks = True else: - update_tasks = final_doc_tasks != current_doc_tasks + # Check if task types are same + update_tasks = False + for task_type in final_doc_tasks: + if task_type not in current_doc_tasks: + update_tasks = True + break + + # Update new task types + # - but keep data about existing types and only add new one + if update_tasks: + for task_type, type_data in current_doc_tasks.items(): + final_doc_tasks[task_type] = type_data changes = self.compare_dict(final_doc_data, self.avalon_project) From 19ab249c6ddd1d6f1e88e6cee3bb345e30dd5850 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 19:03:28 +0100 Subject: [PATCH 105/295] applications are not stored to data in sync to avalon event --- .../event_sync_to_avalon.py | 40 ++++++++++--------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 7c9c4d196fe..a9e1f4282d4 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -1822,6 +1822,27 @@ def process_updated(self): ent_cust_attrs = [] for key, values in ent_info["changes"].items(): + if entType == "show" and key == "applications": + # Store apps to project't config + apps_str = ent_info["changes"]["applications"]["new"] + cust_attr_apps = [ + app_name.strip() + for app_name in apps_str.split(", ") if app_name + ] + + proj_apps, warnings = ( + avalon_sync.get_project_apps(cust_attr_apps) + ) + if "config" not in self.updates[mongo_id]: + self.updates[mongo_id]["config"] = {} + self.updates[mongo_id]["config"]["apps"] = proj_apps + + for msg, items in warnings.items(): + if not msg or not items: + continue + self.report_items["warning"][msg] = items + continue + if key in hier_attrs_keys: self.hier_cust_attrs_changes[key].append(ftrack_id) continue @@ -1839,25 +1860,6 @@ def process_updated(self): ) ) - if entType != "show" or key != "applications": - continue - - # Store apps to project't config - apps_str = ent_info["changes"]["applications"]["new"] - cust_attr_apps = [app for app in apps_str.split(", ") if app] - - proj_apps, warnings = ( - avalon_sync.get_project_apps(cust_attr_apps) - ) - if "config" not in self.updates[mongo_id]: - self.updates[mongo_id]["config"] = {} - self.updates[mongo_id]["config"]["apps"] = proj_apps - - for msg, items in warnings.items(): - if not msg or not items: - continue - self.report_items["warning"][msg] = items - def process_hier_cleanup(self): if ( not self.moved_in_avalon and From 63cc02e750165b03e9f191d42f5bc81ca7095f3a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 18 Mar 2021 19:11:36 +0100 Subject: [PATCH 106/295] fixed collapsible_key dicitonary --- pype/tools/settings/settings/widgets/dict_mutable_widget.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/tools/settings/settings/widgets/dict_mutable_widget.py b/pype/tools/settings/settings/widgets/dict_mutable_widget.py index 53b2d1ddd25..0cb051082ef 100644 --- a/pype/tools/settings/settings/widgets/dict_mutable_widget.py +++ b/pype/tools/settings/settings/widgets/dict_mutable_widget.py @@ -358,7 +358,8 @@ def set_as_required(self, key): self.add_btn.setEnabled(False) def set_as_last_required(self): - self.add_btn.setEnabled(True) + if not self.collapsible_key: + self.add_btn.setEnabled(True) def _on_focus_lose(self): if ( From ebd6c08e76518184aadf70f454d0d2467a0b3be7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 10:59:40 +0100 Subject: [PATCH 107/295] mongo handler cache anatomy keys --- pype/settings/handlers.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index 89f9645be79..004c2fe4c27 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -342,8 +342,25 @@ class MongoSettingsHandler(SettingsHandler): def __init__(self): # Get mongo connection from pype.lib import PypeMongoConnection + from avalon.api import AvalonMongoDB + from .entities import ProjectSettings + settings_collection = PypeMongoConnection.get_mongo_client() + # Prepare anatomy keys and attribute keys + # NOTE this is cached on first import + # - keys may change only on schema change which should not happen + # during production + project_settings_root = ProjectSettings( + reset=False, change_state=False + ) + anatomy_entity = project_settings_root["project_anatomy"] + anatomy_keys = set(anatomy_entity.keys()) + anatomy_keys.remove("attributes") + attribute_keys = set(anatomy_entity["attributes"].keys()) + + self.anatomy_keys = anatomy_keys + self.attribute_keys = attribute_keys # TODO prepare version of pype # - pype version should define how are settings saved and loaded @@ -357,6 +374,7 @@ def __init__(self): self.collection_name = collection_name self.collection = settings_collection[database_name][collection_name] + self.avalon_db = AvalonMongoDB() self.system_settings_cache = CacheValues() self.project_settings_cache = collections.defaultdict(CacheValues) From 1d55c434b742e0012edf58fef477bad15768bf37 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 11:26:43 +0100 Subject: [PATCH 108/295] removed anatomy templates/roots entities --- pype/settings/entities/__init__.py | 10 +- pype/settings/entities/anatomy_entities.py | 94 ------------------- .../schemas/projects_schema/schema_main.json | 2 +- .../schemas/schema_anatomy_templates.json | 2 +- 4 files changed, 4 insertions(+), 104 deletions(-) diff --git a/pype/settings/entities/__init__.py b/pype/settings/entities/__init__.py index b48f763c731..20e00de4a50 100644 --- a/pype/settings/entities/__init__.py +++ b/pype/settings/entities/__init__.py @@ -105,11 +105,7 @@ from .dict_immutable_keys_entity import DictImmutableKeysEntity from .dict_mutable_keys_entity import DictMutableKeysEntity -from .anatomy_entities import ( - AnatomyEntity, - AnatomyRootsEntity, - AnatomyTemplatesEntity -) +from .anatomy_entities import AnatomyEntity __all__ = ( @@ -155,7 +151,5 @@ "DictMutableKeysEntity", - "AnatomyEntity", - "AnatomyRootsEntity", - "AnatomyTemplatesEntity" + "AnatomyEntity" ) diff --git a/pype/settings/entities/anatomy_entities.py b/pype/settings/entities/anatomy_entities.py index 1b98bda4dd4..4e16b8a8409 100644 --- a/pype/settings/entities/anatomy_entities.py +++ b/pype/settings/entities/anatomy_entities.py @@ -1,102 +1,8 @@ from .dict_immutable_keys_entity import DictImmutableKeysEntity -from .dict_mutable_keys_entity import DictMutableKeysEntity class AnatomyEntity(DictImmutableKeysEntity): schema_types = ["anatomy"] - def _item_initalization(self): - self._roots_entity = None - self._templates_entity = None - super(AnatomyEntity, self)._item_initalization() - @property - def roots_entity(self): - if self._roots_entity is None: - _roots_entity = None - for child_entity in self.non_gui_children.values(): - if isinstance(child_entity, AnatomyRootsEntity): - _roots_entity = child_entity - break - - if _roots_entity is None: - raise KeyError( - "AnatomyEntity does not contain AnatomyRootsEntity" - ) - - self._roots_entity = _roots_entity - return self._roots_entity - - @property - def templates_entity(self): - if self._templates_entity is None: - _templates_entity = None - for child_entity in self.non_gui_children.values(): - if isinstance(child_entity, AnatomyTemplatesEntity): - _templates_entity = child_entity - break - - if _templates_entity is None: - raise KeyError( - "AnatomyEntity does not contain AnatomyRootsEntity" - ) - - self._templates_entity = _templates_entity - return self._templates_entity - - -class AnatomyRootsEntity(DictMutableKeysEntity): - schema_types = ["anatomy_roots"] - - def schema_validations(self): - if not isinstance(self.parent, AnatomyEntity): - raise TypeError("Parent of {} is not AnatomyEntity object".format( - self.__class__.__name__ - )) - super(AnatomyRootsEntity, self).schema_validations() - - @property - def has_studio_override(self): - output = super(AnatomyRootsEntity, self).has_studio_override - if not output: - output = self.parent.templates_entity._child_has_studio_override - return output - - @property - def has_project_override(self): - output = super(AnatomyRootsEntity, self).has_project_override - if not output: - output = self.parent.templates_entity._child_has_project_override - return output - - -class AnatomyTemplatesEntity(DictImmutableKeysEntity): - schema_types = ["anatomy_templates"] - - def schema_validations(self): - if not isinstance(self.parent, AnatomyEntity): - raise TypeError("Parent of {} is not AnatomyEntity object".format( - self.__class__.__name__ - )) - super(AnatomyTemplatesEntity, self).schema_validations() - - @property - def has_studio_override(self): - output = super(AnatomyTemplatesEntity, self).has_studio_override - if not output: - output = ( - self.parent.roots_entity._has_studio_override - or self.parent.roots_entity._child_has_studio_override - ) - return output - - @property - def has_project_override(self): - output = super(AnatomyTemplatesEntity, self).has_project_override - if not output: - output = ( - self.parent.roots_entity._has_project_override - or self.parent.roots_entity._child_has_project_override - ) - return output diff --git a/pype/settings/entities/schemas/projects_schema/schema_main.json b/pype/settings/entities/schemas/projects_schema/schema_main.json index 2ac6678d722..565500edd20 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_main.json +++ b/pype/settings/entities/schemas/projects_schema/schema_main.json @@ -10,7 +10,7 @@ { "key": "roots", "label": "Roots", - "type": "anatomy_roots", + "type": "dict-modifiable", "is_file": true, "is_group": true, "expandable": false, diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json index 8410ec48f47..918d3edba6d 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json @@ -1,5 +1,5 @@ { - "type": "anatomy_templates", + "type": "dict", "key": "templates", "label": "Templates", "collapsible": true, From 95d8f7fb31c25edb4f2cc6679c7eccc74b20599e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 11:27:16 +0100 Subject: [PATCH 109/295] AnatomyEntity makes sure that all children all overriden on project overrides --- pype/settings/entities/anatomy_entities.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pype/settings/entities/anatomy_entities.py b/pype/settings/entities/anatomy_entities.py index 4e16b8a8409..d048ffabba7 100644 --- a/pype/settings/entities/anatomy_entities.py +++ b/pype/settings/entities/anatomy_entities.py @@ -1,8 +1,25 @@ from .dict_immutable_keys_entity import DictImmutableKeysEntity +from .lib import OverrideState class AnatomyEntity(DictImmutableKeysEntity): schema_types = ["anatomy"] + def _update_current_metadata(self): + if self._override_state is OverrideState.PROJECT: + return {} + return super(AnatomyEntity, self)._update_current_metadata() + def set_override_state(self, *args, **kwargs): + super(AnatomyEntity, self).set_override_state(*args, **kwargs) + if self._override_state is OverrideState.PROJECT: + for child_obj in self.non_gui_children.values(): + if not child_obj.has_project_override: + self.add_to_project_override() + break + def on_child_change(self, child_obj): + if self._override_state is OverrideState.PROJECT: + if not child_obj.has_project_override: + child_obj.add_to_project_override() + return super(AnatomyEntity, self).on_child_change(child_obj) From af1c18db1fee70f9bbc776926b6f47dd5ab8b87f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 12:03:53 +0100 Subject: [PATCH 110/295] added methods for conversion to valid value types --- pype/settings/entities/base_entity.py | 30 +++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/pype/settings/entities/base_entity.py b/pype/settings/entities/base_entity.py index 3a4bb23a90f..6a0b11cc6c2 100644 --- a/pype/settings/entities/base_entity.py +++ b/pype/settings/entities/base_entity.py @@ -324,6 +324,36 @@ def _validate_value_type(self, value): raise InvalidValueType(self.valid_value_types, type(value), self.path) + def _convert_to_valid_type(self, value): + """Private method of entity to convert value. + + NOTE: Method is not abstract as more entities won't have implemented + logic inside. + + Must return NOT_SET if can't convert the value. + """ + return NOT_SET + + def convert_to_valid_type(self, value): + """Check value type with possibility of conversion to valid. + + If entered value has right type than is returned as it is. otherwise + is used privete method of entity to try convert. + + Raises: + InvalidValueType: If value's type is not valid by entity's + definition and can't be converted by entity logic. + """ + # + if self.is_value_valid_type(value): + return value + + new_value = self._convert_to_valid_type(value) + if new_value is not NOT_SET and self.is_value_valid_type(new_value): + return new_value + + raise InvalidValueType(self.valid_value_types, type(value), self.path) + # TODO convert to private method def _check_update_value(self, value, value_source): """Validation of value on update methods. From 68d20b910e190d2d803b7de1feeb3332465351e8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 12:04:30 +0100 Subject: [PATCH 111/295] loaded values from default/studio/project are tried to convert --- pype/settings/entities/base_entity.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pype/settings/entities/base_entity.py b/pype/settings/entities/base_entity.py index 6a0b11cc6c2..33abee227a7 100644 --- a/pype/settings/entities/base_entity.py +++ b/pype/settings/entities/base_entity.py @@ -375,9 +375,13 @@ def _check_update_value(self, value, value_source): if value is NOT_SET: return value - # Validate value type and return value itself if is valid. - if self.is_value_valid_type(value): - return value + try: + new_value = self.convert_to_valid_type(value) + except InvalidValueType: + new_value = NOT_SET + + if new_value is not NOT_SET: + return new_value # Warning log about invalid value type. self.log.warning( From eec4119177f4ab4a0028d7fb8b83988632bfcecf Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 12:09:45 +0100 Subject: [PATCH 112/295] implemented some convertion methods --- pype/settings/entities/enum_entity.py | 6 ++++++ pype/settings/entities/input_entities.py | 15 ++++++++++++++- pype/settings/entities/list_entity.py | 5 +++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index 4d6d268c702..437f37c4698 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -46,6 +46,12 @@ def schema_validations(self): super(EnumEntity, self).schema_validations() + def _convert_to_valid_type(self, value): + if self.multiselection: + if isinstance(value, (set, tuple)): + return list(value) + return NOT_SET + def set(self, value): if self.multiselection: if not isinstance(value, list): diff --git a/pype/settings/entities/input_entities.py b/pype/settings/entities/input_entities.py index c26cb249a67..1f57578094c 100644 --- a/pype/settings/entities/input_entities.py +++ b/pype/settings/entities/input_entities.py @@ -330,7 +330,7 @@ def _item_initalization(self): self.decimal = self.schema_data.get("decimal", 0) if self.decimal: - valid_value_types = (int, float) + valid_value_types = (float, ) else: valid_value_types = (int, ) self.valid_value_types = valid_value_types @@ -340,6 +340,19 @@ def set(self, value): # TODO check number for floats, integers and point self._validate_value_type(value) super(NumberEntity, self).set(value) + def _convert_to_valid_type(self, value): + if self.decimal: + if isinstance(value, int): + return float(value) + else: + if isinstance(value, float): + new_value = int(value) + if new_value != value: + self.log.info("Converted float {} to int {}".format( + value, new_value + )) + return new_value + return NOT_SET class BoolEntity(InputEntity): diff --git a/pype/settings/entities/list_entity.py b/pype/settings/entities/list_entity.py index 752347489a6..ab112236f85 100644 --- a/pype/settings/entities/list_entity.py +++ b/pype/settings/entities/list_entity.py @@ -126,6 +126,11 @@ def swap_indexes(self, index_1, index_2): ) self.on_change() + def _convert_to_valid_type(self, value): + if isinstance(value, (set, tuple)): + return list(value) + return NOT_SET + def _item_initalization(self): self.valid_value_types = (list, ) self.children = [] From 561ad4b8c69c3ed7a4cc16810490e41335626c98 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 12:11:49 +0100 Subject: [PATCH 113/295] setter methods use conversion method --- .../entities/dict_immutable_keys_entity.py | 4 ++-- .../entities/dict_mutable_keys_entity.py | 4 ++-- pype/settings/entities/enum_entity.py | 14 ++++---------- pype/settings/entities/input_entities.py | 17 ++++++----------- pype/settings/entities/item_entities.py | 4 ++-- pype/settings/entities/list_entity.py | 4 ++-- pype/settings/entities/root_entities.py | 4 ++-- 7 files changed, 20 insertions(+), 31 deletions(-) diff --git a/pype/settings/entities/dict_immutable_keys_entity.py b/pype/settings/entities/dict_immutable_keys_entity.py index 208f9763146..d7973205831 100644 --- a/pype/settings/entities/dict_immutable_keys_entity.py +++ b/pype/settings/entities/dict_immutable_keys_entity.py @@ -70,8 +70,8 @@ def items(self): def set(self, value): """Set value.""" - self._validate_value_type(value) - for _key, _value in value.items(): + new_value = self.convert_to_valid_type(value) + for _key, _value in new_value.items(): self.non_gui_children[_key].set(_value) def schema_validations(self): diff --git a/pype/settings/entities/dict_mutable_keys_entity.py b/pype/settings/entities/dict_mutable_keys_entity.py index 2fd2b873110..f930d3738b2 100644 --- a/pype/settings/entities/dict_mutable_keys_entity.py +++ b/pype/settings/entities/dict_mutable_keys_entity.py @@ -72,11 +72,11 @@ def clear(self): self.pop(key) def set(self, value): - self._validate_value_type(value) + new_value = self.convert_to_valid_type(value) prev_keys = set(self.keys()) - for _key, _value in value.items(): + for _key, _value in new_value.items(): self.set_key_value(_key, _value) if _key in prev_keys: prev_keys.remove(_key) diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index 437f37c4698..f2831c78dc4 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -53,17 +53,11 @@ def _convert_to_valid_type(self, value): return NOT_SET def set(self, value): + new_value = self.convert_to_valid_type(value) if self.multiselection: - if not isinstance(value, list): - if isinstance(value, (set, tuple)): - value = list(value) - else: - value = [value] - check_values = value + check_values = new_value else: - check_values = [value] - - self._validate_value_type(value) + check_values = [new_value] for item in check_values: if item not in self.valid_keys: @@ -72,7 +66,7 @@ def set(self, value): item, self.valid_keys ) ) - self._current_value = value + self._current_value = new_value self._on_value_change() diff --git a/pype/settings/entities/input_entities.py b/pype/settings/entities/input_entities.py index 1f57578094c..1e781ae9512 100644 --- a/pype/settings/entities/input_entities.py +++ b/pype/settings/entities/input_entities.py @@ -121,8 +121,7 @@ def _settings_value(self): def set(self, value): """Change value.""" - self._validate_value_type(value) - self._current_value = value + self._current_value = self.convert_to_valid_type(value) self._on_value_change() def _on_value_change(self): @@ -336,10 +335,6 @@ def _item_initalization(self): self.valid_value_types = valid_value_types self.value_on_not_set = 0 - def set(self, value): - # TODO check number for floats, integers and point - self._validate_value_type(value) - super(NumberEntity, self).set(value) def _convert_to_valid_type(self, value): if self.decimal: if isinstance(value, int): @@ -401,13 +396,13 @@ def _item_initalization(self): self.project_override_metadata = {} def set(self, value): - self._validate_value_type(value) + new_value = self.convert_to_valid_type(value) - if isinstance(value, dict): + if isinstance(new_value, dict): for key in METADATA_KEYS: - if key in value: - value.pop(key) - self._current_value = value + if key in new_value: + new_value.pop(key) + self._current_value = new_value self._on_value_change() @property diff --git a/pype/settings/entities/item_entities.py b/pype/settings/entities/item_entities.py index 11e43e4fa6e..da36bbbc2ae 100644 --- a/pype/settings/entities/item_entities.py +++ b/pype/settings/entities/item_entities.py @@ -239,8 +239,8 @@ def value(self): return output def set(self, value): - self._validate_value_type(value) - for idx, item in enumerate(value): + new_value = self.convert_to_valid_type(value) + for idx, item in enumerate(new_value): self.children[idx].set(item) def settings_value(self): diff --git a/pype/settings/entities/list_entity.py b/pype/settings/entities/list_entity.py index ab112236f85..814086fe0fc 100644 --- a/pype/settings/entities/list_entity.py +++ b/pype/settings/entities/list_entity.py @@ -180,9 +180,9 @@ def get_child_path(self, child_obj): return "/".join([self.path, str(result_idx)]) def set(self, value): - self._validate_value_type(value) + new_value = self.convert_to_valid_type(value) self.clear() - for item in value: + for item in new_value: self.append(item) def on_child_change(self, _child_entity): diff --git a/pype/settings/entities/root_entities.py b/pype/settings/entities/root_entities.py index b4dc6678269..e9000015b28 100644 --- a/pype/settings/entities/root_entities.py +++ b/pype/settings/entities/root_entities.py @@ -82,8 +82,8 @@ def get(self, key, default=None): def set(self, value): """Set value.""" - self._validate_value_type(value) - for _key, _value in value.items(): + new_value = self.convert_to_valid_type(value) + for _key, _value in new_value.items(): self.non_gui_children[_key].set(_value) def keys(self): From d0fec1108998dee6f06be080fc03cd6a2d0eccb2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 12:27:07 +0100 Subject: [PATCH 114/295] added method to convert project document to anatomy data --- pype/settings/handlers.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index 004c2fe4c27..fb724fffcf8 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -505,6 +505,38 @@ def get_project_settings_overrides(self, project_name): return {} return self._get_project_settings_overrides(project_name) + def project_doc_to_anatomy_data(self, project_doc): + """Convert project document to anatomy data. + + Probably should fill missing keys and values. + """ + attributes = {} + project_doc_data = project_doc.get("data") or {} + for key in self.attribute_keys: + value = project_doc_data.get(key) + if value is not None: + attributes[key] = value + + project_doc_config = project_doc.get("config") or {} + app_names = set() + if "apps" in project_doc_config: + for app_item in project_doc_config.pop("apps"): + if not app_item: + continue + app_name = app_item.get("name") + if app_name: + app_names.add(app_name) + + attributes["applications"] = list(app_names) + + output = {"attributes": attributes} + for key in self.anatomy_keys: + value = project_doc_config.get(key) + if value is not None: + output[key] = value + + return output + def _get_project_anatomy_overrides(self, project_name): if self.project_anatomy_cache[project_name].is_outdated: document_filter = { From 150adabff002dba2db33b4b37fd93095adce823f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 13:01:52 +0100 Subject: [PATCH 115/295] implemented way how to load and save anatomy data to project document --- pype/settings/handlers.py | 109 ++++++++++++++++++++++++++++++++++---- 1 file changed, 98 insertions(+), 11 deletions(-) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index fb724fffcf8..48e6ca395c2 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -438,8 +438,90 @@ def save_project_anatomy(self, project_name, anatomy_data): data_cache = self.project_anatomy_cache[project_name] data_cache.update_data(anatomy_data) - self._save_project_data( - project_name, PROJECT_ANATOMY_KEY, data_cache + if project_name is not None: + self._save_project_anatomy_data(project_name, data_cache) + + else: + self._save_project_data( + project_name, PROJECT_ANATOMY_KEY, data_cache + ) + + @classmethod + def prepare_mongo_update_dict(cls, in_data): + data = {} + for key, value in in_data.items(): + if not isinstance(value, dict): + data[key] = value + continue + + new_value = cls.prepare_mongo_update_dict(value) + for _key, _value in new_value.items(): + new_key = ".".join((key, _key)) + data[new_key] = _value + + return data + + def _save_project_anatomy_data(self, project_name, data_cache): + # Create copy of data as they will be modified during save + new_data = data_cache.data_copy() + + # Prepare avalon project document + collection = self.avalon_db.database[project_name] + project_doc = collection.find_one({ + "type": "project" + }) + if not project_doc: + raise ValueError(( + "Project document of project \"{}\" does not exists." + " Create project first." + ).format(project_name)) + + # Update dictionary of changes that will be changed in mongo + update_dict = {} + + # Project's data + update_dict_data = {} + project_doc_data = project_doc.get("data") or {} + attributes = new_data.pop("attributes") + _applications = attributes.pop("applications", None) or [] + for key, value in attributes.items(): + if ( + key in project_doc_data + and project_doc_data[key] == value + ): + continue + update_dict_data[key] = value + + if update_dict_data: + update_dict["data"] = update_dict_data + + update_dict_config = {} + + applications = [] + for application in _applications: + if not application: + continue + if isinstance(application, six.string_types): + applications.append({application: application}) + + new_data["apps"] = applications + + for key, value in new_data.items(): + project_doc_value = project_doc.get(key) + if key in project_doc and project_doc_value == value: + continue + update_dict_config[key] = value + + if update_dict_config: + update_dict["config"] = update_dict_config + + if not update_dict: + return + + _update_dict = self.prepare_mongo_update_dict(update_dict) + collection.update_one( + {"type": "project"}, + {"$set": _update_dict} ) def _save_project_data(self, project_name, doc_type, data_cache): @@ -539,17 +621,22 @@ def project_doc_to_anatomy_data(self, project_doc): def _get_project_anatomy_overrides(self, project_name): if self.project_anatomy_cache[project_name].is_outdated: - document_filter = { - "type": PROJECT_ANATOMY_KEY, - } if project_name is None: - document_filter["is_default"] = True + document_filter = { + "type": PROJECT_ANATOMY_KEY, + "is_default": True + } + document = self.collection.find_one(document_filter) + self.project_anatomy_cache[project_name].update_from_document( + document + ) else: - document_filter["project_name"] = project_name - document = self.collection.find_one(document_filter) - self.project_anatomy_cache[project_name].update_from_document( - document - ) + collection = self.avalon_db.database[project_name] + project_doc = collection.find_one({"type": "project"}) + self.project_anatomy_cache[project_name].update_data( + self.project_doc_to_anatomy_data(project_doc) + ) + return self.project_anatomy_cache[project_name].data_copy() def get_studio_project_anatomy_overrides(self): From 544869cd314b26f6b74c071e5ef871097f405dc1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 13:27:30 +0100 Subject: [PATCH 116/295] it is possible to define keys to query for custom attributes --- pype/modules/ftrack/lib/avalon_sync.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index 7a8800ffebb..f7feec7475e 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -83,15 +83,23 @@ def check_regex(name, entity_type, in_schema=None, schema_patterns=None): return False -def get_pype_attr(session, split_hierarchical=True): +def get_pype_attr(session, split_hierarchical=True, query_keys=None): custom_attributes = [] hier_custom_attributes = [] + if not query_keys: + query_keys = [ + "id", + "entity_type", + "object_type_id", + "is_hierarchical", + "default" + ] # TODO remove deprecated "avalon" group from query cust_attrs_query = ( - "select id, entity_type, object_type_id, is_hierarchical, default" + "select {}" " from CustomAttributeConfiguration" - " where group.name in (\"avalon\", \"pype\")" - ) + " where group.name in (\"avalon\", \"{}\")" + ).format(join_query_keys(query_keys), CUST_ATTR_GROUP) all_avalon_attr = session.query(cust_attrs_query).all() for cust_attr in all_avalon_attr: if split_hierarchical and cust_attr["is_hierarchical"]: From 21ba1896a218aa8cb7d6c4ae60a9a81f6eebf77c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 13:27:39 +0100 Subject: [PATCH 117/295] implemented join query keys --- pype/modules/ftrack/lib/avalon_sync.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index f7feec7475e..f0306c20736 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -83,6 +83,10 @@ def check_regex(name, entity_type, in_schema=None, schema_patterns=None): return False +def join_query_keys(keys): + return ",".join(["\"{}\"".format(key) for key in keys]) + + def get_pype_attr(session, split_hierarchical=True, query_keys=None): custom_attributes = [] hier_custom_attributes = [] From 4408137bdef5ab7ebd218a8921f0e47238dfdeb4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 14:53:42 +0100 Subject: [PATCH 118/295] fix key query --- pype/modules/ftrack/lib/avalon_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index f0306c20736..addc8a4a9cd 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -103,7 +103,7 @@ def get_pype_attr(session, split_hierarchical=True, query_keys=None): "select {}" " from CustomAttributeConfiguration" " where group.name in (\"avalon\", \"{}\")" - ).format(join_query_keys(query_keys), CUST_ATTR_GROUP) + ).format(", ".join(query_keys), CUST_ATTR_GROUP) all_avalon_attr = session.query(cust_attrs_query).all() for cust_attr in all_avalon_attr: if split_hierarchical and cust_attr["is_hierarchical"]: From 85a03bd2d7f41dddbeeb6e367d6760a65dd455f1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 14:54:22 +0100 Subject: [PATCH 119/295] implemented method that can convert changes string to value --- .../event_sync_to_avalon.py | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index a9e1f4282d4..443e428c01d 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -1,6 +1,7 @@ import os import collections import copy +import json import queue import time import datetime @@ -10,6 +11,7 @@ from bson.objectid import ObjectId from pymongo import UpdateOne +import arrow import ftrack_api from avalon import schema @@ -1860,6 +1862,41 @@ def process_updated(self): ) ) + def convert_value_by_cust_attr_conf(self, value, cust_attr_conf): + type_id = cust_attr_conf["type_id"] + cust_attr_type_name = self.cust_attr_types_by_id[type_id]["name"] + ignored = ( + "expression", "notificationtype", "dynamic enumerator" + ) + if cust_attr_type_name in ignored: + return None + + if cust_attr_type_name == "text": + return value + + if cust_attr_type_name == "boolean": + if value == "1": + return True + if value == "0": + return False + return bool(value) + + if cust_attr_type_name == "date": + return arrow.get(value) + + cust_attr_config = json.loads(cust_attr_conf["config"]) + + if cust_attr_type_name == "number": + if cust_attr_config["isdecimal"]: + return float(value) + return int(value) + + if cust_attr_type_name == "enumerator": + if not cust_attr_config["multiSelect"]: + return value + return value.split(", ") + return value + def process_hier_cleanup(self): if ( not self.moved_in_avalon and From d40165ef946993d836837dd5e4bea81ac1a0650a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 14:55:02 +0100 Subject: [PATCH 120/295] removed duplicated property avalon_custom_attributes --- .../event_handlers_server/event_sync_to_avalon.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 443e428c01d..497edf1ae41 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -229,15 +229,6 @@ def changeability_by_mongo_id(self): return self._changeability_by_mongo_id - @property - def avalon_custom_attributes(self): - """Return info about changeability of entity and it's parents.""" - if self._avalon_custom_attributes is None: - self._avalon_custom_attributes = avalon_sync.get_pype_attr( - self.process_session - ) - return self._avalon_custom_attributes - def remove_cached_by_key(self, key, values): if self._avalon_ents is None: return @@ -393,7 +384,6 @@ def reset_variables(self): self._avalon_archived_by_id = None self._avalon_archived_by_name = None - self._avalon_custom_attributes = None self._ent_types_by_name = None self.ftrack_ents_by_id = {} @@ -1238,7 +1228,7 @@ def create_entity_in_avalon(self, ftrack_ent, parent_avalon): def get_cust_attr_values(self, entity, keys=None): output = {} - custom_attrs, hier_attrs = self.avalon_custom_attributes + custom_attrs, hier_attrs = self.avalon_cust_attrs not_processed_keys = True if keys: not_processed_keys = [k for k in keys] From 40865b992af3352358574ff92f5dc56b814b9193 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 14:55:14 +0100 Subject: [PATCH 121/295] define query keys of custom attributes --- .../event_handlers_server/event_sync_to_avalon.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 497edf1ae41..8f53d617810 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -33,6 +33,15 @@ class SyncToAvalonEvent(BaseEvent): ignore_ent_types = ["Milestone"] ignore_keys = ["statusid", "thumbid"] + cust_attr_query_keys = [ + "id", + "key", + "entity_type", + "object_type_id", + "is_hierarchical", + "config", + "default" + ] project_query = ( "select full_name, name, custom_attributes" ", project_schema._task_type_schema.types.name" @@ -117,7 +126,7 @@ def cur_project(self): def avalon_cust_attrs(self): if self._avalon_cust_attrs is None: self._avalon_cust_attrs = avalon_sync.get_pype_attr( - self.process_session + self.process_session, query_keys=self.cust_attr_query_keys ) return self._avalon_cust_attrs From 1c7010b0cbf87d1a45cdbdce9c3de3389e297ed9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 14:55:43 +0100 Subject: [PATCH 122/295] implemented property cust_attr_types_by_id --- .../event_handlers_server/event_sync_to_avalon.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 8f53d617810..2b6f2e33df8 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -130,6 +130,18 @@ def avalon_cust_attrs(self): ) return self._avalon_cust_attrs + @property + def cust_attr_types_by_id(self): + if self._cust_attr_types_by_id is None: + cust_attr_types = self.process_session.query( + "select id, name from CustomAttributeType" + ).all() + self._cust_attr_types_by_id = { + cust_attr_type["id"]: cust_attr_type + for cust_attr_type in cust_attr_types + } + return self._cust_attr_types_by_id + @property def avalon_entities(self): if self._avalon_ents is None: @@ -382,6 +394,7 @@ def reset_variables(self): self._cur_project = None self._avalon_cust_attrs = None + self._cust_attr_types_by_id = None self._avalon_ents = None self._avalon_ents_by_id = None From 0231c73cf773d2903cec1f273af7436e76858429 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 14:55:50 +0100 Subject: [PATCH 123/295] fixed ordered dict --- .../ftrack/event_handlers_server/event_sync_to_avalon.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 2b6f2e33df8..cf5b9d4e267 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -1562,10 +1562,9 @@ def process_added(self): ).format(entity_type, ent_info["entityType"])) continue - _entity_key = collections.OrderedDict({ - "configuration_id": mongo_id_configuration_id, - "entity_id": ftrack_id - }) + _entity_key = collections.OrderedDict() + _entity_key["configuration_id"] = mongo_id_configuration_id + _entity_key["entity_id"] = ftrack_id self.process_session.recorded_operations.push( ftrack_api.operation.UpdateEntityOperation( From a6b8300c32a369eb7278660fb3d31d9b3e00a2db Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 14:56:10 +0100 Subject: [PATCH 124/295] event value is converted to real value --- .../event_sync_to_avalon.py | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index cf5b9d4e267..fec2d672cf6 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -1803,6 +1803,10 @@ def process_updated(self): return cust_attrs, hier_attrs = self.avalon_cust_attrs + hier_attrs_by_key = { + attr["key"]: attr + for attr in hier_attrs + } cust_attrs_by_obj_id = collections.defaultdict(dict) for cust_attr in cust_attrs: key = cust_attr["key"] @@ -1818,8 +1822,6 @@ def process_updated(self): obj_id = cust_attr["object_type_id"] cust_attrs_by_obj_id[obj_id][key] = cust_attr - hier_attrs_keys = [attr["key"] for attr in hier_attrs] - for ftrack_id, ent_info in ent_infos.items(): mongo_id = ftrack_mongo_mapping[ftrack_id] entType = ent_info["entityType"] @@ -1832,19 +1834,25 @@ def process_updated(self): # Ftrack's entity_type does not have defined custom attributes if ent_cust_attrs is None: - ent_cust_attrs = [] + ent_cust_attrs = {} for key, values in ent_info["changes"].items(): + if key in hier_attrs_by_key: + self.hier_cust_attrs_changes[key].append(ftrack_id) + continue + + if key not in ent_cust_attrs: + continue + + value = values["new"] + new_value = self.convert_value_by_cust_attr_conf( + value, ent_cust_attrs[key] + ) + if entType == "show" and key == "applications": # Store apps to project't config - apps_str = ent_info["changes"]["applications"]["new"] - cust_attr_apps = [ - app_name.strip() - for app_name in apps_str.split(", ") if app_name - ] - proj_apps, warnings = ( - avalon_sync.get_project_apps(cust_attr_apps) + avalon_sync.get_project_apps(new_value) ) if "config" not in self.updates[mongo_id]: self.updates[mongo_id]["config"] = {} @@ -1856,20 +1864,12 @@ def process_updated(self): self.report_items["warning"][msg] = items continue - if key in hier_attrs_keys: - self.hier_cust_attrs_changes[key].append(ftrack_id) - continue - - if key not in ent_cust_attrs: - continue - if "data" not in self.updates[mongo_id]: self.updates[mongo_id]["data"] = {} - value = values["new"] - self.updates[mongo_id]["data"][key] = value + self.updates[mongo_id]["data"][key] = new_value self.log.debug( "Setting data value of \"{}\" to \"{}\" <{}>".format( - key, value, ent_path + key, new_value, ent_path ) ) From 2e569e70af673458bd33bc75f9e7986117238b49 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 14:56:21 +0100 Subject: [PATCH 125/295] use join function --- .../ftrack/event_handlers_server/event_sync_to_avalon.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index fec2d672cf6..c1c0ac00320 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -2137,16 +2137,12 @@ def process_hier_cleanup(self): parent_queue.put(parent_ent) # Prepare values to query - entity_ids_joined = ", ".join([ - "\"{}\"".format(id) for id in cust_attrs_ftrack_ids - ]) configuration_ids = set() for key in hier_cust_attrs_keys: configuration_ids.add(hier_attr_id_by_key[key]) - attributes_joined = ", ".join([ - "\"{}\"".format(conf_id) for conf_id in configuration_ids - ]) + entity_ids_joined = self.join_query_keys(cust_attrs_ftrack_ids) + attributes_joined = self.join_query_keys(configuration_ids) queries = [{ "action": "query", From e98fe26aa8fe0223c309d4361ea56d884527fc30 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 14:56:27 +0100 Subject: [PATCH 126/295] formatting changes --- .../ftrack/event_handlers_server/event_sync_to_avalon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index c1c0ac00320..d71a94aabf9 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -2018,7 +2018,7 @@ def process_hier_cleanup(self): self.update_entities() return - cust_attrs, hier_attrs = self.avalon_cust_attrs + _, hier_attrs = self.avalon_cust_attrs # Hierarchical custom attributes preparation *** hier_attr_key_by_id = { From 4c28395143b6df3fed4e3379cd47109825010c8c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 15:00:41 +0100 Subject: [PATCH 127/295] renamed idle_logic to idle_threads --- pype/modules/idle_manager/{idle_logic.py => idle_threads.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pype/modules/idle_manager/{idle_logic.py => idle_threads.py} (100%) diff --git a/pype/modules/idle_manager/idle_logic.py b/pype/modules/idle_manager/idle_threads.py similarity index 100% rename from pype/modules/idle_manager/idle_logic.py rename to pype/modules/idle_manager/idle_threads.py From 6908ae0e4e14720d736f8ce51e8eb20b33a239eb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 15:01:00 +0100 Subject: [PATCH 128/295] moved idle thread to threads file --- pype/modules/idle_manager/idle_module.py | 84 ++--------------------- pype/modules/idle_manager/idle_threads.py | 73 ++++++++++++++++++++ 2 files changed, 79 insertions(+), 78 deletions(-) diff --git a/pype/modules/idle_manager/idle_module.py b/pype/modules/idle_manager/idle_module.py index 25309e94436..979e1b92ea8 100644 --- a/pype/modules/idle_manager/idle_module.py +++ b/pype/modules/idle_manager/idle_module.py @@ -1,11 +1,8 @@ -import time import collections -import threading from abc import ABCMeta, abstractmethod import six -from pype.lib import PypeLogger from pype.modules import PypeModule, ITrayService @@ -79,11 +76,16 @@ def idle_time(self): if self.idle_thread and self.idle_thread.is_running: return self.idle_thread.idle_time + def _create_thread(self): + from .idle_threads import IdleManagerThread + + return IdleManagerThread(self) + def start_thread(self): if self.idle_thread: self.idle_thread.stop() self.idle_thread.join() - self.idle_thread = IdleManagerThread(self) + self.idle_thread = self._create_thread() self.idle_thread.start() def stop_thread(self): @@ -93,77 +95,3 @@ def stop_thread(self): def on_thread_stop(self): self.set_service_failed_icon() - - -class IdleManagerThread(threading.Thread): - def __init__(self, module, *args, **kwargs): - super(IdleManagerThread, self).__init__(*args, **kwargs) - self.log = PypeLogger.get_logger(self.__class__.__name__) - self.module = module - self.threads = [] - self.is_running = False - self.idle_time = 0 - - def stop(self): - self.is_running = False - - def reset_time(self): - self.idle_time = 0 - - @property - def time_callbacks(self): - return self.module.time_callbacks - - def on_stop(self): - self.is_running = False - self.log.info("IdleManagerThread has stopped") - self.module.on_thread_stop() - - def _create_threads(self): - from .idle_logic import MouseThread, KeyboardThread - - thread_mouse = MouseThread(self.reset_time) - thread_keyboard = KeyboardThread(self.reset_time) - return thread_mouse, thread_keyboard - - def run(self): - self.log.info("IdleManagerThread has started") - self.is_running = True - thread_mouse, thread_keyboard = self._create_threads() - thread_mouse.start() - thread_keyboard.start() - try: - while self.is_running: - if self.idle_time in self.time_callbacks: - for callback in self.time_callbacks[self.idle_time]: - thread = threading.Thread(target=callback) - thread.start() - self.threads.append(thread) - - for thread in tuple(self.threads): - if not thread.isAlive(): - thread.join() - self.threads.remove(thread) - - self.idle_time += 1 - time.sleep(1) - - except Exception: - self.log.warning( - 'Idle Manager service has failed', exc_info=True - ) - - # Threads don't have their attrs when Qt application already finished - try: - thread_mouse.stop() - thread_mouse.join() - except AttributeError: - pass - - try: - thread_keyboard.stop() - thread_keyboard.join() - except AttributeError: - pass - - self.on_stop() diff --git a/pype/modules/idle_manager/idle_threads.py b/pype/modules/idle_manager/idle_threads.py index ab3f6790e66..7cedf986e6e 100644 --- a/pype/modules/idle_manager/idle_threads.py +++ b/pype/modules/idle_manager/idle_threads.py @@ -1,5 +1,10 @@ +import time +import threading + from pynput import mouse, keyboard +from pype.lib import PypeLogger + class MouseThread(mouse.Listener): """Listens user's mouse movement.""" @@ -22,3 +27,71 @@ def __init__(self, callback): def on_press(self, key): self.callback() + + +class IdleManagerThread(threading.Thread): + def __init__(self, module, *args, **kwargs): + super(IdleManagerThread, self).__init__(*args, **kwargs) + self.log = PypeLogger.get_logger(self.__class__.__name__) + self.module = module + self.threads = [] + self.is_running = False + self.idle_time = 0 + + def stop(self): + self.is_running = False + + def reset_time(self): + self.idle_time = 0 + + @property + def time_callbacks(self): + return self.module.time_callbacks + + def on_stop(self): + self.is_running = False + self.log.info("IdleManagerThread has stopped") + self.module.on_thread_stop() + + def run(self): + self.log.info("IdleManagerThread has started") + self.is_running = True + thread_mouse = MouseThread(self.reset_time) + thread_keyboard = KeyboardThread(self.reset_time) + thread_mouse.start() + thread_keyboard.start() + try: + while self.is_running: + if self.idle_time in self.time_callbacks: + for callback in self.time_callbacks[self.idle_time]: + thread = threading.Thread(target=callback) + thread.start() + self.threads.append(thread) + + for thread in tuple(self.threads): + if not thread.isAlive(): + thread.join() + self.threads.remove(thread) + + self.idle_time += 1 + time.sleep(1) + + except Exception: + self.log.warning( + 'Idle Manager service has failed', exc_info=True + ) + + # Threads don't have their attrs when Qt application already finished + try: + thread_mouse.stop() + thread_mouse.join() + except AttributeError: + pass + + try: + thread_keyboard.stop() + thread_keyboard.join() + except AttributeError: + pass + + self.on_stop() From c502cc2d1c58384074ce50a8f2599e210ad2203a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 15:15:13 +0100 Subject: [PATCH 129/295] event sync convert hierarchical values to proper type --- .../event_sync_to_avalon.py | 44 ++++++++++++++++++- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index d71a94aabf9..ba609459682 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -2159,10 +2159,43 @@ def process_hier_cleanup(self): ftrack_project_id = self.cur_project["id"] + attr_types_by_id = self.cust_attr_types_by_id + convert_types_by_id = {} for attr in hier_attrs: key = attr["key"] if key not in hier_cust_attrs_keys: continue + + type_id = attr["type_id"] + attr_id = attr["id"] + cust_attr_type_name = attr_types_by_id[type_id]["name"] + convert_type = None + if cust_attr_type_name == "text": + convert_type = str + + elif cust_attr_type_name == "boolean": + convert_type = bool + + elif cust_attr_type_name in ( + "date", "expression", "notificationtype", "dynamic enumerator" + ): + pass + + else: + cust_attr_config = json.loads(attr["config"]) + if cust_attr_type_name == "number": + if cust_attr_config["isdecimal"]: + convert_type = float + else: + convert_type = int + + elif cust_attr_type_name == "enumerator": + if cust_attr_config["multiSelect"]: + convert_type = list + else: + convert_type = str + + convert_types_by_id[attr_id] = convert_type entities_dict[ftrack_project_id]["hier_attrs"][key] = ( attr["default"] ) @@ -2173,8 +2206,15 @@ def process_hier_cleanup(self): if value["value"] is None: continue entity_id = value["entity_id"] - key = hier_attr_key_by_id[value["configuration_id"]] - entities_dict[entity_id]["hier_attrs"][key] = value["value"] + configuration_id = value["configuration_id"] + + convert_type = convert_types_by_id[configuration_id] + key = hier_attr_key_by_id[configuration_id] + + the_value = value["value"] + if convert_type: + the_value = convert_type(the_value) + entities_dict[entity_id]["hier_attrs"][key] = the_value # Get dictionary with not None hierarchical values to pull to childs project_values = {} From 47a7fbe18130359a028946a22e461afb6838dba7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 15:26:41 +0100 Subject: [PATCH 130/295] implemented function to retrieve python type for passed custom attribute --- pype/modules/ftrack/lib/avalon_sync.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index addc8a4a9cd..9aa76539b6a 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -119,6 +119,31 @@ def get_pype_attr(session, split_hierarchical=True, query_keys=None): return custom_attributes +def get_python_type_for_custom_attribute(cust_attr, cust_attr_type_name=None): + if cust_attr_type_name is None: + cust_attr_type_name = cust_attr["type"]["name"] + + if cust_attr_type_name == "text": + return str + + if cust_attr_type_name == "boolean": + return bool + + if cust_attr_type_name in ("number", "enumerator"): + cust_attr_config = json.loads(cust_attr["config"]) + if cust_attr_type_name == "number": + if cust_attr_config["isdecimal"]: + return float + return int + + if cust_attr_type_name == "enumerator": + if cust_attr_config["multiSelect"]: + return list + return str + # "date", "expression", "notificationtype", "dynamic enumerator" + return None + + def from_dict_to_set(data, is_project): """ Converts 'data' into $set part of MongoDB update command. From a91c0368e2de1ac0d8aed746ed544bf301269cef Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 15:27:01 +0100 Subject: [PATCH 131/295] event sync to avalon is using get_python_type_for_custom_attribute --- .../event_sync_to_avalon.py | 28 ++----------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index ba609459682..c03abbd52f7 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -2169,31 +2169,9 @@ def process_hier_cleanup(self): type_id = attr["type_id"] attr_id = attr["id"] cust_attr_type_name = attr_types_by_id[type_id]["name"] - convert_type = None - if cust_attr_type_name == "text": - convert_type = str - - elif cust_attr_type_name == "boolean": - convert_type = bool - - elif cust_attr_type_name in ( - "date", "expression", "notificationtype", "dynamic enumerator" - ): - pass - - else: - cust_attr_config = json.loads(attr["config"]) - if cust_attr_type_name == "number": - if cust_attr_config["isdecimal"]: - convert_type = float - else: - convert_type = int - - elif cust_attr_type_name == "enumerator": - if cust_attr_config["multiSelect"]: - convert_type = list - else: - convert_type = str + convert_type = avalon_sync.get_python_type_for_custom_attribute( + attr, cust_attr_type_name + ) convert_types_by_id[attr_id] = convert_type entities_dict[ftrack_project_id]["hier_attrs"][key] = ( From f0f4a1c65bb6b00df5952c8eb07dcaba34cdde70 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 15:38:55 +0100 Subject: [PATCH 132/295] define query keys --- pype/modules/ftrack/lib/avalon_sync.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index 9aa76539b6a..d75a6babdd7 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -321,6 +321,16 @@ def get_hierarchical_attributes(session, entity, attr_names, attr_defaults={}): class SyncEntitiesFactory: dbcon = AvalonMongoDB() + cust_attr_query_keys = [ + "id", + "key", + "entity_type", + "object_type_id", + "is_hierarchical", + "config", + "default" + ] + project_query = ( "select full_name, name, custom_attributes" ", project_schema._task_type_schema.types.name" @@ -866,7 +876,9 @@ def filter_by_selection(self, event): def set_cutom_attributes(self): self.log.debug("* Preparing custom attributes") # Get custom attributes and values - custom_attrs, hier_attrs = get_pype_attr(self.session) + custom_attrs, hier_attrs = get_pype_attr( + self.session, query_keys=self.cust_attr_query_keys + ) ent_types = self.session.query("select id, name from ObjectType").all() ent_types_by_name = { ent_type["name"]: ent_type["id"] for ent_type in ent_types From c8bc66ae9ed6a220ed5c3657bce6698c190cdf65 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 15:41:21 +0100 Subject: [PATCH 133/295] sync to avalon action is also using proper value type --- pype/modules/ftrack/lib/avalon_sync.py | 63 ++++++++++++++++++++++---- 1 file changed, 54 insertions(+), 9 deletions(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index d75a6babdd7..2db124235e0 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -883,6 +883,14 @@ def set_cutom_attributes(self): ent_types_by_name = { ent_type["name"]: ent_type["id"] for ent_type in ent_types } + # Custom attribute types + cust_attr_types = self.session.query( + "select id, name from CustomAttributeType" + ).all() + cust_attr_type_name_by_id = { + cust_attr_type["id"]: cust_attr_type["name"] + for cust_attr_type in cust_attr_types + } # store default values per entity type attrs_per_entity_type = collections.defaultdict(dict) @@ -892,9 +900,20 @@ def set_cutom_attributes(self): avalon_attrs_ca_id = collections.defaultdict(dict) attribute_key_by_id = {} + convert_types_by_attr_id = {} for cust_attr in custom_attrs: key = cust_attr["key"] - attribute_key_by_id[cust_attr["id"]] = key + attr_id = cust_attr["id"] + type_id = cust_attr["type_id"] + + attribute_key_by_id[attr_id] = key + cust_attr_type_name = cust_attr_type_name_by_id[type_id] + + convert_type = get_python_type_for_custom_attribute( + cust_attr, cust_attr_type_name + ) + convert_types_by_attr_id[attr_id] = convert_type + ca_ent_type = cust_attr["entity_type"] if key.startswith("avalon_"): if ca_ent_type == "show": @@ -988,24 +1007,44 @@ def set_cutom_attributes(self): for item in values["data"]: entity_id = item["entity_id"] - key = attribute_key_by_id[item["configuration_id"]] + attr_id = item["configuration_id"] + key = attribute_key_by_id[attr_id] store_key = "custom_attributes" if key.startswith("avalon_"): store_key = "avalon_attrs" - self.entities_dict[entity_id][store_key][key] = item["value"] + + convert_type = convert_types_by_attr_id[attr_id] + value = item["value"] + if convert_type: + value = convert_type(value) + self.entities_dict[entity_id][store_key][key] = value # process hierarchical attributes - self.set_hierarchical_attribute(hier_attrs, sync_ids) + self.set_hierarchical_attribute( + hier_attrs, sync_ids, cust_attr_type_name_by_id + ) - def set_hierarchical_attribute(self, hier_attrs, sync_ids): + def set_hierarchical_attribute( + self, hier_attrs, sync_ids, cust_attr_type_name_by_id + ): # collect all hierarchical attribute keys # and prepare default values to project attributes_by_key = {} attribute_key_by_id = {} + convert_types_by_attr_id = {} for attr in hier_attrs: key = attr["key"] - attribute_key_by_id[attr["id"]] = key + attr_id = attr["id"] + type_id = attr["type_id"] + attribute_key_by_id[attr_id] = key attributes_by_key[key] = attr + + cust_attr_type_name = cust_attr_type_name_by_id[type_id] + convert_type = get_python_type_for_custom_attribute( + attr, cust_attr_type_name + ) + convert_types_by_attr_id[attr_id] = convert_type + self.hier_cust_attr_ids_by_key[key] = attr["id"] store_key = "hier_attrs" @@ -1040,7 +1079,7 @@ def set_hierarchical_attribute(self, hier_attrs, sync_ids): else: prepare_dict[key] = None - for id, entity_dict in self.entities_dict.items(): + for entity_dict in self.entities_dict.values(): # Skip project because has stored defaults at the moment if entity_dict["entity_type"] == "project": continue @@ -1078,8 +1117,14 @@ def set_hierarchical_attribute(self, hier_attrs, sync_ids): or (isinstance(value, (tuple, list)) and not value) ): continue + + attr_id = item["configuration_id"] + convert_type = convert_types_by_attr_id[attr_id] + if convert_type: + value = convert_type(value) + entity_id = item["entity_id"] - key = attribute_key_by_id[item["configuration_id"]] + key = attribute_key_by_id[attr_id] if key.startswith("avalon_"): store_key = "avalon_attrs" avalon_hier.append(key) @@ -2436,7 +2481,7 @@ def create_ftrack_ent_from_avalon_ent(self, av_entity, parent_id): if new_entity_id not in p_chilren: self.entities_dict[parent_id]["children"].append(new_entity_id) - cust_attr, hier_attrs = get_pype_attr(self.session) + cust_attr, _ = get_pype_attr(self.session) for _attr in cust_attr: key = _attr["key"] if key not in av_entity["data"]: From c8f1e14bb8453dbb845b1169bb44dcf2c441374b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 15:43:26 +0100 Subject: [PATCH 134/295] added some docstrings --- pype/modules/ftrack/lib/avalon_sync.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index 2db124235e0..ff305fe17b8 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -120,6 +120,14 @@ def get_pype_attr(session, split_hierarchical=True, query_keys=None): def get_python_type_for_custom_attribute(cust_attr, cust_attr_type_name=None): + """Python type that should value of custom attribute have. + + This function is mainly for number type which is always float from ftrack. + + Returns: + type: Python type which call be called on object to convert the object + to the type or None if can't figure out. + """ if cust_attr_type_name is None: cust_attr_type_name = cust_attr["type"]["name"] From df12e9a112fa6c17d180238ec9d7525f100d4c21 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 16:13:47 +0100 Subject: [PATCH 135/295] fixed and made faster new entity creation --- .../event_sync_to_avalon.py | 59 +++------ pype/modules/ftrack/lib/avalon_sync.py | 125 +++++++++--------- 2 files changed, 81 insertions(+), 103 deletions(-) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index c03abbd52f7..4b0b96f166d 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -1248,48 +1248,18 @@ def create_entity_in_avalon(self, ftrack_ent, parent_avalon): return final_entity - def get_cust_attr_values(self, entity, keys=None): + def get_cust_attr_values(self, entity): output = {} custom_attrs, hier_attrs = self.avalon_cust_attrs - not_processed_keys = True - if keys: - not_processed_keys = [k for k in keys] + # Notmal custom attributes - processed_keys = [] for attr in custom_attrs: - if not not_processed_keys: - break key = attr["key"] - if key in processed_keys: - continue - - if key not in entity["custom_attributes"]: - continue - - if keys: - if key not in keys: - continue - else: - not_processed_keys.remove(key) - - output[key] = entity["custom_attributes"][key] - processed_keys.append(key) - - if not not_processed_keys: - return output - - # Hierarchical cust attrs - hier_keys = [] - defaults = {} - for attr in hier_attrs: - key = attr["key"] - if keys and key not in keys: - continue - hier_keys.append(key) - defaults[key] = attr["default"] + if key in entity["custom_attributes"]: + output[key] = entity["custom_attributes"][key] - hier_values = avalon_sync.get_hierarchical_attributes( - self.process_session, entity, hier_keys, defaults + hier_values = avalon_sync.get_hierarchical_attributes_values( + self.process_session, entity, hier_attrs ) for key, val in hier_values.items(): if key == CUST_ATTR_ID_KEY: @@ -2147,7 +2117,8 @@ def process_hier_cleanup(self): queries = [{ "action": "query", "expression": ( - "select value, entity_id from CustomAttributeValue " + "select value, entity_id, configuration_id" + " from CustomAttributeValue " "where entity_id in ({}) and configuration_id in ({})" ).format(entity_ids_joined, attributes_joined) }] @@ -2180,19 +2151,19 @@ def process_hier_cleanup(self): # PREPARE DATA BEFORE THIS avalon_hier = [] - for value in values["data"]: - if value["value"] is None: + for item in values["data"]: + value = item["value"] + if value is None: continue - entity_id = value["entity_id"] - configuration_id = value["configuration_id"] + entity_id = item["entity_id"] + configuration_id = item["configuration_id"] convert_type = convert_types_by_id[configuration_id] key = hier_attr_key_by_id[configuration_id] - the_value = value["value"] if convert_type: - the_value = convert_type(the_value) - entities_dict[entity_id]["hier_attrs"][key] = the_value + value = convert_type(value) + entities_dict[entity_id]["hier_attrs"][key] = value # Get dictionary with not None hierarchical values to pull to childs project_values = {} diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index ff305fe17b8..a6151bfba96 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -249,79 +249,84 @@ def get_project_apps(in_app_list): return apps, warnings -def get_hierarchical_attributes(session, entity, attr_names, attr_defaults={}): - entity_ids = [] - if entity.entity_type.lower() == "project": - entity_ids.append(entity["id"]) - else: - typed_context = session.query(( - "select ancestors.id, project from TypedContext where id is \"{}\"" - ).format(entity["id"])).one() - entity_ids.append(typed_context["id"]) - entity_ids.extend( - [ent["id"] for ent in reversed(typed_context["ancestors"])] +def get_hierarchical_attributes_values( + session, entity, hier_attrs, cust_attr_types=None +): + if not cust_attr_types: + cust_attr_types = session.query( + "select id, name from CustomAttributeType" + ).all() + + cust_attr_name_by_id = { + cust_attr_type["id"]: cust_attr_type["name"] + for cust_attr_type in cust_attr_types + } + # Hierarchical cust attrs + attr_key_by_id = {} + convert_types_by_attr_id = {} + defaults = {} + for attr in hier_attrs: + attr_id = attr["id"] + key = attr["key"] + type_id = attr["type_id"] + + attr_key_by_id[attr_id] = key + defaults[key] = attr["default"] + + cust_attr_type_name = cust_attr_name_by_id[type_id] + convert_type = get_python_type_for_custom_attribute( + attr, cust_attr_type_name ) - entity_ids.append(typed_context["project"]["id"]) + convert_types_by_attr_id[attr_id] = convert_type - missing_defaults = [] - for attr_name in attr_names: - if attr_name not in attr_defaults: - missing_defaults.append(attr_name) + entity_ids = [item["id"] for item in entity["link"]] + + join_ent_ids = join_query_keys(entity_ids) + join_attribute_ids = join_query_keys(attr_key_by_id.keys()) - join_ent_ids = ", ".join( - ["\"{}\"".format(entity_id) for entity_id in entity_ids] - ) - join_attribute_names = ", ".join( - ["\"{}\"".format(key) for key in attr_names] - ) queries = [] queries.append({ "action": "query", "expression": ( - "select value, entity_id from CustomAttributeValue " - "where entity_id in ({}) and configuration.key in ({})" - ).format(join_ent_ids, join_attribute_names) + "select value, configuration_id, entity_id" + " from CustomAttributeValue" + " where entity_id in ({}) and configuration_id in ({})" + ).format(join_ent_ids, join_attribute_ids) }) - if not missing_defaults: - if hasattr(session, "call"): - [values] = session.call(queries) - else: - [values] = session._call(queries) + if hasattr(session, "call"): + [values] = session.call(queries) else: - join_missing_names = ", ".join( - ["\"{}\"".format(key) for key in missing_defaults] - ) - queries.append({ - "action": "query", - "expression": ( - "select default from CustomAttributeConfiguration " - "where key in ({})" - ).format(join_missing_names) - }) - - [values, default_values] = session.call(queries) - for default_value in default_values: - key = default_value["data"][0]["key"] - attr_defaults[key] = default_value["data"][0]["default"] + [values] = session._call(queries) hier_values = {} - for key, val in attr_defaults.items(): + for key, val in defaults.items(): hier_values[key] = val if not values["data"]: return hier_values - _hier_values = collections.defaultdict(list) - for value in values["data"]: - key = value["configuration"]["key"] - _hier_values[key].append(value) + values_by_entity_id = collections.defaultdict(dict) + for item in values["data"]: + value = item["value"] + if value is None: + continue - for key, values in _hier_values.items(): - value = sorted( - values, key=lambda value: entity_ids.index(value["entity_id"]) - )[0] - hier_values[key] = value["value"] + attr_id = item["configuration_id"] + + convert_type = convert_types_by_attr_id[attr_id] + if convert_type: + value = convert_type(value) + + key = attr_key_by_id[attr_id] + entity_id = item["entity_id"] + values_by_entity_id[entity_id][key] = value + + for entity_id in entity_ids: + for key in attr_key_by_id.values(): + value = values_by_entity_id[entity_id].get(key) + if value is not None: + hier_values[key] = value return hier_values @@ -999,8 +1004,9 @@ def set_cutom_attributes(self): ]) cust_attr_query = ( - "select value, entity_id from ContextCustomAttributeValue " - "where entity_id in ({}) and configuration_id in ({})" + "select value, configuration_id, entity_id" + " from ContextCustomAttributeValue" + " where entity_id in ({}) and configuration_id in ({})" ) call_expr = [{ "action": "query", @@ -1106,8 +1112,9 @@ def set_hierarchical_attribute( call_expr = [{ "action": "query", "expression": ( - "select value, entity_id from ContextCustomAttributeValue " - "where entity_id in ({}) and configuration_id in ({})" + "select value, entity_id, configuration_id" + " from ContextCustomAttributeValue" + " where entity_id in ({}) and configuration_id in ({})" ).format(entity_ids_joined, attributes_joined) }] if hasattr(self.session, "call"): From 236ad6561783f861d25b56062fc95c4c02995634 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 16:16:39 +0100 Subject: [PATCH 136/295] use precached values --- .../event_handlers_server/event_sync_to_avalon.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 4b0b96f166d..be3a15b0499 100644 --- a/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/pype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -1259,13 +1259,17 @@ def get_cust_attr_values(self, entity): output[key] = entity["custom_attributes"][key] hier_values = avalon_sync.get_hierarchical_attributes_values( - self.process_session, entity, hier_attrs + self.process_session, + entity, + hier_attrs, + self.cust_attr_types_by_id ) for key, val in hier_values.items(): - if key == CUST_ATTR_ID_KEY: - continue output[key] = val + # Make sure mongo id is not set + output.pop(CUST_ATTR_ID_KEY, None) + return output def process_renamed(self): From 202ba646c55c2116cb350dd68ad49d945797754c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 18:42:24 +0100 Subject: [PATCH 137/295] implemented settings changes interface --- pype/modules/__init__.py | 1 + pype/modules/settings_action.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/pype/modules/__init__.py b/pype/modules/__init__.py index 7b945922eae..4b120647e10 100644 --- a/pype/modules/__init__.py +++ b/pype/modules/__init__.py @@ -11,6 +11,7 @@ ) from .settings_action import ( SettingsAction, + ISettingsChangeListener, LocalSettingsAction ) from .webserver import ( diff --git a/pype/modules/settings_action.py b/pype/modules/settings_action.py index aab10e9ebfe..2fc59b11c28 100644 --- a/pype/modules/settings_action.py +++ b/pype/modules/settings_action.py @@ -1,6 +1,37 @@ +from abc import ABCMeta, abstractmethod + +import six + from . import PypeModule, ITrayAction +@six.add_metaclass(ABCMeta) +class ISettingsChangeListener: + """Module has plugin paths to return. + + Expected result is dictionary with keys "publish", "create", "load" or + "actions" and values as list or string. + { + "publish": ["path/to/publish_plugins"] + } + """ + @abstractmethod + def on_system_settings_save(self, old_value, new_value, changes): + pass + + @abstractmethod + def on_project_settings_save( + self, old_value, new_value, changes, project_name + ): + pass + + @abstractmethod + def on_project_anatomy_save( + self, old_value, new_value, changes, project_name + ): + pass + + class SettingsAction(PypeModule, ITrayAction): """Action to show Setttings tool.""" name = "settings" From af0736d068e045ed9669953aa33474ea8a9a10bf Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 18:43:19 +0100 Subject: [PATCH 138/295] Ftrack module listens to settings changes --- pype/modules/ftrack/ftrack_module.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/pype/modules/ftrack/ftrack_module.py b/pype/modules/ftrack/ftrack_module.py index a257ede8454..30dcba16b8c 100644 --- a/pype/modules/ftrack/ftrack_module.py +++ b/pype/modules/ftrack/ftrack_module.py @@ -1,4 +1,5 @@ import os +import collections from abc import ABCMeta, abstractmethod import six import pype @@ -8,7 +9,8 @@ IPluginPaths, ITimersManager, IUserModule, - ILaunchHookPaths + ILaunchHookPaths, + ISettingsChangeListener ) FTRACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -31,7 +33,8 @@ class FtrackModule( IPluginPaths, ITimersManager, IUserModule, - ILaunchHookPaths + ILaunchHookPaths, + ISettingsChangeListener ): name = "ftrack" @@ -115,6 +118,22 @@ def on_pype_user_change(self, username): if self.tray_module: self.tray_module.changed_user() + def on_system_settings_save(self, *_args, **_kwargs): + """Implementation of ISettingsChangeListener interface.""" + # Ignore + return + + def on_project_settings_save(self, *_args, **_kwargs): + """Implementation of ISettingsChangeListener interface.""" + # Ignore + return + + def on_project_anatomy_save( + self, old_value, new_value, changes, project_name + ): + """Implementation of ISettingsChangeListener interface.""" + return + def tray_init(self): from .tray import FtrackTrayWrapper self.tray_module = FtrackTrayWrapper(self) From 49141bccc641fb00ece49074e8a5ba8d87954759 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 18:43:35 +0100 Subject: [PATCH 139/295] implemented function to calculate changes --- pype/settings/lib.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pype/settings/lib.py b/pype/settings/lib.py index c4238f3ffe5..81561b876e4 100644 --- a/pype/settings/lib.py +++ b/pype/settings/lib.py @@ -70,6 +70,25 @@ def create_local_settings_handler(): return MongoLocalSettingsHandler() +def calculate_changes(old_value, new_value): + changes = {} + for key, value in new_value.items(): + if key not in old_value: + changes[key] = value + continue + + _value = old_value[key] + if isinstance(value, dict) and isinstance(_value, dict): + _changes = calculate_changes(_value, value) + if _changes: + changes[key] = _changes + continue + + if _value != value: + changes[key] = value + return changes + + @require_handler def save_studio_settings(data): return _SETTINGS_HANDLER.save_studio_settings(data) From ce35976cc4541c0dc39c3a17e6ab1b3f4d52aa67 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 18:43:50 +0100 Subject: [PATCH 140/295] notify pype modules about settings changes --- pype/settings/lib.py | 66 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/pype/settings/lib.py b/pype/settings/lib.py index 81561b876e4..55d16b3e6b4 100644 --- a/pype/settings/lib.py +++ b/pype/settings/lib.py @@ -91,16 +91,82 @@ def calculate_changes(old_value, new_value): @require_handler def save_studio_settings(data): + # Notify Pype modules + from pype.modules import ModulesManager, ISettingsChangeListener + + old_data = get_system_settings() + default_values = get_default_settings()[SYSTEM_SETTINGS_KEY] + new_data = apply_overrides(default_values, copy.deepcopy(data)) + clear_metadata_from_settings(new_data) + + changes = calculate_changes(old_data, new_data) + modules_manager = ModulesManager() + for module in modules_manager.get_enabled_modules(): + if isinstance(module, ISettingsChangeListener): + module.on_system_settings_save(old_data, new_data, changes) + return _SETTINGS_HANDLER.save_studio_settings(data) @require_handler def save_project_settings(project_name, overrides): + # Notify Pype modules + from pype.modules import ModulesManager, ISettingsChangeListener + + default_values = get_default_settings()[PROJECT_SETTINGS_KEY] + if project_name: + old_data = get_project_settings(project_name) + + studio_overrides = get_studio_project_settings_overrides() + studio_values = apply_overrides(default_values, studio_overrides) + clear_metadata_from_settings(studio_values) + new_data = apply_overrides(studio_values, copy.deepcopy(overrides)) + + else: + old_data = get_default_project_settings() + new_data = apply_overrides(default_values, copy.deepcopy(overrides)) + + clear_metadata_from_settings(new_data) + + changes = calculate_changes(old_data, new_data) + modules_manager = ModulesManager() + for module in modules_manager.get_enabled_modules(): + if isinstance(module, ISettingsChangeListener): + module.on_project_settings_save( + old_data, new_data, project_name, changes + ) + return _SETTINGS_HANDLER.save_project_settings(project_name, overrides) @require_handler def save_project_anatomy(project_name, anatomy_data): + # Notify Pype modules + from pype.modules import ModulesManager, ISettingsChangeListener + + default_values = get_default_settings()[PROJECT_ANATOMY_KEY] + if project_name: + old_data = get_anatomy_settings(project_name) + + studio_overrides = get_studio_project_settings_overrides() + studio_values = apply_overrides(default_values, studio_overrides) + clear_metadata_from_settings(studio_values) + new_data = apply_overrides(studio_values, copy.deepcopy(anatomy_data)) + + else: + old_data = get_default_anatomy_settings() + new_data = apply_overrides(default_values, copy.deepcopy(anatomy_data)) + + clear_metadata_from_settings(new_data) + + changes = calculate_changes(old_data, new_data) + modules_manager = ModulesManager() + for module in modules_manager.get_enabled_modules(): + if isinstance(module, ISettingsChangeListener): + module.on_project_anatomy_save( + old_data, new_data, changes, project_name + ) + return _SETTINGS_HANDLER.save_project_anatomy(project_name, anatomy_data) From 7f72d1554867477972a0998ff1eac0535bb551ac Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 18:44:35 +0100 Subject: [PATCH 141/295] changed key `tools` to `tools_env` --- .../projects_schema/schemas/schema_anatomy_attributes.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json index f75319c7e1c..7391108a024 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json @@ -67,7 +67,7 @@ }, { "type": "tools-enum", - "key": "tools", + "key": "tools_env", "label": "Tools" } ] From 88369d863c3d38d5043907c5ec40d0f077b62b8a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 18:45:00 +0100 Subject: [PATCH 142/295] changed defaults settings --- pype/settings/defaults/project_anatomy/attributes.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/settings/defaults/project_anatomy/attributes.json b/pype/settings/defaults/project_anatomy/attributes.json index cc5516fd1f3..1d16be42c5e 100644 --- a/pype/settings/defaults/project_anatomy/attributes.json +++ b/pype/settings/defaults/project_anatomy/attributes.json @@ -1,5 +1,5 @@ { - "fps": 25, + "fps": 25.0, "frameStart": 1001, "frameEnd": 1001, "clipIn": 1, @@ -8,7 +8,7 @@ "handleEnd": 0, "resolutionWidth": 1920, "resolutionHeight": 1080, - "pixelAspect": 1, + "pixelAspect": 1.0, "applications": [], - "tools": [] + "tools_env": [] } \ No newline at end of file From bfc48ca039cc3af5685aa4d02d87037913804c09 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 18:45:20 +0100 Subject: [PATCH 143/295] ftrack module has create_ftrack_setting method --- pype/modules/ftrack/ftrack_module.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pype/modules/ftrack/ftrack_module.py b/pype/modules/ftrack/ftrack_module.py index 30dcba16b8c..f2f772b6d2a 100644 --- a/pype/modules/ftrack/ftrack_module.py +++ b/pype/modules/ftrack/ftrack_module.py @@ -134,6 +134,20 @@ def on_project_anatomy_save( """Implementation of ISettingsChangeListener interface.""" return + def create_ftrack_session(self, **session_kwargs): + import ftrack_api + + if "server_url" not in session_kwargs: + session_kwargs["server_url"] = self.ftrack_url + + if "api_key" not in session_kwargs or "api_user" not in session_kwargs: + from .lib import credentials + cred = credentials.get_credentials() + session_kwargs["api_user"] = cred.get("username") + session_kwargs["api_key"] = cred.get("api_key") + + return ftrack_api.Session(**session_kwargs) + def tray_init(self): from .tray import FtrackTrayWrapper self.tray_module = FtrackTrayWrapper(self) From dbe8bd85e9482f78da32c3ccf9599ffd2d57dbd6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 18:45:47 +0100 Subject: [PATCH 144/295] ftrack module updates changes from settings attributes --- pype/modules/ftrack/ftrack_module.py | 52 +++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/pype/modules/ftrack/ftrack_module.py b/pype/modules/ftrack/ftrack_module.py index f2f772b6d2a..7b9d42f6dfe 100644 --- a/pype/modules/ftrack/ftrack_module.py +++ b/pype/modules/ftrack/ftrack_module.py @@ -132,7 +132,57 @@ def on_project_anatomy_save( self, old_value, new_value, changes, project_name ): """Implementation of ISettingsChangeListener interface.""" - return + if not project_name: + return + + attributes_changes = changes.get("attributes") + if not attributes_changes: + return + + import ftrack_api + from pype.modules.ftrack.lib import avalon_sync + + session = self.create_ftrack_session() + project_entity = session.query( + "Project where full_name is \"{}\"".format(project_name) + ).first() + + if not project_entity: + self.log.warning(( + "Ftrack project with names \"{}\" was not found." + " Skipping settings attributes change callback." + )) + return + + project_id = project_entity["id"] + + cust_attr, hier_attr = avalon_sync.get_pype_attr(session) + cust_attr_by_key = {attr["key"]: attr for attr in cust_attr} + hier_attrs_by_key = {attr["key"]: attr for attr in hier_attr} + for key, value in attributes_changes.items(): + configuration = hier_attrs_by_key.get(key) + if not configuration: + configuration = cust_attr_by_key.get(key) + if not configuration: + continue + + # TODO add value validations + # - value type and list items + entity_key = collections.OrderedDict() + entity_key["configuration_id"] = configuration["id"] + entity_key["entity_id"] = project_id + + session.recorded_operations.push( + ftrack_api.operation.UpdateEntityOperation( + "ContextCustomAttributeValue", + entity_key, + "value", + ftrack_api.symbol.NOT_SET, + value + + ) + ) + session.commit() def create_ftrack_session(self, **session_kwargs): import ftrack_api From 48981b67a36b6152226f5b58b2d37a9aff030726 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 19 Mar 2021 19:14:14 +0100 Subject: [PATCH 145/295] fix anatomy overrides --- pype/settings/lib.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/pype/settings/lib.py b/pype/settings/lib.py index 55d16b3e6b4..89e662ee184 100644 --- a/pype/settings/lib.py +++ b/pype/settings/lib.py @@ -638,11 +638,7 @@ def get_default_anatomy_settings(clear_metadata=True): # TODO uncomment and remove hotfix result when overrides of anatomy # are stored correctly. - # result = apply_overrides(default_values, studio_values) - result = copy.deepcopy(default_values) - if studio_values: - for key, value in studio_values.items(): - result[key] = value + result = apply_overrides(default_values, studio_values) if clear_metadata: clear_metadata_from_settings(result) local_settings = get_local_settings() @@ -662,8 +658,10 @@ def get_anatomy_settings(project_name, site_name=None, exclude_locals=False): project_overrides = get_project_anatomy_overrides( project_name ) - - result = apply_overrides(studio_overrides, project_overrides) + result = copy.deepcopy(studio_overrides) + if project_overrides: + for key, value in project_overrides.items(): + result[key] = value clear_metadata_from_settings(result) From 4fc12b8c089c31eab6e49998291a163081a42939 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 22 Mar 2021 09:33:10 +0100 Subject: [PATCH 146/295] convert #1067 to 3.0 --- pype/hosts/maya/plugins/publish/extract_playblast.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/hosts/maya/plugins/publish/extract_playblast.py b/pype/hosts/maya/plugins/publish/extract_playblast.py index 99411e7f532..8402e412856 100644 --- a/pype/hosts/maya/plugins/publish/extract_playblast.py +++ b/pype/hosts/maya/plugins/publish/extract_playblast.py @@ -118,7 +118,8 @@ def process(self, instance): tags.append("delete") # Add camera node name to representation data - camera_node_name = pm.ls(camera)[0].getTransform().getName() + camera_node_name = pm.ls(camera)[0].getTransform().name() + representation = { 'name': 'png', From 89ee780d79d12cebdc1f70480f8d562cd2cfee9d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 09:54:52 +0100 Subject: [PATCH 147/295] raw json cares about value types --- pype/settings/entities/input_entities.py | 21 +++++++++++++++++++-- pype/settings/entities/schemas/README.md | 10 +++++++--- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/pype/settings/entities/input_entities.py b/pype/settings/entities/input_entities.py index c26cb249a67..40d1595f5a7 100644 --- a/pype/settings/entities/input_entities.py +++ b/pype/settings/entities/input_entities.py @@ -380,13 +380,30 @@ class RawJsonEntity(InputEntity): def _item_initalization(self): # Schema must define if valid value is dict or list - self.valid_value_types = (list, dict) - self.value_on_not_set = {} + is_list = self.schema_data.get("is_list", False) + if is_list: + valid_value_types = (list, ) + value_on_not_set = [] + else: + valid_value_types = (dict, ) + value_on_not_set = {} + + self._is_list = is_list + self.valid_value_types = valid_value_types + self.value_on_not_set = value_on_not_set self.default_metadata = {} self.studio_override_metadata = {} self.project_override_metadata = {} + @property + def is_list(self): + return self._is_list + + @property + def is_dict(self): + return not self._is_list + def set(self, value): self._validate_value_type(value) diff --git a/pype/settings/entities/schemas/README.md b/pype/settings/entities/schemas/README.md index 80125d4b1bb..e92ba8918f5 100644 --- a/pype/settings/entities/schemas/README.md +++ b/pype/settings/entities/schemas/README.md @@ -235,13 +235,17 @@ ### raw-json - a little bit enhanced text input for raw json - has validations of json format - - empty value is invalid value, always must be at least `{}` of `[]` - + - empty value is invalid value, always must be json serializable + - valid value types are list `[]` and dictionary `{}` +- schema also defines valid value type + - by default it is dictionary + - to be able use list it is required to define `is_list` to `true` ``` { "type": "raw-json", "key": "profiles", - "label": "Extract Review profiles" + "label": "Extract Review profiles", + "is_list": true } ``` From 0c23cf0c19c9c9408b4f02800f6a33de336b7e2e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 09:55:27 +0100 Subject: [PATCH 148/295] fixed list items in schemas --- .../schemas/projects_schema/schemas/schema_maya_publish.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json index bb0e162c045..623658b7a2a 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json @@ -274,7 +274,8 @@ { "type": "raw-json", "key": "bake_attributes", - "label": "Bake Attributes" + "label": "Bake Attributes", + "is_list": true } ] }, From 9f04cd627cf41b587fd23a628900cbb5bc96eb29 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 09:55:46 +0100 Subject: [PATCH 149/295] raw json widget also validate value types --- .../tools/settings/settings/widgets/item_widgets.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pype/tools/settings/settings/widgets/item_widgets.py b/pype/tools/settings/settings/widgets/item_widgets.py index 7cfcd844883..ef4b98e1d0c 100644 --- a/pype/tools/settings/settings/widgets/item_widgets.py +++ b/pype/tools/settings/settings/widgets/item_widgets.py @@ -366,7 +366,7 @@ def _on_value_change(self): class RawJsonInput(QtWidgets.QPlainTextEdit): tab_length = 4 - def __init__(self, *args, **kwargs): + def __init__(self, valid_type, *args, **kwargs): super(RawJsonInput, self).__init__(*args, **kwargs) self.setObjectName("RawJsonInput") self.setTabStopDistance( @@ -374,6 +374,7 @@ def __init__(self, *args, **kwargs): self.font() ).horizontalAdvance(" ") * self.tab_length ) + self.valid_type = valid_type def sizeHint(self): document = self.document() @@ -403,8 +404,8 @@ def json_value(self): def has_invalid_value(self): try: - self.json_value() - return False + value = self.json_value() + return not isinstance(value, self.valid_type) except Exception: return True @@ -415,7 +416,11 @@ def resizeEvent(self, event): class RawJsonWidget(InputWidget): def _add_inputs_to_layout(self): - self.input_field = RawJsonInput(self.content_widget) + if self.entity.is_list: + valid_type = list + else: + valid_type = dict + self.input_field = RawJsonInput(valid_type, self.content_widget) self.input_field.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding From f4a3f43088623ad7809d61181a72a5226fdae4cb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 10:21:28 +0100 Subject: [PATCH 150/295] application arguments are separated --- .../host_settings/template_host_variant.json | 32 +++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json index 244b9c1f56a..53652fd1927 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json @@ -43,8 +43,36 @@ "key": "executables", "label": "Executables", "multiplatform": "{multiplatform}", - "multipath": "{multipath_executables}", - "with_arguments": true + "multipath": "{multipath_executables}" + }, + { + "type":"separator" + }, + { + "type": "dict", + "key": "arguments", + "label": "Arguments", + "use_label_wrap": false, + "children": [ + { + "key": "windows", + "label": "Windows", + "type": "list", + "object_type": "text" + }, + { + "key": "linux", + "label": "Linux", + "type": "list", + "object_type": "text" + }, + { + "key": "darwin", + "label": "MacOS", + "type": "list", + "object_type": "text" + } + ] }, { "key": "environment", From ea75b495a0b8256099bee3b070659cdcf1c40d38 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 10:51:38 +0100 Subject: [PATCH 151/295] removed with_arguments key from path schema --- pype/settings/entities/input_entities.py | 9 ++----- pype/settings/entities/item_entities.py | 15 +++-------- .../settings/settings/widgets/item_widgets.py | 27 +++---------------- 3 files changed, 9 insertions(+), 42 deletions(-) diff --git a/pype/settings/entities/input_entities.py b/pype/settings/entities/input_entities.py index c26cb249a67..1afeb9f311b 100644 --- a/pype/settings/entities/input_entities.py +++ b/pype/settings/entities/input_entities.py @@ -366,13 +366,8 @@ class PathInput(InputEntity): schema_types = ["path-input"] def _item_initalization(self): - self.with_arguments = self.schema_data.get("with_arguments", False) - if self.with_arguments: - self.valid_value_types = (list, ) - self.value_on_not_set = ["", ""] - else: - self.valid_value_types = (STRING_TYPE, ) - self.value_on_not_set = "" + self.valid_value_types = (STRING_TYPE, ) + self.value_on_not_set = "" class RawJsonEntity(InputEntity): diff --git a/pype/settings/entities/item_entities.py b/pype/settings/entities/item_entities.py index 11e43e4fa6e..42374c350c9 100644 --- a/pype/settings/entities/item_entities.py +++ b/pype/settings/entities/item_entities.py @@ -53,15 +53,13 @@ def _item_initalization(self): self.multiplatform = self.schema_data.get("multiplatform", False) self.multipath = self.schema_data.get("multipath", False) - self.with_arguments = self.schema_data.get("with_arguments", False) # Create child object if not self.multiplatform and not self.multipath: valid_value_types = (STRING_TYPE, ) item_schema = { "type": "path-input", - "key": self.key, - "with_arguments": self.with_arguments + "key": self.key } elif not self.multiplatform: @@ -69,10 +67,7 @@ def _item_initalization(self): item_schema = { "type": "list", "key": self.key, - "object_type": { - "type": "path-input", - "with_arguments": self.with_arguments - } + "object_type": "path-input" } else: @@ -91,13 +86,9 @@ def _item_initalization(self): } if self.multipath: child_item["type"] = "list" - child_item["object_type"] = { - "type": "path-input", - "with_arguments": self.with_arguments - } + child_item["object_type"] = "path-input" else: child_item["type"] = "path-input" - child_item["with_arguments"] = self.with_arguments item_schema["children"].append(child_item) diff --git a/pype/tools/settings/settings/widgets/item_widgets.py b/pype/tools/settings/settings/widgets/item_widgets.py index 7cfcd844883..d7d6b8ab349 100644 --- a/pype/tools/settings/settings/widgets/item_widgets.py +++ b/pype/tools/settings/settings/widgets/item_widgets.py @@ -623,40 +623,21 @@ def get_invalid(self): class PathInputWidget(InputWidget): def _add_inputs_to_layout(self): self.input_field = QtWidgets.QLineEdit(self.content_widget) - self.args_input_field = None - if self.entity.with_arguments: - self.input_field.setPlaceholderText("Executable path") - self.args_input_field = QtWidgets.QLineEdit(self) - self.args_input_field.setPlaceholderText("Arguments") + self.input_field.setPlaceholderText("Executable path") self.setFocusProxy(self.input_field) - self.content_layout.addWidget(self.input_field, 8) + self.content_layout.addWidget(self.input_field) self.input_field.textChanged.connect(self._on_value_change) - if self.args_input_field: - self.content_layout.addWidget(self.args_input_field, 2) - self.args_input_field.textChanged.connect(self._on_value_change) - def _on_entity_change(self): if self.entity.value != self.input_value(): self.set_entity_value() def set_entity_value(self): - value = self.entity.value - args = "" - if isinstance(value, list): - value, args = value - self.input_field.setText(value) - if self.args_input_field: - self.args_input_field.setText(args) + self.input_field.setText(self.entity.value) def input_value(self): - path_value = self.input_field.text() - if self.entity.with_arguments: - value = [path_value, self.args_input_field.text()] - else: - value = path_value - return value + return self.input_field.text() def _on_value_change(self): if self.ignore_input_changes: From 8b86b0634c555fddc2f7fd02c9626471b2ec52fa Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 10:52:25 +0100 Subject: [PATCH 152/295] fixed SystemSettings initialization in project settings root --- pype/settings/entities/root_entities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/settings/entities/root_entities.py b/pype/settings/entities/root_entities.py index b4dc6678269..d1f86666c7b 100644 --- a/pype/settings/entities/root_entities.py +++ b/pype/settings/entities/root_entities.py @@ -596,7 +596,7 @@ def project_name(self, project_name): def system_settings_entity(self): output = self._system_settings_entity if output is None: - output = SystemSettings() + output = SystemSettings(set_studio_state=False) self._system_settings_entity = output if self.override_state is OverrideState.DEFAULTS: From d46f09615aa7871d26b1699ac07bdeb869c8d399 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 10:52:38 +0100 Subject: [PATCH 153/295] changed order of platforms --- .../host_settings/template_host_variant.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json index 53652fd1927..ba009cf0948 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json @@ -61,14 +61,14 @@ "object_type": "text" }, { - "key": "linux", - "label": "Linux", + "key": "darwin", + "label": "MacOS", "type": "list", "object_type": "text" }, { - "key": "darwin", - "label": "MacOS", + "key": "linux", + "label": "Linux", "type": "list", "object_type": "text" } From bf5dce7ff7205f000b8c38b3b9ab652d1fe71996 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 10:52:48 +0100 Subject: [PATCH 154/295] resaved executables and arguments --- .../system_settings/applications.json | 555 +++++++++++------- 1 file changed, 354 insertions(+), 201 deletions(-) diff --git a/pype/settings/defaults/system_settings/applications.json b/pype/settings/defaults/system_settings/applications.json index b78d23f6ff0..4a13cf78f6f 100644 --- a/pype/settings/defaults/system_settings/applications.json +++ b/pype/settings/defaults/system_settings/applications.json @@ -37,19 +37,18 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Autodesk\\Maya2020\\bin\\maya.exe", - "" - ] + "C:\\Program Files\\Autodesk\\Maya2020\\bin\\maya.exe" ], "darwin": [], "linux": [ - [ - "/usr/autodesk/maya2020/bin/maya", - "" - ] + "/usr/autodesk/maya2020/bin/maya" ] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "MAYA_VERSION": "2020", "__environment_keys__": { @@ -66,19 +65,18 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Autodesk\\Maya2019\\bin\\maya.exe", - "" - ] + "C:\\Program Files\\Autodesk\\Maya2019\\bin\\maya.exe" ], "darwin": [], "linux": [ - [ - "/usr/autodesk/maya2019/bin/maya", - "" - ] + "/usr/autodesk/maya2019/bin/maya" ] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "MAYA_VERSION": "2019", "__environment_keys__": { @@ -95,19 +93,18 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Autodesk\\Maya2017\\bin\\maya.exe", - "" - ] + "C:\\Program Files\\Autodesk\\Maya2018\\bin\\maya.exe" ], "darwin": [], "linux": [ - [ - "/usr/autodesk/maya2018/bin/maya", - "" - ] + "/usr/autodesk/maya2018/bin/maya" ] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "MAYA_VERSION": "2018", "__environment_keys__": { @@ -159,14 +156,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Autodesk\\Maya2020\\bin\\mayabatch.exe", - "" - ] + "C:\\Program Files\\Autodesk\\Maya2020\\bin\\mayabatch.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "MAYA_VERSION": "2020", "__environment_keys__": { @@ -183,14 +182,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Autodesk\\Maya2019\\bin\\mayabatch.exe", - "" - ] + "C:\\Program Files\\Autodesk\\Maya2019\\bin\\mayabatch.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "MAYA_VERSION": "2019", "__environment_keys__": { @@ -207,14 +208,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Autodesk\\Maya2018\\bin\\mayabatch.exe", - "" - ] + "C:\\Program Files\\Autodesk\\Maya2018\\bin\\mayabatch.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "MAYA_VERSION": "2018", "__environment_keys__": { @@ -257,19 +260,18 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe", - "" - ] + "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke12.2v3Nuke12.2", - "" - ] + "/usr/local/Nuke12.2v3Nuke12.2" ] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "nuke_12.2": [] @@ -283,19 +285,18 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe", - "" - ] + "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke12.0v1/Nuke12.0", - "" - ] + "/usr/local/Nuke12.0v1/Nuke12.0" ] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "nuke_12.0": [] @@ -309,19 +310,18 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe", - "" - ] + "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke11.3v5/Nuke11.3", - "" - ] + "/usr/local/Nuke11.3v5/Nuke11.3" ] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "nuke_11.3": [] @@ -335,14 +335,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke11.2v2\\Nuke11.2.exe", - "" - ] + "C:\\Program Files\\Nuke11.2v2\\Nuke11.2.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "nuke_11.2": [] @@ -382,17 +384,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe", - "--nukex" - ] + "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke12.2v3Nuke12.2", - "--nukex" - ] + "/usr/local/Nuke12.2v3Nuke12.2" + ] + }, + "arguments": { + "windows": [ + "--nukex" + ], + "darwin": [ + "--nukex" + ], + "linux": [ + "--nukex" ] }, "environment": { @@ -408,17 +415,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe", - "--nukex" - ] + "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke12.0v1/Nuke12.0", - "--nukex" - ] + "/usr/local/Nuke12.0v1/Nuke12.0" + ] + }, + "arguments": { + "windows": [ + "--nukex" + ], + "darwin": [ + "--nukex" + ], + "linux": [ + "--nukex" ] }, "environment": { @@ -434,17 +446,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe", - "--nukex" - ] + "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke11.3v5/Nuke11.3", - "--nukex" - ] + "/usr/local/Nuke11.3v5/Nuke11.3" + ] + }, + "arguments": { + "windows": [ + "--nukex" + ], + "darwin": [ + "--nukex" + ], + "linux": [ + "--nukex" ] }, "environment": { @@ -460,14 +477,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke11.2v2\\Nuke11.2.exe", - "--nukex" - ] + "C:\\Program Files\\Nuke11.2v2\\Nuke11.2.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [ + "--nukex" + ], + "darwin": [ + "--nukex" + ], + "linux": [ + "--nukex" + ] + }, "environment": { "__environment_keys__": { "nukex_11.2": [] @@ -509,17 +534,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe", - "--studio" - ] + "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke12.2v3Nuke12.2", - "--studio" - ] + "/usr/local/Nuke12.2v3Nuke12.2" + ] + }, + "arguments": { + "windows": [ + "--studio" + ], + "darwin": [ + "--studio" + ], + "linux": [ + "--studio" ] }, "environment": { @@ -535,17 +565,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe", - "--studio" - ] + "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke12.0v1/Nuke12.0", - "--studio" - ] + "/usr/local/Nuke12.0v1/Nuke12.0" + ] + }, + "arguments": { + "windows": [ + "--studio" + ], + "darwin": [ + "--studio" + ], + "linux": [ + "--studio" ] }, "environment": { @@ -561,17 +596,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe", - "--studio" - ] + "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke11.3v5/Nuke11.3", - "--studio" - ] + "/usr/local/Nuke11.3v5/Nuke11.3" + ] + }, + "arguments": { + "windows": [ + "--studio" + ], + "darwin": [ + "--studio" + ], + "linux": [ + "--studio" ] }, "environment": { @@ -590,6 +630,17 @@ "darwin": [], "linux": [] }, + "arguments": { + "windows": [ + "--studio" + ], + "darwin": [ + "--studio" + ], + "linux": [ + "--studio" + ] + }, "environment": { "__environment_keys__": { "nukestudio_11.2": [] @@ -631,17 +682,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe", - "--hiero" - ] + "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke12.2v3Nuke12.2", - "--hiero" - ] + "/usr/local/Nuke12.2v3Nuke12.2" + ] + }, + "arguments": { + "windows": [ + "--hiero" + ], + "darwin": [ + "--hiero" + ], + "linux": [ + "--hiero" ] }, "environment": { @@ -657,17 +713,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe", - "--hiero" - ] + "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke12.0v1/Nuke12.0", - "--hiero" - ] + "/usr/local/Nuke12.0v1/Nuke12.0" + ] + }, + "arguments": { + "windows": [ + "--hiero" + ], + "darwin": [ + "--hiero" + ], + "linux": [ + "--hiero" ] }, "environment": { @@ -683,17 +744,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe", - "--hiero" - ] + "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe" ], "darwin": [], "linux": [ - [ - "/usr/local/Nuke11.3v5/Nuke11.3", - "--hiero" - ] + "/usr/local/Nuke11.3v5/Nuke11.3" + ] + }, + "arguments": { + "windows": [ + "--hiero" + ], + "darwin": [ + "--hiero" + ], + "linux": [ + "--hiero" ] }, "environment": { @@ -709,14 +775,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Nuke11.2v2\\Nuke11.2.exe", - "--hiero" - ] + "C:\\Program Files\\Nuke11.2v2\\Nuke11.2.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [ + "--hiero" + ], + "darwin": [ + "--hiero" + ], + "linux": [ + "--hiero" + ] + }, "environment": { "__environment_keys__": { "hiero_11.2": [] @@ -775,6 +849,11 @@ "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "fusion_16": [] @@ -788,14 +867,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Blackmagic Design\\Fusion 9\\Fusion.exe", - "" - ] + "C:\\Program Files\\Blackmagic Design\\Fusion 9\\Fusion.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "fusion_9": [] @@ -869,14 +950,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:/Program Files/Blackmagic Design/DaVinci Resolve/Resolve.exe", - "" - ] + "C:/Program Files/Blackmagic Design/DaVinci Resolve/Resolve.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "resolve_16": [] @@ -919,6 +1002,11 @@ "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "houdini_18": [] @@ -935,6 +1023,11 @@ "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "houdini_17": [] @@ -971,14 +1064,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Blender Foundation\\Blender 2.90\\blender.exe", - "--python-use-system-env" - ] + "C:\\Program Files\\Blender Foundation\\Blender 2.90\\blender.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [ + "--python-use-system-env" + ], + "darwin": [ + "--python-use-system-env" + ], + "linux": [ + "--python-use-system-env" + ] + }, "environment": { "__environment_keys__": { "blender_2.90": [] @@ -992,14 +1093,22 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Blender Foundation\\Blender 2.83\\blender.exe", - "--python-use-system-env" - ] + "C:\\Program Files\\Blender Foundation\\Blender 2.83\\blender.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [ + "--python-use-system-env" + ], + "darwin": [ + "--python-use-system-env" + ], + "linux": [ + "--python-use-system-env" + ] + }, "environment": { "__environment_keys__": { "blender_2.83": [] @@ -1034,6 +1143,11 @@ "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "harmony_20": [] @@ -1048,13 +1162,15 @@ "executables": { "windows": [], "darwin": [ - [ - "/Applications/Toon Boom Harmony 17 Premium/Harmony Premium.app/Contents/MacOS/Harmony Premium", - "" - ] + "/Applications/Toon Boom Harmony 17 Premium/Harmony Premium.app/Contents/MacOS/Harmony Premium" ], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "harmony_17": [] @@ -1084,18 +1200,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\TVPaint Developpement\\TVPaint Animation 11 (64bits)\\TVPaint Animation 11 (64bits).exe", - "" - ], - [ - "C:\\Program Files\\TVPaint Developpement\\TVPaint Animation 11 Pro (64bits) (DEMO)\\TVPaint Animation 11 Pro (64bits) (DEMO).exe", - "" - ] + "C:\\Program Files\\TVPaint Developpement\\TVPaint Animation 11 (64bits)\\TVPaint Animation 11 (64bits).exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "tvpaint_Animation 11 (64bits)": [] @@ -1109,14 +1223,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files (x86)\\TVPaint Developpement\\TVPaint Animation 11 (32bits)\\TVPaint Animation 11 (32bits).exe", - "" - ] + "C:\\Program Files (x86)\\TVPaint Developpement\\TVPaint Animation 11 (32bits)\\TVPaint Animation 11 (32bits).exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "tvpaint_Animation 11 (32bits)": [] @@ -1152,14 +1268,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Adobe\\Adobe Photoshop 2020\\Photoshop.exe", - "" - ] + "C:\\Program Files\\Adobe\\Adobe Photoshop 2020\\Photoshop.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "photoshop_2020": [] @@ -1173,14 +1291,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Adobe\\Adobe Photoshop 2021\\Photoshop.exe", - "" - ] + "C:\\Program Files\\Adobe\\Adobe Photoshop 2021\\Photoshop.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "photoshop_2021": [] @@ -1216,14 +1336,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Adobe\\Adobe After Effects 2020\\Support Files\\AfterFX.exe", - "" - ] + "" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "aftereffects_2020": [] @@ -1237,14 +1359,16 @@ "icon": "", "executables": { "windows": [ - [ - "C:\\Program Files\\Adobe\\Adobe After Effects 2021\\Support Files\\AfterFX.exe", - "" - ] + "C:\\Program Files\\Adobe\\Adobe After Effects 2021\\Support Files\\AfterFX.exe" ], "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "aftereffects_2021": [] @@ -1272,10 +1396,12 @@ "label": "", "variant_label": "Local", "icon": "{}/app_icons/celaction_local.png", - "executables": [ - "", - "" - ], + "executables": "", + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "celation_Local": [] @@ -1287,10 +1413,12 @@ "label": "", "variant_label": "Pulblish", "icon": "", - "executables": [ - "", - "" - ], + "executables": "", + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "celation_Publish": [] @@ -1327,6 +1455,11 @@ "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "unreal_4.24": [] @@ -1353,6 +1486,11 @@ "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "python_Python 3.7": [] @@ -1369,6 +1507,11 @@ "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "python_Python 2.7": [] @@ -1385,6 +1528,11 @@ "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "terminal_Terminal": [] @@ -1414,6 +1562,11 @@ "darwin": [], "linux": [] }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, "environment": { "__environment_keys__": { "djvview_1.1": [] @@ -1422,4 +1575,4 @@ } } } -} \ No newline at end of file +} From a844b966ac817c61672f3e63a27ce9b286d1fe5d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 11:15:15 +0100 Subject: [PATCH 155/295] changed how arguments are prepared for launch context --- pype/lib/applications.py | 35 ++++++++++++++--------------------- 1 file changed, 14 insertions(+), 21 deletions(-) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index 0b1f45f2f11..abaecf1e9c3 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -210,32 +210,16 @@ def __bool__(self): class ApplicationExecutable: def __init__(self, executable): - default_launch_args = [] - executable_path = None - if isinstance(executable, str): - executable_path = executable - - elif isinstance(executable, list): - for arg in executable: - if arg: - if executable_path is None: - executable_path = arg - else: - default_launch_args.append(arg) - - self.executable_path = executable_path - self.default_launch_args = default_launch_args - - def __iter__(self): - yield self._realpath() - for arg in self.default_launch_args: - yield arg + self.executable_path = executable def __str__(self): return self.executable_path + def __repr__(self): + return "<{}> {}".format(self.__class__.__name__, self.executable_path) + def as_args(self): - return list(self) + return [self.executable_path] def _realpath(self): """Check if path is valid executable path.""" @@ -293,11 +277,19 @@ def __init__(self, app_group, app_name, host_name, app_data, manager): elif isinstance(_executables, dict): _executables = _executables.get(platform.system().lower()) or [] + _arguments = app_data["arguments"] + if not _arguments: + _arguments = [] + + elif isinstance(_arguments, dict): + _arguments = _arguments.get(platform.system().lower()) or [] + executables = [] for executable in _executables: executables.append(ApplicationExecutable(executable)) self.executables = executables + self.arguments = _arguments @property def full_label(self): @@ -503,6 +495,7 @@ def __init__(self, application, executable, **data): # subprocess.Popen launch arguments (first argument in constructor) self.launch_args = executable.as_args() + self.launch_args.extend(application.arguments) # Handle launch environemtns env = self.data.pop("env", None) From 15677f56fca5b085ac766485571a65e8d607eebf Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 11:15:34 +0100 Subject: [PATCH 156/295] fixed local settings --- pype/tools/settings/local_settings/apps_widget.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/tools/settings/local_settings/apps_widget.py b/pype/tools/settings/local_settings/apps_widget.py index d63cd6a834a..bc27a3c1c45 100644 --- a/pype/tools/settings/local_settings/apps_widget.py +++ b/pype/tools/settings/local_settings/apps_widget.py @@ -56,7 +56,7 @@ def __init__(self, group_label, variant_entity, parent): for item in studio_executables: path_widget = QtWidgets.QLineEdit(content_widget) - path_widget.setText(item.value[0]) + path_widget.setText(item.value) path_widget.setEnabled(False) content_layout.addWidget(path_widget) From e22fb7b89c7919cfc70f94b6a9adf47c27053913 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 11:16:21 +0100 Subject: [PATCH 157/295] fixed app definition --- pype/settings/handlers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index 48e6ca395c2..936784f04e4 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -502,7 +502,7 @@ def _save_project_anatomy_data(self, project_name, data_cache): if not application: continue if isinstance(application, six.string_types): - applications.append({application: application}) + applications.append({"name": application}) new_data["apps"] = applications From d3515e6015dbc2c7305edc55926f140b6f573d34 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 11:25:22 +0100 Subject: [PATCH 158/295] hound fix --- pype/settings/entities/item_entities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/settings/entities/item_entities.py b/pype/settings/entities/item_entities.py index 42374c350c9..ebaacb193eb 100644 --- a/pype/settings/entities/item_entities.py +++ b/pype/settings/entities/item_entities.py @@ -67,7 +67,7 @@ def _item_initalization(self): item_schema = { "type": "list", "key": self.key, - "object_type": "path-input" + "object_type": "path-input" } else: From dd51ccdd6831f6eb76e4602c4947e86ddc308eb0 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 12:00:29 +0100 Subject: [PATCH 159/295] short name is by default empty string --- pype/modules/ftrack/lib/avalon_sync.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index a6151bfba96..ccde69eef46 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -1215,11 +1215,6 @@ def remove_from_archived(self, mongo_id): ) def prepare_ftrack_ent_data(self): - project_name = self.entities_dict[self.ft_project_id]["name"] - project_anatomy_data = get_anatomy_settings(project_name) - - task_type_mapping = project_anatomy_data["tasks"] - not_set_ids = [] for id, entity_dict in self.entities_dict.items(): entity = entity_dict["entity"] @@ -1258,10 +1253,11 @@ def prepare_ftrack_ent_data(self): tasks = {} for task_type in task_types: task_type_name = task_type["name"] - task_type_def = task_type_mapping.get(task_type_name) or {} - short_name = task_type_def.get("short_name") + # Set short name to empty string + # QUESTION Maybe better would be to lower and remove spaces + # from task type name. tasks[task_type_name] = { - "short_name": short_name or task_type_name + "short_name": "" } self.entities_dict[id]["final_entity"]["config"] = { "tasks": tasks, From 0ae39b54b3c37336dffc743dfc30d16c0184f82b Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 22 Mar 2021 12:18:41 +0100 Subject: [PATCH 160/295] add nukex to python2 vendor prehook --- pype/hooks/pre_python2_vendor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/hooks/pre_python2_vendor.py b/pype/hooks/pre_python2_vendor.py index 070e671db0b..6f34e441328 100644 --- a/pype/hooks/pre_python2_vendor.py +++ b/pype/hooks/pre_python2_vendor.py @@ -6,7 +6,7 @@ class PrePython2Vendor(PreLaunchHook): """Prepend python 2 dependencies for py2 hosts.""" # WARNING This hook will probably be deprecated in Pype 3 - kept for test order = 10 - app_groups = ["hiero", "nuke"] + app_groups = ["hiero", "nuke", "nukex"] def execute(self): # Prepare vendor dir path From c24823b90548b70b909197c3d906af9df2a62dee Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 12:33:56 +0100 Subject: [PATCH 161/295] sync to avalon creates anatomy data if are not set --- pype/modules/ftrack/lib/avalon_sync.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index ccde69eef46..970a2702906 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -14,7 +14,11 @@ from avalon.api import AvalonMongoDB import avalon -from pype.api import Logger, Anatomy, get_anatomy_settings +from pype.api import ( + Logger, + Anatomy, + get_anatomy_settings +) from bson.objectid import ObjectId from bson.errors import InvalidId @@ -1237,6 +1241,7 @@ def prepare_ftrack_ent_data(self): data[key] = val if id == self.ft_project_id: + project_name = entity["full_name"] data["code"] = entity["name"] self.entities_dict[id]["final_entity"]["data"] = data self.entities_dict[id]["final_entity"]["type"] = "project" @@ -1259,10 +1264,23 @@ def prepare_ftrack_ent_data(self): tasks[task_type_name] = { "short_name": "" } - self.entities_dict[id]["final_entity"]["config"] = { + + current_project_anatomy_data = get_anatomy_settings( + project_name, exclude_locals=True + ) + + project_config = { "tasks": tasks, "apps": proj_apps } + for key, value in current_project_anatomy_data.items(): + if key in project_config or key == "attributes": + continue + project_config[key] = value + + self.entities_dict[id]["final_entity"]["config"] = ( + project_config + ) continue ent_path_items = [ent["name"] for ent in entity["link"]] From a5ad0e831ab6d8243d9fd9c740eeccff7b6b8a22 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 12:45:13 +0100 Subject: [PATCH 162/295] added some docstrings --- pype/settings/lib.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/pype/settings/lib.py b/pype/settings/lib.py index 89e662ee184..9dc23e6ddf3 100644 --- a/pype/settings/lib.py +++ b/pype/settings/lib.py @@ -91,6 +91,19 @@ def calculate_changes(old_value, new_value): @require_handler def save_studio_settings(data): + """Save studio overrides of system settings. + + Triggers callbacks on modules that want to know about system settings + changes. + + Callbacks are triggered on all modules. They must check if their enabled + value has changed. + + For saving of data cares registered Settings handler. + + Args: + data(dict): Overrides data with metadata defying studio overrides. + """ # Notify Pype modules from pype.modules import ModulesManager, ISettingsChangeListener @@ -110,6 +123,18 @@ def save_studio_settings(data): @require_handler def save_project_settings(project_name, overrides): + """Save studio overrides of project settings. + + Old value, new value and changes are passed to enabled modules that want to + know about settings changes. + + For saving of data cares registered Settings handler. + + Args: + project_name (str): Project name for which overrides are passed. + Default project's value is None. + overrides(dict): Overrides data with metadata defying studio overrides. + """ # Notify Pype modules from pype.modules import ModulesManager, ISettingsChangeListener @@ -141,6 +166,18 @@ def save_project_settings(project_name, overrides): @require_handler def save_project_anatomy(project_name, anatomy_data): + """Save studio overrides of project anatomy. + + Old value, new value and changes are passed to enabled modules that want to + know about settings changes. + + For saving of data cares registered Settings handler. + + Args: + project_name (str): Project name for which overrides are passed. + Default project's value is None. + overrides(dict): Overrides data with metadata defying studio overrides. + """ # Notify Pype modules from pype.modules import ModulesManager, ISettingsChangeListener From 77627a167cfc8180e40ff04bf31746aeea33ae8d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 12:53:51 +0100 Subject: [PATCH 163/295] ModulesManager can accept different settings data --- pype/modules/base.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/pype/modules/base.py b/pype/modules/base.py index 03a59658412..47c7de9d500 100644 --- a/pype/modules/base.py +++ b/pype/modules/base.py @@ -285,12 +285,20 @@ def set_service_idle_icon(self): class ModulesManager: + """Manager of Pype modules helps to load and prepare them to work. + + Args: + modules_settings(dict): To be able create module manager with specified + data. For settings changes callbacks and testing purposes. + """ # Helper attributes for report _report_total_key = "Total" - def __init__(self): + def __init__(self, _system_settings=None): self.log = logging.getLogger(self.__class__.__name__) + self._system_settings = _system_settings + self.modules = [] self.modules_by_id = {} self.modules_by_name = {} @@ -304,7 +312,11 @@ def initialize_modules(self): """Import and initialize modules.""" self.log.debug("*** Pype modules initialization.") # Prepare settings for modules - modules_settings = get_system_settings()["modules"] + if self._system_settings is None: + system_settings = get_system_settings() + else: + system_settings = self._system_settings + modules_settings = system_settings["modules"] report = {} time_start = time.time() From 9abb71962e0b9dd8ced8eb151b261b2b9965ca0f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 12:54:04 +0100 Subject: [PATCH 164/295] pass new data to create modules manager --- pype/settings/lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/settings/lib.py b/pype/settings/lib.py index 9dc23e6ddf3..1aa8c11eda2 100644 --- a/pype/settings/lib.py +++ b/pype/settings/lib.py @@ -113,7 +113,7 @@ def save_studio_settings(data): clear_metadata_from_settings(new_data) changes = calculate_changes(old_data, new_data) - modules_manager = ModulesManager() + modules_manager = ModulesManager(_system_settings=new_data) for module in modules_manager.get_enabled_modules(): if isinstance(module, ISettingsChangeListener): module.on_system_settings_save(old_data, new_data, changes) From 7d8d7f913c9c5fa5f3be185c356ae3732f4ddcb0 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 12:58:55 +0100 Subject: [PATCH 165/295] fix system settings attribute --- pype/modules/base.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pype/modules/base.py b/pype/modules/base.py index 47c7de9d500..fe68c147058 100644 --- a/pype/modules/base.py +++ b/pype/modules/base.py @@ -312,10 +312,9 @@ def initialize_modules(self): """Import and initialize modules.""" self.log.debug("*** Pype modules initialization.") # Prepare settings for modules - if self._system_settings is None: + system_settings = getattr(self, "_system_settings", None) + if system_settings is None: system_settings = get_system_settings() - else: - system_settings = self._system_settings modules_settings = system_settings["modules"] report = {} From bfea62c9391fdf71c7051b6c377162f01d03a193 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 13:19:06 +0100 Subject: [PATCH 166/295] fix site settings --- pype/settings/lib.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pype/settings/lib.py b/pype/settings/lib.py index 1aa8c11eda2..bf38b051bcc 100644 --- a/pype/settings/lib.py +++ b/pype/settings/lib.py @@ -538,7 +538,10 @@ def apply_local_settings_on_anatomy_settings( # Get active site from settings if site_name is None: - project_settings = get_project_settings(project_name) + if project_name: + project_settings = get_project_settings(project_name) + else: + project_settings = get_default_project_settings() site_name = ( project_settings["global"]["sync_server"]["config"]["active_site"] ) From 27c6613718981f47db8816a79913574e77faae94 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 13:19:32 +0100 Subject: [PATCH 167/295] added ability to exclude locals on studio settings getters --- pype/settings/lib.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/pype/settings/lib.py b/pype/settings/lib.py index bf38b051bcc..9cb0bc9ecbc 100644 --- a/pype/settings/lib.py +++ b/pype/settings/lib.py @@ -148,7 +148,7 @@ def save_project_settings(project_name, overrides): new_data = apply_overrides(studio_values, copy.deepcopy(overrides)) else: - old_data = get_default_project_settings() + old_data = get_default_project_settings(exclude_locals=True) new_data = apply_overrides(default_values, copy.deepcopy(overrides)) clear_metadata_from_settings(new_data) @@ -191,7 +191,7 @@ def save_project_anatomy(project_name, anatomy_data): new_data = apply_overrides(studio_values, copy.deepcopy(anatomy_data)) else: - old_data = get_default_anatomy_settings() + old_data = get_default_anatomy_settings(exclude_locals=True) new_data = apply_overrides(default_values, copy.deepcopy(anatomy_data)) clear_metadata_from_settings(new_data) @@ -659,19 +659,22 @@ def get_system_settings(clear_metadata=True): return result -def get_default_project_settings(clear_metadata=True): +def get_default_project_settings(clear_metadata=True, exclude_locals=False): """Project settings with applied studio's default project overrides.""" default_values = get_default_settings()[PROJECT_SETTINGS_KEY] studio_values = get_studio_project_settings_overrides() result = apply_overrides(default_values, studio_values) if clear_metadata: clear_metadata_from_settings(result) - local_settings = get_local_settings() - apply_local_settings_on_project_settings(result, local_settings, None) + if not exclude_locals: + local_settings = get_local_settings() + apply_local_settings_on_project_settings( + result, local_settings, None + ) return result -def get_default_anatomy_settings(clear_metadata=True): +def get_default_anatomy_settings(clear_metadata=True, exclude_locals=False): """Project anatomy data with applied studio's default project overrides.""" default_values = get_default_settings()[PROJECT_ANATOMY_KEY] studio_values = get_studio_project_anatomy_overrides() @@ -681,8 +684,11 @@ def get_default_anatomy_settings(clear_metadata=True): result = apply_overrides(default_values, studio_values) if clear_metadata: clear_metadata_from_settings(result) - local_settings = get_local_settings() - apply_local_settings_on_anatomy_settings(result, local_settings, None) + if not exclude_locals: + local_settings = get_local_settings() + apply_local_settings_on_anatomy_settings( + result, local_settings, None + ) return result From 7c88137dea845b65f53f26612c3709245327c4f3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 13:24:33 +0100 Subject: [PATCH 168/295] fixed updates --- pype/settings/handlers.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index 936784f04e4..f2b857dd8c7 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -492,9 +492,6 @@ def _save_project_anatomy_data(self, project_name, data_cache): continue update_dict_data[key] = value - if update_dict_data: - update_dict["data"] = update_dict_data - update_dict_config = {} applications = [] @@ -512,16 +509,23 @@ def _save_project_anatomy_data(self, project_name, data_cache): continue update_dict_config[key] = value - if update_dict_config: - update_dict["config"] = update_dict_config - - if not update_dict: + if not update_dict_data and not update_dict_config: return - _update_dict = self.prepare_mongo_update_dict(update_dict) + data_changes = self.prepare_mongo_update_dict(update_dict_data) + + set_dict = {} + for key, value in data_changes.items(): + new_key = "data.{}".format(key) + set_dict[new_key] = value + + for key, value in update_dict_config.items(): + new_key = "config.{}".format(key) + set_dict[new_key] = value + collection.update_one( {"type": "project"}, - {"$set": _update_dict} + {"$set": set_dict} ) def _save_project_data(self, project_name, doc_type, data_cache): From 94a729e9937b0244e0916b03ce8c9f3f274ddb96 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 22 Mar 2021 13:31:21 +0100 Subject: [PATCH 169/295] hound fix --- pype/settings/handlers.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index f2b857dd8c7..78bfd6cc3fa 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -476,9 +476,6 @@ def _save_project_anatomy_data(self, project_name, data_cache): " Create project first." ).format(project_name)) - # Update dictionary of changes that will be changed in mongo - update_dict = {} - # Project's data update_dict_data = {} project_doc_data = project_doc.get("data") or {} @@ -514,18 +511,19 @@ def _save_project_anatomy_data(self, project_name, data_cache): data_changes = self.prepare_mongo_update_dict(update_dict_data) - set_dict = {} + # Update dictionary of changes that will be changed in mongo + update_dict = {} for key, value in data_changes.items(): new_key = "data.{}".format(key) - set_dict[new_key] = value + update_dict[new_key] = value for key, value in update_dict_config.items(): new_key = "config.{}".format(key) - set_dict[new_key] = value + update_dict[new_key] = value collection.update_one( {"type": "project"}, - {"$set": set_dict} + {"$set": update_dict} ) def _save_project_data(self, project_name, doc_type, data_cache): From 57447f2bfcb3a06d8a93e12527d641b4662cf256 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 23 Mar 2021 17:13:00 +0100 Subject: [PATCH 170/295] numbers can be converted to string for text input and enum --- pype/settings/entities/enum_entity.py | 2 ++ pype/settings/entities/input_entities.py | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index f2831c78dc4..66bfb67df96 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -50,6 +50,8 @@ def _convert_to_valid_type(self, value): if self.multiselection: if isinstance(value, (set, tuple)): return list(value) + elif isinstance(value, (int, float)): + return str(value) return NOT_SET def set(self, value): diff --git a/pype/settings/entities/input_entities.py b/pype/settings/entities/input_entities.py index 2ed397dad9b..641da93733a 100644 --- a/pype/settings/entities/input_entities.py +++ b/pype/settings/entities/input_entities.py @@ -369,6 +369,12 @@ def _item_initalization(self): self.multiline = self.schema_data.get("multiline", False) self.placeholder_text = self.schema_data.get("placeholder") + def _convert_to_valid_type(self, value): + # Allow numbers converted to string + if isinstance(value, (int, float)): + return str(value) + return NOT_SET + class PathInput(InputEntity): schema_types = ["path-input"] From 5bdfc29bb614c63d58a2d8f299473087af92358d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 23 Mar 2021 17:13:29 +0100 Subject: [PATCH 171/295] value type of enum must always be string --- pype/settings/entities/enum_entity.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index 66bfb67df96..94a995ee086 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -1,5 +1,8 @@ from .input_entities import InputEntity -from .lib import NOT_SET +from .lib import ( + NOT_SET, + STRING_TYPE +) class EnumEntity(InputEntity): @@ -21,13 +24,12 @@ def _item_initalization(self): self.valid_value_types = (list, ) self.value_on_not_set = [] else: - valid_value_types = set() for key in valid_keys: if self.value_on_not_set is NOT_SET: self.value_on_not_set = key - valid_value_types.add(type(key)) + break - self.valid_value_types = tuple(valid_value_types) + self.valid_value_types = (STRING_TYPE, ) # GUI attribute self.placeholder = self.schema_data.get("placeholder") @@ -42,6 +44,12 @@ def schema_validations(self): self.path, key ) ) + if not isinstance(key, STRING_TYPE): + raise ValueError( + "{}: Key \"{}\" has invalid type {}, expected {}.".format( + self.path, key, type(key), STRING_TYPE + ) + ) enum_keys.add(key) super(EnumEntity, self).schema_validations() From 4c89adebeb026a58a79f24f7bec5687d49e454e2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 23 Mar 2021 17:13:36 +0100 Subject: [PATCH 172/295] added path to exception --- pype/settings/entities/enum_entity.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index 94a995ee086..e5d2097443e 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -72,8 +72,8 @@ def set(self, value): for item in check_values: if item not in self.valid_keys: raise ValueError( - "Invalid value \"{}\". Expected: {}".format( - item, self.valid_keys + "{} Invalid value \"{}\". Expected: {}".format( + self.path, item, self.valid_keys ) ) self._current_value = new_value From 42752c46487649ab0bf0d163192b5a6fba7bfb76 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 23 Mar 2021 17:47:25 +0100 Subject: [PATCH 173/295] using specific exceptions for schema errors --- pype/settings/entities/base_entity.py | 47 ++++++++++--------- .../entities/dict_immutable_keys_entity.py | 22 +++++---- .../entities/dict_mutable_keys_entity.py | 10 ++-- pype/settings/entities/enum_entity.py | 26 ++++++---- pype/settings/entities/exceptions.py | 34 +++++++++----- pype/settings/entities/input_entities.py | 14 +++--- pype/settings/entities/item_entities.py | 7 +-- pype/settings/entities/list_entity.py | 9 ++-- pype/settings/entities/root_entities.py | 6 ++- 9 files changed, 100 insertions(+), 75 deletions(-) diff --git a/pype/settings/entities/base_entity.py b/pype/settings/entities/base_entity.py index 33abee227a7..c01fca6a4e9 100644 --- a/pype/settings/entities/base_entity.py +++ b/pype/settings/entities/base_entity.py @@ -10,7 +10,8 @@ from .exceptions import ( InvalidValueType, - SchemeGroupHierarchyBug + SchemeGroupHierarchyBug, + EntitySchemaError ) from pype.lib import PypeLogger @@ -215,49 +216,48 @@ def schema_validations(self): """ # Entity must have defined valid value types. if self.valid_value_types is NOT_SET: - raise ValueError("Attribute `valid_value_types` is not filled.") + raise EntitySchemaError( + self, "Attribute `valid_value_types` is not filled." + ) # Check if entity has defined key when is required. if self.require_key and not self.key: - error_msg = "{}: Missing \"key\" in schema data. {}".format( - self.path, str(self.schema_data).replace("'", '"') + error_msg = "Missing \"key\" in schema data. {}".format( + str(self.schema_data).replace("'", '"') ) - raise KeyError(error_msg) + raise EntitySchemaError(self, error_msg) # Group entity must have defined label. (UI specific) # QUESTION this should not be required? if not self.label and self.is_group: - raise ValueError( - "{}: Item is set as `is_group` but has empty `label`.".format( - self.path - ) + raise EntitySchemaError( + self, "Item is set as `is_group` but has empty `label`." ) # Group item can be only once in on hierarchy branch. if self.is_group and self.group_item: - raise SchemeGroupHierarchyBug(self.path) + raise SchemeGroupHierarchyBug(self) # Validate that env group entities will be stored into file. # - env group entities must store metadata which is not possible if # metadata would be outside of file if not self.file_item and self.is_env_group: - raise ValueError(( - "{}: Environment item is not inside file" + reason = ( + "Environment item is not inside file" " item so can't store metadata for defaults." - ).format(self.path)) + ) + raise EntitySchemaError(self, reason) # Dynamic items must not have defined labels. (UI specific) if self.label and self.is_dynamic_item: - raise ValueError(( - "{}: Item has set label but is used as dynamic item." - ).format(self.path)) + raise EntitySchemaError( + self, "Item has set label but is used as dynamic item." + ) # Dynamic items or items in dynamic item must not have set `is_group` if self.is_group and (self.is_dynamic_item or self.is_in_dynamic_item): - raise ValueError( - "{} Dynamic entity has set `is_group` to true.".format( - self.path - ) + raise EntitySchemaError( + self, "Dynamic entity has set `is_group` to true." ) @abstractmethod @@ -818,10 +818,11 @@ def save(self): def schema_validations(self): if not self.label and self.use_label_wrap: - raise ValueError(( - "{} Entity has set `use_label_wrap` to true but" + reason = ( + "Entity has set `use_label_wrap` to true but" " does not have set `label`." - ).format(self.path)) + ) + raise EntitySchemaError(self, reason) super(ItemEntity, self).schema_validations() diff --git a/pype/settings/entities/dict_immutable_keys_entity.py b/pype/settings/entities/dict_immutable_keys_entity.py index d7973205831..f0621381511 100644 --- a/pype/settings/entities/dict_immutable_keys_entity.py +++ b/pype/settings/entities/dict_immutable_keys_entity.py @@ -15,7 +15,10 @@ BoolEntity, GUIEntity ) -from .exceptions import SchemaDuplicatedKeys +from .exceptions import ( + SchemaDuplicatedKeys, + EntitySchemaError +) class DictImmutableKeysEntity(ItemEntity): @@ -83,20 +86,21 @@ def schema_validations(self): elif child_entity.key not in children_keys: children_keys.add(child_entity.key) else: - raise SchemaDuplicatedKeys(self.path, child_entity.key) + raise SchemaDuplicatedKeys(self, child_entity.key) if self.checkbox_key: checkbox_child = self.non_gui_children.get(self.checkbox_key) if not checkbox_child: - raise ValueError( - "{}: Checkbox children \"{}\" was not found.".format( - self.path, self.checkbox_key - ) + reason = "Checkbox children \"{}\" was not found.".format( + self.checkbox_key ) + raise EntitySchemaError(self, reason) + if not isinstance(checkbox_child, BoolEntity): - raise TypeError(( - "{}: Checkbox children \"{}\" is not `boolean` type." - ).format(self.path, self.checkbox_key)) + reason = "Checkbox children \"{}\" is not `boolean` type.".format( + self.checkbox_key + ) + raise EntitySchemaError(self, reason) super(DictImmutableKeysEntity, self).schema_validations() # Trigger schema validation on children entities diff --git a/pype/settings/entities/dict_mutable_keys_entity.py b/pype/settings/entities/dict_mutable_keys_entity.py index c314242bf96..7005d346c1b 100644 --- a/pype/settings/entities/dict_mutable_keys_entity.py +++ b/pype/settings/entities/dict_mutable_keys_entity.py @@ -8,7 +8,8 @@ from .exceptions import ( DefaultsNotDefined, StudioDefaultsNotDefined, - RequiredKeyModified + RequiredKeyModified, + EntitySchemaError ) from pype.settings.constants import ( METADATA_KEYS, @@ -213,10 +214,11 @@ def schema_validations(self): # TODO Ability to store labels should be defined with different key if self.collapsible_key and not self.file_item: - raise ValueError(( - "{}: Modifiable dictionary with collapsible keys is not under" + reason = ( + "Modifiable dictionary with collapsible keys is not under" " file item so can't store metadata." - ).format(self.path)) + ) + raise EntitySchemaError(self, reason) for child_obj in self.children_by_key.values(): child_obj.schema_validations() diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index e5d2097443e..b7b7d1cf4d0 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -1,4 +1,5 @@ from .input_entities import InputEntity +from .exceptions import EntitySchemaError from .lib import ( NOT_SET, STRING_TYPE @@ -35,24 +36,29 @@ def _item_initalization(self): self.placeholder = self.schema_data.get("placeholder") def schema_validations(self): + if not isinstance(self.enum_items, list): + raise EntitySchemaError( + self, "Enum item must have defined `enum_items` as list." + ) + enum_keys = set() for item in self.enum_items: key = tuple(item.keys())[0] if key in enum_keys: - raise ValueError( - "{}: Key \"{}\" is more than once in enum items.".format( - self.path, key - ) + reason = "Key \"{}\" is more than once in enum items.".format( + key ) + raise EntitySchemaError(self, reason) + + enum_keys.add(key) + if not isinstance(key, STRING_TYPE): - raise ValueError( - "{}: Key \"{}\" has invalid type {}, expected {}.".format( - self.path, key, type(key), STRING_TYPE - ) + reason = "Key \"{}\" has invalid type {}, expected {}.".format( + key, type(key), STRING_TYPE ) - enum_keys.add(key) + raise EntitySchemaError(self, reason) - super(EnumEntity, self).schema_validations() + super(BaseEnumEntity, self).schema_validations() def _convert_to_valid_type(self, value): if self.multiselection: diff --git a/pype/settings/entities/exceptions.py b/pype/settings/entities/exceptions.py index 7080a9b187e..2c3b262ff17 100644 --- a/pype/settings/entities/exceptions.py +++ b/pype/settings/entities/exceptions.py @@ -38,6 +38,23 @@ class SchemaError(Exception): pass +class EntitySchemaError(SchemaError): + def __init__(self, entity, reason): + self.entity = entity + self.reason = reason + msg = "{} {} - {}".format(entity.__class__, entity.path, reason) + super(EntitySchemaError, self).__init__(msg) + + +class SchemeGroupHierarchyBug(EntitySchemaError): + def __init__(self, entity): + reason = ( + "Items with attribute \"is_group\" can't have another item with" + " \"is_group\" attribute as child." + ) + super(SchemeGroupHierarchyBug, self).__init__(entity, reason) + + class SchemaMissingFileInfo(SchemaError): def __init__(self, invalid): full_path_keys = [] @@ -51,22 +68,13 @@ def __init__(self, invalid): super(SchemaMissingFileInfo, self).__init__(msg) -class SchemeGroupHierarchyBug(SchemaError): - def __init__(self, entity_path): - msg = ( - "Items with attribute \"is_group\" can't have another item with" - " \"is_group\" attribute as child. Error happened in entity: {}" - ).format(entity_path) - super(SchemeGroupHierarchyBug, self).__init__(msg) - - class SchemaDuplicatedKeys(SchemaError): - def __init__(self, entity_path, key): + def __init__(self, entity, key): msg = ( "Schema item contain duplicated key \"{}\" in" - " one hierarchy level. {}" - ).format(key, entity_path) - super(SchemaDuplicatedKeys, self).__init__(msg) + " one hierarchy level." + ).format(key) + super(SchemaDuplicatedKeys, self).__init__(entity, msg) class SchemaDuplicatedEnvGroupKeys(SchemaError): diff --git a/pype/settings/entities/input_entities.py b/pype/settings/entities/input_entities.py index 641da93733a..a1beaef9f4d 100644 --- a/pype/settings/entities/input_entities.py +++ b/pype/settings/entities/input_entities.py @@ -9,7 +9,8 @@ ) from .exceptions import ( DefaultsNotDefined, - StudioDefaultsNotDefined + StudioDefaultsNotDefined, + EntitySchemaError ) from pype.settings.constants import ( @@ -39,11 +40,10 @@ def schema_validations(self): """Validation of entity schema and schema hierarchy.""" # Default value when even defaults are not filled must be set if self.value_on_not_set is NOT_SET: - raise ValueError( - "Attribute `value_on_not_set` is not filled. {}".format( - self.__class__.__name__ - ) + reason = "Attribute `value_on_not_set` is not filled. {}".format( + self.__class__.__name__ ) + raise EntitySchemaError(self, reason) super(EndpointEntity, self).schema_validations() @@ -105,9 +105,7 @@ def get_child_path(self, child_obj): def schema_validations(self): # Input entity must have file parent. if not self.file_item: - raise ValueError( - "{}: Missing parent file entity.".format(self.path) - ) + raise EntitySchemaError(self, "Missing parent file entity.") super(InputEntity, self).schema_validations() diff --git a/pype/settings/entities/item_entities.py b/pype/settings/entities/item_entities.py index 2edd3dad772..56e7d1c7b23 100644 --- a/pype/settings/entities/item_entities.py +++ b/pype/settings/entities/item_entities.py @@ -5,7 +5,8 @@ ) from .exceptions import ( DefaultsNotDefined, - StudioDefaultsNotDefined + StudioDefaultsNotDefined, + EntitySchemaError ) from .base_entity import ItemEntity @@ -204,8 +205,8 @@ def _item_initalization(self): def schema_validations(self): # List entity must have file parent. if not self.file_item and not self.is_file: - raise ValueError( - "{}: Missing file entity in hierarchy.".format(self.path) + raise EntitySchemaError( + self, "Missing file entity in hierarchy." ) super(ListStrictEntity, self).schema_validations() diff --git a/pype/settings/entities/list_entity.py b/pype/settings/entities/list_entity.py index 814086fe0fc..c6155b78f8d 100644 --- a/pype/settings/entities/list_entity.py +++ b/pype/settings/entities/list_entity.py @@ -9,7 +9,8 @@ ) from .exceptions import ( DefaultsNotDefined, - StudioDefaultsNotDefined + StudioDefaultsNotDefined, + EntitySchemaError ) @@ -153,16 +154,18 @@ def schema_validations(self): super(ListEntity, self).schema_validations() if self.is_dynamic_item and self.use_label_wrap: - raise ValueError( + reason = ( "`ListWidget` can't have set `use_label_wrap` to True and" " be used as widget at the same time." ) + raise EntitySchemaError(self, reason) if self.use_label_wrap and not self.label: - raise ValueError( + reason = ( "`ListWidget` can't have set `use_label_wrap` to True and" " not have set \"label\" key at the same time." ) + raise EntitySchemaError(self, reason) for child_obj in self.children: child_obj.schema_validations() diff --git a/pype/settings/entities/root_entities.py b/pype/settings/entities/root_entities.py index 74ccb8d17c9..89660971f15 100644 --- a/pype/settings/entities/root_entities.py +++ b/pype/settings/entities/root_entities.py @@ -13,6 +13,7 @@ get_studio_settings_schema, get_project_settings_schema ) +from .exceptions import EntitySchemaError from pype.settings.constants import ( SYSTEM_SETTINGS_KEY, PROJECT_SETTINGS_KEY, @@ -145,10 +146,11 @@ def _item_initalization(self): def schema_validations(self): for child_entity in self.children: if child_entity.is_group: - raise ValueError(( + reason = ( "Root entity \"{}\" has child with `is_group`" " attribute set to True but root can't save overrides." - ).format(self.__class__.__name__)) + ).format(self.__class__.__name__) + raise EntitySchemaError(self, reason) child_entity.schema_validations() def get_entity_from_path(self, path): From 5c7ea9d9e42b2d1ba8d016cfeadc6b2d2ba608ed Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 23 Mar 2021 17:48:26 +0100 Subject: [PATCH 174/295] enum entity was abstracted to be able do more specific schema validations --- pype/settings/entities/__init__.py | 2 + pype/settings/entities/enum_entity.py | 75 +++++++++++-------- pype/settings/entities/root_entities.py | 3 +- .../settings/settings/widgets/categories.py | 4 +- 4 files changed, 51 insertions(+), 33 deletions(-) diff --git a/pype/settings/entities/__init__.py b/pype/settings/entities/__init__.py index 20e00de4a50..e0910077df3 100644 --- a/pype/settings/entities/__init__.py +++ b/pype/settings/entities/__init__.py @@ -96,6 +96,7 @@ ) from .enum_entity import ( + BaseEnumEntity, EnumEntity, AppsEnumEntity, ToolsEnumEntity @@ -141,6 +142,7 @@ "PathInput", "RawJsonEntity", + "BaseEnumEntity", "EnumEntity", "AppsEnumEntity", "ToolsEnumEntity", diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index b7b7d1cf4d0..2dcb1a89359 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -6,34 +6,14 @@ ) -class EnumEntity(InputEntity): - schema_types = ["enum"] - +class BaseEnumEntity(InputEntity): def _item_initalization(self): - self.multiselection = self.schema_data.get("multiselection", False) - self.enum_items = self.schema_data["enum_items"] - if not self.enum_items: - raise ValueError("Attribute `enum_items` is not defined.") - - valid_keys = set() - for item in self.enum_items: - valid_keys.add(tuple(item.keys())[0]) - - self.valid_keys = valid_keys - - if self.multiselection: - self.valid_value_types = (list, ) - self.value_on_not_set = [] - else: - for key in valid_keys: - if self.value_on_not_set is NOT_SET: - self.value_on_not_set = key - break - - self.valid_value_types = (STRING_TYPE, ) - - # GUI attribute - self.placeholder = self.schema_data.get("placeholder") + self.multiselection = True + self.value_on_not_set = None + self.enum_items = None + self.valid_keys = None + self.valid_value_types = None + self.placeholder = None def schema_validations(self): if not isinstance(self.enum_items, list): @@ -78,7 +58,7 @@ def set(self, value): for item in check_values: if item not in self.valid_keys: raise ValueError( - "{} Invalid value \"{}\". Expected: {}".format( + "{} Invalid value \"{}\". Expected one of: {}".format( self.path, item, self.valid_keys ) ) @@ -86,7 +66,42 @@ def set(self, value): self._on_value_change() -class AppsEnumEntity(EnumEntity): +class EnumEntity(BaseEnumEntity): + schema_types = ["enum"] + + def _item_initalization(self): + self.multiselection = self.schema_data.get("multiselection", False) + self.enum_items = self.schema_data.get("enum_items") + + valid_keys = set() + for item in self.enum_items or []: + valid_keys.add(tuple(item.keys())[0]) + + self.valid_keys = valid_keys + + if self.multiselection: + self.valid_value_types = (list, ) + self.value_on_not_set = [] + else: + for key in valid_keys: + if self.value_on_not_set is NOT_SET: + self.value_on_not_set = key + break + + self.valid_value_types = (STRING_TYPE, ) + + # GUI attribute + self.placeholder = self.schema_data.get("placeholder") + + def schema_validations(self): + if not self.enum_items and "enum_items" not in self.schema_data: + raise EntitySchemaError( + self, "Enum item must have defined `enum_items`" + ) + super().schema_validations() + + +class AppsEnumEntity(BaseEnumEntity): schema_types = ["apps-enum"] def _item_initalization(self): @@ -139,7 +154,7 @@ def set_override_state(self, *args, **kwargs): self._current_value = new_value -class ToolsEnumEntity(EnumEntity): +class ToolsEnumEntity(BaseEnumEntity): schema_types = ["tools-enum"] def _item_initalization(self): diff --git a/pype/settings/entities/root_entities.py b/pype/settings/entities/root_entities.py index 89660971f15..82885e84794 100644 --- a/pype/settings/entities/root_entities.py +++ b/pype/settings/entities/root_entities.py @@ -175,7 +175,8 @@ def create_schema_object(self, schema_data, *args, **kwargs): entities.BaseItemEntity, entities.ItemEntity, entities.EndpointEntity, - entities.InputEntity + entities.InputEntity, + entities.BaseEnumEntity ) self._loaded_types = {} diff --git a/pype/tools/settings/settings/widgets/categories.py b/pype/tools/settings/settings/widgets/categories.py index 07f7291e91a..4cab86c30b4 100644 --- a/pype/tools/settings/settings/widgets/categories.py +++ b/pype/tools/settings/settings/widgets/categories.py @@ -15,7 +15,7 @@ NumberEntity, BoolEntity, - EnumEntity, + BaseEnumEntity, TextEntity, PathInput, RawJsonEntity, @@ -112,7 +112,7 @@ def create_ui_for_entity(category_widget, entity, entity_widget): elif isinstance(entity, RawJsonEntity): return RawJsonWidget(*args) - elif isinstance(entity, EnumEntity): + elif isinstance(entity, BaseEnumEntity): return EnumeratorWidget(*args) elif isinstance(entity, PathEntity): From 16e11a29593b9a47c221acb21a7fb4902cd1fbca Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 23 Mar 2021 19:36:28 +0100 Subject: [PATCH 175/295] hound fix --- pype/settings/entities/dict_immutable_keys_entity.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/settings/entities/dict_immutable_keys_entity.py b/pype/settings/entities/dict_immutable_keys_entity.py index f0621381511..92a36b7dca9 100644 --- a/pype/settings/entities/dict_immutable_keys_entity.py +++ b/pype/settings/entities/dict_immutable_keys_entity.py @@ -97,9 +97,9 @@ def schema_validations(self): raise EntitySchemaError(self, reason) if not isinstance(checkbox_child, BoolEntity): - reason = "Checkbox children \"{}\" is not `boolean` type.".format( - self.checkbox_key - ) + reason = ( + "Checkbox children \"{}\" is not `boolean` type." + ).format(self.checkbox_key) raise EntitySchemaError(self, reason) super(DictImmutableKeysEntity, self).schema_validations() From 771a8e29acc80806c514c5277692238042d2a870 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 24 Mar 2021 10:17:08 +0100 Subject: [PATCH 176/295] rename master version to hero --- pype/lib/avalon_context.py | 2 +- pype/modules/sync_server/tray/app.py | 2 +- .../publish/integrate_master_version.py | 146 +++++++++--------- .../defaults/project_anatomy/templates.json | 6 +- .../defaults/project_settings/global.json | 2 +- .../schemas/schema_anatomy_templates.json | 4 +- .../schemas/schema_global_publish.json | 4 +- ...version-1.0.json => hero_version-1.0.json} | 12 +- test_localsystem.txt | 1 + 9 files changed, 90 insertions(+), 89 deletions(-) rename schema/{master_version-1.0.json => hero_version-1.0.json} (76%) create mode 100644 test_localsystem.txt diff --git a/pype/lib/avalon_context.py b/pype/lib/avalon_context.py index dc0ce9a8733..d4daf221422 100644 --- a/pype/lib/avalon_context.py +++ b/pype/lib/avalon_context.py @@ -40,7 +40,7 @@ def is_latest(representation): """ version = avalon.io.find_one({"_id": representation['parent']}) - if version["type"] == "master_version": + if version["type"] == "hero_version": return True # Get highest version under the parent diff --git a/pype/modules/sync_server/tray/app.py b/pype/modules/sync_server/tray/app.py index b28ca0f66e8..783f2def1c8 100644 --- a/pype/modules/sync_server/tray/app.py +++ b/pype/modules/sync_server/tray/app.py @@ -784,7 +784,7 @@ def _add_page_records(self, local_site, remote_site, representations): if context.get("version"): version = "v{:0>3d}".format(context.get("version")) else: - version = "master" + version = "hero" item = self.SyncRepresentation( repre.get("_id"), diff --git a/pype/plugins/publish/integrate_master_version.py b/pype/plugins/publish/integrate_master_version.py index 7d72bb26d43..7709f089fed 100644 --- a/pype/plugins/publish/integrate_master_version.py +++ b/pype/plugins/publish/integrate_master_version.py @@ -10,8 +10,8 @@ from avalon.vendor import filelink -class IntegrateMasterVersion(pyblish.api.InstancePlugin): - label = "Integrate Master Version" +class IntegrateHeroVersion(pyblish.api.InstancePlugin): + label = "Integrate Hero Version" # Must happen after IntegrateNew order = pyblish.api.IntegratorOrder + 0.1 @@ -39,7 +39,7 @@ class IntegrateMasterVersion(pyblish.api.InstancePlugin): def process(self, instance): self.log.debug( - "--- Integration of Master version for subset `{}` begins.".format( + "--- Integration of Hero version for subset `{}` begins.".format( instance.data.get("subset", str(instance)) ) ) @@ -52,25 +52,25 @@ def process(self, instance): project_name = api.Session["AVALON_PROJECT"] - # TODO raise error if master not set? + # TODO raise error if Hero not set? anatomy = instance.context.data["anatomy"] - if "master" not in anatomy.templates: - self.log.warning("!!! Anatomy does not have set `master` key!") + if "hero" not in anatomy.templates: + self.log.warning("!!! Anatomy does not have set `hero` key!") return - if "path" not in anatomy.templates["master"]: + if "path" not in anatomy.templates["hero"]: self.log.warning(( - "!!! There is not set `path` template in `master` anatomy" + "!!! There is not set `path` template in `hero` anatomy" " for project \"{}\"." ).format(project_name)) return - master_template = anatomy.templates["master"]["path"] - self.log.debug("`Master` template check was successful. `{}`".format( - master_template + hero_template = anatomy.templates["hero"]["path"] + self.log.debug("`hero` template check was successful. `{}`".format( + hero_template )) - master_publish_dir = self.get_publish_dir(instance) + hero_publish_dir = self.get_publish_dir(instance) src_version_entity = instance.data.get("versionEntity") filtered_repre_ids = [] @@ -105,7 +105,7 @@ def process(self, instance): if not src_version_entity: self.log.warning(( "!!! Can't find origin version in database." - " Skipping Master version publish." + " Skipping hero version publish." )) return @@ -131,7 +131,7 @@ def process(self, instance): all_repre_file_paths.append(file_path) # TODO this is not best practice of getting resources for publish - # WARNING due to this we must remove all files from master publish dir + # WARNING due to this we must remove all files from hero publish dir instance_publish_dir = os.path.normpath( instance.data["publishDir"] ) @@ -145,13 +145,13 @@ def process(self, instance): continue dst_filepath = file_path.replace( - instance_publish_dir, master_publish_dir + instance_publish_dir, hero_publish_dir ) other_file_paths_mapping.append((file_path, dst_filepath)) # Current version old_version, old_repres = ( - self.current_master_ents(src_version_entity) + self.current_hero_ents(src_version_entity) ) old_repres_by_name = { @@ -163,30 +163,30 @@ def process(self, instance): else: new_version_id = io.ObjectId() - new_master_version = { + new_hero_version = { "_id": new_version_id, "version_id": src_version_entity["_id"], "parent": src_version_entity["parent"], - "type": "master_version", - "schema": "pype:master_version-1.0" + "type": "hero_version", + "schema": "pype:hero_version-1.0" } - schema.validate(new_master_version) + schema.validate(new_hero_version) # Don't make changes in database until everything is O.K. bulk_writes = [] if old_version: - self.log.debug("Replacing old master version.") + self.log.debug("Replacing old hero version.") bulk_writes.append( ReplaceOne( - {"_id": new_master_version["_id"]}, - new_master_version + {"_id": new_hero_version["_id"]}, + new_hero_version ) ) else: - self.log.debug("Creating first master version.") + self.log.debug("Creating first hero version.") bulk_writes.append( - InsertOne(new_master_version) + InsertOne(new_hero_version) ) # Separate old representations into `to replace` and `to delete` @@ -213,21 +213,21 @@ def process(self, instance): repre_name_low = repre["name"].lower() archived_repres_by_name[repre_name_low] = repre - backup_master_publish_dir = None - if os.path.exists(master_publish_dir): - backup_master_publish_dir = master_publish_dir + ".BACKUP" + backup_hero_publish_dir = None + if os.path.exists(hero_publish_dir): + backup_hero_publish_dir = hero_publish_dir + ".BACKUP" max_idx = 10 idx = 0 - _backup_master_publish_dir = backup_master_publish_dir - while os.path.exists(_backup_master_publish_dir): + _backup_hero_publish_dir = backup_hero_publish_dir + while os.path.exists(_backup_hero_publish_dir): self.log.debug(( "Backup folder already exists." " Trying to remove \"{}\"" - ).format(_backup_master_publish_dir)) + ).format(_backup_hero_publish_dir)) try: - shutil.rmtree(_backup_master_publish_dir) - backup_master_publish_dir = _backup_master_publish_dir + shutil.rmtree(_backup_hero_publish_dir) + backup_hero_publish_dir = _backup_hero_publish_dir break except Exception: self.log.info(( @@ -235,11 +235,11 @@ def process(self, instance): " Trying to add index to folder name" )) - _backup_master_publish_dir = ( - backup_master_publish_dir + str(idx) + _backup_hero_publish_dir = ( + backup_hero_publish_dir + str(idx) ) - if not os.path.exists(_backup_master_publish_dir): - backup_master_publish_dir = _backup_master_publish_dir + if not os.path.exists(_backup_hero_publish_dir): + backup_hero_publish_dir = _backup_hero_publish_dir break if idx > max_idx: @@ -251,14 +251,14 @@ def process(self, instance): idx += 1 self.log.debug("Backup folder path is \"{}\"".format( - backup_master_publish_dir + backup_hero_publish_dir )) try: - os.rename(master_publish_dir, backup_master_publish_dir) + os.rename(hero_publish_dir, backup_hero_publish_dir) except PermissionError: raise AssertionError(( - "Could not create master version because it is not" - " possible to replace current master files." + "Could not create hero version because it is not" + " possible to replace current hero files." )) try: src_to_dst_file_paths = [] @@ -275,11 +275,11 @@ def process(self, instance): # Get filled path to repre context anatomy_filled = anatomy.format(anatomy_data) - template_filled = anatomy_filled["master"]["path"] + template_filled = anatomy_filled["hero"]["path"] repre_data = { "path": str(template_filled), - "template": master_template + "template": hero_template } repre_context = template_filled.used_values for key in self.db_representation_context_keys: @@ -293,7 +293,7 @@ def process(self, instance): # Prepare new repre repre = copy.deepcopy(repre_info["representation"]) - repre["parent"] = new_master_version["_id"] + repre["parent"] = new_hero_version["_id"] repre["context"] = repre_context repre["data"] = repre_data repre.pop("_id", None) @@ -319,7 +319,7 @@ def process(self, instance): frame_splitter = "_-_FRAME_SPLIT_-_" anatomy_data["frame"] = frame_splitter _anatomy_filled = anatomy.format(anatomy_data) - _template_filled = _anatomy_filled["master"]["path"] + _template_filled = _anatomy_filled["hero"]["path"] head, tail = _template_filled.split(frame_splitter) padding = int( anatomy.templates["render"].get( @@ -338,7 +338,7 @@ def process(self, instance): (src_file, dst_file) ) - # replace original file name with master name in repre doc + # replace original file name with hero name in repre doc for index in range(len(repre.get("files"))): file = repre.get("files")[index] file_name = os.path.basename(file.get('path')) @@ -431,27 +431,27 @@ def process(self, instance): bulk_writes ) - # Remove backuped previous master + # Remove backuped previous hero if ( - backup_master_publish_dir is not None and - os.path.exists(backup_master_publish_dir) + backup_hero_publish_dir is not None and + os.path.exists(backup_hero_publish_dir) ): - shutil.rmtree(backup_master_publish_dir) + shutil.rmtree(backup_hero_publish_dir) except Exception: if ( - backup_master_publish_dir is not None and - os.path.exists(backup_master_publish_dir) + backup_hero_publish_dir is not None and + os.path.exists(backup_hero_publish_dir) ): - os.rename(backup_master_publish_dir, master_publish_dir) + os.rename(backup_hero_publish_dir, hero_publish_dir) self.log.error(( - "!!! Creating of Master version failed." - " Previous master version maybe lost some data!" + "!!! Creating of hero version failed." + " Previous hero version maybe lost some data!" )) raise self.log.debug(( - "--- Master version integration for subset `{}`" + "--- hero version integration for subset `{}`" " seems to be successful." ).format( instance.data.get("subset", str(instance)) @@ -469,9 +469,9 @@ def get_publish_dir(self, instance): anatomy = instance.context.data["anatomy"] template_data = copy.deepcopy(instance.data["anatomyData"]) - if "folder" in anatomy.templates["master"]: + if "folder" in anatomy.templates["hero"]: anatomy_filled = anatomy.format(template_data) - publish_folder = anatomy_filled["master"]["folder"] + publish_folder = anatomy_filled["hero"]["folder"] else: # This is for cases of Deprecated anatomy without `folder` # TODO remove when all clients have solved this issue @@ -488,13 +488,13 @@ def get_publish_dir(self, instance): " key underneath `publish` (in global of for project `{}`)." ).format(project_name)) - file_path = anatomy_filled["master"]["path"] + file_path = anatomy_filled["hero"]["path"] # Directory publish_folder = os.path.dirname(file_path) publish_folder = os.path.normpath(publish_folder) - self.log.debug("Master publish dir: \"{}\"".format(publish_folder)) + self.log.debug("hero publish dir: \"{}\"".format(publish_folder)) return publish_folder @@ -535,33 +535,33 @@ def version_from_representations(self, repres): if version: return version - def current_master_ents(self, version): - master_version = io.find_one({ + def current_hero_ents(self, version): + hero_version = io.find_one({ "parent": version["parent"], - "type": "master_version" + "type": "hero_version" }) - if not master_version: + if not hero_version: return (None, []) - master_repres = list(io.find({ - "parent": master_version["_id"], + hero_repres = list(io.find({ + "parent": hero_version["_id"], "type": "representation" })) - return (master_version, master_repres) + return (hero_version, hero_repres) def _update_path(self, anatomy, path, src_file, dst_file): """ - Replaces source path with new master path + Replaces source path with new hero path 'path' contains original path with version, must be replaced with - 'master' path (with 'master' label and without version) + 'hero' path (with 'hero' label and without version) Args: anatomy (Anatomy) - to get rootless style of path path (string) - path from DB src_file (string) - original file path - dst_file (string) - master file path + dst_file (string) - hero file path """ _, rootless = anatomy.find_root_template_from_path( dst_file @@ -573,13 +573,13 @@ def _update_path(self, anatomy, path, src_file, dst_file): def _update_hash(self, hash, src_file_name, dst_file): """ - Updates hash value with proper master name + Updates hash value with proper hero name """ src_file_name = self._get_name_without_ext( src_file_name) - master_file_name = self._get_name_without_ext( + hero_file_name = self._get_name_without_ext( dst_file) - return hash.replace(src_file_name, master_file_name) + return hash.replace(src_file_name, hero_file_name) def _get_name_without_ext(self, value): file_name = os.path.basename(value) diff --git a/pype/settings/defaults/project_anatomy/templates.json b/pype/settings/defaults/project_anatomy/templates.json index 862b7328463..2b16f59d017 100644 --- a/pype/settings/defaults/project_anatomy/templates.json +++ b/pype/settings/defaults/project_anatomy/templates.json @@ -21,9 +21,9 @@ "path": "{@folder}/{@file}", "thumbnail": "{thumbnail_root}/{project[name]}/{_id}_{thumbnail_type}.{ext}" }, - "master": { - "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/master", - "file": "{project[code]}_{asset}_{subset}_master<_{output}><.{frame}>.{ext}", + "hero": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/hero", + "file": "{project[code]}_{asset}_{subset}_hero<_{output}><.{frame}>.{ext}", "path": "{@folder}/{@file}" }, "delivery": {}, diff --git a/pype/settings/defaults/project_settings/global.json b/pype/settings/defaults/project_settings/global.json index ada4a6e17cc..0bd9c2a3a63 100644 --- a/pype/settings/defaults/project_settings/global.json +++ b/pype/settings/defaults/project_settings/global.json @@ -1,6 +1,6 @@ { "publish": { - "IntegrateMasterVersion": { + "IntegrateHeroVersion": { "enabled": true }, "ExtractJpegEXR": { diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json index 918d3edba6d..a8534e7e294 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_templates.json @@ -113,8 +113,8 @@ }, { "type": "dict", - "key": "master", - "label": "Master", + "key": "hero", + "label": "Hero", "children": [ { "type": "text", diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index 4045870a9a7..32e8c7d1e9d 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -8,8 +8,8 @@ "type": "dict", "collapsible": true, "checkbox_key": "enabled", - "key": "IntegrateMasterVersion", - "label": "IntegrateMasterVersion", + "key": "IntegrateHeroVersion", + "label": "IntegrateHeroVersion", "is_group": true, "children": [ { diff --git a/schema/master_version-1.0.json b/schema/hero_version-1.0.json similarity index 76% rename from schema/master_version-1.0.json rename to schema/hero_version-1.0.json index 9dff570b3ae..83304ef4d5d 100644 --- a/schema/master_version-1.0.json +++ b/schema/hero_version-1.0.json @@ -1,8 +1,8 @@ { "$schema": "http://json-schema.org/draft-04/schema#", - "title": "pype:master_version-1.0", - "description": "Master version of asset", + "title": "pype:hero_version-1.0", + "description": "Hero version of asset", "type": "object", @@ -27,14 +27,14 @@ "schema": { "description": "The schema associated with this document", "type": "string", - "enum": ["avalon-core:master_version-1.0", "pype:master_version-1.0"], - "example": "pype:master_version-1.0" + "enum": ["avalon-core:hero_version-1.0", "pype:hero_version-1.0"], + "example": "pype:hero_version-1.0" }, "type": { "description": "The type of document", "type": "string", - "enum": ["master_version"], - "example": "master_version" + "enum": ["hero_version"], + "example": "hero_version" }, "parent": { "description": "Unique identifier to parent document", diff --git a/test_localsystem.txt b/test_localsystem.txt new file mode 100644 index 00000000000..dde7986af89 --- /dev/null +++ b/test_localsystem.txt @@ -0,0 +1 @@ +I have run From 33f319ddf87ba05b00447eeb5bc025db3690cde3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 12:38:13 +0100 Subject: [PATCH 177/295] opacity has defined min/max and decimals --- .../schemas/schema_global_publish.json | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index 4045870a9a7..92abe7064e8 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -208,17 +208,24 @@ { "type": "number", "key": "font_size", - "label": "Font size" + "label": "Font size", + "minimum": 0 }, { "type": "number", "key": "opacity", - "label": "Font opacity" + "label": "Font opacity", + "decimal": 2, + "maximum": 1, + "minimum": 0 }, { "type": "number", "key": "bg_opacity", - "label": "Background opacity" + "label": "Background opacity", + "decimal": 2, + "maximum": 1, + "minimum": 0 }, { "type": "number", From cf68e071f7486402550f9653cb9191faf024d7b4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 12:38:22 +0100 Subject: [PATCH 178/295] resaved values --- pype/settings/defaults/project_settings/global.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/settings/defaults/project_settings/global.json b/pype/settings/defaults/project_settings/global.json index ada4a6e17cc..7aab1082fe8 100644 --- a/pype/settings/defaults/project_settings/global.json +++ b/pype/settings/defaults/project_settings/global.json @@ -51,8 +51,8 @@ "enabled": true, "options": { "font_size": 42, - "opacity": 1, - "bg_opacity": 0, + "opacity": 1.0, + "bg_opacity": 0.5, "x_offset": 5, "y_offset": 5, "bg_padding": 5 From 75d96074f3c93fbbe0ca0c6d9401e45d5794e30f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 12:39:01 +0100 Subject: [PATCH 179/295] number entity entity can convert stringified number to to number type --- pype/settings/entities/input_entities.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/pype/settings/entities/input_entities.py b/pype/settings/entities/input_entities.py index a1beaef9f4d..921171cfffb 100644 --- a/pype/settings/entities/input_entities.py +++ b/pype/settings/entities/input_entities.py @@ -1,3 +1,4 @@ +import re import copy from abc import abstractmethod @@ -320,6 +321,8 @@ def _remove_from_project_override(self, on_change_trigger): class NumberEntity(InputEntity): schema_types = ["number"] + float_number_regex = re.compile(r"^\d+\.\d+$") + int_number_regex = re.compile(r"^\d+$") def _item_initalization(self): self.minimum = self.schema_data.get("minimum", -99999) @@ -334,15 +337,32 @@ def _item_initalization(self): self.value_on_not_set = 0 def _convert_to_valid_type(self, value): + if isinstance(value, str): + new_value = None + if self.float_number_regex.match(value): + new_value = float(value) + elif self.int_number_regex.match(value): + new_value = int(value) + + if new_value is not None: + self.log.info("{} - Converted str {} to {} {}".format( + self.path, value, type(new_value).__name__, new_value + )) + value = new_value + if self.decimal: + if isinstance(value, float): + return value if isinstance(value, int): return float(value) else: + if isinstance(value, int): + return value if isinstance(value, float): new_value = int(value) if new_value != value: - self.log.info("Converted float {} to int {}".format( - value, new_value + self.log.info("{} - Converted float {} to int {}".format( + self.path, value, new_value )) return new_value return NOT_SET From 5a07975714acc234a04d19f5c7bf07ede6ef2389 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 12:40:35 +0100 Subject: [PATCH 180/295] moved separator out of options entity top be visible even if options are collapsed --- .../projects_schema/schemas/schema_global_publish.json | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index 92abe7064e8..e0ef5ea2e1c 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -241,13 +241,12 @@ "type": "number", "key": "bg_padding", "label": "Padding aroung text" - }, - { - "type": "splitter" } ] }, - + { + "type": "separator" + }, { "type": "list", "key": "profiles", From 60e563bf66fd983e0ee122070aab3b26cc1e3c8b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 13:00:38 +0100 Subject: [PATCH 181/295] specify more packages of pywin32 module --- setup.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 6a86f0f97ac..e3be48d6e09 100644 --- a/setup.py +++ b/setup.py @@ -62,7 +62,12 @@ ] if sys.platform == "win32": - install_requires.append("win32ctypes") + install_requires.extend([ + # `pywin32` packages + "win32ctypes", + "win32comext", + "pythoncom" + ]) build_options = dict( packages=install_requires, From 222c6104105cd153243e5a4499e082f2a815c472 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 15:26:08 +0100 Subject: [PATCH 182/295] fix super call for python 2 compatibility --- pype/settings/entities/enum_entity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index 2dcb1a89359..f06ec97f4b0 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -98,7 +98,7 @@ def schema_validations(self): raise EntitySchemaError( self, "Enum item must have defined `enum_items`" ) - super().schema_validations() + super(EnumEntity, self).schema_validations() class AppsEnumEntity(BaseEnumEntity): From 9bfaa841a9bf8587b3cf68eba2c4e063ec146186 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 15:44:38 +0100 Subject: [PATCH 183/295] anatomy_keys and attribute_keys are created on demand --- pype/settings/handlers.py | 46 ++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index 78bfd6cc3fa..1dae2526f51 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -343,24 +343,11 @@ def __init__(self): # Get mongo connection from pype.lib import PypeMongoConnection from avalon.api import AvalonMongoDB - from .entities import ProjectSettings settings_collection = PypeMongoConnection.get_mongo_client() - # Prepare anatomy keys and attribute keys - # NOTE this is cached on first import - # - keys may change only on schema change which should not happen - # during production - project_settings_root = ProjectSettings( - reset=False, change_state=False - ) - anatomy_entity = project_settings_root["project_anatomy"] - anatomy_keys = set(anatomy_entity.keys()) - anatomy_keys.remove("attributes") - attribute_keys = set(anatomy_entity["attributes"].keys()) - - self.anatomy_keys = anatomy_keys - self.attribute_keys = attribute_keys + self._anatomy_keys = None + self._attribute_keys = None # TODO prepare version of pype # - pype version should define how are settings saved and loaded @@ -380,6 +367,35 @@ def __init__(self): self.project_settings_cache = collections.defaultdict(CacheValues) self.project_anatomy_cache = collections.defaultdict(CacheValues) + def _prepare_project_settings_keys(self): + from .entities import ProjectSettings + # Prepare anatomy keys and attribute keys + # NOTE this is cached on first import + # - keys may change only on schema change which should not happen + # during production + project_settings_root = ProjectSettings( + reset=False, change_state=False + ) + anatomy_entity = project_settings_root["project_anatomy"] + anatomy_keys = set(anatomy_entity.keys()) + anatomy_keys.remove("attributes") + attribute_keys = set(anatomy_entity["attributes"].keys()) + + self._anatomy_keys = anatomy_keys + self._attribute_keys = attribute_keys + + @property + def anatomy_keys(self): + if self._anatomy_keys: + self._prepare_project_settings_keys() + return self._anatomy_keys + + @property + def attribute_keys(self): + if self._attribute_keys: + self._prepare_project_settings_keys() + return self._attribute_keys + def save_studio_settings(self, data): """Save studio overrides of system settings. From 0197eaa5d730a2e360d355d58c7ffe4fdb7a1670 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 16:03:25 +0100 Subject: [PATCH 184/295] settings ui can catch exceptions --- pype/settings/entities/__init__.py | 1 + .../settings/settings/widgets/categories.py | 57 ++++++++++++++++--- 2 files changed, 51 insertions(+), 7 deletions(-) diff --git a/pype/settings/entities/__init__.py b/pype/settings/entities/__init__.py index e0910077df3..1cb4be62e70 100644 --- a/pype/settings/entities/__init__.py +++ b/pype/settings/entities/__init__.py @@ -54,6 +54,7 @@ """ from .exceptions import ( + SchemaError, DefaultsNotDefined, StudioDefaultsNotDefined, InvalidValueType, diff --git a/pype/tools/settings/settings/widgets/categories.py b/pype/tools/settings/settings/widgets/categories.py index 4cab86c30b4..d84bf3dc15b 100644 --- a/pype/tools/settings/settings/widgets/categories.py +++ b/pype/tools/settings/settings/widgets/categories.py @@ -1,4 +1,6 @@ import os +import sys +import traceback from enum import Enum from Qt import QtWidgets, QtCore, QtGui @@ -21,7 +23,8 @@ RawJsonEntity, DefaultsNotDefined, - StudioDefaultsNotDefined + StudioDefaultsNotDefined, + SchemaError ) from pype.settings.lib import get_system_settings @@ -199,6 +202,7 @@ def create_ui(self): save_btn.clicked.connect(self._save) + self.save_btn = save_btn self.scroll_widget = scroll_widget self.content_layout = content_layout self.content_widget = content_widget @@ -280,19 +284,49 @@ def reset(self): self.content_layout.removeWidget(widget) widget.deleteLater() - self._create_root_entity() + dialog = None + try: + self._create_root_entity() - self.add_children_gui() + self.add_children_gui() - self.ignore_input_changes.set_ignore(True) + self.ignore_input_changes.set_ignore(True) - for input_field in self.input_fields: - input_field.set_entity_value() + for input_field in self.input_fields: + input_field.set_entity_value() + + self.ignore_input_changes.set_ignore(False) - self.ignore_input_changes.set_ignore(False) + except SchemaError as exc: + dialog = QtWidgets.QMessageBox(self) + dialog.setWindowTitle("Schema error") + msg = "Implementation bug!\n\nError: {}".format(str(exc)) + dialog.setText(msg) + dialog.setIcon(QtWidgets.QMessageBox.Warning) + + except Exception as exc: + formatted_traceback = traceback.format_exception(*sys.exc_info()) + dialog = QtWidgets.QMessageBox(self) + msg = "Unexpected error happened!\n\nError: {}".format(str(exc)) + dialog.setText(msg) + dialog.setDetailedText(formatted_traceback) + dialog.setIcon(QtWidgets.QMessageBox.Critical) self.set_state(CategoryState.Idle) + if dialog: + dialog.exec_() + self._on_reset_crash() + else: + self._on_reset_success() + + def _on_reset_crash(self): + self.save_btn.setEnabled(False) + + def _on_reset_success(self): + if not self.save_btn.isEnabled(): + self.save_btn.setEnabled(True) + def add_children_gui(self): for child_obj in self.entity.children: item = self.create_ui_for_entity(self, child_obj, self) @@ -404,6 +438,15 @@ def on_saved(self, saved_tab_widget): if self is saved_tab_widget: return + def _on_reset_crash(self): + self.project_list_widget.setEnabled(False) + super(ProjectWidget, self)._on_reset_crash() + + def _on_reset_success(self): + if not self.project_list_widget.isEnabled(): + self.project_list_widget.setEnabled(True) + super(ProjectWidget, self)._on_reset_success() + def _create_root_entity(self): self.entity = ProjectSettings(change_state=False) self.entity.on_change_callbacks.append(self._on_entity_change) From 26809bad907b3e76714fbc2fdb77ebe651ce630f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 16:22:19 +0100 Subject: [PATCH 185/295] fixed dialog arguments --- .../settings/settings/widgets/categories.py | 24 ++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/pype/tools/settings/settings/widgets/categories.py b/pype/tools/settings/settings/widgets/categories.py index d84bf3dc15b..263012fa52d 100644 --- a/pype/tools/settings/settings/widgets/categories.py +++ b/pype/tools/settings/settings/widgets/categories.py @@ -309,9 +309,25 @@ def reset(self): dialog = QtWidgets.QMessageBox(self) msg = "Unexpected error happened!\n\nError: {}".format(str(exc)) dialog.setText(msg) - dialog.setDetailedText(formatted_traceback) + dialog.setDetailedText("\n".join(formatted_traceback)) dialog.setIcon(QtWidgets.QMessageBox.Critical) + line_widths = set() + metricts = dialog.fontMetrics() + for line in formatted_traceback: + line_widths.add(metricts.width(line)) + max_width = max(line_widths) + + spacer = QtWidgets.QSpacerItem( + max_width, 0, + QtWidgets.QSizePolicy.Minimum, + QtWidgets.QSizePolicy.Expanding + ) + layout = dialog.layout() + layout.addItem( + spacer, layout.rowCount(), 0, 1, layout.columnCount() + ) + self.set_state(CategoryState.Idle) if dialog: @@ -345,8 +361,10 @@ def items_are_valid(self): msg_box = QtWidgets.QMessageBox( QtWidgets.QMessageBox.Warning, "Invalid input", - "There is invalid value in one of inputs." - " Please lead red color and fix them.", + ( + "There is invalid value in one of inputs." + " Please lead red color and fix them." + ), parent=self ) msg_box.setStandardButtons(QtWidgets.QMessageBox.Ok) From 92591d45aa2e9e7770f4669b939ce0c54e5f9eb1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 16:22:58 +0100 Subject: [PATCH 186/295] plugin template also have 'active' key in --- .../projects_schema/schemas/template_publish_plugin.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/template_publish_plugin.json b/pype/settings/entities/schemas/projects_schema/schemas/template_publish_plugin.json index 88151f7534b..5151e4550d6 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/template_publish_plugin.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/template_publish_plugin.json @@ -21,10 +21,15 @@ "key": "optional", "label": "Optional" }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, { "type": "label", "label": "{docstring}" } ] } -] \ No newline at end of file +] From 722909b2db5c593a87a283f51146dfe17c5bb671 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 16:23:23 +0100 Subject: [PATCH 187/295] resaved defaults of optional plugins with active key --- .../defaults/project_settings/maya.json | 66 ++++++++++++------- .../defaults/project_settings/nuke.json | 12 ++-- 2 files changed, 52 insertions(+), 26 deletions(-) diff --git a/pype/settings/defaults/project_settings/maya.json b/pype/settings/defaults/project_settings/maya.json index 8f991c76734..de8db5aa814 100644 --- a/pype/settings/defaults/project_settings/maya.json +++ b/pype/settings/defaults/project_settings/maya.json @@ -169,91 +169,113 @@ }, "ValidateColorSets": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateMeshHasOverlappingUVs": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateMeshArnoldAttributes": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateMeshShaderConnections": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ValidateMeshSingleUVSet": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateMeshHasUVs": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ValidateMeshLaminaFaces": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateMeshNonManifold": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateMeshNormalsUnlocked": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateMeshUVSetMap1": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateMeshVerticesHaveEdges": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ValidateNoAnimation": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateNoNamespace": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ValidateNoNullTransforms": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ValidateNoUnknownNodes": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ValidateNodeNoGhosting": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateShapeDefaultNames": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateShapeRenderStats": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateTransformZero": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateCameraAttributes": { "enabled": false, - "optional": true + "optional": true, + "active": true }, "ValidateAssemblyName": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ValidateAssRelativePaths": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ExtractPlayblast": { "capture_preset": { diff --git a/pype/settings/defaults/project_settings/nuke.json b/pype/settings/defaults/project_settings/nuke.json index d209a671064..f808f9caa59 100644 --- a/pype/settings/defaults/project_settings/nuke.json +++ b/pype/settings/defaults/project_settings/nuke.json @@ -30,19 +30,23 @@ }, "ValidateOutputResolution": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ValidateGizmo": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ValidateScript": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ValidateNukeWriteBoundingBox": { "enabled": true, - "optional": true + "optional": true, + "active": true }, "ExtractThumbnail": { "enabled": true, From 593de5e995700a22fe36d9ff9b1efdd8147ae189 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 24 Mar 2021 17:46:12 +0100 Subject: [PATCH 188/295] fix check of attribute values --- pype/settings/handlers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index 1dae2526f51..6e93f2f4050 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -386,13 +386,13 @@ def _prepare_project_settings_keys(self): @property def anatomy_keys(self): - if self._anatomy_keys: + if self._anatomy_keys is None: self._prepare_project_settings_keys() return self._anatomy_keys @property def attribute_keys(self): - if self._attribute_keys: + if self._attribute_keys is None: self._prepare_project_settings_keys() return self._attribute_keys From 694cc12caa2320d869a4c839889af5263ccccc7a Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 24 Mar 2021 18:20:25 +0100 Subject: [PATCH 189/295] Delete test_localsystem.txt --- test_localsystem.txt | 1 - 1 file changed, 1 deletion(-) delete mode 100644 test_localsystem.txt diff --git a/test_localsystem.txt b/test_localsystem.txt deleted file mode 100644 index dde7986af89..00000000000 --- a/test_localsystem.txt +++ /dev/null @@ -1 +0,0 @@ -I have run From 93c771eaf0ec1f90eea72a14469b9e6a6e995a33 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 10:52:57 +0100 Subject: [PATCH 190/295] variant template expect 3 keys app_name app_variant and app_variant_label --- .../system_schema/host_settings/template_host_variant.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json index ba009cf0948..c809891b307 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json @@ -7,8 +7,8 @@ }, { "type": "dict", - "key": "{host_name}_{host_version}", - "label": "{host_version}", + "key": "{app_name}_{app_variant}", + "label": "{app_variant_label}", "collapsible": true, "checkbox_key": "enabled", "children": [ @@ -78,7 +78,7 @@ "key": "environment", "label": "Environment", "type": "raw-json", - "env_group_key": "{host_name}_{host_version}" + "env_group_key": "{app_name}_{app_variant}" } ] } From 34b227612196f44e18f8536a2239e3eed4e891b0 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 10:55:58 +0100 Subject: [PATCH 191/295] all aplications are passing new keys to variants --- .../host_settings/schema_aftereffects.json | 10 ++++---- .../host_settings/schema_blender.json | 10 ++++---- .../host_settings/schema_celaction.json | 10 ++++---- .../host_settings/schema_djv.json | 5 ++-- .../host_settings/schema_fusion.json | 10 ++++---- .../host_settings/schema_harmony.json | 10 ++++---- .../host_settings/schema_houdini.json | 10 ++++---- .../host_settings/schema_maya.json | 15 +++++++----- .../host_settings/schema_mayabatch.json | 15 +++++++----- .../host_settings/schema_photoshop.json | 10 ++++---- .../host_settings/schema_resolve.json | 5 ++-- .../host_settings/schema_shell.json | 15 +++++++----- .../host_settings/schema_tvpaint.json | 10 ++++---- .../host_settings/schema_unreal.json | 5 ++-- .../host_settings/template_nuke.json | 24 +++++++++---------- 15 files changed, 96 insertions(+), 68 deletions(-) diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json index 4304c654450..6e1ba352fc6 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json @@ -29,12 +29,14 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "2020", - "host_name": "aftereffects" + "app_variant_label": "2020", + "app_variant": "2020", + "app_name": "aftereffects" }, { - "host_version": "2021", - "host_name": "aftereffects" + "app_variant_label": "2021", + "app_variant": "2021", + "app_name": "aftereffects" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json index 5d8cb45da80..725a0685b6d 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json @@ -29,12 +29,14 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "2.90", - "host_name": "blender" + "app_variant_label": "2.83", + "app_variant": "2_83", + "app_name": "blender" }, { - "host_version": "2.83", - "host_name": "blender" + "app_variant_label": "2.90", + "app_variant": "2_90", + "app_name": "blender" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json index ab3f0f3f15c..6fa596808d5 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json @@ -29,14 +29,16 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "Local", - "host_name": "celation", + "app_variant_label": "Local", + "app_variant": "Local", + "app_name": "celation", "multiplatform": false, "multipath_executables": false }, { - "host_version": "Publish", - "host_name": "celation", + "app_variant_label": "Publish", + "app_variant": "Publish", + "app_name": "celation", "multiplatform": false, "multipath_executables": false } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json index 02c90a92ad9..8bbdb7ea9b2 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json @@ -28,8 +28,9 @@ "type": "schema_template", "name": "template_host_variant", "template_data": { - "host_version": "1.1", - "host_name": "djvview" + "app_variant_label": "1.1", + "app_variant": "1_1", + "app_name": "djvview" } } ] diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_fusion.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_fusion.json index 1c1b7653d92..d693c39ffef 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_fusion.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_fusion.json @@ -29,12 +29,14 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "16", - "host_name": "fusion" + "app_variant_label": "16", + "app_variant": "16", + "app_name": "fusion" }, { - "host_version": "9", - "host_name": "fusion" + "app_variant_label": "9", + "app_variant": "9", + "app_name": "fusion" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json index b0abf35bfaa..8ad07c95ba3 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json @@ -29,12 +29,14 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "20", - "host_name": "harmony" + "app_variant_label": "20", + "app_variant": "20", + "app_name": "harmony" }, { - "host_version": "17", - "host_name": "harmony" + "app_variant_label": "17", + "app_variant": "17", + "app_name": "harmony" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json index cc0cd54cf20..399261528bc 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json @@ -29,12 +29,14 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "18", - "host_name": "houdini" + "app_variant_label": "18", + "app_variant": "18", + "app_name": "houdini" }, { - "host_version": "17", - "host_name": "houdini" + "app_variant_label": "17", + "app_variant": "17", + "app_name": "houdini" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_maya.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_maya.json index 84782cb3d80..d8396b16cb2 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_maya.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_maya.json @@ -29,16 +29,19 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "2020", - "host_name": "maya" + "app_variant_label": "2020", + "app_variant": "2020", + "app_name": "maya" }, { - "host_version": "2019", - "host_name": "maya" + "app_variant_label": "2019", + "app_variant": "2019", + "app_name": "maya" }, { - "host_version": "2018", - "host_name": "maya" + "app_variant_label": "2018", + "app_variant": "2018", + "app_name": "maya" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_mayabatch.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_mayabatch.json index dbd850dcd6a..af7cc3d301d 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_mayabatch.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_mayabatch.json @@ -29,16 +29,19 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "2020", - "host_name": "mayabatch" + "app_variant_label": "2020", + "app_variant": "2020", + "app_name": "mayabatch" }, { - "host_version": "2019", - "host_name": "mayabatch" + "app_variant_label": "2019", + "app_variant": "2019", + "app_name": "mayabatch" }, { - "host_version": "2018", - "host_name": "mayabatch" + "app_variant_label": "2018", + "app_variant": "2018", + "app_name": "mayabatch" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_photoshop.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_photoshop.json index 136eb168889..a8e3574aa33 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_photoshop.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_photoshop.json @@ -29,12 +29,14 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "2020", - "host_name": "photoshop" + "app_variant_label": "2020", + "app_variant": "2020", + "app_name": "photoshop" }, { - "host_version": "2021", - "host_name": "photoshop" + "app_variant_label": "2021", + "app_variant": "2021", + "app_name": "photoshop" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json index 2d11e1def46..052a9354100 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json @@ -29,8 +29,9 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "16", - "host_name": "resolve" + "app_variant_label": "16", + "app_variant": "16", + "app_name": "resolve" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json index 4fdbd65c24e..f72450aa080 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json @@ -25,16 +25,19 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "Python 3.7", - "host_name": "python" + "app_variant": "python_3_7", + "app_variant_label": "Python 3.7", + "app_name": "python" }, { - "host_version": "Python 2.7", - "host_name": "python" + "app_variant": "python_2_7", + "app_variant_label": "Python 2.7", + "app_name": "python" }, { - "host_version": "Terminal", - "host_name": "terminal" + "app_variant": "terminal", + "app_variant_label": "Terminal", + "app_name": "terminal" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json index 1c88d12cabb..a569ec0503f 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json @@ -29,12 +29,14 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "Animation 11 (64bits)", - "host_name": "tvpaint" + "app_variant_label": "Animation 11 (64bits)", + "app_variant": "animation_11_64bit", + "app_name": "tvpaint" }, { - "host_version": "Animation 11 (32bits)", - "host_name": "tvpaint" + "app_variant_label": "Animation 11 (32bits)", + "app_variant": "animation_11_32bit", + "app_name": "tvpaint" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json index d7065ad3ff9..b23a21b0fd1 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json @@ -29,8 +29,9 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "4.24", - "host_name": "unreal" + "app_variant": "4_24", + "app_variant_label": "4.24", + "app_name": "unreal" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json b/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json index 01c3be726a5..d32f87949d1 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json @@ -30,24 +30,24 @@ "name": "template_host_variant", "template_data": [ { - "host_version": "12.2", - "host_name": "{nuke_type}", - "multipath_executables": true + "app_variant": "12_2", + "app_variant_label": "12.2", + "app_name": "{nuke_type}" }, { - "host_version": "12.0", - "host_name": "{nuke_type}", - "multipath_executables": true + "app_variant": "12_0", + "app_variant_label": "12.0", + "app_name": "{nuke_type}" }, { - "host_version": "11.3", - "host_name": "{nuke_type}", - "multipath_executables": true + "app_variant": "11_3", + "app_variant_label": "11.3", + "app_name": "{nuke_type}" }, { - "host_version": "11.2", - "host_name": "{nuke_type}", - "multipath_executables": true + "app_variant": "11_2", + "app_variant_label": "11.2", + "app_name": "{nuke_type}" } ] } From 6b2ababb62f71a59532aea29f756562775ac97d8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 10:56:13 +0100 Subject: [PATCH 192/295] resaved keys in defaults --- .../system_settings/applications.json | 110 +++++++++--------- 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/pype/settings/defaults/system_settings/applications.json b/pype/settings/defaults/system_settings/applications.json index 4a13cf78f6f..436ef62342c 100644 --- a/pype/settings/defaults/system_settings/applications.json +++ b/pype/settings/defaults/system_settings/applications.json @@ -253,7 +253,7 @@ } }, "variants": { - "nuke_12.2": { + "nuke_12_2": { "enabled": true, "label": "", "variant_label": "12.2", @@ -274,11 +274,11 @@ }, "environment": { "__environment_keys__": { - "nuke_12.2": [] + "nuke_12_2": [] } } }, - "nuke_12.0": { + "nuke_12_0": { "enabled": true, "label": "", "variant_label": "12.0", @@ -299,11 +299,11 @@ }, "environment": { "__environment_keys__": { - "nuke_12.0": [] + "nuke_12_0": [] } } }, - "nuke_11.3": { + "nuke_11_3": { "enabled": true, "label": "", "variant_label": "11.3", @@ -324,11 +324,11 @@ }, "environment": { "__environment_keys__": { - "nuke_11.3": [] + "nuke_11_3": [] } } }, - "nuke_11.2": { + "nuke_11_2": { "enabled": true, "label": "", "variant_label": "11.2", @@ -347,7 +347,7 @@ }, "environment": { "__environment_keys__": { - "nuke_11.2": [] + "nuke_11_2": [] } } } @@ -377,7 +377,7 @@ } }, "variants": { - "nukex_12.2": { + "nukex_12_2": { "enabled": true, "label": "", "variant_label": "12.2", @@ -404,11 +404,11 @@ }, "environment": { "__environment_keys__": { - "nukex_12.2": [] + "nukex_12_2": [] } } }, - "nukex_12.0": { + "nukex_12_0": { "enabled": true, "label": "", "variant_label": "12.0", @@ -435,11 +435,11 @@ }, "environment": { "__environment_keys__": { - "nukex_12.0": [] + "nukex_12_0": [] } } }, - "nukex_11.3": { + "nukex_11_3": { "enabled": true, "label": "", "variant_label": "11.3", @@ -466,11 +466,11 @@ }, "environment": { "__environment_keys__": { - "nukex_11.3": [] + "nukex_11_3": [] } } }, - "nukex_11.2": { + "nukex_11_2": { "enabled": true, "label": "", "variant_label": "11.2", @@ -495,7 +495,7 @@ }, "environment": { "__environment_keys__": { - "nukex_11.2": [] + "nukex_11_2": [] } } } @@ -527,7 +527,7 @@ } }, "variants": { - "nukestudio_12.2": { + "nukestudio_12_2": { "enabled": true, "label": "", "variant_label": "12.2", @@ -554,11 +554,11 @@ }, "environment": { "__environment_keys__": { - "nukestudio_12.2": [] + "nukestudio_12_2": [] } } }, - "nukestudio_12.0": { + "nukestudio_12_0": { "enabled": true, "label": "", "variant_label": "12.0", @@ -585,11 +585,11 @@ }, "environment": { "__environment_keys__": { - "nukestudio_12.0": [] + "nukestudio_12_0": [] } } }, - "nukestudio_11.3": { + "nukestudio_11_3": { "enabled": true, "label": "", "variant_label": "11.3", @@ -616,11 +616,11 @@ }, "environment": { "__environment_keys__": { - "nukestudio_11.3": [] + "nukestudio_11_3": [] } } }, - "nukestudio_11.2": { + "nukestudio_11_2": { "enabled": true, "label": "", "variant_label": "11.2", @@ -643,7 +643,7 @@ }, "environment": { "__environment_keys__": { - "nukestudio_11.2": [] + "nukestudio_11_2": [] } } } @@ -675,7 +675,7 @@ } }, "variants": { - "hiero_12.2": { + "hiero_12_2": { "enabled": true, "label": "", "variant_label": "12.2", @@ -702,11 +702,11 @@ }, "environment": { "__environment_keys__": { - "hiero_12.2": [] + "hiero_12_2": [] } } }, - "hiero_12.0": { + "hiero_12_0": { "enabled": true, "label": "", "variant_label": "12.0", @@ -733,11 +733,11 @@ }, "environment": { "__environment_keys__": { - "hiero_12.0": [] + "hiero_12_0": [] } } }, - "hiero_11.3": { + "hiero_11_3": { "enabled": true, "label": "", "variant_label": "11.3", @@ -764,11 +764,11 @@ }, "environment": { "__environment_keys__": { - "hiero_11.3": [] + "hiero_11_3": [] } } }, - "hiero_11.2": { + "hiero_11_2": { "enabled": true, "label": "", "variant_label": "11.2", @@ -793,7 +793,7 @@ }, "environment": { "__environment_keys__": { - "hiero_11.2": [] + "hiero_11_2": [] } } } @@ -1057,14 +1057,14 @@ } }, "variants": { - "blender_2.90": { + "blender_2_83": { "enabled": true, "label": "", - "variant_label": "2.90", + "variant_label": "2.83", "icon": "", "executables": { "windows": [ - "C:\\Program Files\\Blender Foundation\\Blender 2.90\\blender.exe" + "C:\\Program Files\\Blender Foundation\\Blender 2.83\\blender.exe" ], "darwin": [], "linux": [] @@ -1082,18 +1082,18 @@ }, "environment": { "__environment_keys__": { - "blender_2.90": [] + "blender_2_83": [] } } }, - "blender_2.83": { + "blender_2_90": { "enabled": true, "label": "", - "variant_label": "2.83", + "variant_label": "2.90", "icon": "", "executables": { "windows": [ - "C:\\Program Files\\Blender Foundation\\Blender 2.83\\blender.exe" + "C:\\Program Files\\Blender Foundation\\Blender 2.90\\blender.exe" ], "darwin": [], "linux": [] @@ -1111,7 +1111,7 @@ }, "environment": { "__environment_keys__": { - "blender_2.83": [] + "blender_2_90": [] } } } @@ -1193,7 +1193,7 @@ } }, "variants": { - "tvpaint_Animation 11 (64bits)": { + "tvpaint_animation_11_64bit": { "enabled": true, "label": "", "variant_label": "11 (64bits)", @@ -1212,11 +1212,11 @@ }, "environment": { "__environment_keys__": { - "tvpaint_Animation 11 (64bits)": [] + "tvpaint_animation_11_64bit": [] } } }, - "tvpaint_Animation 11 (32bits)": { + "tvpaint_animation_11_32bit": { "enabled": true, "label": "", "variant_label": "11 (32bits)", @@ -1235,7 +1235,7 @@ }, "environment": { "__environment_keys__": { - "tvpaint_Animation 11 (32bits)": [] + "tvpaint_animation_11_32bit": [] } } } @@ -1445,7 +1445,7 @@ } }, "variants": { - "unreal_4.24": { + "unreal_4_24": { "enabled": true, "label": "", "variant_label": "4.24", @@ -1462,7 +1462,7 @@ }, "environment": { "__environment_keys__": { - "unreal_4.24": [] + "unreal_4_24": [] } } } @@ -1476,7 +1476,7 @@ } }, "variants": { - "python_Python 3.7": { + "python_python_3_7": { "enabled": true, "label": "Python", "variant_label": "3.7", @@ -1493,11 +1493,11 @@ }, "environment": { "__environment_keys__": { - "python_Python 3.7": [] + "python_python_3_7": [] } } }, - "python_Python 2.7": { + "python_python_2_7": { "enabled": true, "label": "Python", "variant_label": "2.7", @@ -1514,11 +1514,11 @@ }, "environment": { "__environment_keys__": { - "python_Python 2.7": [] + "python_python_2_7": [] } } }, - "terminal_Terminal": { + "terminal_terminal": { "enabled": true, "label": "Terminal", "variant_label": "", @@ -1535,7 +1535,7 @@ }, "environment": { "__environment_keys__": { - "terminal_Terminal": [] + "terminal_terminal": [] } } } @@ -1552,7 +1552,7 @@ } }, "variants": { - "djvview_1.1": { + "djvview_1_1": { "enabled": true, "label": "", "variant_label": "1.1", @@ -1569,10 +1569,10 @@ }, "environment": { "__environment_keys__": { - "djvview_1.1": [] + "djvview_1_1": [] } } } } } -} +} \ No newline at end of file From 80abe4b9aaf74f8cb85c20a4f31502944504d324 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 11:08:17 +0100 Subject: [PATCH 193/295] added active key t oremaining optional plugins --- .../schema_project_deadline.json | 20 +++++++++++++++++++ .../schemas/schema_maya_publish.json | 5 +++++ 2 files changed, 25 insertions(+) diff --git a/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json index 97b28059593..2070e4c8f5d 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -29,6 +29,11 @@ "key": "optional", "label": "Optional" }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, { "type": "enum", "key": "tile_assembler_plugin", @@ -83,6 +88,11 @@ "key": "optional", "label": "Optional" }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, { "type": "boolean", "key": "use_published", @@ -137,6 +147,11 @@ "key": "optional", "label": "Optional" }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, { "type": "boolean", "key": "use_published", @@ -191,6 +206,11 @@ "key": "optional", "label": "Optional" }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, { "type": "boolean", "key": "use_published", diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json index 623658b7a2a..9d2e39edde7 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json @@ -271,6 +271,11 @@ "key": "optional", "label": "Optional" }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, { "type": "raw-json", "key": "bake_attributes", From 28f85892f8822f03f7550d02f70872b09cc698cb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 11:08:25 +0100 Subject: [PATCH 194/295] saved default values --- pype/settings/defaults/project_settings/deadline.json | 4 ++++ pype/settings/defaults/project_settings/maya.json | 1 + 2 files changed, 5 insertions(+) diff --git a/pype/settings/defaults/project_settings/deadline.json b/pype/settings/defaults/project_settings/deadline.json index 892fb5d29fe..6d36f38423f 100644 --- a/pype/settings/defaults/project_settings/deadline.json +++ b/pype/settings/defaults/project_settings/deadline.json @@ -3,6 +3,7 @@ "MayaSubmitDeadline": { "enabled": true, "optional": false, + "active": true, "tile_assembler_plugin": "oiio", "use_published": true, "asset_dependencies": true, @@ -12,6 +13,7 @@ "NukeSubmitDeadline": { "enabled": true, "optional": false, + "active": true, "use_published": true, "priority": 50, "chunk_size": 10, @@ -23,6 +25,7 @@ "HarmonySubmitDeadline": { "enabled": true, "optional": false, + "active": true, "use_published": true, "priority": 50, "chunk_size": 10000, @@ -34,6 +37,7 @@ "AfterEffectsSubmitDeadline": { "enabled": true, "optional": false, + "active": true, "use_published": true, "priority": 50, "chunk_size": 10000, diff --git a/pype/settings/defaults/project_settings/maya.json b/pype/settings/defaults/project_settings/maya.json index de8db5aa814..6945bb0581b 100644 --- a/pype/settings/defaults/project_settings/maya.json +++ b/pype/settings/defaults/project_settings/maya.json @@ -379,6 +379,7 @@ "ExtractCameraAlembic": { "enabled": true, "optional": true, + "active": true, "bake_attributes": [] }, "MayaSubmitDeadline": { From d55d133b7987aee1963fafb47df65e8d38b16e46 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 13:06:42 +0100 Subject: [PATCH 195/295] replaced dots in default tools --- pype/settings/defaults/system_settings/tools.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pype/settings/defaults/system_settings/tools.json b/pype/settings/defaults/system_settings/tools.json index af5772705c1..fce847fb555 100644 --- a/pype/settings/defaults/system_settings/tools.json +++ b/pype/settings/defaults/system_settings/tools.json @@ -36,18 +36,18 @@ } }, "variants": { - "mtoa_3.2": { + "mtoa_3_2": { "MTOA_VERSION": "3.2", "__environment_keys__": { - "mtoa_3.2": [ + "mtoa_3_2": [ "MTOA_VERSION" ] } }, - "mtoa_3.1": { + "mtoa_3_1": { "MTOA_VERSION": "3.1", "__environment_keys__": { - "mtoa_3.1": [ + "mtoa_3_1": [ "MTOA_VERSION" ] } From 1ba1cf9acace5200f6d95fd4be37380cb8177121 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 13:27:30 +0100 Subject: [PATCH 196/295] nuke shortcuts have valid keys --- pype/settings/defaults/project_settings/nuke.json | 10 +++++----- .../schemas/projects_schema/schema_project_nuke.json | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pype/settings/defaults/project_settings/nuke.json b/pype/settings/defaults/project_settings/nuke.json index f808f9caa59..d727a6ba1e4 100644 --- a/pype/settings/defaults/project_settings/nuke.json +++ b/pype/settings/defaults/project_settings/nuke.json @@ -1,11 +1,11 @@ { "menu": { "Pype": { - "Create...": "ctrl+shift+alt+c", - "Publish...": "ctrl+alt+p", - "Load...": "ctrl+alt+l", - "Manage...": "ctrl+alt+m", - "Build Workfile": "ctrl+alt+b" + "create": "ctrl+shift+alt+c", + "publish": "ctrl+alt+p", + "load": "ctrl+alt+l", + "manage": "ctrl+alt+m", + "build_workfile": "ctrl+alt+b" } }, "create": { diff --git a/pype/settings/entities/schemas/projects_schema/schema_project_nuke.json b/pype/settings/entities/schemas/projects_schema/schema_project_nuke.json index 90e068ba339..3fe01cad099 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_project_nuke.json +++ b/pype/settings/entities/schemas/projects_schema/schema_project_nuke.json @@ -20,27 +20,27 @@ "children": [ { "type": "text", - "key": "Create...", + "key": "create", "label": "Create..." }, { "type": "text", - "key": "Publish...", + "key": "publish", "label": "Publish..." }, { "type": "text", - "key": "Load...", + "key": "load", "label": "Load..." }, { "type": "text", - "key": "Manage...", + "key": "manage", "label": "Manage..." }, { "type": "text", - "key": "Build Workfile", + "key": "build_workfile", "label": "Build Workfile" } ] From 220454aa542a9006e60f48be4e8f9ddf2bea7d53 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 13:27:54 +0100 Subject: [PATCH 197/295] added key -> label mapping to nuke shortcut implementation --- pype/hosts/nuke/api/menu.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pype/hosts/nuke/api/menu.py b/pype/hosts/nuke/api/menu.py index 9ff1dc251ad..3f97cc228a5 100644 --- a/pype/hosts/nuke/api/menu.py +++ b/pype/hosts/nuke/api/menu.py @@ -85,6 +85,13 @@ def add_shortcuts_from_presets(): nuke_presets = get_current_project_settings()["nuke"] if nuke_presets.get("menu"): + menu_label_mapping = { + "manage": "Manage...", + "create": "Create...", + "load": "Load...", + "build_workfile": "Build Workfile", + "publish": "Publish..." + } for menu_name, menuitems in nuke_presets.get("menu").items(): menu = menubar.findItem(menu_name) for mitem_name, shortcut in menuitems.items(): @@ -92,7 +99,8 @@ def add_shortcuts_from_presets(): shortcut, mitem_name )) try: - menuitem = menu.findItem(mitem_name) + item_label = menu_label_mapping[mitem_name] + menuitem = menu.findItem(item_label) menuitem.setShortcut(shortcut) except AttributeError as e: log.error(e) From 96a80438e08115f6a8cc7ca5e57c5b7f97912a11 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 13:28:28 +0100 Subject: [PATCH 198/295] defined allowed key symbols and regex in constants --- pype/settings/constants.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pype/settings/constants.py b/pype/settings/constants.py index ce19ad3f931..f6077e826ef 100644 --- a/pype/settings/constants.py +++ b/pype/settings/constants.py @@ -1,3 +1,6 @@ +import re + + # Metadata keys for work with studio and project overrides M_OVERRIDEN_KEY = "__overriden_keys__" # Metadata key for storing information about environments @@ -19,6 +22,10 @@ DEFAULT_PROJECT_KEY = "__default_project__" +KEY_ALLOWED_SYMBOLS = "a-zA-Z0-9-_ " +KEY_REGEX = re.compile(r"^[{}]+$".format(KEY_ALLOWED_SYMBOLS)) + + __all__ = ( "M_OVERRIDEN_KEY", "M_ENVIRONMENT_KEY", @@ -29,5 +36,10 @@ "SYSTEM_SETTINGS_KEY", "PROJECT_SETTINGS_KEY", "PROJECT_ANATOMY_KEY", - "LOCAL_SETTING_KEY" + "LOCAL_SETTING_KEY", + + "DEFAULT_PROJECT_KEY", + + "KEY_ALLOWED_SYMBOLS", + "KEY_REGEX" ) From 7f39cf5dd492e0c6c5ea583343ee1c4a7cf65f09 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 13:28:48 +0100 Subject: [PATCH 199/295] implemented exception InvalidKeySymbols --- pype/settings/entities/exceptions.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pype/settings/entities/exceptions.py b/pype/settings/entities/exceptions.py index 2c3b262ff17..f86d08ab5fd 100644 --- a/pype/settings/entities/exceptions.py +++ b/pype/settings/entities/exceptions.py @@ -1,3 +1,6 @@ +from pype.settings.constants import KEY_ALLOWED_SYMBOLS + + class DefaultsNotDefined(Exception): def __init__(self, obj): msg = "Default values for object are not set. {}".format(obj.path) @@ -34,6 +37,14 @@ def __init__(self, entity_path, key): super(RequiredKeyModified, self).__init__(msg.format(entity_path, key)) +class InvalidKeySymbols(KeyError): + def __init__(self, entity_path, key): + msg = "{} - Invalid key \"{}\". Allowed symbols are {}" + super(InvalidKeySymbols, self).__init__( + msg.format(entity_path, key, KEY_ALLOWED_SYMBOLS) + ) + + class SchemaError(Exception): pass From 01d066025380e7eeb112fa31fa4e799af0cfe4a5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 13:29:12 +0100 Subject: [PATCH 200/295] immutable dict is validating keys with allowed symbols --- pype/settings/entities/dict_immutable_keys_entity.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pype/settings/entities/dict_immutable_keys_entity.py b/pype/settings/entities/dict_immutable_keys_entity.py index 92a36b7dca9..270e635736e 100644 --- a/pype/settings/entities/dict_immutable_keys_entity.py +++ b/pype/settings/entities/dict_immutable_keys_entity.py @@ -7,7 +7,8 @@ ) from pype.settings.constants import ( METADATA_KEYS, - M_OVERRIDEN_KEY + M_OVERRIDEN_KEY, + KEY_REGEX ) from . import ( BaseItemEntity, @@ -17,7 +18,8 @@ ) from .exceptions import ( SchemaDuplicatedKeys, - EntitySchemaError + EntitySchemaError, + InvalidKeySymbols ) @@ -88,6 +90,10 @@ def schema_validations(self): else: raise SchemaDuplicatedKeys(self, child_entity.key) + for key in self.keys(): + if not KEY_REGEX.match(key): + raise InvalidKeySymbols(self.path, key) + if self.checkbox_key: checkbox_child = self.non_gui_children.get(self.checkbox_key) if not checkbox_child: From 48a300cfc312bc228172b0ab7f5a55cd54f202ca Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 13:29:56 +0100 Subject: [PATCH 201/295] mutable dictionary validate keys on change and tries to fix them on load --- pype/settings/entities/__init__.py | 2 ++ .../entities/dict_mutable_keys_entity.py | 25 ++++++++++++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/pype/settings/entities/__init__.py b/pype/settings/entities/__init__.py index 1cb4be62e70..2ff911e7d47 100644 --- a/pype/settings/entities/__init__.py +++ b/pype/settings/entities/__init__.py @@ -58,6 +58,7 @@ DefaultsNotDefined, StudioDefaultsNotDefined, InvalidValueType, + InvalidKeySymbols, SchemaMissingFileInfo, SchemeGroupHierarchyBug, SchemaDuplicatedKeys, @@ -114,6 +115,7 @@ "DefaultsNotDefined", "StudioDefaultsNotDefined", "InvalidValueType", + "InvalidKeySymbols", "SchemaMissingFileInfo", "SchemeGroupHierarchyBug", "SchemaDuplicatedKeys", diff --git a/pype/settings/entities/dict_mutable_keys_entity.py b/pype/settings/entities/dict_mutable_keys_entity.py index 7005d346c1b..12a18ad6125 100644 --- a/pype/settings/entities/dict_mutable_keys_entity.py +++ b/pype/settings/entities/dict_mutable_keys_entity.py @@ -1,3 +1,4 @@ +import re import copy from .lib import ( @@ -7,6 +8,7 @@ from . import EndpointEntity from .exceptions import ( DefaultsNotDefined, + InvalidKeySymbols, StudioDefaultsNotDefined, RequiredKeyModified, EntitySchemaError @@ -14,7 +16,9 @@ from pype.settings.constants import ( METADATA_KEYS, M_DYNAMIC_KEY_LABEL, - M_ENVIRONMENT_KEY + M_ENVIRONMENT_KEY, + KEY_REGEX, + KEY_ALLOWED_SYMBOLS ) @@ -92,6 +96,9 @@ def set_key_value(self, key, value): # TODO Check for value type if is Settings entity? child_obj = self.children_by_key.get(key) if not child_obj: + if not KEY_REGEX.match(key): + raise InvalidKeySymbols(self.path, key) + child_obj = self.add_key(key) child_obj.set(value) @@ -102,6 +109,10 @@ def change_key(self, old_key, new_key): if new_key == old_key: return + + if not KEY_REGEX.match(new_key): + raise InvalidKeySymbols(self.path, new_key) + self.children_by_key[new_key] = self.children_by_key.pop(old_key) self._on_key_label_change() @@ -116,6 +127,9 @@ def _add_key(self, key): if key in self.children_by_key: self.pop(key) + if not KEY_REGEX.match(key): + raise InvalidKeySymbols(self.path, key) + if self.value_is_env_group: item_schema = copy.deepcopy(self.item_schema) item_schema["env_group_key"] = key @@ -325,6 +339,15 @@ def set_override_state(self, state): children_label_by_id = {} metadata_labels = metadata.get(M_DYNAMIC_KEY_LABEL) or {} for _key, _value in new_value.items(): + if not KEY_REGEX.match(_key): + # Replace invalid characters with underscore + # - this is safety to not break already existing settings + _key = re.sub( + r"[^{}]+".format(KEY_ALLOWED_SYMBOLS), + "_", + _key + ) + child_entity = self._add_key(_key) child_entity.update_default_value(_value) if using_project_overrides: From f5e9096fdcfd142f447de6ea4563efe4d2054fa2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 13:38:42 +0100 Subject: [PATCH 202/295] added key validation to roots entity --- pype/settings/entities/root_entities.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/pype/settings/entities/root_entities.py b/pype/settings/entities/root_entities.py index 82885e84794..2d336896974 100644 --- a/pype/settings/entities/root_entities.py +++ b/pype/settings/entities/root_entities.py @@ -13,11 +13,15 @@ get_studio_settings_schema, get_project_settings_schema ) -from .exceptions import EntitySchemaError +from .exceptions import ( + EntitySchemaError, + InvalidKeySymbols +) from pype.settings.constants import ( SYSTEM_SETTINGS_KEY, PROJECT_SETTINGS_KEY, - PROJECT_ANATOMY_KEY + PROJECT_ANATOMY_KEY, + KEY_REGEX ) from pype.settings.lib import ( @@ -153,6 +157,10 @@ def schema_validations(self): raise EntitySchemaError(self, reason) child_entity.schema_validations() + for key in self.non_gui_children.keys(): + if not KEY_REGEX.match(key): + raise InvalidKeySymbols(self.path, key) + def get_entity_from_path(self, path): """Return system settings entity.""" raise NotImplementedError(( From 2567fee07284019d3445295ad2ae5e34b358343a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 13:52:15 +0100 Subject: [PATCH 203/295] added key validations to modifiable dict widget --- .../settings/widgets/dict_mutable_widget.py | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/pype/tools/settings/settings/widgets/dict_mutable_widget.py b/pype/tools/settings/settings/widgets/dict_mutable_widget.py index 0cb051082ef..c55af7a7744 100644 --- a/pype/tools/settings/settings/widgets/dict_mutable_widget.py +++ b/pype/tools/settings/settings/widgets/dict_mutable_widget.py @@ -12,6 +12,7 @@ BTN_FIXED_SIZE, CHILD_OFFSET ) +from pype.settings.constants import KEY_REGEX def create_add_btn(parent): @@ -37,6 +38,7 @@ def __init__(self, entity_widget, parent): self.collapsible_key = entity_widget.entity.collapsible_key self.is_duplicated = False + self.key_is_valid = False if self.collapsible_key: self.create_collapsible_ui() @@ -86,6 +88,9 @@ def _on_enter_press(self): if self.is_duplicated: return + if not self.key_is_valid: + return + key = self.key_input.text() if key: label = self.key_label_input.text() @@ -95,9 +100,10 @@ def _on_enter_press(self): def _on_key_change(self): key = self.key_input.text() + self.key_is_valid = KEY_REGEX.match(key) self.is_duplicated = self.entity_widget.is_key_duplicated(key) key_input_state = "" - if self.is_duplicated: + if self.is_duplicated or not self.key_is_valid: key_input_state = "invalid" elif key != "": key_input_state = "modified" @@ -157,6 +163,7 @@ def __init__(self, collapsible_key, entity, entity_widget): self.ignore_input_changes = entity_widget.ignore_input_changes self.is_key_duplicated = False + self.key_is_valid = False self.is_required = False self.origin_key = None @@ -382,6 +389,11 @@ def _on_key_label_change(self): def _on_key_change(self): key = self.key_value() + self.key_is_valid = KEY_REGEX.match(key) + if not self.key_is_valid: + self.update_style() + return + is_key_duplicated = self.entity_widget.validate_key_duplication( self.temp_key, key, self ) @@ -458,6 +470,7 @@ def is_invalid(self): self.is_key_duplicated or self.key_value() == "" or self.child_invalid + or not self.key_is_valid ) @property @@ -473,7 +486,11 @@ def get_invalid(self): def update_style(self): key_input_state = "" - if self.is_key_duplicated or self.key_value() == "": + if ( + self.is_key_duplicated + or self.key_value() == "" + or not self.key_is_valid + ): key_input_state = "invalid" elif self.is_key_modified(): key_input_state = "modified" From 994238a63812031655deb51020dc319016b3398f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 14:11:03 +0100 Subject: [PATCH 204/295] better key validation --- .../settings/settings/widgets/dict_mutable_widget.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pype/tools/settings/settings/widgets/dict_mutable_widget.py b/pype/tools/settings/settings/widgets/dict_mutable_widget.py index c55af7a7744..e704dd40ee2 100644 --- a/pype/tools/settings/settings/widgets/dict_mutable_widget.py +++ b/pype/tools/settings/settings/widgets/dict_mutable_widget.py @@ -341,7 +341,7 @@ def set_is_key_duplicated(self, is_key_duplicated): else: self._on_focus_lose() - if not self.is_key_duplicated: + if not self.is_key_duplicated and self.key_is_valid: self.entity_widget.change_key(self.key_value(), self) def set_key_label(self, key, label): @@ -390,15 +390,11 @@ def _on_key_label_change(self): def _on_key_change(self): key = self.key_value() self.key_is_valid = KEY_REGEX.match(key) - if not self.key_is_valid: - self.update_style() - return - is_key_duplicated = self.entity_widget.validate_key_duplication( self.temp_key, key, self ) self.temp_key = key - if is_key_duplicated: + if is_key_duplicated or not self.key_is_valid: return if key: From 9a0070e5a123def51d3b0a079587b737a5cd1a71 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 14:20:29 +0100 Subject: [PATCH 205/295] fixed modifiable dict collapsible key widget --- pype/tools/settings/settings/widgets/dict_mutable_widget.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/tools/settings/settings/widgets/dict_mutable_widget.py b/pype/tools/settings/settings/widgets/dict_mutable_widget.py index e704dd40ee2..e44ffdf35a4 100644 --- a/pype/tools/settings/settings/widgets/dict_mutable_widget.py +++ b/pype/tools/settings/settings/widgets/dict_mutable_widget.py @@ -765,15 +765,17 @@ def validate_key_duplication(self, old_key, new_key, widget): old_key_items.append(input_field) if duplicated_items: - widget.set_is_key_duplicated(True) for input_field in duplicated_items: input_field.set_is_key_duplicated(True) + widget.set_is_key_duplicated(True) else: widget.set_is_key_duplicated(False) if len(old_key_items) == 1: for input_field in old_key_items: input_field.set_is_key_duplicated(False) + input_field.set_key(old_key) + input_field.update_key_label() self.trigger_hierarchical_style_update() return bool(duplicated_items) From 73df4150a7df87c63fb6b26a0e094f3779988cdb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 14:33:52 +0100 Subject: [PATCH 206/295] extract review use None as value of width and height --- pype/plugins/publish/extract_review.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pype/plugins/publish/extract_review.py b/pype/plugins/publish/extract_review.py index 5414ae52892..f6530219a69 100644 --- a/pype/plugins/publish/extract_review.py +++ b/pype/plugins/publish/extract_review.py @@ -757,8 +757,9 @@ def rescaling_filters(self, temp_data, output_def, new_repre): self.log.debug("input_height: `{}`".format(input_height)) # NOTE Setting only one of `width` or `heigth` is not allowed - output_width = output_def.get("width") - output_height = output_def.get("height") + # - settings value can't have None but has value of 0 + output_width = output_def.get("width") or None + output_height = output_def.get("height") or None # Use instance resolution if output definition has not set it. if output_width is None or output_height is None: output_width = temp_data["resolution_width"] From af300ff0af11b794d0a0a17a03f7bd138b653be8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 14:35:48 +0100 Subject: [PATCH 207/295] added width and height items to extract review output definitions --- .../schemas/schema_global_publish.json | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index f97bfb11b3f..b4d18762974 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -177,6 +177,29 @@ "object_type": "text" } ] + }, + { + "type": "separator" + }, + { + "type": "label", + "label": "Width and Height must be both set to higher value than 0 else source resolution is used." + }, + { + "key": "width", + "label": "Output width", + "type": "number", + "default": 0, + "minimum": 0, + "maximum": 100000 + }, + { + "key": "height", + "label": "Output height", + "type": "number", + "default": 0, + "minimum": 0, + "maximum": 100000 } ] } From 6be4414a13c60282032fe9a672d6fd790f13c8ee Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 14:36:19 +0100 Subject: [PATCH 208/295] number entity can expect default value --- pype/settings/entities/input_entities.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pype/settings/entities/input_entities.py b/pype/settings/entities/input_entities.py index 921171cfffb..f668cfde2d7 100644 --- a/pype/settings/entities/input_entities.py +++ b/pype/settings/entities/input_entities.py @@ -329,12 +329,15 @@ def _item_initalization(self): self.maximum = self.schema_data.get("maximum", 99999) self.decimal = self.schema_data.get("decimal", 0) + value_on_not_set = self.schema_data.get("default", 0) if self.decimal: valid_value_types = (float, ) + value_on_not_set = float(value_on_not_set) else: valid_value_types = (int, ) + value_on_not_set = int(value_on_not_set) self.valid_value_types = valid_value_types - self.value_on_not_set = 0 + self.value_on_not_set = value_on_not_set def _convert_to_valid_type(self, value): if isinstance(value, str): From ea6f21ee808f756f42e4c0870071b00a8029c346 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 14:36:49 +0100 Subject: [PATCH 209/295] resaved defaults of extract review plugin --- pype/settings/defaults/project_settings/global.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/settings/defaults/project_settings/global.json b/pype/settings/defaults/project_settings/global.json index 63c092ec274..794700bc2b3 100644 --- a/pype/settings/defaults/project_settings/global.json +++ b/pype/settings/defaults/project_settings/global.json @@ -41,7 +41,9 @@ "review", "ftrack" ] - } + }, + "width": 0, + "height": 0 } } } From 712670ec60c661c347049ef52174001787e479ce Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 15:37:57 +0100 Subject: [PATCH 210/295] moved project plugin paths to project settings --- .../schemas/projects_schema/schema_project_global.json | 8 ++++++++ .../entities/schemas/system_schema/schema_general.json | 7 ------- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/pype/settings/entities/schemas/projects_schema/schema_project_global.json b/pype/settings/entities/schemas/projects_schema/schema_project_global.json index 1733e04f675..ebc8d08fb83 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_project_global.json +++ b/pype/settings/entities/schemas/projects_schema/schema_project_global.json @@ -22,6 +22,14 @@ { "type": "schema", "name": "schema_project_syncserver" + }, + { + "key": "project_plugins", + "type": "path", + "label": "Additional Project Plugin Paths", + "multiplatform": true, + "multipath": true, + "use_label_wrap": true } ] } diff --git a/pype/settings/entities/schemas/system_schema/schema_general.json b/pype/settings/entities/schemas/system_schema/schema_general.json index b029081c7c6..cf88043cd03 100644 --- a/pype/settings/entities/schemas/system_schema/schema_general.json +++ b/pype/settings/entities/schemas/system_schema/schema_general.json @@ -18,13 +18,6 @@ { "type": "splitter" }, - { - "key": "project_plugins", - "type": "path", - "label": "Additional Project Plugins Path", - "multiplatform": true, - "multipath": false - }, { "key": "studio_soft", "type": "path", From bba39ff457bd43e95663574c287d7ae6211ccc66 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 15:38:17 +0100 Subject: [PATCH 211/295] save defaults of project plugins --- pype/settings/defaults/project_settings/global.json | 5 +++++ pype/settings/defaults/system_settings/general.json | 5 ----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pype/settings/defaults/project_settings/global.json b/pype/settings/defaults/project_settings/global.json index 63c092ec274..e252a103acf 100644 --- a/pype/settings/defaults/project_settings/global.json +++ b/pype/settings/defaults/project_settings/global.json @@ -1,4 +1,9 @@ { + "project_plugins": { + "windows": [], + "darwin": [], + "linux": [] + }, "publish": { "IntegrateHeroVersion": { "enabled": true diff --git a/pype/settings/defaults/system_settings/general.json b/pype/settings/defaults/system_settings/general.json index bf2bb5def08..e03e00aca88 100644 --- a/pype/settings/defaults/system_settings/general.json +++ b/pype/settings/defaults/system_settings/general.json @@ -1,11 +1,6 @@ { "studio_name": "Studio name", "studio_code": "stu", - "project_plugins": { - "windows": "convert from \"PYPE_PROJECT_PLUGINS\"", - "darwin": "", - "linux": "" - }, "studio_soft": { "windows": "convert from \"STUDIO_SOFT\"", "darwin": "", From 2b4890e6594025bfd5d2ccd3bb0a3bf0f19922df Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 17:05:52 +0100 Subject: [PATCH 212/295] removed usage of PYPE_PROJECT_PLUGINS --- pype/__init__.py | 13 ------------- pype/hosts/celaction/api/cli.py | 8 -------- pype/tools/standalonepublish/publish.py | 8 -------- 3 files changed, 29 deletions(-) diff --git a/pype/__init__.py b/pype/__init__.py index fd0ba321ed4..0add849457f 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -99,20 +99,7 @@ def install(): pyblish.register_discovery_filter(filter_pyblish_plugins) avalon.register_plugin_path(avalon.Loader, LOAD_PATH) - # Register project specific plugins project_name = os.environ.get("AVALON_PROJECT") - if PROJECT_PLUGINS_PATH and project_name: - for path in PROJECT_PLUGINS_PATH.split(os.pathsep): - if not path: - continue - plugin_path = os.path.join(path, project_name, "plugins") - if os.path.exists(plugin_path): - pyblish.register_plugin_path(plugin_path) - avalon.register_plugin_path(avalon.Loader, plugin_path) - avalon.register_plugin_path(avalon.Creator, plugin_path) - avalon.register_plugin_path( - avalon.InventoryAction, plugin_path - ) # Register studio specific plugins if STUDIO_PLUGINS_PATH and project_name: diff --git a/pype/hosts/celaction/api/cli.py b/pype/hosts/celaction/api/cli.py index 9f2d1a1fdb9..476d2f69a98 100644 --- a/pype/hosts/celaction/api/cli.py +++ b/pype/hosts/celaction/api/cli.py @@ -102,14 +102,6 @@ def main(): pyblish.api.register_host(publish_host) - # Register project specific plugins - project_name = os.environ["AVALON_PROJECT"] - project_plugins_paths = os.getenv("PYPE_PROJECT_PLUGINS", "") - for path in project_plugins_paths.split(os.pathsep): - plugin_path = os.path.join(path, project_name, "plugins") - if os.path.exists(plugin_path): - pyblish.api.register_plugin_path(plugin_path) - return publish.show() diff --git a/pype/tools/standalonepublish/publish.py b/pype/tools/standalonepublish/publish.py index a4bb81ad3cb..cfa9f8b8e88 100644 --- a/pype/tools/standalonepublish/publish.py +++ b/pype/tools/standalonepublish/publish.py @@ -19,14 +19,6 @@ def main(env): continue pyblish.api.register_plugin_path(path) - # Register project specific plugins - project_name = os.environ["AVALON_PROJECT"] - project_plugins_paths = env.get("PYPE_PROJECT_PLUGINS") or "" - for path in project_plugins_paths.split(os.pathsep): - plugin_path = os.path.join(path, project_name, "plugins") - if os.path.exists(plugin_path): - pyblish.api.register_plugin_path(plugin_path) - return publish.show() From 0b3953d39ec2c563c66787f517247cc0825e388f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 17:06:56 +0100 Subject: [PATCH 213/295] fix PUBLISH_PATH variable in celaction cli.py --- pype/hosts/celaction/api/cli.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/pype/hosts/celaction/api/cli.py b/pype/hosts/celaction/api/cli.py index 476d2f69a98..f77bdea451c 100644 --- a/pype/hosts/celaction/api/cli.py +++ b/pype/hosts/celaction/api/cli.py @@ -91,14 +91,9 @@ def main(): # Registers pype's Global pyblish plugins pype.install() - for path in PUBLISH_PATHS: - path = os.path.normpath(path) - - if not os.path.exists(path): - continue - - log.info(f"Registering path: {path}") - pyblish.api.register_plugin_path(path) + if os.path.exists(PUBLISH_PATH): + log.info(f"Registering path: {PUBLISH_PATH}") + pyblish.api.register_plugin_path(PUBLISH_PATH) pyblish.api.register_host(publish_host) From 960304a5dec66633f2bffa237997d0dbe77020c9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 17:09:28 +0100 Subject: [PATCH 214/295] removed usage of STUDIO_PLUGINS_PATH --- pype/__init__.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/pype/__init__.py b/pype/__init__.py index 0add849457f..216ec617840 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -13,8 +13,6 @@ log = logging.getLogger(__name__) -PROJECT_PLUGINS_PATH = os.environ.get("PYPE_PROJECT_PLUGINS") -STUDIO_PLUGINS_PATH = os.environ.get("PYPE_STUDIO_PLUGINS") PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__)) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") @@ -101,17 +99,6 @@ def install(): project_name = os.environ.get("AVALON_PROJECT") - # Register studio specific plugins - if STUDIO_PLUGINS_PATH and project_name: - for path in STUDIO_PLUGINS_PATH.split(os.pathsep): - if not path: - continue - if os.path.exists(path): - pyblish.register_plugin_path(path) - avalon.register_plugin_path(avalon.Loader, path) - avalon.register_plugin_path(avalon.Creator, path) - avalon.register_plugin_path(avalon.InventoryAction, path) - if project_name: anatomy = Anatomy(project_name) anatomy.set_root_environments() From 1d279b301d0050fbd3d39ed0b0f4e9bec402853f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 17:09:43 +0100 Subject: [PATCH 215/295] register plugins from project settings on pype install --- pype/__init__.py | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/pype/__init__.py b/pype/__init__.py index 216ec617840..edd48a018d6 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -1,12 +1,16 @@ # -*- coding: utf-8 -*- """Pype module.""" import os +import platform import functools import logging from .settings import get_project_settings -from .lib import Anatomy, filter_pyblish_plugins, \ +from .lib import ( + Anatomy, + filter_pyblish_plugins, change_timer_to_current_context +) pyblish = avalon = _original_discover = None @@ -99,10 +103,29 @@ def install(): project_name = os.environ.get("AVALON_PROJECT") + # Register studio specific plugins if project_name: anatomy = Anatomy(project_name) anatomy.set_root_environments() avalon.register_root(anatomy.roots) + + project_settings = get_project_settings(project_name) + platform_name = platform.system().lower() + project_plugins = ( + project_settings + .get("global", {}) + .get("project_plugins", {}) + .get(platform_name) + ) or [] + for path in project_plugins: + if not path or not os.path.exists(path): + continue + + pyblish.register_plugin_path(path) + avalon.register_plugin_path(avalon.Loader, path) + avalon.register_plugin_path(avalon.Creator, path) + avalon.register_plugin_path(avalon.InventoryAction, path) + # apply monkey patched discover to original one log.info("Patching discovery") avalon.discover = patched_discover From 1089c5f3c72655a51ff409f225c2eabcfad03340 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 17:28:15 +0100 Subject: [PATCH 216/295] it is possible to define project specific environments --- pype/settings/defaults/project_settings/global.json | 1 + .../schemas/projects_schema/schema_project_global.json | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/pype/settings/defaults/project_settings/global.json b/pype/settings/defaults/project_settings/global.json index e252a103acf..fc8e7ddaf8a 100644 --- a/pype/settings/defaults/project_settings/global.json +++ b/pype/settings/defaults/project_settings/global.json @@ -4,6 +4,7 @@ "darwin": [], "linux": [] }, + "project_environments": {}, "publish": { "IntegrateHeroVersion": { "enabled": true diff --git a/pype/settings/entities/schemas/projects_schema/schema_project_global.json b/pype/settings/entities/schemas/projects_schema/schema_project_global.json index ebc8d08fb83..6e5cf0671c6 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_project_global.json +++ b/pype/settings/entities/schemas/projects_schema/schema_project_global.json @@ -30,6 +30,12 @@ "multiplatform": true, "multipath": true, "use_label_wrap": true + }, + { + "key": "project_environments", + "type": "raw-json", + "label": "Additional Project Environments (set on application launch)", + "use_label_wrap": true } ] } From 71c9fb431a072d1dd5327589a988b325ce3189de Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 17:28:47 +0100 Subject: [PATCH 217/295] implemented function to apply project specific environments --- pype/lib/applications.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index abaecf1e9c3..039b122bae0 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -940,6 +940,19 @@ def prepare_host_environments(data): data["env"].update(final_env) +def apply_project_environments_value(project_name, env, project_settings=None): + import acre + + if project_settings is None: + project_settings = get_project_settings(project_name) + + env_value = project_settings["global"]["project_environments"] + if not env_value: + return env + parsed = acre.parse(env_value) + return _merge_env(parsed, env) + + def prepare_context_environments(data): """Modify launch environemnts with context data for launched host. @@ -964,6 +977,12 @@ def prepare_context_environments(data): ) return + # Load project specific environments + project_name = project_doc["name"] + data["env"] = apply_project_environments_value( + project_name, data["env"] + ) + app = data["app"] workdir_data = get_workdir_data( project_doc, asset_doc, task_name, app.host_name From 5793f9827398e5abaa0c8b3847256cee8b849cd3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 17:32:03 +0100 Subject: [PATCH 218/295] added some docstring --- pype/lib/applications.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index 039b122bae0..a03a845770f 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -941,6 +941,20 @@ def prepare_host_environments(data): def apply_project_environments_value(project_name, env, project_settings=None): + """Apply project specific environments on passed environments. + + Args: + project_name (str): Name of project for which environemnts should be + received. + env (dict): Environment values on which project specific environments + will be applied. + project_settings (dict): Project settings for passed project name. + Optional if project settings are already prepared. + + Raises: + KeyError: If project settings do not contain keys for project specific + environments. + """ import acre if project_settings is None: From c98f4d83e7c57e9a5c9c083c4c5c058a75d9b612 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 17:44:53 +0100 Subject: [PATCH 219/295] standalone publisher sets project environments before publish is started --- .../tools/standalonepublish/widgets/widget_components.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pype/tools/standalonepublish/widgets/widget_components.py b/pype/tools/standalonepublish/widgets/widget_components.py index d4638ea4374..d09c12f0ab4 100644 --- a/pype/tools/standalonepublish/widgets/widget_components.py +++ b/pype/tools/standalonepublish/widgets/widget_components.py @@ -1,5 +1,4 @@ import os -import sys import json import tempfile import random @@ -10,7 +9,10 @@ from .constants import HOST_NAME from avalon import io from pype.api import execute, Logger -from pype.lib import get_pype_execute_args +from pype.lib import ( + get_pype_execute_args, + apply_project_environments_value +) log = Logger().get_logger("standalonepublisher") @@ -209,6 +211,9 @@ def cli_publish(data, publish_paths, gui=True): if data.get("family", "").lower() == "editorial": envcopy["PYBLISH_SUSPEND_LOGS"] = "1" + project_name = os.environ["AVALON_PROJECT"] + env_copy = apply_project_environments_value(project_name, envcopy) + args = get_pype_execute_args("run", PUBLISH_SCRIPT_PATH) result = execute(args, env=envcopy) From 6c74ea04d4aad48acc8354cbb11585d6eed1ca60 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 17:45:09 +0100 Subject: [PATCH 220/295] added new function to lib's init file --- pype/lib/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 27dd93c1a1b..2150e53b0eb 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -81,6 +81,7 @@ prepare_host_environments, prepare_context_environments, get_app_environments_for_context, + apply_project_environments_value, compile_list_of_regexes ) @@ -174,6 +175,7 @@ "prepare_host_environments", "prepare_context_environments", "get_app_environments_for_context", + "apply_project_environments_value", "compile_list_of_regexes", From 6eb792144ba7ba1a240330fe4e93eb4442e9a41f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 17:57:58 +0100 Subject: [PATCH 221/295] removed celaction publish variant --- .../system_schema/host_settings/schema_celaction.json | 7 ------- 1 file changed, 7 deletions(-) diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json index 6fa596808d5..c5fe824f943 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json @@ -34,13 +34,6 @@ "app_name": "celation", "multiplatform": false, "multipath_executables": false - }, - { - "app_variant_label": "Publish", - "app_variant": "Publish", - "app_name": "celation", - "multiplatform": false, - "multipath_executables": false } ] } From 07fb6379263e1916a21f6d1b8b2fdea05bbc7e95 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 17:58:40 +0100 Subject: [PATCH 222/295] dashes are used as version serparators instead of underscore on apps --- .../system_settings/applications.json | 113 ++++++++---------- .../host_settings/schema_blender.json | 4 +- .../host_settings/schema_djv.json | 2 +- .../host_settings/schema_shell.json | 4 +- .../host_settings/schema_tvpaint.json | 4 +- .../host_settings/schema_unreal.json | 2 +- .../host_settings/template_nuke.json | 8 +- 7 files changed, 60 insertions(+), 77 deletions(-) diff --git a/pype/settings/defaults/system_settings/applications.json b/pype/settings/defaults/system_settings/applications.json index 436ef62342c..90e30c8a51c 100644 --- a/pype/settings/defaults/system_settings/applications.json +++ b/pype/settings/defaults/system_settings/applications.json @@ -253,7 +253,7 @@ } }, "variants": { - "nuke_12_2": { + "nuke_12-2": { "enabled": true, "label": "", "variant_label": "12.2", @@ -274,11 +274,11 @@ }, "environment": { "__environment_keys__": { - "nuke_12_2": [] + "nuke_12-2": [] } } }, - "nuke_12_0": { + "nuke_12-0": { "enabled": true, "label": "", "variant_label": "12.0", @@ -299,11 +299,11 @@ }, "environment": { "__environment_keys__": { - "nuke_12_0": [] + "nuke_12-0": [] } } }, - "nuke_11_3": { + "nuke_11-3": { "enabled": true, "label": "", "variant_label": "11.3", @@ -324,11 +324,11 @@ }, "environment": { "__environment_keys__": { - "nuke_11_3": [] + "nuke_11-3": [] } } }, - "nuke_11_2": { + "nuke_11-2": { "enabled": true, "label": "", "variant_label": "11.2", @@ -347,7 +347,7 @@ }, "environment": { "__environment_keys__": { - "nuke_11_2": [] + "nuke_11-2": [] } } } @@ -377,7 +377,7 @@ } }, "variants": { - "nukex_12_2": { + "nukex_12-2": { "enabled": true, "label": "", "variant_label": "12.2", @@ -404,11 +404,11 @@ }, "environment": { "__environment_keys__": { - "nukex_12_2": [] + "nukex_12-2": [] } } }, - "nukex_12_0": { + "nukex_12-0": { "enabled": true, "label": "", "variant_label": "12.0", @@ -435,11 +435,11 @@ }, "environment": { "__environment_keys__": { - "nukex_12_0": [] + "nukex_12-0": [] } } }, - "nukex_11_3": { + "nukex_11-3": { "enabled": true, "label": "", "variant_label": "11.3", @@ -466,11 +466,11 @@ }, "environment": { "__environment_keys__": { - "nukex_11_3": [] + "nukex_11-3": [] } } }, - "nukex_11_2": { + "nukex_11-2": { "enabled": true, "label": "", "variant_label": "11.2", @@ -495,7 +495,7 @@ }, "environment": { "__environment_keys__": { - "nukex_11_2": [] + "nukex_11-2": [] } } } @@ -527,7 +527,7 @@ } }, "variants": { - "nukestudio_12_2": { + "nukestudio_12-2": { "enabled": true, "label": "", "variant_label": "12.2", @@ -554,11 +554,11 @@ }, "environment": { "__environment_keys__": { - "nukestudio_12_2": [] + "nukestudio_12-2": [] } } }, - "nukestudio_12_0": { + "nukestudio_12-0": { "enabled": true, "label": "", "variant_label": "12.0", @@ -585,11 +585,11 @@ }, "environment": { "__environment_keys__": { - "nukestudio_12_0": [] + "nukestudio_12-0": [] } } }, - "nukestudio_11_3": { + "nukestudio_11-3": { "enabled": true, "label": "", "variant_label": "11.3", @@ -616,11 +616,11 @@ }, "environment": { "__environment_keys__": { - "nukestudio_11_3": [] + "nukestudio_11-3": [] } } }, - "nukestudio_11_2": { + "nukestudio_11-2": { "enabled": true, "label": "", "variant_label": "11.2", @@ -643,7 +643,7 @@ }, "environment": { "__environment_keys__": { - "nukestudio_11_2": [] + "nukestudio_11-2": [] } } } @@ -675,7 +675,7 @@ } }, "variants": { - "hiero_12_2": { + "hiero_12-2": { "enabled": true, "label": "", "variant_label": "12.2", @@ -702,11 +702,11 @@ }, "environment": { "__environment_keys__": { - "hiero_12_2": [] + "hiero_12-2": [] } } }, - "hiero_12_0": { + "hiero_12-0": { "enabled": true, "label": "", "variant_label": "12.0", @@ -733,11 +733,11 @@ }, "environment": { "__environment_keys__": { - "hiero_12_0": [] + "hiero_12-0": [] } } }, - "hiero_11_3": { + "hiero_11-3": { "enabled": true, "label": "", "variant_label": "11.3", @@ -764,11 +764,11 @@ }, "environment": { "__environment_keys__": { - "hiero_11_3": [] + "hiero_11-3": [] } } }, - "hiero_11_2": { + "hiero_11-2": { "enabled": true, "label": "", "variant_label": "11.2", @@ -793,7 +793,7 @@ }, "environment": { "__environment_keys__": { - "hiero_11_2": [] + "hiero_11-2": [] } } } @@ -1057,7 +1057,7 @@ } }, "variants": { - "blender_2_83": { + "blender_2-83": { "enabled": true, "label": "", "variant_label": "2.83", @@ -1082,11 +1082,11 @@ }, "environment": { "__environment_keys__": { - "blender_2_83": [] + "blender_2-83": [] } } }, - "blender_2_90": { + "blender_2-90": { "enabled": true, "label": "", "variant_label": "2.90", @@ -1111,7 +1111,7 @@ }, "environment": { "__environment_keys__": { - "blender_2_90": [] + "blender_2-90": [] } } } @@ -1193,7 +1193,7 @@ } }, "variants": { - "tvpaint_animation_11_64bit": { + "tvpaint_animation_11-64bits": { "enabled": true, "label": "", "variant_label": "11 (64bits)", @@ -1212,11 +1212,11 @@ }, "environment": { "__environment_keys__": { - "tvpaint_animation_11_64bit": [] + "tvpaint_animation_11-64bits": [] } } }, - "tvpaint_animation_11_32bit": { + "tvpaint_animation_11-32bits": { "enabled": true, "label": "", "variant_label": "11 (32bits)", @@ -1235,7 +1235,7 @@ }, "environment": { "__environment_keys__": { - "tvpaint_animation_11_32bit": [] + "tvpaint_animation_11-32bits": [] } } } @@ -1407,23 +1407,6 @@ "celation_Local": [] } } - }, - "celation_Publish": { - "enabled": true, - "label": "", - "variant_label": "Pulblish", - "icon": "", - "executables": "", - "arguments": { - "windows": [], - "darwin": [], - "linux": [] - }, - "environment": { - "__environment_keys__": { - "celation_Publish": [] - } - } } } }, @@ -1445,7 +1428,7 @@ } }, "variants": { - "unreal_4_24": { + "unreal_4-24": { "enabled": true, "label": "", "variant_label": "4.24", @@ -1462,7 +1445,7 @@ }, "environment": { "__environment_keys__": { - "unreal_4_24": [] + "unreal_4-24": [] } } } @@ -1476,7 +1459,7 @@ } }, "variants": { - "python_python_3_7": { + "python_python_3-7": { "enabled": true, "label": "Python", "variant_label": "3.7", @@ -1493,11 +1476,11 @@ }, "environment": { "__environment_keys__": { - "python_python_3_7": [] + "python_python_3-7": [] } } }, - "python_python_2_7": { + "python_python_2-7": { "enabled": true, "label": "Python", "variant_label": "2.7", @@ -1514,7 +1497,7 @@ }, "environment": { "__environment_keys__": { - "python_python_2_7": [] + "python_python_2-7": [] } } }, @@ -1552,7 +1535,7 @@ } }, "variants": { - "djvview_1_1": { + "djvview_1-1": { "enabled": true, "label": "", "variant_label": "1.1", @@ -1569,7 +1552,7 @@ }, "environment": { "__environment_keys__": { - "djvview_1_1": [] + "djvview_1-1": [] } } } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json index 725a0685b6d..5030f8280f9 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json @@ -30,12 +30,12 @@ "template_data": [ { "app_variant_label": "2.83", - "app_variant": "2_83", + "app_variant": "2-83", "app_name": "blender" }, { "app_variant_label": "2.90", - "app_variant": "2_90", + "app_variant": "2-90", "app_name": "blender" } ] diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json index 8bbdb7ea9b2..3f3af3585a8 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json @@ -29,7 +29,7 @@ "name": "template_host_variant", "template_data": { "app_variant_label": "1.1", - "app_variant": "1_1", + "app_variant": "1-1", "app_name": "djvview" } } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json index f72450aa080..3288fe2ffb8 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json @@ -25,12 +25,12 @@ "name": "template_host_variant", "template_data": [ { - "app_variant": "python_3_7", + "app_variant": "python_3-7", "app_variant_label": "Python 3.7", "app_name": "python" }, { - "app_variant": "python_2_7", + "app_variant": "python_2-7", "app_variant_label": "Python 2.7", "app_name": "python" }, diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json index a569ec0503f..a3cc6188ac0 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json @@ -30,12 +30,12 @@ "template_data": [ { "app_variant_label": "Animation 11 (64bits)", - "app_variant": "animation_11_64bit", + "app_variant": "animation_11-64bits", "app_name": "tvpaint" }, { "app_variant_label": "Animation 11 (32bits)", - "app_variant": "animation_11_32bit", + "app_variant": "animation_11-32bits", "app_name": "tvpaint" } ] diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json index b23a21b0fd1..c79f08b71a9 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json @@ -29,7 +29,7 @@ "name": "template_host_variant", "template_data": [ { - "app_variant": "4_24", + "app_variant": "4-24", "app_variant_label": "4.24", "app_name": "unreal" } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json b/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json index d32f87949d1..c86c2aef61c 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json @@ -30,22 +30,22 @@ "name": "template_host_variant", "template_data": [ { - "app_variant": "12_2", + "app_variant": "12-2", "app_variant_label": "12.2", "app_name": "{nuke_type}" }, { - "app_variant": "12_0", + "app_variant": "12-0", "app_variant_label": "12.0", "app_name": "{nuke_type}" }, { - "app_variant": "11_3", + "app_variant": "11-3", "app_variant_label": "11.3", "app_name": "{nuke_type}" }, { - "app_variant": "11_2", + "app_variant": "11-2", "app_variant_label": "11.2", "app_name": "{nuke_type}" } From 2fbe5d816558e7e995bc3ade8a09bb244e898c35 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 19:40:51 +0100 Subject: [PATCH 223/295] tools are dynamic dictionaries --- .../schemas/system_schema/schema_tools.json | 52 +++++++++---------- .../tool_settings/schema_arnold.json | 29 ----------- .../tool_settings/schema_vray.json | 29 ----------- .../tool_settings/schema_yeti.json | 29 ----------- .../tool_settings/template_tool_variant.json | 11 ---- 5 files changed, 25 insertions(+), 125 deletions(-) delete mode 100644 pype/settings/entities/schemas/system_schema/tool_settings/schema_arnold.json delete mode 100644 pype/settings/entities/schemas/system_schema/tool_settings/schema_vray.json delete mode 100644 pype/settings/entities/schemas/system_schema/tool_settings/schema_yeti.json delete mode 100644 pype/settings/entities/schemas/system_schema/tool_settings/template_tool_variant.json diff --git a/pype/settings/entities/schemas/system_schema/schema_tools.json b/pype/settings/entities/schemas/system_schema/schema_tools.json index 2c04abc47c0..188d2fc8e83 100644 --- a/pype/settings/entities/schemas/system_schema/schema_tools.json +++ b/pype/settings/entities/schemas/system_schema/schema_tools.json @@ -1,37 +1,35 @@ { - "key": "tools", "type": "dict", - "label": "Tools", + "key": "tools", "collapsible": true, "is_file": true, "children": [ { - "type": "schema", - "name": "schema_arnold" - }, - { - "type": "schema", - "name": "schema_vray" - }, - { - "type": "schema", - "name": "schema_yeti" - }, - { - "type": "dict", - "key": "other", - "children": [ - { - "type": "schema_template", - "name": "template_tool_variant", - "template_data": [ - { - "tool_name": "othertools", - "tool_label": "Other Tools and Plugins" + "type": "dict-modifiable", + "label": "Tools", + "key": "tool_groups", + "collapsible_key": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "environment", + "label": "Environments", + "type": "raw-json" + }, + { + "type": "separator" + }, + { + "type": "dict-modifiable", + "key": "variants", + "label": "Variants", + "object_type": { + "type": "raw-json" } - ] - } - ] + } + ] + } } ] } diff --git a/pype/settings/entities/schemas/system_schema/tool_settings/schema_arnold.json b/pype/settings/entities/schemas/system_schema/tool_settings/schema_arnold.json deleted file mode 100644 index db2be09c834..00000000000 --- a/pype/settings/entities/schemas/system_schema/tool_settings/schema_arnold.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "type": "dict", - "key": "mtoa", - "label": "Autodesk Arnold", - "collapsible": true, - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "key": "environment", - "label": "Environment (mtoa)", - "type": "raw-json", - "env_group_key": "mtoa" - }, - { - "type": "schema_template", - "name": "template_tool_variant", - "template_data": [ - { - "tool_label": "Arnold Versions" - } - ] - } - ] -} diff --git a/pype/settings/entities/schemas/system_schema/tool_settings/schema_vray.json b/pype/settings/entities/schemas/system_schema/tool_settings/schema_vray.json deleted file mode 100644 index 295b3ccac30..00000000000 --- a/pype/settings/entities/schemas/system_schema/tool_settings/schema_vray.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "type": "dict", - "key": "vray", - "label": "Chaos Group Vray", - "collapsible": true, - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "key": "environment", - "label": "Environment", - "type": "raw-json", - "env_group_key": "vray" - }, - { - "type": "schema_template", - "name": "template_tool_variant", - "template_data": [ - { - "tool_label": "Vray Versions" - } - ] - } - ] -} diff --git a/pype/settings/entities/schemas/system_schema/tool_settings/schema_yeti.json b/pype/settings/entities/schemas/system_schema/tool_settings/schema_yeti.json deleted file mode 100644 index 34bb09da8da..00000000000 --- a/pype/settings/entities/schemas/system_schema/tool_settings/schema_yeti.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "type": "dict", - "key": "yeti", - "label": "Pergrine Labs Yeti", - "collapsible": true, - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "key": "environment", - "label": "Environment", - "type": "raw-json", - "env_group_key": "yeti" - }, - { - "type": "schema_template", - "name": "template_tool_variant", - "template_data": [ - { - "tool_label": "Yeti Versions" - } - ] - } - ] -} diff --git a/pype/settings/entities/schemas/system_schema/tool_settings/template_tool_variant.json b/pype/settings/entities/schemas/system_schema/tool_settings/template_tool_variant.json deleted file mode 100644 index b0ba63469cd..00000000000 --- a/pype/settings/entities/schemas/system_schema/tool_settings/template_tool_variant.json +++ /dev/null @@ -1,11 +0,0 @@ -[ - { - "type": "dict-modifiable", - "key": "variants", - "label": "{tool_label}", - "value_is_env_group": true, - "object_type": { - "type": "raw-json" - } - } -] From ddd5cbb13624360064fa19d7f50b9e6bf89e0387 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 19:42:06 +0100 Subject: [PATCH 224/295] changed exception type on root entity --- pype/settings/entities/root_entities.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/settings/entities/root_entities.py b/pype/settings/entities/root_entities.py index 2d336896974..e7cb098c674 100644 --- a/pype/settings/entities/root_entities.py +++ b/pype/settings/entities/root_entities.py @@ -14,7 +14,7 @@ get_project_settings_schema ) from .exceptions import ( - EntitySchemaError, + SchemaError, InvalidKeySymbols ) from pype.settings.constants import ( @@ -154,7 +154,7 @@ def schema_validations(self): "Root entity \"{}\" has child with `is_group`" " attribute set to True but root can't save overrides." ).format(self.__class__.__name__) - raise EntitySchemaError(self, reason) + raise SchemaError(reason) child_entity.schema_validations() for key in self.non_gui_children.keys(): From c24ba96bea143802537979dbd2bcd9ada74dd0df Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 19:42:26 +0100 Subject: [PATCH 225/295] added schema validation on object_type key in mutable dict item --- pype/settings/entities/dict_mutable_keys_entity.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pype/settings/entities/dict_mutable_keys_entity.py b/pype/settings/entities/dict_mutable_keys_entity.py index 12a18ad6125..b4651717345 100644 --- a/pype/settings/entities/dict_mutable_keys_entity.py +++ b/pype/settings/entities/dict_mutable_keys_entity.py @@ -202,7 +202,7 @@ def _item_initalization(self): self.schema_data.get("highlight_content") or False ) - object_type = self.schema_data["object_type"] + object_type = self.schema_data.get("object_type") or {} if not isinstance(object_type, dict): # Backwards compatibility object_type = { @@ -226,6 +226,12 @@ def _item_initalization(self): def schema_validations(self): super(DictMutableKeysEntity, self).schema_validations() + if not self.schema_data.get("object_type"): + reason = ( + "Modifiable dictionary must have specified `object_type`." + ) + raise EntitySchemaError(self, reason) + # TODO Ability to store labels should be defined with different key if self.collapsible_key and not self.file_item: reason = ( From 9582d9ffdfb703e5bc35199c9c2baa3893c223a6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 19:42:38 +0100 Subject: [PATCH 226/295] resaved tools --- .../defaults/system_settings/tools.json | 109 +++++++----------- 1 file changed, 40 insertions(+), 69 deletions(-) diff --git a/pype/settings/defaults/system_settings/tools.json b/pype/settings/defaults/system_settings/tools.json index fce847fb555..4c9a90993f5 100644 --- a/pype/settings/defaults/system_settings/tools.json +++ b/pype/settings/defaults/system_settings/tools.json @@ -1,78 +1,49 @@ { - "mtoa": { - "enabled": true, - "environment": { - "MTOA": "{PYPE_STUDIO_SOFTWARE}/arnold/mtoa_{MAYA_VERSION}_{MTOA_VERSION}", - "MAYA_RENDER_DESC_PATH": "{MTOA}", - "MAYA_MODULE_PATH": "{MTOA}", - "ARNOLD_PLUGIN_PATH": "{MTOA}/shaders", - "MTOA_EXTENSIONS_PATH": { - "darwin": "{MTOA}/extensions", - "linux": "{MTOA}/extensions", - "windows": "{MTOA}/extensions" - }, - "MTOA_EXTENSIONS": { - "darwin": "{MTOA}/extensions", - "linux": "{MTOA}/extensions", - "windows": "{MTOA}/extensions" - }, - "DYLD_LIBRARY_PATH": { - "darwin": "{MTOA}/bin" - }, - "PATH": { - "windows": "{PATH};{MTOA}/bin" - }, - "__environment_keys__": { - "mtoa": [ - "MTOA", - "MAYA_RENDER_DESC_PATH", - "MAYA_MODULE_PATH", - "ARNOLD_PLUGIN_PATH", - "MTOA_EXTENSIONS_PATH", - "MTOA_EXTENSIONS", - "DYLD_LIBRARY_PATH", - "PATH" - ] - } - }, - "variants": { - "mtoa_3_2": { - "MTOA_VERSION": "3.2", - "__environment_keys__": { - "mtoa_3_2": [ - "MTOA_VERSION" - ] + "tool_groups": { + "mtoa": { + "environment": { + "MTOA": "{PYPE_STUDIO_SOFTWARE}/arnold/mtoa_{MAYA_VERSION}_{MTOA_VERSION}", + "MAYA_RENDER_DESC_PATH": "{MTOA}", + "MAYA_MODULE_PATH": "{MTOA}", + "ARNOLD_PLUGIN_PATH": "{MTOA}/shaders", + "MTOA_EXTENSIONS_PATH": { + "darwin": "{MTOA}/extensions", + "linux": "{MTOA}/extensions", + "windows": "{MTOA}/extensions" + }, + "MTOA_EXTENSIONS": { + "darwin": "{MTOA}/extensions", + "linux": "{MTOA}/extensions", + "windows": "{MTOA}/extensions" + }, + "DYLD_LIBRARY_PATH": { + "darwin": "{MTOA}/bin" + }, + "PATH": { + "windows": "{PATH};{MTOA}/bin" } }, - "mtoa_3_1": { - "MTOA_VERSION": "3.1", - "__environment_keys__": { - "mtoa_3_1": [ - "MTOA_VERSION" - ] + "variants": { + "mtoa_3-2": { + "MTOA_VERSION": "3.2" + }, + "mtoa_3-1": { + "MTOA_VERSION": "3.1" } } - } - }, - "vray": { - "enabled": true, - "environment": { - "__environment_keys__": { - "vray": [] - } }, - "variants": {} - }, - "yeti": { - "enabled": true, - "environment": { - "__environment_keys__": { - "yeti": [] - } + "vray": { + "environment": {}, + "variants": {} }, - "variants": {} - }, - "other": { - "variants": {} + "yeti": { + "environment": {}, + "variants": {} + }, + "__dynamic_keys_labels__": { + "mtoa": "Autodesk Arnold", + "yeti": "Pergrine Labs Yeti", + "vray": "Chaos Group Vray" + } } } \ No newline at end of file From 76bcca4c44aaada832304769d514dba5ed219209 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 19:42:53 +0100 Subject: [PATCH 227/295] changed how tools are used --- pype/lib/applications.py | 23 +++++++++-------------- pype/settings/entities/enum_entity.py | 12 +++++------- 2 files changed, 14 insertions(+), 21 deletions(-) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index abaecf1e9c3..d62c5deff78 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -138,20 +138,16 @@ def refresh(self): app_group, app_name, host_name, app_data, self ) - tools_definitions = settings["tools"] + tools_definitions = settings["tools"]["tool_groups"] for tool_group_name, tool_group_data in tools_definitions.items(): - enabled = tool_group_data.get("enabled", True) tool_variants = tool_group_data.get("variants") or {} for tool_name, tool_data in tool_variants.items(): - if tool_name in self.tools: + tool = ApplicationTool(tool_name, tool_group_name) + if tool.full_name in self.tools: self.log.warning(( "Duplicated tool name in settings \"{}\"" - ).format(tool_name)) - - _enabled = tool_data.get("enabled", enabled) - self.tools[tool_name] = ApplicationTool( - tool_name, tool_group_name, _enabled - ) + ).format(tool.full_name)) + self.tools[tool.full_name] = tool def launch(self, app_name, **data): """Launch procedure. @@ -196,16 +192,15 @@ class ApplicationTool: Args: tool_name (str): Name of the tool. group_name (str): Name of group which wraps tool. - enabled (bool): Is tool enabled by studio. """ - def __init__(self, tool_name, group_name, enabled): + def __init__(self, tool_name, group_name): self.name = tool_name self.group_name = group_name - self.enabled = enabled - def __bool__(self): - return self.enabled + @property + def full_name(self): + return "/".join((self.group_name, self.name)) class ApplicationExecutable: diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index f06ec97f4b0..ca0d5dec219 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -170,14 +170,12 @@ def _get_enum_values(self): valid_keys = set() enum_items = [] - for tool_group in system_settings_entity["tools"].values(): - enabled_entity = tool_group.get("enabled") - if enabled_entity and not enabled_entity.value: - continue - + tools_entity = system_settings_entity["tools"] + for group_name, tool_group in tools_entity["tool_groups"].items(): for variant_name in tool_group["variants"].keys(): - enum_items.append({variant_name: variant_name}) - valid_keys.add(variant_name) + tool_name = "/".join((group_name, variant_name)) + enum_items.append({tool_name: tool_name}) + valid_keys.add(tool_name) return enum_items, valid_keys def set_override_state(self, *args, **kwargs): From e3c44925d48f4c8f1843a57116d429973f97bfa6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 25 Mar 2021 19:50:18 +0100 Subject: [PATCH 228/295] fix tool name in create update attriutes action --- .../event_handlers_user/action_create_cust_attrs.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py b/pype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py index ae040fd6301..8ff0cade7b0 100644 --- a/pype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py +++ b/pype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py @@ -400,11 +400,10 @@ def applications_attribute(self, event): def tools_attribute(self, event): tools_data = [] - for tool_name, tool in self.app_manager.tools.items(): - if tool.enabled: - tools_data.append({ - tool_name: tool_name - }) + for tool_name in self.app_manager.tools.keys(): + tools_data.append({ + tool_name: tool_name + }) # Make sure there is at least one item if not tools_data: From 18a56ccce872c892de811092b03355b3f9808605 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Thu, 25 Mar 2021 23:11:25 +0100 Subject: [PATCH 229/295] update houdini menu --- pype/hosts/houdini/startup/MainMenuCommon.XML | 2 +- pype/hosts/houdini/startup/scripts/123.py | 4 +-- .../system_settings/applications.json | 25 ++++++++++++++++++- .../host_settings/schema_houdini.json | 4 +++ 4 files changed, 31 insertions(+), 4 deletions(-) diff --git a/pype/hosts/houdini/startup/MainMenuCommon.XML b/pype/hosts/houdini/startup/MainMenuCommon.XML index 16e92be6883..bfc7b6c0bc8 100644 --- a/pype/hosts/houdini/startup/MainMenuCommon.XML +++ b/pype/hosts/houdini/startup/MainMenuCommon.XML @@ -2,7 +2,7 @@ - + Date: Thu, 25 Mar 2021 23:16:34 +0100 Subject: [PATCH 230/295] update workfiles tool in houdini --- pype/hosts/houdini/startup/MainMenuCommon.XML | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/hosts/houdini/startup/MainMenuCommon.XML b/pype/hosts/houdini/startup/MainMenuCommon.XML index bfc7b6c0bc8..ba639a71a14 100644 --- a/pype/hosts/houdini/startup/MainMenuCommon.XML +++ b/pype/hosts/houdini/startup/MainMenuCommon.XML @@ -58,7 +58,7 @@ publish.show(parent) From 5d610145c6fe1b28f756bec7439a2ec620e699c1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 26 Mar 2021 11:32:20 +0100 Subject: [PATCH 231/295] fix houdini defaults --- pype/settings/defaults/system_settings/applications.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/settings/defaults/system_settings/applications.json b/pype/settings/defaults/system_settings/applications.json index 1c080aa6135..ea910e125d7 100644 --- a/pype/settings/defaults/system_settings/applications.json +++ b/pype/settings/defaults/system_settings/applications.json @@ -992,7 +992,7 @@ } }, "variants": { - "houdini_18.5": { + "houdini_18-5": { "enabled": true, "label": "", "variant_label": "18.5", @@ -1011,7 +1011,7 @@ }, "environment": { "__environment_keys__": { - "houdini_18.5": [] + "houdini_18-5": [] } } }, From 8b4f462af9d999024479a565d7ff9518b95d04ae Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 26 Mar 2021 11:33:30 +0100 Subject: [PATCH 232/295] removed "mtoa" from mtoa variants --- pype/settings/defaults/system_settings/tools.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pype/settings/defaults/system_settings/tools.json b/pype/settings/defaults/system_settings/tools.json index 4c9a90993f5..215139468ca 100644 --- a/pype/settings/defaults/system_settings/tools.json +++ b/pype/settings/defaults/system_settings/tools.json @@ -24,10 +24,10 @@ } }, "variants": { - "mtoa_3-2": { + "3-2": { "MTOA_VERSION": "3.2" }, - "mtoa_3-1": { + "3-1": { "MTOA_VERSION": "3.1" } } @@ -42,8 +42,8 @@ }, "__dynamic_keys_labels__": { "mtoa": "Autodesk Arnold", - "yeti": "Pergrine Labs Yeti", - "vray": "Chaos Group Vray" + "vray": "Chaos Group Vray", + "yeti": "Pergrine Labs Yeti" } } } \ No newline at end of file From 227e1aee1b44604aa253f86ece078ddf2ee7016a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 26 Mar 2021 12:41:45 +0100 Subject: [PATCH 233/295] enhanced labels of tools in enum items --- pype/settings/entities/enum_entity.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index ca0d5dec219..8588cbb47c2 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -172,9 +172,23 @@ def _get_enum_values(self): enum_items = [] tools_entity = system_settings_entity["tools"] for group_name, tool_group in tools_entity["tool_groups"].items(): - for variant_name in tool_group["variants"].keys(): + group_label = None + if hasattr(tool_group, "get_key_label"): + group_label = tool_group.get_key_label(group_name) + + for variant_name, variant in tool_group["variants"].items(): + variant_label = None + if hasattr(variant, "get_key_label"): + variant_label = variant.get_key_label(variant_name) + + tool_label = None + if group_label and variant_label: + tool_label = " ".join((group_label, variant_label)) + tool_name = "/".join((group_name, variant_name)) - enum_items.append({tool_name: tool_name}) + if not tool_label: + tool_label = tool_name + enum_items.append({tool_name: tool_label}) valid_keys.add(tool_name) return enum_items, valid_keys From 604847758033753b6441555fb2ce4e0398384f3b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 26 Mar 2021 12:49:08 +0100 Subject: [PATCH 234/295] removed variants label and variants are as collapsible keys --- pype/settings/defaults/system_settings/tools.json | 4 ++++ .../settings/entities/schemas/system_schema/schema_tools.json | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pype/settings/defaults/system_settings/tools.json b/pype/settings/defaults/system_settings/tools.json index 215139468ca..214bfc95e5d 100644 --- a/pype/settings/defaults/system_settings/tools.json +++ b/pype/settings/defaults/system_settings/tools.json @@ -29,6 +29,10 @@ }, "3-1": { "MTOA_VERSION": "3.1" + }, + "__dynamic_keys_labels__": { + "3-2": "3.2", + "3-1": "3.2" } } }, diff --git a/pype/settings/entities/schemas/system_schema/schema_tools.json b/pype/settings/entities/schemas/system_schema/schema_tools.json index 188d2fc8e83..2346bef36d2 100644 --- a/pype/settings/entities/schemas/system_schema/schema_tools.json +++ b/pype/settings/entities/schemas/system_schema/schema_tools.json @@ -23,7 +23,7 @@ { "type": "dict-modifiable", "key": "variants", - "label": "Variants", + "collapsible_key": true, "object_type": { "type": "raw-json" } From d9c263be39d10cbd09900e04adb2f44f7d523155 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 26 Mar 2021 12:49:25 +0100 Subject: [PATCH 235/295] fixed tools labels getting --- pype/settings/entities/enum_entity.py | 28 ++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index 8588cbb47c2..c486de397ed 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -170,24 +170,30 @@ def _get_enum_values(self): valid_keys = set() enum_items = [] - tools_entity = system_settings_entity["tools"] - for group_name, tool_group in tools_entity["tool_groups"].items(): + tool_groups_entity = system_settings_entity["tools"]["tool_groups"] + for group_name, tool_group in tool_groups_entity.items(): + # Try to get group label from entity group_label = None - if hasattr(tool_group, "get_key_label"): - group_label = tool_group.get_key_label(group_name) + if hasattr(tool_groups_entity, "get_key_label"): + group_label = tool_groups_entity.get_key_label(group_name) - for variant_name, variant in tool_group["variants"].items(): + variants_entity = tool_group["variants"] + for variant_name in variants_entity.keys(): + # Prepare tool name (used as value) + tool_name = "/".join((group_name, variant_name)) + + # Try to get variant label from entity variant_label = None - if hasattr(variant, "get_key_label"): - variant_label = variant.get_key_label(variant_name) + if hasattr(variants_entity, "get_key_label"): + variant_label = variants_entity.get_key_label(variant_name) - tool_label = None + # Tool label that will be shown + # - use tool name itself if labels are not filled if group_label and variant_label: tool_label = " ".join((group_label, variant_label)) - - tool_name = "/".join((group_name, variant_name)) - if not tool_label: + else: tool_label = tool_name + enum_items.append({tool_name: tool_label}) valid_keys.add(tool_name) return enum_items, valid_keys From 5544f933104719ff1bed37396779f97b2bd02113 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 26 Mar 2021 12:58:10 +0100 Subject: [PATCH 236/295] implemented prelaunch hook that will try to install PySide2 to blender --- .../hosts/blender/hooks/pre_pyside_install.py | 138 ++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 pype/hosts/blender/hooks/pre_pyside_install.py diff --git a/pype/hosts/blender/hooks/pre_pyside_install.py b/pype/hosts/blender/hooks/pre_pyside_install.py new file mode 100644 index 00000000000..72a2f0d5c81 --- /dev/null +++ b/pype/hosts/blender/hooks/pre_pyside_install.py @@ -0,0 +1,138 @@ +import os +import subprocess +from pype.lib import PreLaunchHook + + +class InstallPySideToBlender(PreLaunchHook): + """Install Qt binding to blender's python packages. + + Prelaunch hook does 2 things: + 1.) Blender's python packages are pushed to the beginning of PYTHONPATH. + 2.) Check if blender has installed PySide2 and will try to install if not. + + For pipeline implementation is required to have Qt binding installed in + blender's python packages. + + Prelaunch hook can work only on Windows right now. + """ + + app_groups = ["blender"] + platforms = ["windows"] + + def execute(self): + # Get blender's python directory + executable = self.launch_context.executable.executable_path + # Blender installation contain subfolder named with it's version where + # python binaries are stored. + version_subfolder = self.launch_context.app_name.split("_")[1] + pythond_dir = os.path.join( + os.path.dirname(executable), + version_subfolder, + "python" + ) + + # Change PYTHONPATH to contain blender's packages as first + python_paths = [ + os.path.join(pythond_dir, "lib"), + os.path.join(pythond_dir, "lib", "site-packages"), + ] + python_path = self.launch_context.env.get("PYTHONPATH") or "" + for path in python_path.split(os.pathsep): + if path: + python_paths.append(path) + + self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths) + + # Get blender's python executable + python_executable = os.path.join(pythond_dir, "bin", "python.exe") + if not os.path.exists(python_executable): + self.log.warning( + "Couldn't find python executable for blender. {}".format( + executable + ) + ) + return + + # Check if PySide2 is installed and skip if yes + if self.is_pyside_installed(python_executable): + return + + # Install PySide2 in blender's python + self.install_pyside_windows(python_executable) + + def install_pyside_windows(self, python_executable): + """Install PySide2 python module to blender's python. + + Installation requires administration rights that's why it is required + to use "pywin32" module which can execute command's and ask for + administration rights. + """ + try: + import win32api + import win32con + import win32process + import win32event + import pywintypes + from win32comext.shell.shell import ShellExecuteEx + from win32comext.shell import shellcon + except Exception: + self.log.warning("Couldn't import \"pywin32\" modules") + return + + try: + # Parameters + # - use "-m pip" as module pip to install PySide2 and argument + # "--ignore-installed" is to force install module to blender's + # site-packages and make sure it is binary compatible + parameters = "-m pip install --ignore-installed PySide2" + + # Execute command and ask for administrator's rights + process_info = ShellExecuteEx( + nShow=win32con.SW_SHOWNORMAL, + fMask=shellcon.SEE_MASK_NOCLOSEPROCESS, + lpVerb="runas", + lpFile=python_executable, + lpParameters=parameters, + lpDirectory=os.path.dirname(python_executable) + ) + process_handle = process_info["hProcess"] + obj = win32event.WaitForSingleObject( + process_handle, win32event.INFINITE + ) + returncode = win32process.GetExitCodeProcess(process_handle) + if returncode == 0: + self.log.info( + "Successfully installed PySide2 module to blender." + ) + return + except pywintypes.error: + pass + + self.log.warning("Failed to instal PySide2 module to blender.") + + def is_pyside_installed(self, python_executable): + """Check if PySide2 module is in blender's pip list. + + Check that PySide2 is installed directly in blender's site-packages. + It is possible that it is installed in user's site-packages but that + may be incompatible with blender's python. + """ + # Get pip list from blender's python executable + args = [python_executable, "-m", "pip", "list"] + process = subprocess.Popen(args, stdout=subprocess.PIPE) + stdout, _ = process.communicate() + lines = stdout.decode().split("\r\n") + # Second line contain dashes that define maximum length of module name. + # Second column of dashes define maximum length of module version. + package_dashes, *_ = lines[1].split(" ") + package_len = len(package_dashes) + + # Got through printed lines starting at line 3 + for idx in range(2, len(lines)): + line = lines[idx] + if not line: + continue + package_name = line[0:package_len].strip() + if package_name.lower() == "pyside2": + return True + return False From 6520dd1b366fdc93f7c7f88ba511b2153a209fa3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 26 Mar 2021 13:03:30 +0100 Subject: [PATCH 237/295] make prelaunch hook not critical --- pype/hosts/blender/hooks/pre_pyside_install.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pype/hosts/blender/hooks/pre_pyside_install.py b/pype/hosts/blender/hooks/pre_pyside_install.py index 72a2f0d5c81..935105e8950 100644 --- a/pype/hosts/blender/hooks/pre_pyside_install.py +++ b/pype/hosts/blender/hooks/pre_pyside_install.py @@ -20,6 +20,16 @@ class InstallPySideToBlender(PreLaunchHook): platforms = ["windows"] def execute(self): + # Prelaunch hook is not crutial + try: + self.inner_execute() + except Exception: + self.log.warning( + "Processing of {} crashed.".format(self.__class__.__name__), + exc_info=True + ) + + def inner_execute(self): # Get blender's python directory executable = self.launch_context.executable.executable_path # Blender installation contain subfolder named with it's version where From 6cff8c514618ab2b37e04d4588902c51c239f685 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 26 Mar 2021 18:09:31 +0100 Subject: [PATCH 238/295] Nuke: adding keys and values to settings for Loaders --- .../defaults/project_settings/nuke.json | 52 ++++++++++++++++--- .../schemas/template_loader_plugin.json | 11 ++++ 2 files changed, 57 insertions(+), 6 deletions(-) diff --git a/pype/settings/defaults/project_settings/nuke.json b/pype/settings/defaults/project_settings/nuke.json index f808f9caa59..d0b5ad0a7e1 100644 --- a/pype/settings/defaults/project_settings/nuke.json +++ b/pype/settings/defaults/project_settings/nuke.json @@ -95,20 +95,60 @@ "load": { "LoadImage": { "enabled": true, - "representations": [] + "families": [ + "render2d", + "source", + "plate", + "render", + "prerender", + "review", + "image" + ], + "representations": [ + "exr", + "dpx", + "jpg", + "jpeg", + "png", + "psd" + ], + "node_name_template": "{class_name}_{ext}" }, "LoadMov": { "enabled": true, - "representations": [] + "families": [ + "source", + "plate", + "render", + "prerender", + "review" + ], + "representations": [ + "mov", + "review", + "mp4", + "h264" + ], + "node_name_template": "{class_name}_{ext}" }, "LoadSequence": { "enabled": true, + "families": [ + "render2d", + "source", + "plate", + "render", + "prerender", + "review" + ], "representations": [ - "png", - "jpg", "exr", - "" - ] + "dpx", + "jpg", + "jpeg", + "png" + ], + "node_name_template": "{class_name}_{ext}" } }, "workfile_build": { diff --git a/pype/settings/entities/schemas/projects_schema/schemas/template_loader_plugin.json b/pype/settings/entities/schemas/projects_schema/schemas/template_loader_plugin.json index 20dca6df179..d01691ed5fd 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/template_loader_plugin.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/template_loader_plugin.json @@ -11,11 +11,22 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "list", + "key": "families", + "label": "Families", + "object_type": "text" + }, { "type": "list", "key": "representations", "label": "Representations", "object_type": "text" + }, + { + "type": "text", + "key": "node_name_template", + "label": "Node name template" } ] } From 0d0dd262d35cce24c2e22df0f226ba155e3ad010 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 26 Mar 2021 18:09:56 +0100 Subject: [PATCH 239/295] Nuke: adding Loaders with template for name of node --- pype/hosts/nuke/plugins/load/load_image.py | 22 +++++++------ pype/hosts/nuke/plugins/load/load_mov.py | 31 +++++++++---------- pype/hosts/nuke/plugins/load/load_sequence.py | 20 ++++++++---- 3 files changed, 41 insertions(+), 32 deletions(-) diff --git a/pype/hosts/nuke/plugins/load/load_image.py b/pype/hosts/nuke/plugins/load/load_image.py index dcaf31c9e39..7033cca9f86 100644 --- a/pype/hosts/nuke/plugins/load/load_image.py +++ b/pype/hosts/nuke/plugins/load/load_image.py @@ -12,11 +12,7 @@ class LoadImage(api.Loader): """Load still image into Nuke""" - families = [ - "render2d", "source", "plate", - "render", "prerender", "review", - "image" - ] + families = ["render", "source", "plate", "review", "image"] representations = ["exr", "dpx", "jpg", "jpeg", "png", "psd"] label = "Load Image" @@ -24,6 +20,8 @@ class LoadImage(api.Loader): icon = "image" color = "white" + node_name_template = "{class_name}_{ext}" + options = [ qargparse.Integer( "frame_number", @@ -75,10 +73,16 @@ def load(self, context, name, namespace, options): frame, format(frame_number, "0{}".format(padding))) - read_name = "Read_{0}_{1}_{2}".format( - repr_cont["asset"], - repr_cont["subset"], - repr_cont["representation"]) + name_data = { + "asset": repr_cont["asset"], + "subset": repr_cont["subset"], + "representation": context["representation"]["name"], + "ext": repr_cont["representation"], + "id": context["representation"]["_id"], + "class_name": self.__class__.__name__ + } + + read_name = self.node_name_template.format(**name_data) # Create the Loader with the filename path set with viewer_update_and_undo_stop(): diff --git a/pype/hosts/nuke/plugins/load/load_mov.py b/pype/hosts/nuke/plugins/load/load_mov.py index 830359ccf90..0314322609b 100644 --- a/pype/hosts/nuke/plugins/load/load_mov.py +++ b/pype/hosts/nuke/plugins/load/load_mov.py @@ -69,19 +69,8 @@ def add_review_presets_config(): class LoadMov(api.Loader): """Load mov file into Nuke""" - presets = add_review_presets_config() - families = [ - "source", - "plate", - "render", - "prerender", - "review"] + presets["families"] - - representations = [ - "mov", - "preview", - "review", - "mp4"] + presets["representations"] + families = ["render", "source", "plate", "review"] + representations = ["mov", "review", "mp4"] label = "Load mov" order = -10 @@ -90,6 +79,8 @@ class LoadMov(api.Loader): script_start = nuke.root()["first_frame"].value() + node_name_template = "{class_name}_{ext}" + def load(self, context, name, namespace, data): from avalon.nuke import ( containerise, @@ -133,10 +124,16 @@ def load(self, context, name, namespace, data): file = file.replace("\\", "/") - read_name = "Read_{0}_{1}_{2}".format( - repr_cont["asset"], - repr_cont["subset"], - repr_cont["representation"]) + name_data = { + "asset": repr_cont["asset"], + "subset": repr_cont["subset"], + "representation": context["representation"]["name"], + "ext": repr_cont["representation"], + "id": context["representation"]["_id"], + "class_name": self.__class__.__name__ + } + + read_name = self.node_name_template.format(**name_data) # Create the Loader with the filename path set with viewer_update_and_undo_stop(): diff --git a/pype/hosts/nuke/plugins/load/load_sequence.py b/pype/hosts/nuke/plugins/load/load_sequence.py index f99b7be52f8..5cc2d019a00 100644 --- a/pype/hosts/nuke/plugins/load/load_sequence.py +++ b/pype/hosts/nuke/plugins/load/load_sequence.py @@ -72,14 +72,16 @@ def loader_shift(node, frame, relative=False): class LoadSequence(api.Loader): """Load image sequence into Nuke""" - families = ["render2d", "source", "plate", "render", "prerender", "review"] - representations = ["exr", "dpx", "jpg", "jpeg", "png"] + families = ["render", "source", "plate", "review"] + representations = ["exr", "dpx"] label = "Load Image Sequence" order = -20 icon = "file-video-o" color = "white" + node_name_template = "{class_name}_{ext}" + def load(self, context, name, namespace, data): from avalon.nuke import ( containerise, @@ -125,10 +127,16 @@ def load(self, context, name, namespace, data): padding = len(frame) file = file.replace(frame, "#" * padding) - read_name = "Read_{0}_{1}_{2}".format( - repr_cont["asset"], - repr_cont["subset"], - context["representation"]["name"]) + name_data = { + "asset": repr_cont["asset"], + "subset": repr_cont["subset"], + "representation": context["representation"]["name"], + "ext": repr_cont["representation"], + "id": context["representation"]["_id"], + "class_name": self.__class__.__name__ + } + + read_name = self.node_name_template.format(**name_data) # Create the Loader with the filename path set with viewer_update_and_undo_stop(): From 73aeda9425dbc6cb29bba703c3937079e9c3af64 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 29 Mar 2021 12:18:06 +0200 Subject: [PATCH 240/295] implementing #1114 --- pype/hosts/nuke/api/lib.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pype/hosts/nuke/api/lib.py b/pype/hosts/nuke/api/lib.py index bea5df48cfc..cee558f88fa 100644 --- a/pype/hosts/nuke/api/lib.py +++ b/pype/hosts/nuke/api/lib.py @@ -489,6 +489,9 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): # Deadline tab. add_deadline_tab(GN) + # open the AvalonTab as default + GN["AvalonTab"].setFlag(0) + # set tile color tile_color = _data.get("tile_color", "0xff0000ff") GN["tile_color"].setValue(tile_color) From e9b4e7ea4324aa3c9c9ffea7f179de9cd132445e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 29 Mar 2021 13:05:27 +0200 Subject: [PATCH 241/295] show error message as message in error detail --- pype/tools/pyblish_pype/model.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/tools/pyblish_pype/model.py b/pype/tools/pyblish_pype/model.py index ab05e7d6658..b537d7724da 100644 --- a/pype/tools/pyblish_pype/model.py +++ b/pype/tools/pyblish_pype/model.py @@ -1013,7 +1013,9 @@ def append(self, record_items): all_record_items = [] for record_item in record_items: record_type = record_item["type"] - + # Add error message to detail + if record_type == "error": + record_item["msg"] = record_item["label"] terminal_item_type = None if record_type == "record": for level, _type in self.level_to_record: From 449334dc624e28e7a11733e1908fe9c9a77c9e4f Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 29 Mar 2021 13:31:48 +0200 Subject: [PATCH 242/295] Deadline: adding limit groups implementing #1167 --- .../plugins/publish/submit_nuke_deadline.py | 42 ++++++++++++++++--- .../defaults/project_settings/deadline.json | 3 +- .../schema_project_deadline.json | 9 ++++ 3 files changed, 48 insertions(+), 6 deletions(-) diff --git a/pype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/pype/modules/deadline/plugins/publish/submit_nuke_deadline.py index 60cc179a9ba..a4653427bb7 100644 --- a/pype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/pype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -6,6 +6,7 @@ from avalon.vendor import requests import re import pyblish.api +import nuke class NukeSubmitDeadline(pyblish.api.InstancePlugin): @@ -29,6 +30,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): secondary_pool = "" group = "" department = "" + limit_groups = {} def process(self, instance): instance.data["toBeRenderedOn"] = "deadline" @@ -149,6 +151,10 @@ def payload_submit(self, if not priority: priority = self.priority + # resolve any limit groups + limit_groups = self.get_limit_groups() + self.log.info("Limit groups: `{}`".format(limit_groups)) + payload = { "JobInfo": { # Top-level group name @@ -180,7 +186,10 @@ def payload_submit(self, # Optional, enable double-click to preview rendered # frames from Deadline Monitor - "OutputFilename0": output_filename_0.replace("\\", "/") + "OutputFilename0": output_filename_0.replace("\\", "/"), + + # limiting groups + "LimitGroups": ",".join(limit_groups) }, "PluginInfo": { @@ -329,9 +338,7 @@ def preview_fname(self, path): return int(search_results[1]) if "#" in path: self.log.debug("_ path: `{}`".format(path)) - return path - else: - return path + return path def expected_files(self, instance, @@ -339,7 +346,7 @@ def expected_files(self, """ Create expected files in instance data """ if not instance.data.get("expectedFiles"): - instance.data["expectedFiles"] = list() + instance.data["expectedFiles"] = [] dir = os.path.dirname(path) file = os.path.basename(path) @@ -356,3 +363,28 @@ def expected_files(self, for i in range(self._frame_start, (self._frame_end + 1)): instance.data["expectedFiles"].append( os.path.join(dir, (file % i)).replace("\\", "/")) + + def get_limit_groups(self): + """Search for limit group nodes and return group name. + Limit groups will be defined as pairs in Nuke deadline submitter + presents where the key will be name of limit group and value will be + a list of plugin's node class names. Thus, when a plugin uses more + than one node, these will be captured and the triggered process + will add the appropriate limit group to the payload jobinfo attributes. + Returning: + list: captured groups list + """ + captured_groups = [] + for lg_name, list_node_class in self.deadline_limit_groups.items(): + for node_class in list_node_class: + for node in nuke.allNodes(recurseGroups=True): + # ignore all nodes not member of defined class + if node.Class() not in node_class: + continue + # ignore all disabled nodes + if node["disable"].value(): + continue + # add group name if not already added + if lg_name not in captured_groups: + captured_groups.append(lg_name) + return captured_groups diff --git a/pype/settings/defaults/project_settings/deadline.json b/pype/settings/defaults/project_settings/deadline.json index 6d36f38423f..9ff551491c8 100644 --- a/pype/settings/defaults/project_settings/deadline.json +++ b/pype/settings/defaults/project_settings/deadline.json @@ -20,7 +20,8 @@ "primary_pool": "", "secondary_pool": "", "group": "", - "department": "" + "department": "", + "limit_groups": {} }, "HarmonySubmitDeadline": { "enabled": true, diff --git a/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json index 2070e4c8f5d..f46221ba638 100644 --- a/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/pype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -127,6 +127,15 @@ "type": "text", "key": "department", "label": "Department" + }, + { + "type": "dict-modifiable", + "key": "limit_groups", + "label": "Limit Groups", + "object_type": { + "type": "list", + "object_type": "text" + } } ] }, From 6f68a7c38a8cca4fd6e81ab2e34a85e9f98c97f2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 29 Mar 2021 14:25:21 +0200 Subject: [PATCH 243/295] removed usage of settings for ftrack mongo names --- pype/modules/ftrack/lib/settings.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pype/modules/ftrack/lib/settings.py b/pype/modules/ftrack/lib/settings.py index 4afac9c29fb..46854e81845 100644 --- a/pype/modules/ftrack/lib/settings.py +++ b/pype/modules/ftrack/lib/settings.py @@ -1,5 +1,7 @@ from pype.api import get_system_settings +PYPE_DATABASE_NAME = "pype" + def get_ftrack_settings(): return get_system_settings()["modules"]["ftrack"] @@ -11,6 +13,6 @@ def get_ftrack_url_from_settings(): def get_ftrack_event_mongo_info(): ftrack_settings = get_ftrack_settings() - database_name = ftrack_settings["mongo_database_name"] - collection_name = ftrack_settings["mongo_collection_name"] + database_name = PYPE_DATABASE_NAME + collection_name = "ftrack_events" return database_name, collection_name From b29dbc15b9a38550d2ef7b2b842da48648a170f8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 29 Mar 2021 14:25:44 +0200 Subject: [PATCH 244/295] removed settings of ftrack mongo variables from settings --- .../defaults/system_settings/modules.json | 2 -- .../module_settings/schema_ftrack.json | 25 ++++--------------- 2 files changed, 5 insertions(+), 22 deletions(-) diff --git a/pype/settings/defaults/system_settings/modules.json b/pype/settings/defaults/system_settings/modules.json index 8c4d7fe196a..e285fce8540 100644 --- a/pype/settings/defaults/system_settings/modules.json +++ b/pype/settings/defaults/system_settings/modules.json @@ -12,8 +12,6 @@ "ftrack_server": "https://pype.ftrackapp.com", "ftrack_actions_path": [], "ftrack_events_path": [], - "mongo_database_name": "pype", - "mongo_collection_name": "ftrack_events", "intent": { "items": { "-": "-", diff --git a/pype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json b/pype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json index 5647e14ebfe..50ec330a114 100644 --- a/pype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json +++ b/pype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json @@ -35,24 +35,7 @@ "object_type": "text" }, { - "type": "splitter" - }, - { - "type": "label", - "label": "Ftrack event server advanced settings" - }, - { - "type": "text", - "key": "mongo_database_name", - "label": "Event Mongo DB" - }, - { - "type": "text", - "key": "mongo_collection_name", - "label": "Events Mongo Collection" - }, - { - "type": "splitter" + "type": "separator" }, { "key": "intent", @@ -71,13 +54,15 @@ "key": "items" }, { - "type": "label", - "label": " " + "type": "separator" }, { "key": "default", "type": "text", "label": "Default Intent" + }, + { + "type": "separator" } ] }, From 1c469d373086011daac423d51708f7396980ebe8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 29 Mar 2021 16:12:18 +0200 Subject: [PATCH 245/295] catch exceptions on save in settings ui --- .../settings/settings/widgets/categories.py | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/pype/tools/settings/settings/widgets/categories.py b/pype/tools/settings/settings/widgets/categories.py index 263012fa52d..f1e154ee4da 100644 --- a/pype/tools/settings/settings/widgets/categories.py +++ b/pype/tools/settings/settings/widgets/categories.py @@ -260,14 +260,43 @@ def add_widget_to_layout(self, widget, label_widget=None): self.content_layout.addWidget(widget, 0) def save(self): - if self.items_are_valid(): + if not self.items_are_valid(): + return + + try: self.entity.save() + # NOTE There are relations to previous entities and C++ callbacks # so it is easier to just use new entity and recreate UI but # would be nice to change this and add cleanup part so this is # not required. self.reset() + except Exception as exc: + formatted_traceback = traceback.format_exception(*sys.exc_info()) + dialog = QtWidgets.QMessageBox(self) + msg = "Unexpected error happened!\n\nError: {}".format(str(exc)) + dialog.setText(msg) + dialog.setDetailedText("\n".join(formatted_traceback)) + dialog.setIcon(QtWidgets.QMessageBox.Critical) + + line_widths = set() + metricts = dialog.fontMetrics() + for line in formatted_traceback: + line_widths.add(metricts.width(line)) + max_width = max(line_widths) + + spacer = QtWidgets.QSpacerItem( + max_width, 0, + QtWidgets.QSizePolicy.Minimum, + QtWidgets.QSizePolicy.Expanding + ) + layout = dialog.layout() + layout.addItem( + spacer, layout.rowCount(), 0, 1, layout.columnCount() + ) + dialog.exec_() + def _create_root_entity(self): raise NotImplementedError( "`create_root_entity` method not implemented" From 48faec38032756ce6c9e6936ff48df01a624c9a7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 30 Mar 2021 10:23:38 +0200 Subject: [PATCH 246/295] bulk mov instance collecting separated from batch instances --- .../publish/collect_batch_instances.py | 11 +-- .../publish/collect_bulk_mov_instances.py | 98 +++++++++++++++++++ 2 files changed, 100 insertions(+), 9 deletions(-) create mode 100644 pype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py b/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py index 545efcb3035..4ca1f72cc41 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py @@ -9,12 +9,11 @@ class CollectBatchInstances(pyblish.api.InstancePlugin): label = "Collect Batch Instances" order = pyblish.api.CollectorOrder + 0.489 hosts = ["standalonepublisher"] - families = ["background_batch", "render_mov_batch"] + families = ["background_batch"] # presets default_subset_task = { - "background_batch": "background", - "render_mov_batch": "compositing" + "background_batch": "background" } subsets = { "background_batch": { @@ -30,12 +29,6 @@ class CollectBatchInstances(pyblish.api.InstancePlugin): "task": "background", "family": "workfile" } - }, - "render_mov_batch": { - "renderCompositingDefault": { - "task": "compositing", - "family": "render" - } } } unchecked_by_default = [] diff --git a/pype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py b/pype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py new file mode 100644 index 00000000000..cbb9d95e01c --- /dev/null +++ b/pype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py @@ -0,0 +1,98 @@ +import copy +import json +import pyblish.api + +from avalon import io +from pype.lib import get_subset_name + + +class CollectBulkMovInstances(pyblish.api.InstancePlugin): + """Collect all available instances for batch publish.""" + + label = "Collect Bulk Mov Instances" + order = pyblish.api.CollectorOrder + 0.489 + hosts = ["standalonepublisher"] + families = ["render_mov_batch"] + + new_instance_family = "render" + instance_task_names = [ + "compositing", + "comp" + ] + default_task_name = "compositing" + subset_name_variant = "Default" + + def process(self, instance): + context = instance.context + asset_name = instance.data["asset"] + + asset_doc = io.find_one( + { + "type": "asset", + "name": asset_name + }, + { + "_id": 1, + "data.tasks": 1 + } + ) + if not asset_doc: + raise AssertionError(( + "Couldn't find Asset document with name \"{}\"" + ).format(asset_name)) + + available_task_names = {} + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + for task_name in asset_tasks.keys(): + available_task_names[task_name.lower()] = task_name + + task_name = self.default_task_name + for _task_name in self.instance_task_names: + _task_name_low = _task_name.lower() + if _task_name_low in available_task_names: + task_name = available_task_names[_task_name_low] + break + + subset_name = get_subset_name( + self.new_instance_family, + self.subset_name_variant, + task_name, + asset_doc["_id"], + io.Session["AVALON_PROJECT"] + ) + instance_name = f"{asset_name}_{subset_name}" + + # create new instance + new_instance = context.create_instance(instance_name) + new_instance_data = { + "name": instance_name, + "label": instance_name, + "family": self.new_instance_family, + "subset": subset_name, + "task": task_name + } + new_instance.data.update(new_instance_data) + # add original instance data except name key + for key, value in instance.data.items(): + if key in new_instance_data: + continue + # Make sure value is copy since value may be object which + # can be shared across all new created objects + new_instance.data[key] = copy.deepcopy(value) + + # Add `render_mov_batch` for specific validators + if "families" not in new_instance.data: + new_instance.data["families"] = [] + new_instance.data["families"].append("render_mov_batch") + + # delete original instance + context.remove(instance) + + self.log.info(f"Created new instance: {instance_name}") + + def convertor(value): + return str(value) + + self.log.debug("Instance data: {}".format( + json.dumps(new_instance.data, indent=4, default=convertor) + )) From e22cb6a1f76b3075b74d2d2e50c804db62810f24 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 30 Mar 2021 10:23:47 +0200 Subject: [PATCH 247/295] moved `get_subset_name` logic from pype.plugin to pype.lib --- pype/lib/__init__.py | 4 ++ pype/lib/plugin_tools.py | 81 ++++++++++++++++++++++++++++++++++++++++ pype/plugin.py | 72 ++--------------------------------- 3 files changed, 89 insertions(+), 68 deletions(-) diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 2150e53b0eb..f4282f7ea30 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -89,6 +89,8 @@ from .profiles_filtering import filter_profiles from .plugin_tools import ( + TaskNotSetError, + get_subset_name, filter_pyblish_plugins, source_hash, get_unique_layer_name, @@ -181,6 +183,8 @@ "filter_profiles", + "TaskNotSetError", + "get_subset_name", "filter_pyblish_plugins", "source_hash", "get_unique_layer_name", diff --git a/pype/lib/plugin_tools.py b/pype/lib/plugin_tools.py index c03d978ad41..0f3a0a2838b 100644 --- a/pype/lib/plugin_tools.py +++ b/pype/lib/plugin_tools.py @@ -8,12 +8,93 @@ import tempfile from .execute import run_subprocess +from .profiles_filtering import filter_profiles from pype.settings import get_project_settings log = logging.getLogger(__name__) +# Subset name template used when plugin does not have defined any +DEFAULT_SUBSET_TEMPLATE = "{family}{Variant}" + + +class TaskNotSetError(KeyError): + def __init__(self, msg=None): + if not msg: + msg = "Creator's subset name template requires task name." + super(TaskNotSetError, self).__init__(msg) + + +def get_subset_name( + family, + variant, + task_name, + asset_id, + project_name=None, + host_name=None, + default_template=None +): + if not family: + return "" + + if not host_name: + host_name = os.environ["AVALON_APP"] + + # Use only last part of class family value split by dot (`.`) + family = family.rsplit(".", 1)[-1] + + # Get settings + tools_settings = get_project_settings(project_name)["global"]["tools"] + profiles = tools_settings["creator"]["subset_name_profiles"] + filtering_criteria = { + "families": family, + "hosts": host_name, + "tasks": task_name + } + + matching_profile = filter_profiles(profiles, filtering_criteria) + template = None + if matching_profile: + template = matching_profile["template"] + + # Make sure template is set (matching may have empty string) + if not template: + template = default_template or DEFAULT_SUBSET_TEMPLATE + + # Simple check of task name existence for template with {task} in + # - missing task should be possible only in Standalone publisher + if not task_name and "{task" in template.lower(): + raise TaskNotSetError() + + fill_pairs = ( + ("variant", variant), + ("family", family), + ("task", task_name) + ) + fill_data = {} + for key, value in fill_pairs: + # Handle cases when value is `None` (standalone publisher) + if value is None: + continue + # Keep value as it is + fill_data[key] = value + # Both key and value are with upper case + fill_data[key.upper()] = value.upper() + + # Capitalize only first char of value + # - conditions are because of possible index errors + capitalized = "" + if value: + # Upper first character + capitalized += value[0].upper() + # Append rest of string if there is any + if len(value) > 1: + capitalized += value[1:] + fill_data[key.capitalize()] = capitalized + + return template.format(**fill_data) + def filter_pyblish_plugins(plugins): """Filter pyblish plugins by presets. diff --git a/pype/plugin.py b/pype/plugin.py index 855b3371d3d..111b65ade38 100644 --- a/pype/plugin.py +++ b/pype/plugin.py @@ -2,8 +2,8 @@ import os import pyblish.api import avalon.api -from pype.api import get_project_settings -from pype.lib import filter_profiles + +from pype.lib import get_subset_name ValidatePipelineOrder = pyblish.api.ValidatorOrder + 0.05 ValidateContentsOrder = pyblish.api.ValidatorOrder + 0.1 @@ -11,83 +11,19 @@ ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3 -class TaskNotSetError(KeyError): - def __init__(self, msg=None): - if not msg: - msg = "Creator's subset name template requires task name." - super(TaskNotSetError, self).__init__(msg) - - class PypeCreatorMixin: """Helper to override avalon's default class methods. Mixin class must be used as first in inheritance order to override methods. """ - default_tempate = "{family}{Variant}" @classmethod def get_subset_name( cls, variant, task_name, asset_id, project_name, host_name=None ): - if not cls.family: - return "" - - if not host_name: - host_name = os.environ["AVALON_APP"] - - # Use only last part of class family value split by dot (`.`) - family = cls.family.rsplit(".", 1)[-1] - - # Get settings - tools_settings = get_project_settings(project_name)["global"]["tools"] - profiles = tools_settings["creator"]["subset_name_profiles"] - filtering_criteria = { - "families": family, - "hosts": host_name, - "tasks": task_name - } - - matching_profile = filter_profiles(profiles, filtering_criteria) - template = None - if matching_profile: - template = matching_profile["template"] - - # Make sure template is set (matching may have empty string) - if not template: - template = cls.default_tempate - - # Simple check of task name existence for template with {task} in - # - missing task should be possible only in Standalone publisher - if not task_name and "{task" in template.lower(): - raise TaskNotSetError() - - fill_pairs = ( - ("variant", variant), - ("family", family), - ("task", task_name) + return get_subset_name( + cls.family, variant, task_name, asset_id, project_name, host_name ) - fill_data = {} - for key, value in fill_pairs: - # Handle cases when value is `None` (standalone publisher) - if value is None: - continue - # Keep value as it is - fill_data[key] = value - # Both key and value are with upper case - fill_data[key.upper()] = value.upper() - - # Capitalize only first char of value - # - conditions are because of possible index errors - capitalized = "" - if value: - # Upper first character - capitalized += value[0].upper() - # Append rest of string if there is any - if len(value) > 1: - capitalized += value[1:] - fill_data[key.capitalize()] = capitalized - - return template.format(**fill_data) class Creator(PypeCreatorMixin, avalon.api.Creator): From f34d25f49b5a4a932688890f15813d32c8efbd8d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 30 Mar 2021 10:25:06 +0200 Subject: [PATCH 248/295] implemented validator of task name existence on asset document --- .../publish/validate_task_existence.py | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 pype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py diff --git a/pype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py b/pype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py new file mode 100644 index 00000000000..8bd4fb997af --- /dev/null +++ b/pype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py @@ -0,0 +1,57 @@ +import collections +import pyblish.api +from avalon import io + + +class ValidateTaskExistence(pyblish.api.ContextPlugin): + """Validating tasks on instances are filled and existing.""" + + label = "Validate Task Existence" + order = pyblish.api.ValidatorOrder + + hosts = ["standalonepublisher"] + families = ["render_mov_batch"] + + def process(self, context): + asset_names = set() + for instance in context: + asset_names.add(instance.data["asset"]) + + asset_docs = io.find( + { + "type": "asset", + "name": {"$in": list(asset_names)} + }, + { + "name": 1, + "data.tasks": 1 + } + ) + tasks_by_asset_names = {} + for asset_doc in asset_docs: + asset_name = asset_doc["name"] + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + tasks_by_asset_names[asset_name] = list(asset_tasks.keys()) + + missing_tasks = [] + for instance in context: + asset_name = instance.data["asset"] + task_name = instance.data["task"] + task_names = tasks_by_asset_names.get(asset_name) or [] + if task_name and task_name in task_names: + continue + missing_tasks.append((asset_name, task_name)) + + # Everything is OK + if not missing_tasks: + return + + # Raise an exception + msg = "Couldn't find task name/s required for publishing.\n{}" + pair_msgs = [] + for missing_pair in missing_tasks: + pair_msgs.append( + "Asset: \"{}\" Task: \"{}\"".format(*missing_pair) + ) + + raise AssertionError(msg.format("\n".join(pair_msgs))) From 62db507459438e6fd42f52246d585dd8099a37ff Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 30 Mar 2021 10:28:27 +0200 Subject: [PATCH 249/295] removed unused import --- .../plugins/publish/validate_task_existence.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py b/pype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py index 8bd4fb997af..e3b2ae16467 100644 --- a/pype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py +++ b/pype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py @@ -1,4 +1,3 @@ -import collections import pyblish.api from avalon import io From bc09f31ddb96eec8fc754ff2aa57ae329a8d9e64 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 30 Mar 2021 10:48:31 +0200 Subject: [PATCH 250/295] check for full match instead of simple match --- pype/lib/profiles_filtering.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/lib/profiles_filtering.py b/pype/lib/profiles_filtering.py index 32c17cbd125..455bb4cdd57 100644 --- a/pype/lib/profiles_filtering.py +++ b/pype/lib/profiles_filtering.py @@ -87,7 +87,7 @@ def validate_value_by_regexes(value, in_list): regexes = compile_list_of_regexes(in_list) for regex in regexes: - if re.match(regex, value): + if re.fullmatch(regex, value): return 1 return -1 From 687e7c619e74bfe105b2defa7d6caf9a6073acdd Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 30 Mar 2021 11:05:42 +0200 Subject: [PATCH 251/295] fix standalone publisher --- pype/tools/standalonepublish/widgets/widget_family.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/tools/standalonepublish/widgets/widget_family.py b/pype/tools/standalonepublish/widgets/widget_family.py index 31506466240..3b590d3f97d 100644 --- a/pype/tools/standalonepublish/widgets/widget_family.py +++ b/pype/tools/standalonepublish/widgets/widget_family.py @@ -9,7 +9,7 @@ get_project_settings, Creator ) -from pype.plugin import TaskNotSetError +from pype.lib import TaskNotSetError from avalon.tools.creator.app import SubsetAllowedSymbols From def94529b4b68f2bd1e2480d79907af5b3d93c31 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 30 Mar 2021 14:04:24 +0200 Subject: [PATCH 252/295] aded python 2 compatibility for fullmatch --- pype/lib/profiles_filtering.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pype/lib/profiles_filtering.py b/pype/lib/profiles_filtering.py index 455bb4cdd57..c4410204dd2 100644 --- a/pype/lib/profiles_filtering.py +++ b/pype/lib/profiles_filtering.py @@ -59,6 +59,14 @@ def _profile_exclusion(matching_profiles, logger): return matching_profiles[0][0] +def fullmatch(regex, string, flags=0): + """Emulate python-3.4 re.fullmatch().""" + matched = re.match(regex, string, flags=flags) + if matched and matched.span()[1] == len(string): + return matched + return None + + def validate_value_by_regexes(value, in_list): """Validates in any regex from list match entered value. @@ -87,7 +95,11 @@ def validate_value_by_regexes(value, in_list): regexes = compile_list_of_regexes(in_list) for regex in regexes: - if re.fullmatch(regex, value): + if hasattr(regex, "fullmatch"): + result = regex.fullmatch(value) + else: + result = fullmatch(regex, value) + if result: return 1 return -1 From d81625cf711befbfe1f4d56d00be3afcddcbcb84 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 30 Mar 2021 14:50:24 +0200 Subject: [PATCH 253/295] reimplemented pype's workfiles tool in nuke --- pype/hosts/nuke/api/__init__.py | 2 +- pype/hosts/nuke/api/menu.py | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/pype/hosts/nuke/api/__init__.py b/pype/hosts/nuke/api/__init__.py index 26a8248f01d..6bf1ce888ad 100644 --- a/pype/hosts/nuke/api/__init__.py +++ b/pype/hosts/nuke/api/__init__.py @@ -3,7 +3,7 @@ import nuke from avalon import api as avalon -from avalon.tools import workfiles +from pype.tools import workfiles from pyblish import api as pyblish from pype.api import Logger import pype.hosts.nuke diff --git a/pype/hosts/nuke/api/menu.py b/pype/hosts/nuke/api/menu.py index 3f97cc228a5..b5c4636d2dc 100644 --- a/pype/hosts/nuke/api/menu.py +++ b/pype/hosts/nuke/api/menu.py @@ -1,15 +1,37 @@ +import os import nuke from avalon.api import Session from .lib import WorkfileSettings from pype.api import Logger, BuildWorkfile, get_current_project_settings +from pype.tools import workfiles log = Logger().get_logger(__name__) +def _show_workfiles(*args, **kwargs): + workfiles.show(os.environ["AVALON_WORKDIR"]) + + def install(): menubar = nuke.menu("Nuke") menu = menubar.findItem(Session["AVALON_LABEL"]) + + # replace reset resolution from avalon core to pype's + name = "Work Files..." + rm_item = [ + (i, item) for i, item in enumerate(menu.items()) if name in item.name() + ][0] + + log.debug("Changing Item: {}".format(rm_item)) + + menu.removeItem(rm_item[1].name()) + menu.addCommand( + name, + _show_workfiles, + index=(rm_item[0]) + ) + # replace reset resolution from avalon core to pype's name = "Reset Resolution" new_name = "Set Resolution" From 6d198ab4fac293fdec8e880214ca1699e38200fe Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 30 Mar 2021 15:00:44 +0200 Subject: [PATCH 254/295] removed unnecessary function --- pype/hosts/nuke/api/menu.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pype/hosts/nuke/api/menu.py b/pype/hosts/nuke/api/menu.py index b5c4636d2dc..7c3646c587e 100644 --- a/pype/hosts/nuke/api/menu.py +++ b/pype/hosts/nuke/api/menu.py @@ -9,10 +9,6 @@ log = Logger().get_logger(__name__) -def _show_workfiles(*args, **kwargs): - workfiles.show(os.environ["AVALON_WORKDIR"]) - - def install(): menubar = nuke.menu("Nuke") menu = menubar.findItem(Session["AVALON_LABEL"]) @@ -28,7 +24,7 @@ def install(): menu.removeItem(rm_item[1].name()) menu.addCommand( name, - _show_workfiles, + workfiles.show, index=(rm_item[0]) ) From c8e2e7744cd3749ff89e943099dabfdc935893cd Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 30 Mar 2021 15:48:16 +0200 Subject: [PATCH 255/295] remove unused import --- pype/hosts/nuke/api/menu.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pype/hosts/nuke/api/menu.py b/pype/hosts/nuke/api/menu.py index 7c3646c587e..d6380348093 100644 --- a/pype/hosts/nuke/api/menu.py +++ b/pype/hosts/nuke/api/menu.py @@ -1,4 +1,3 @@ -import os import nuke from avalon.api import Session From f8a220d4243e53c9ce3a574813f026dc80f28fe2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 30 Mar 2021 16:24:11 +0200 Subject: [PATCH 256/295] settings are stored as dictionaries instead of json string --- pype/settings/handlers.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index 6e93f2f4050..c801e9f17a1 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -320,10 +320,15 @@ def update_data(self, data): self.creation_time = datetime.datetime.now() def update_from_document(self, document): - value = "{}" + data = {} if document: - value = document.get("value") or value - self.data = json.loads(value) + if "data" in document: + data = document["data"] + elif "value" in document: + value = document["value"] + if value: + data = json.loads(value) + self.data = data def to_json_string(self): return json.dumps(self.data or {}) @@ -415,7 +420,7 @@ def save_studio_settings(self, data): }, { "type": SYSTEM_SETTINGS_KEY, - "value": self.system_settings_cache.to_json_string() + "data": self.system_settings_cache.data }, upsert=True ) @@ -550,7 +555,7 @@ def _save_project_data(self, project_name, doc_type, data_cache): } replace_data = { "type": doc_type, - "value": data_cache.to_json_string(), + "data": data_cache.data, "is_default": is_default } if not is_default: @@ -730,7 +735,7 @@ def save_local_settings(self, data): { "type": LOCAL_SETTING_KEY, "site_id": self.local_site_id, - "value": self.local_settings_cache.to_json_string() + "data": self.local_settings_cache.data }, upsert=True ) From fd6466082b86fe3dc5eb43838b9f8fbd89326117 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 30 Mar 2021 16:38:36 +0200 Subject: [PATCH 257/295] OTIO: adding python-2 version to `pype.vendor` --- .../python_2/opentimelineio/__init__.py | 51 + .../opentimelineio/adapters/__init__.py | 213 ++ .../opentimelineio/adapters/adapter.py | 317 +++ .../builtin_adapters.plugin_manifest.json | 31 + .../opentimelineio/adapters/cmx_3600.py | 1306 +++++++++++ .../opentimelineio/adapters/fcp_xml.py | 1941 +++++++++++++++++ .../opentimelineio/adapters/otio_json.py | 48 + .../opentimelineio/algorithms/__init__.py | 44 + .../opentimelineio/algorithms/filter.py | 275 +++ .../opentimelineio/algorithms/stack_algo.py | 138 ++ .../algorithms/timeline_algo.py | 56 + .../opentimelineio/algorithms/track_algo.py | 236 ++ .../opentimelineio/console/__init__.py | 40 + .../console/autogen_serialized_datamodel.py | 302 +++ .../opentimelineio/console/console_utils.py | 72 + .../opentimelineio/console/otiocat.py | 138 ++ .../opentimelineio/console/otioconvert.py | 259 +++ .../opentimelineio/console/otiostat.py | 193 ++ .../python_2/opentimelineio/core/__init__.py | 67 + .../opentimelineio/core/composable.py | 141 ++ .../opentimelineio/core/composition.py | 718 ++++++ .../python_2/opentimelineio/core/item.py | 243 +++ .../opentimelineio/core/json_serializer.py | 218 ++ .../opentimelineio/core/media_reference.py | 102 + .../core/serializable_object.py | 219 ++ .../opentimelineio/core/type_registry.py | 152 ++ .../opentimelineio/core/unknown_schema.py | 50 + .../python_2/opentimelineio/exceptions.py | 89 + .../python/python_2/opentimelineio/hooks.py | 174 ++ .../python_2/opentimelineio/media_linker.py | 169 ++ .../python_2/opentimelineio/opentime.py | 856 ++++++++ .../opentimelineio/plugins/__init__.py | 33 + .../opentimelineio/plugins/manifest.py | 282 +++ .../opentimelineio/plugins/python_plugin.py | 128 ++ .../opentimelineio/schema/__init__.py | 75 + .../python_2/opentimelineio/schema/clip.py | 130 ++ .../python_2/opentimelineio/schema/effect.py | 130 ++ .../schema/external_reference.py | 69 + .../python_2/opentimelineio/schema/gap.py | 82 + .../schema/generator_reference.py | 76 + .../python_2/opentimelineio/schema/marker.py | 128 ++ .../schema/missing_reference.py | 43 + .../opentimelineio/schema/schemadef.py | 65 + .../schema/serializable_collection.py | 149 ++ .../python_2/opentimelineio/schema/stack.py | 120 + .../opentimelineio/schema/timeline.py | 133 ++ .../python_2/opentimelineio/schema/track.py | 242 ++ .../opentimelineio/schema/transition.py | 159 ++ .../opentimelineio/schemadef/__init__.py | 5 + .../python_2/opentimelineio/test_utils.py | 54 + .../opentimelineio_contrib/__init__.py | 37 + .../adapters/__init__.py | 0 .../adapters/aaf_adapter/__init__.py | 0 .../adapters/aaf_adapter/aaf_writer.py | 764 +++++++ .../adapters/advanced_authoring_format.py | 979 +++++++++ .../opentimelineio_contrib/adapters/ale.py | 318 +++ .../adapters/burnins.py | 93 + .../contrib_adapters.plugin_manifest.json | 61 + .../adapters/extern_maya_sequencer.py | 261 +++ .../adapters/extern_rv.py | 327 +++ .../adapters/fcpx_xml.py | 1182 ++++++++++ .../adapters/ffmpeg_burnins.py | 424 ++++ .../adapters/hls_playlist.py | 1781 +++++++++++++++ .../adapters/maya_sequencer.py | 132 ++ .../opentimelineio_contrib/adapters/rv.py | 84 + .../opentimelineio_contrib/adapters/xges.py | 819 +++++++ 66 files changed, 18223 insertions(+) create mode 100644 pype/vendor/python/python_2/opentimelineio/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/adapter.py create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/otio_json.py create mode 100644 pype/vendor/python/python_2/opentimelineio/algorithms/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/algorithms/filter.py create mode 100644 pype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py create mode 100644 pype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py create mode 100644 pype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/console_utils.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/otiocat.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/otioconvert.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/otiostat.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/composable.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/composition.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/item.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/json_serializer.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/media_reference.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/serializable_object.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/type_registry.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/unknown_schema.py create mode 100644 pype/vendor/python/python_2/opentimelineio/exceptions.py create mode 100644 pype/vendor/python/python_2/opentimelineio/hooks.py create mode 100644 pype/vendor/python/python_2/opentimelineio/media_linker.py create mode 100644 pype/vendor/python/python_2/opentimelineio/opentime.py create mode 100644 pype/vendor/python/python_2/opentimelineio/plugins/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/plugins/manifest.py create mode 100644 pype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/clip.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/effect.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/external_reference.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/gap.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/generator_reference.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/marker.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/missing_reference.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/schemadef.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/stack.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/timeline.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/track.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/transition.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schemadef/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/test_utils.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py diff --git a/pype/vendor/python/python_2/opentimelineio/__init__.py b/pype/vendor/python/python_2/opentimelineio/__init__.py new file mode 100644 index 00000000000..a8b0a636ad4 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/__init__.py @@ -0,0 +1,51 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""An editorial interchange format and library. + +see: http://opentimeline.io + +.. moduleauthor:: Pixar Animation Studios +""" + +# flake8: noqa + +# in dependency hierarchy +from . import ( + opentime, + exceptions, + core, + schema, + schemadef, + plugins, + media_linker, + adapters, + hooks, + algorithms, +) + +__version__ = "0.11.0" +__author__ = "Pixar Animation Studios" +__author_email__ = "opentimelineio@pixar.com" +__license__ = "Modified Apache 2.0 License" diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/__init__.py b/pype/vendor/python/python_2/opentimelineio/adapters/__init__.py new file mode 100644 index 00000000000..afbe3f8e8a5 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/__init__.py @@ -0,0 +1,213 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Expose the adapter interface to developers. + +To read from an existing representation, use the read_from_string and +read_from_file functions. To query the list of adapters, use the +available_adapter_names function. + +The otio_json adapter is provided as a the canonical, lossless, serialization +of the in-memory otio schema. Other adapters are to varying degrees lossy. +For more information, consult the documentation in the individual adapter +modules. +""" + +import os +import itertools + +from .. import ( + exceptions, + plugins, + media_linker +) + +from .adapter import Adapter # noqa + +# OTIO Json adapter is always available +from . import otio_json # noqa + + +def suffixes_with_defined_adapters(read=False, write=False): + """Return a set of all the suffixes that have adapters defined for them.""" + + if not read and not write: + read = True + write = True + + positive_adapters = [] + for adp in plugins.ActiveManifest().adapters: + if read and adp.has_feature("read"): + positive_adapters.append(adp) + continue + + if write and adp.has_feature("write"): + positive_adapters.append(adp) + + return set( + itertools.chain.from_iterable( + adp.suffixes for adp in positive_adapters + ) + ) + + +def available_adapter_names(): + """Return a string list of the available adapters.""" + + return [str(adp.name) for adp in plugins.ActiveManifest().adapters] + + +def _from_filepath_or_name(filepath, adapter_name): + if adapter_name is not None: + return plugins.ActiveManifest().from_name(adapter_name) + else: + return from_filepath(filepath) + + +def from_filepath(filepath): + """Guess the adapter object to use for a given filepath. + + example: + "foo.otio" returns the "otio_json" adapter. + """ + + outext = os.path.splitext(filepath)[1][1:] + + try: + return plugins.ActiveManifest().from_filepath(outext) + except exceptions.NoKnownAdapterForExtensionError: + raise exceptions.NoKnownAdapterForExtensionError( + "No adapter for suffix '{}' on file '{}'".format( + outext, + filepath + ) + ) + + +def from_name(name): + """Fetch the adapter object by the name of the adapter directly.""" + + try: + return plugins.ActiveManifest().from_name(name) + except exceptions.NotSupportedError: + raise exceptions.NotSupportedError( + "adapter not supported: {}, available: {}".format( + name, + available_adapter_names() + ) + ) + + +def read_from_file( + filepath, + adapter_name=None, + media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker, + media_linker_argument_map=None, + **adapter_argument_map +): + """Read filepath using adapter_name. + + If adapter_name is None, try and infer the adapter name from the filepath. + + For example: + timeline = read_from_file("example_trailer.otio") + timeline = read_from_file("file_with_no_extension", "cmx_3600") + """ + + adapter = _from_filepath_or_name(filepath, adapter_name) + + return adapter.read_from_file( + filepath=filepath, + media_linker_name=media_linker_name, + media_linker_argument_map=media_linker_argument_map, + **adapter_argument_map + ) + + +def read_from_string( + input_str, + adapter_name='otio_json', + media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker, + media_linker_argument_map=None, + **adapter_argument_map +): + """Read a timeline from input_str using adapter_name. + + This is useful if you obtain a timeline from someplace other than the + filesystem. + + Example: + raw_text = urlopen(my_url).read() + timeline = read_from_string(raw_text, "otio_json") + """ + + adapter = plugins.ActiveManifest().from_name(adapter_name) + return adapter.read_from_string( + input_str=input_str, + media_linker_name=media_linker_name, + media_linker_argument_map=media_linker_argument_map, + **adapter_argument_map + ) + + +def write_to_file( + input_otio, + filepath, + adapter_name=None, + **adapter_argument_map +): + """Write input_otio to filepath using adapter_name. + + If adapter_name is None, infer the adapter_name to use based on the + filepath. + + Example: + otio.adapters.write_to_file(my_timeline, "output.otio") + """ + + adapter = _from_filepath_or_name(filepath, adapter_name) + + return adapter.write_to_file( + input_otio=input_otio, + filepath=filepath, + **adapter_argument_map + ) + + +def write_to_string( + input_otio, + adapter_name='otio_json', + **adapter_argument_map +): + """Return input_otio written to a string using adapter_name. + + Example: + raw_text = otio.adapters.write_to_string(my_timeline, "otio_json") + """ + + adapter = plugins.ActiveManifest().from_name(adapter_name) + return adapter.write_to_string( + input_otio=input_otio, + **adapter_argument_map + ) diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/adapter.py b/pype/vendor/python/python_2/opentimelineio/adapters/adapter.py new file mode 100644 index 00000000000..82ac4050655 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/adapter.py @@ -0,0 +1,317 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of the OTIO internal `Adapter` system. + +For information on writing adapters, please consult: + https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# # noqa +""" + +from .. import ( + core, + plugins, + media_linker, + hooks, +) + + +@core.register_type +class Adapter(plugins.PythonPlugin): + """Adapters convert between OTIO and other formats. + + Note that this class is not subclassed by adapters. Rather, an adapter is + a python module that implements at least one of the following functions: + + write_to_string(input_otio) + write_to_file(input_otio, filepath) (optionally inferred) + read_from_string(input_str) + read_from_file(filepath) (optionally inferred) + + ...as well as a small json file that advertises the features of the adapter + to OTIO. This class serves as the wrapper around these modules internal + to OTIO. You should not need to extend this class to create new adapters + for OTIO. + + For more information: + https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# # noqa + """ + _serializable_label = "Adapter.1" + + def __init__( + self, + name=None, + execution_scope=None, + filepath=None, + suffixes=None + ): + plugins.PythonPlugin.__init__( + self, + name, + execution_scope, + filepath + ) + + self.suffixes = suffixes or [] + + suffixes = core.serializable_field( + "suffixes", + type([]), + doc="File suffixes associated with this adapter." + ) + + def has_feature(self, feature_string): + """ + return true if adapter supports feature_string, which must be a key + of the _FEATURE_MAP dictionary. + + Will trigger a call to self.module(), which imports the plugin. + """ + + if feature_string.lower() not in _FEATURE_MAP: + return False + + search_strs = _FEATURE_MAP[feature_string] + + try: + return any(hasattr(self.module(), s) for s in search_strs) + except ImportError: + # @TODO: should issue a warning that the plugin was not importable? + return False + + def read_from_file( + self, + filepath, + media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker, + media_linker_argument_map=None, + hook_function_argument_map={}, + **adapter_argument_map + ): + """Execute the read_from_file function on this adapter. + + If read_from_string exists, but not read_from_file, execute that with + a trivial file object wrapper. + """ + + if media_linker_argument_map is None: + media_linker_argument_map = {} + + result = None + + if ( + not self.has_feature("read_from_file") and + self.has_feature("read_from_string") + ): + with open(filepath, 'r') as fo: + contents = fo.read() + result = self._execute_function( + "read_from_string", + input_str=contents, + **adapter_argument_map + ) + else: + result = self._execute_function( + "read_from_file", + filepath=filepath, + **adapter_argument_map + ) + + hook_function_argument_map['adapter_arguments'] = adapter_argument_map + hook_function_argument_map['media_linker_argument_map'] = \ + media_linker_argument_map + result = hooks.run("post_adapter_read", result, + extra_args=hook_function_argument_map) + + if media_linker_name and ( + media_linker_name != media_linker.MediaLinkingPolicy.DoNotLinkMedia + ): + _with_linked_media_references( + result, + media_linker_name, + media_linker_argument_map + ) + + result = hooks.run("post_media_linker", result, + extra_args=media_linker_argument_map) + + return result + + def write_to_file( + self, + input_otio, + filepath, + hook_function_argument_map={}, + **adapter_argument_map + ): + """Execute the write_to_file function on this adapter. + + If write_to_string exists, but not write_to_file, execute that with + a trivial file object wrapper. + """ + hook_function_argument_map['adapter_arguments'] = adapter_argument_map + input_otio = hooks.run("pre_adapter_write", input_otio, + extra_args=hook_function_argument_map) + + if ( + not self.has_feature("write_to_file") and + self.has_feature("write_to_string") + ): + result = self.write_to_string(input_otio, **adapter_argument_map) + with open(filepath, 'w') as fo: + fo.write(result) + return filepath + + return self._execute_function( + "write_to_file", + input_otio=input_otio, + filepath=filepath, + **adapter_argument_map + ) + + def read_from_string( + self, + input_str, + media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker, + media_linker_argument_map=None, + hook_function_argument_map={}, + **adapter_argument_map + ): + """Call the read_from_string function on this adapter.""" + + result = self._execute_function( + "read_from_string", + input_str=input_str, + **adapter_argument_map + ) + hook_function_argument_map['adapter_arguments'] = adapter_argument_map + hook_function_argument_map['media_linker_argument_map'] = \ + media_linker_argument_map + + result = hooks.run("post_adapter_read", result, + extra_args=hook_function_argument_map) + + if media_linker_name and ( + media_linker_name != media_linker.MediaLinkingPolicy.DoNotLinkMedia + ): + _with_linked_media_references( + result, + media_linker_name, + media_linker_argument_map + ) + + # @TODO: Should this run *ONLY* if the media linker ran? + result = hooks.run("post_media_linker", result, + extra_args=hook_function_argument_map) + + return result + + def write_to_string( + self, + input_otio, + hook_function_argument_map={}, + **adapter_argument_map + ): + """Call the write_to_string function on this adapter.""" + + hook_function_argument_map['adapter_arguments'] = adapter_argument_map + input_otio = hooks.run("pre_adapter_write", input_otio, + extra_args=hook_function_argument_map) + + return self._execute_function( + "write_to_string", + input_otio=input_otio, + **adapter_argument_map + ) + + def __str__(self): + return ( + "Adapter(" + "{}, " + "{}, " + "{}, " + "{}" + ")".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath), + repr(self.suffixes), + ) + ) + + def __repr__(self): + return ( + "otio.adapter.Adapter(" + "name={}, " + "execution_scope={}, " + "filepath={}, " + "suffixes={}" + ")".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath), + repr(self.suffixes), + ) + ) + + +def _with_linked_media_references( + read_otio, + media_linker_name, + media_linker_argument_map +): + """Link media references in the read_otio if possible. + + Makes changes in place and returns the read_otio structure back. + """ + + if not read_otio or not media_linker.from_name(media_linker_name): + return read_otio + + # not every object the adapter reads has an "each_clip" method, so this + # skips objects without one. + clpfn = getattr(read_otio, "each_clip", None) + if clpfn is None: + return read_otio + + for cl in read_otio.each_clip(): + new_mr = media_linker.linked_media_reference( + cl, + media_linker_name, + # @TODO: should any context get wired in at this point? + media_linker_argument_map + ) + if new_mr is not None: + cl.media_reference = new_mr + + return read_otio + + +# map of attr to look for vs feature name in the adapter plugin +_FEATURE_MAP = { + 'read_from_file': ['read_from_file'], + 'read_from_string': ['read_from_string'], + 'read': ['read_from_file', 'read_from_string'], + 'write_to_file': ['write_to_file'], + 'write_to_string': ['write_to_string'], + 'write': ['write_to_file', 'write_to_string'] +} diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json b/pype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json new file mode 100644 index 00000000000..5e394a67d82 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json @@ -0,0 +1,31 @@ +{ + "OTIO_SCHEMA" : "PluginManifest.1", + "adapters": [ + { + "OTIO_SCHEMA": "Adapter.1", + "name": "fcp_xml", + "execution_scope": "in process", + "filepath": "fcp_xml.py", + "suffixes": ["xml"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "otio_json", + "execution_scope" : "in process", + "filepath" : "otio_json.py", + "suffixes" : ["otio"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "cmx_3600", + "execution_scope" : "in process", + "filepath" : "cmx_3600.py", + "suffixes" : ["edl"] + } + ], + "hooks": { + "post_adapter_read" : [], + "post_media_linker" : [], + "pre_adapter_write" : [] + } +} diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py b/pype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py new file mode 100644 index 00000000000..f3275e3929e --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py @@ -0,0 +1,1306 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO CMX 3600 EDL Adapter""" + +# Note: this adapter is not an ideal model for new adapters, but it works. +# If you want to write your own adapter, please see: +# https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# + +# TODO: Flesh out Attribute Handler +# TODO: Add line numbers to errors and warnings +# TODO: currently tracks with linked audio/video will lose their linkage when +# read into OTIO. + +import os +import re +import math +import collections + +from .. import ( + exceptions, + schema, + opentime, +) + + +class EDLParseError(exceptions.OTIOError): + pass + + +# regex for parsing the playback speed of an M2 event +SPEED_EFFECT_RE = re.compile( + r"(?P.*?)\s*(?P[0-9\.]*)\s*(?P[0-9:]{11})$" +) + + +# these are all CMX_3600 transition codes +# the wipe is written in regex format because it is W### where the ### is +# a 'wipe code' +# @TODO: not currently read by the transition code +transition_regex_map = { + 'C': 'cut', + 'D': 'dissolve', + r'W\d{3}': 'wipe', + 'KB': 'key_background', + 'K': 'key_foreground', + 'KO': 'key_overlay' +} + +# CMX_3600 supports some shorthand for channel assignments +# We name the actual tracks V and A1,A2,A3,etc. +# This channel_map tells you which track to use for each channel shorthand. +# Channels not listed here are used as track names verbatim. +channel_map = { + 'A': ['A1'], + 'AA': ['A1', 'A2'], + 'B': ['V', 'A1'], + 'A2/V': ['V', 'A2'], + 'AA/V': ['V', 'A1', 'A2'] +} + + +# Currently, the 'style' argument determines +# the comment string for the media reference: +# 'avid': '* FROM CLIP:' (default) +# 'nucoda': '* FROM FILE:' +# When adding a new style, please be sure to add sufficient tests +# to verify both the new and existing styles. +VALID_EDL_STYLES = ['avid', 'nucoda'] + + +class EDLParser(object): + def __init__(self, edl_string, rate=24, ignore_timecode_mismatch=False): + self.timeline = schema.Timeline() + + # Start with no tracks. They will be added as we encounter them. + # This dict maps a track name (e.g "A2" or "V") to an OTIO Track. + self.tracks_by_name = {} + + self.ignore_timecode_mismatch = ignore_timecode_mismatch + + self.parse_edl(edl_string, rate=rate) + + # TODO: Sort the tracks V, then A1,A2,etc. + + def add_clip(self, line, comments, rate=24): + comment_handler = CommentHandler(comments) + clip_handler = ClipHandler(line, comment_handler.handled, rate=rate) + clip = clip_handler.clip + if comment_handler.unhandled: + clip.metadata.setdefault("cmx_3600", {}) + clip.metadata['cmx_3600'].setdefault("comments", []) + clip.metadata['cmx_3600']['comments'] += ( + comment_handler.unhandled + ) + + # Add reel name to metadata + # A reel name of `AX` represents an unknown or auxilary source + # We don't currently track these sources outside of this adapter + # So lets skip adding AX reels as metadata for now, + # as that would dirty json outputs with non-relevant information + if clip_handler.reel and clip_handler.reel != 'AX': + clip.metadata.setdefault("cmx_3600", {}) + clip.metadata['cmx_3600']['reel'] = clip_handler.reel + + # each edit point between two clips is a transition. the default is a + # cut in the edl format the transition codes are for the transition + # into the clip + self.add_transition( + clip_handler, + clip_handler.transition_type, + clip_handler.transition_data + ) + + tracks = self.tracks_for_channel(clip_handler.channel_code) + for track in tracks: + + edl_rate = clip_handler.edl_rate + record_in = opentime.from_timecode( + clip_handler.record_tc_in, + edl_rate + ) + record_out = opentime.from_timecode( + clip_handler.record_tc_out, + edl_rate + ) + + src_duration = clip.duration() + rec_duration = record_out - record_in + if rec_duration != src_duration: + motion = comment_handler.handled.get('motion_effect') + freeze = comment_handler.handled.get('freeze_frame') + if motion is not None or freeze is not None: + # Adjust the clip to match the record duration + clip.source_range = opentime.TimeRange( + start_time=clip.source_range.start_time, + duration=rec_duration + ) + + if freeze is not None: + clip.effects.append(schema.FreezeFrame()) + # XXX remove 'FF' suffix (writing edl will add it back) + if clip.name.endswith(' FF'): + clip.name = clip.name[:-3] + elif motion is not None: + fps = float( + SPEED_EFFECT_RE.match(motion).group("speed") + ) + time_scalar = fps / rate + clip.effects.append( + schema.LinearTimeWarp(time_scalar=time_scalar) + ) + + elif self.ignore_timecode_mismatch: + # Pretend there was no problem by adjusting the record_out. + # Note that we don't actually use record_out after this + # point in the code, since all of the subsequent math uses + # the clip's source_range. Adjusting the record_out is + # just to document what the implications of ignoring the + # mismatch here entails. + record_out = record_in + src_duration + + else: + raise EDLParseError( + "Source and record duration don't match: {} != {}" + " for clip {}".format( + src_duration, + rec_duration, + clip.name + )) + + if track.source_range is None: + zero = opentime.RationalTime(0, edl_rate) + track.source_range = opentime.TimeRange( + start_time=zero - record_in, + duration=zero + ) + + track_end = track.duration() - track.source_range.start_time + if record_in < track_end: + if self.ignore_timecode_mismatch: + # shift it over + record_in = track_end + record_out = record_in + rec_duration + else: + raise EDLParseError( + "Overlapping record in value: {} for clip {}".format( + clip_handler.record_tc_in, + clip.name + )) + + # If the next clip is supposed to start beyond the end of the + # clips we've accumulated so far, then we need to add a Gap + # to fill that space. This can happen when an EDL has record + # timecodes that are sparse (e.g. from a single track of a + # multi-track composition). + if record_in > track_end and len(track) > 0: + gap = schema.Gap() + gap.source_range = opentime.TimeRange( + start_time=opentime.RationalTime(0, edl_rate), + duration=record_in - track_end + ) + track.append(gap) + track.source_range = opentime.TimeRange( + start_time=track.source_range.start_time, + duration=track.source_range.duration + gap.duration() + ) + + track.append(clip) + track.source_range = opentime.TimeRange( + start_time=track.source_range.start_time, + duration=track.source_range.duration + clip.duration() + ) + + def guess_kind_for_track_name(self, name): + if name.startswith("V"): + return schema.TrackKind.Video + if name.startswith("A"): + return schema.TrackKind.Audio + return schema.TrackKind.Video + + def tracks_for_channel(self, channel_code): + # Expand channel shorthand into a list of track names. + if channel_code in channel_map: + track_names = channel_map[channel_code] + else: + track_names = [channel_code] + + # Create any channels we don't already have + for track_name in track_names: + if track_name not in self.tracks_by_name: + track = schema.Track( + name=track_name, + kind=self.guess_kind_for_track_name(track_name) + ) + self.tracks_by_name[track_name] = track + self.timeline.tracks.append(track) + + # Return a list of actual tracks + return [self.tracks_by_name[c] for c in track_names] + + def add_transition(self, clip_handler, transition, data): + if transition not in ['C']: + md = clip_handler.clip.metadata.setdefault("cmx_3600", {}) + md["transition"] = transition + + def parse_edl(self, edl_string, rate=24): + # edl 'events' can be comprised of an indeterminate amount of lines + # we are to translating 'events' to a single clip and transition + # then we add the transition and the clip to all channels the 'event' + # channel code is mapped to the transition given in the 'event' + # precedes the clip + + # remove all blank lines from the edl + edl_lines = [ + l for l in (l.strip() for l in edl_string.splitlines()) if l + ] + + while edl_lines: + # a basic for loop wont work cleanly since we need to look ahead at + # array elements to determine what type of 'event' we are looking + # at + line = edl_lines.pop(0) + + if line.startswith('TITLE:'): + # this is the first line of interest in an edl + # it is required to be in the header + self.timeline.name = line.replace('TITLE:', '').strip() + + elif line.startswith('FCM'): + # this can occur either in the header or before any 'event' + # in both cases we can ignore it since it is meant for tape + # timecode + pass + + elif line.startswith('SPLIT'): + # this is the only comment preceding an 'event' that we care + # about in our context it simply means the next two clips will + # have the same comment data it is for reading purposes only + audio_delay = None + video_delay = None + + if 'AUDIO DELAY' in line: + audio_delay = line.split()[-1].strip() + if 'VIDEO DELAY' in line: + video_delay = line.split()[-1].strip() + if audio_delay and video_delay: + raise EDLParseError( + 'both audio and video delay declared after SPLIT.' + ) + if not (audio_delay or video_delay): + raise EDLParseError( + 'either audio or video delay declared after SPLIT.' + ) + + line_1 = edl_lines.pop(0) + line_2 = edl_lines.pop(0) + + comments = [] + while edl_lines: + if re.match(r'^\D', edl_lines[0]): + comments.append(edl_lines.pop(0)) + else: + break + self.add_clip(line_1, comments, rate=rate) + self.add_clip(line_2, comments, rate=rate) + + elif line[0].isdigit(): + # all 'events' start_time with an edit decision. this is + # denoted by the line beginning with a padded integer 000-999 + comments = [] + while edl_lines: + # any non-numbered lines after an edit decision should be + # treated as 'comments' + # comments are string tags used by the reader to get extra + # information not able to be found in the restricted edl + # format + if re.match(r'^\D', edl_lines[0]): + comments.append(edl_lines.pop(0)) + else: + break + + self.add_clip(line, comments, rate=rate) + + else: + raise EDLParseError('Unknown event type') + + for track in self.timeline.tracks: + # if the source_range is the same as the available_range + # then we don't need to set it at all. + if track.source_range == track.available_range(): + track.source_range = None + + +class ClipHandler(object): + + def __init__(self, line, comment_data, rate=24): + self.clip_num = None + self.reel = None + self.channel_code = None + self.edl_rate = rate + self.transition_id = None + self.transition_data = None + self.source_tc_in = None + self.source_tc_out = None + self.record_tc_in = None + self.record_tc_out = None + + self.parse(line) + self.clip = self.make_clip(comment_data) + + def make_clip(self, comment_data): + clip = schema.Clip() + clip.name = str(self.clip_num) + + # BLACK/BL and BARS are called out as "Special Source Identifiers" in + # the documents referenced here: + # https://github.com/PixarAnimationStudios/OpenTimelineIO#cmx3600-edl + if self.reel in ['BL', 'BLACK']: + clip.media_reference = schema.GeneratorReference() + # TODO: Replace with enum, once one exists + clip.media_reference.generator_kind = 'black' + elif self.reel == 'BARS': + clip.media_reference = schema.GeneratorReference() + # TODO: Replace with enum, once one exists + clip.media_reference.generator_kind = 'SMPTEBars' + elif 'media_reference' in comment_data: + clip.media_reference = schema.ExternalReference() + clip.media_reference.target_url = comment_data[ + 'media_reference' + ] + else: + clip.media_reference = schema.MissingReference() + + # this could currently break without a 'FROM CLIP' comment. + # Without that there is no 'media_reference' Do we have a default + # clip name? + if 'clip_name' in comment_data: + clip.name = comment_data["clip_name"] + elif ( + clip.media_reference and + hasattr(clip.media_reference, 'target_url') and + clip.media_reference.target_url is not None + ): + clip.name = os.path.splitext( + os.path.basename(clip.media_reference.target_url) + )[0] + + asc_sop = comment_data.get('asc_sop', None) + asc_sat = comment_data.get('asc_sat', None) + if asc_sop or asc_sat: + slope = (1, 1, 1) + offset = (0, 0, 0) + power = (1, 1, 1) + sat = 1.0 + + if asc_sop: + triple = r'([-+]?[\d.]+) ([-+]?[\d.]+) ([-+]?[\d.]+)' + m = re.match( + r'\(' + + triple + + r'\)\s*\(' + + triple + r'\)\s*\(' + + triple + r'\)', + asc_sop + ) + if m: + floats = [float(g) for g in m.groups()] + slope = [floats[0], floats[1], floats[2]] + offset = [floats[3], floats[4], floats[5]] + power = [floats[6], floats[7], floats[8]] + else: + raise EDLParseError( + 'Invalid ASC_SOP found: {}'.format(asc_sop)) + + if asc_sat: + sat = float(asc_sat) + + clip.metadata['cdl'] = { + 'asc_sat': sat, + 'asc_sop': { + 'slope': slope, + 'offset': offset, + 'power': power + } + } + + if 'locator' in comment_data: + # An example EDL locator line looks like this: + # * LOC: 01:00:01:14 RED ANIM FIX NEEDED + # We get the part after "LOC: " as the comment_data entry + # Given the fixed-width nature of these, we could be more + # strict about the field widths, but there are many + # variations of EDL, so if we are lenient then maybe we + # can handle more of them? Only real-world testing will + # determine this for sure... + m = re.match( + r'(\d\d:\d\d:\d\d:\d\d)\s+(\w*)\s+(.*)', + comment_data["locator"] + ) + if m: + marker = schema.Marker() + marker.marked_range = opentime.TimeRange( + start_time=opentime.from_timecode( + m.group(1), + self.edl_rate + ), + duration=opentime.RationalTime() + ) + + # always write the source value into metadata, in case it + # is not a valid enum somehow. + color_parsed_from_file = m.group(2) + + marker.metadata = { + "cmx_3600": { + "color": color_parsed_from_file + } + } + + # @TODO: if it is a valid + if hasattr( + schema.MarkerColor, + color_parsed_from_file.upper() + ): + marker.color = color_parsed_from_file.upper() + else: + marker.color = schema.MarkerColor.RED + + marker.name = m.group(3) + clip.markers.append(marker) + else: + # TODO: Should we report this as a warning somehow? + pass + + clip.source_range = opentime.range_from_start_end_time( + opentime.from_timecode(self.source_tc_in, self.edl_rate), + opentime.from_timecode(self.source_tc_out, self.edl_rate) + ) + + return clip + + def parse(self, line): + fields = tuple(e.strip() for e in line.split() if e.strip()) + field_count = len(fields) + + if field_count == 9: + # has transition data + # this is for edits with timing or other needed info + # transition data for D and W*** transitions is a n integer that + # denotes frame count + # i haven't figured out how the key transitions (K, KB, KO) work + ( + self.clip_num, + self.reel, + self.channel_code, + self.transition_type, + self.transition_data, + self.source_tc_in, + self.source_tc_out, + self.record_tc_in, + self.record_tc_out + ) = fields + + elif field_count == 8: + # no transition data + # this is for basic cuts + ( + self.clip_num, + self.reel, + self.channel_code, + self.transition_type, + self.source_tc_in, + self.source_tc_out, + self.record_tc_in, + self.record_tc_out + ) = fields + + else: + raise EDLParseError( + 'incorrect number of fields [{0}] in form statement: {1}' + ''.format(field_count, line)) + + # Frame numbers (not just timecode) are ok + for prop in [ + 'source_tc_in', + 'source_tc_out', + 'record_tc_in', + 'record_tc_out' + ]: + if ':' not in getattr(self, prop): + setattr( + self, + prop, + opentime.to_timecode( + opentime.from_frames( + int(getattr(self, prop)), + self.edl_rate + ), + self.edl_rate + ) + ) + + +class CommentHandler(object): + # this is the for that all comment 'id' tags take + regex_template = r'\*?\s*{id}:?\s*(?P.*)' + + # this should be a map of all known comments that we can read + # 'FROM CLIP' or 'FROM FILE' is a required comment to link media + # An exception is raised if both 'FROM CLIP' and 'FROM FILE' are found + # needs to be ordered so that FROM CLIP NAME gets matched before FROM CLIP + comment_id_map = collections.OrderedDict([ + ('FROM CLIP NAME', 'clip_name'), + ('FROM CLIP', 'media_reference'), + ('FROM FILE', 'media_reference'), + ('LOC', 'locator'), + ('ASC_SOP', 'asc_sop'), + ('ASC_SAT', 'asc_sat'), + ('M2', 'motion_effect'), + ('\\* FREEZE FRAME', 'freeze_frame'), + ]) + + def __init__(self, comments): + self.handled = {} + self.unhandled = [] + for comment in comments: + self.parse(comment) + + def parse(self, comment): + for comment_id, comment_type in self.comment_id_map.items(): + regex = self.regex_template.format(id=comment_id) + match = re.match(regex, comment) + if match: + self.handled[comment_type] = match.group( + 'comment_body' + ).strip() + break + else: + stripped = comment.lstrip('*').strip() + if stripped: + self.unhandled.append(stripped) + + +def _expand_transitions(timeline): + """Convert clips with metadata/transition == 'D' into OTIO transitions.""" + + tracks = timeline.tracks + remove_list = [] + replace_list = [] + append_list = [] + for track in tracks: + track_iter = iter(track) + # avid inserts an extra clip for the source + prev_prev = None + prev = None + clip = next(track_iter, None) + next_clip = next(track_iter, None) + while clip is not None: + transition_type = clip.metadata.get('cmx_3600', {}).get( + 'transition', + 'C' + ) + + if transition_type == 'C': + # nothing to do, continue to the next iteration of the loop + prev_prev = prev + prev = clip + clip = next_clip + next_clip = next(track_iter, None) + continue + if transition_type not in ['D']: + raise EDLParseError( + "Transition type '{}' not supported by the CMX EDL reader " + "currently.".format(transition_type) + ) + + transition_duration = clip.duration() + + # EDL doesn't have enough data to know where the cut point was, so + # this arbitrarily puts it in the middle of the transition + pre_cut = math.floor(transition_duration.value / 2) + post_cut = transition_duration.value - pre_cut + mid_tran_cut_pre_duration = opentime.RationalTime( + pre_cut, + transition_duration.rate + ) + mid_tran_cut_post_duration = opentime.RationalTime( + post_cut, + transition_duration.rate + ) + + # expand the previous + expansion_clip = None + if prev and not prev_prev: + expansion_clip = prev + elif prev_prev: + expansion_clip = prev_prev + if prev: + remove_list.append((track, prev)) + + sr = expansion_clip.source_range + expansion_clip.source_range = opentime.TimeRange( + start_time=sr.start_time, + duration=sr.duration + mid_tran_cut_pre_duration + ) + + # rebuild the clip as a transition + new_trx = schema.Transition( + name=clip.name, + # only supported type at the moment + transition_type=schema.TransitionTypes.SMPTE_Dissolve, + metadata=clip.metadata + ) + new_trx.in_offset = mid_tran_cut_pre_duration + new_trx.out_offset = mid_tran_cut_post_duration + + # in from to + replace_list.append((track, clip, new_trx)) + + # expand the next_clip + if next_clip: + next_clip.source_range = opentime.TimeRange( + next_clip.source_range.start_time - mid_tran_cut_post_duration, + next_clip.source_range.duration + mid_tran_cut_post_duration + ) + else: + fill = schema.Gap( + source_range=opentime.TimeRange( + duration=mid_tran_cut_post_duration, + start_time=opentime.RationalTime( + 0, + transition_duration.rate + ) + ) + ) + append_list.append((track, fill)) + + prev = clip + clip = next_clip + next_clip = next(track_iter, None) + + for (track, from_clip, to_transition) in replace_list: + track[track.index(from_clip)] = to_transition + + for (track, clip_to_remove) in list(set(remove_list)): + # if clip_to_remove in track: + track.remove(clip_to_remove) + + for (track, clip) in append_list: + track.append(clip) + + return timeline + + +def read_from_string(input_str, rate=24, ignore_timecode_mismatch=False): + """Reads a CMX Edit Decision List (EDL) from a string. + Since EDLs don't contain metadata specifying the rate they are meant + for, you may need to specify the rate parameter (default is 24). + By default, read_from_string will throw an exception if it discovers + invalid timecode in the EDL. For example, if a clip's record timecode + overlaps with the previous cut. Since this is a common mistake in + many EDLs, you can specify ignore_timecode_mismatch=True, which will + supress these errors and attempt to guess at the correct record + timecode based on the source timecode and adjacent cuts. + For best results, you may wish to do something like this: + + Example: + >>> try: + ... timeline = otio.adapters.read_from_string("mymovie.edl", rate=30) + ... except EDLParseError: + ... print('Log a warning here') + ... try: + ... timeline = otio.adapters.read_from_string( + ... "mymovie.edl", + ... rate=30, + ... ignore_timecode_mismatch=True) + ... except EDLParseError: + ... print('Log an error here') + """ + parser = EDLParser( + input_str, + rate=float(rate), + ignore_timecode_mismatch=ignore_timecode_mismatch + ) + result = parser.timeline + result = _expand_transitions(result) + return result + + +def write_to_string(input_otio, rate=None, style='avid', reelname_len=8): + # TODO: We should have convenience functions in Timeline for this? + # also only works for a single video track at the moment + + video_tracks = [t for t in input_otio.tracks + if t.kind == schema.TrackKind.Video] + audio_tracks = [t for t in input_otio.tracks + if t.kind == schema.TrackKind.Audio] + + if len(video_tracks) != 1: + raise exceptions.NotSupportedError( + "Only a single video track is supported, got: {}".format( + len(video_tracks) + ) + ) + + if len(audio_tracks) > 2: + raise exceptions.NotSupportedError( + "No more than 2 audio tracks are supported." + ) + # if audio_tracks: + # raise exceptions.NotSupportedError( + # "No audio tracks are currently supported." + # ) + + # TODO: We should try to detect the frame rate and output an + # appropriate "FCM: NON-DROP FRAME" etc here. + + writer = EDLWriter( + tracks=input_otio.tracks, + # Assume all rates are the same as the 1st track's + rate=rate or input_otio.tracks[0].duration().rate, + style=style, + reelname_len=reelname_len + ) + + return writer.get_content_for_track_at_index(0, title=input_otio.name) + + +class EDLWriter(object): + def __init__(self, tracks, rate, style, reelname_len=8): + self._tracks = tracks + self._rate = rate + self._style = style + self._reelname_len = reelname_len + + if style not in VALID_EDL_STYLES: + raise exceptions.NotSupportedError( + "The EDL style '{}' is not supported.".format( + style + ) + ) + + def get_content_for_track_at_index(self, idx, title): + track = self._tracks[idx] + + # Add a gap if the last child is a transition. + if isinstance(track[-1], schema.Transition): + gap = schema.Gap( + source_range=opentime.TimeRange( + start_time=track[-1].duration(), + duration=opentime.RationalTime(0.0, self._rate) + ) + ) + track.append(gap) + + # Note: Transitions in EDLs are unconventionally represented. + # + # Where a transition might normally be visualized like: + # |---57.0 Trans 43.0----| + # |------Clip1 102.0------|----------Clip2 143.0----------|Clip3 24.0| + # + # In an EDL it can be thought of more like this: + # |---0.0 Trans 100.0----| + # |Clip1 45.0|----------------Clip2 200.0-----------------|Clip3 24.0| + + # Adjust cut points to match EDL event representation. + for idx, child in enumerate(track): + if isinstance(child, schema.Transition): + if idx != 0: + # Shorten the a-side + sr = track[idx - 1].source_range + track[idx - 1].source_range = opentime.TimeRange( + start_time=sr.start_time, + duration=sr.duration - child.in_offset + ) + + # Lengthen the b-side + sr = track[idx + 1].source_range + track[idx + 1].source_range = opentime.TimeRange( + start_time=sr.start_time - child.in_offset, + duration=sr.duration + child.in_offset + ) + + # Just clean up the transition for goodness sake + in_offset = child.in_offset + child.in_offset = opentime.RationalTime(0.0, self._rate) + child.out_offset += in_offset + + # Group events into either simple clip/a-side or transition and b-side + # to match EDL edit/event representation and edit numbers. + events = [] + for idx, child in enumerate(track): + if isinstance(child, schema.Transition): + # Transition will be captured in subsequent iteration. + continue + + prv = track[idx - 1] if idx > 0 else None + + if isinstance(prv, schema.Transition): + events.append( + DissolveEvent( + events[-1] if len(events) else None, + prv, + child, + self._tracks, + track.kind, + self._rate, + self._style, + self._reelname_len + ) + ) + elif isinstance(child, schema.Clip): + events.append( + Event( + child, + self._tracks, + track.kind, + self._rate, + self._style, + self._reelname_len + ) + ) + elif isinstance(child, schema.Gap): + # Gaps are represented as missing record timecode, no event + # needed. + pass + + content = "TITLE: {}\n\n".format(title) if title else '' + + # Convert each event/dissolve-event into plain text. + for idx, event in enumerate(events): + event.edit_number = idx + 1 + content += event.to_edl_format() + '\n' + + return content + + +def _supported_timing_effects(clip): + return [ + fx for fx in clip.effects + if isinstance(fx, schema.LinearTimeWarp) + ] + + +def _relevant_timing_effect(clip): + # check to see if there is more than one timing effect + effects = _supported_timing_effects(clip) + + if effects != clip.effects: + for thing in clip.effects: + if thing not in effects and isinstance(thing, schema.TimeEffect): + raise exceptions.NotSupportedError( + "Clip contains timing effects not supported by the EDL" + " adapter.\nClip: {}".format(str(clip))) + + timing_effect = None + if effects: + timing_effect = effects[0] + if len(effects) > 1: + raise exceptions.NotSupportedError( + "EDL Adapter only allows one timing effect / clip." + ) + + return timing_effect + + +class Event(object): + def __init__( + self, + clip, + tracks, + kind, + rate, + style, + reelname_len + ): + + line = EventLine(kind, rate, reel=_reel_from_clip(clip, reelname_len)) + line.source_in = clip.source_range.start_time + line.source_out = clip.source_range.end_time_exclusive() + + timing_effect = _relevant_timing_effect(clip) + + if timing_effect: + if timing_effect.effect_name == "FreezeFrame": + line.source_out = line.source_in + opentime.RationalTime( + 1, + line.source_in.rate + ) + elif timing_effect.effect_name == "LinearTimeWarp": + value = clip.trimmed_range().duration.value / timing_effect.time_scalar + line.source_out = ( + line.source_in + opentime.RationalTime(value, rate)) + + range_in_timeline = clip.transformed_time_range( + clip.trimmed_range(), + tracks + ) + line.record_in = range_in_timeline.start_time + line.record_out = range_in_timeline.end_time_exclusive() + self.line = line + + self.comments = _generate_comment_lines( + clip=clip, + style=style, + edl_rate=rate, + reelname_len=reelname_len, + from_or_to='FROM' + ) + + self.clip = clip + self.source_out = line.source_out + self.record_out = line.record_out + self.reel = line.reel + + def __str__(self): + return '{type}({name})'.format( + type=self.clip.schema_name(), + name=self.clip.name + ) + + def to_edl_format(self): + """ + Example output: + 002 AX V C 00:00:00:00 00:00:00:05 00:00:00:05 00:00:00:10 + * FROM CLIP NAME: test clip2 + * FROM FILE: S:\\var\\tmp\\test.exr + + """ + lines = [self.line.to_edl_format(self.edit_number)] + lines += self.comments if len(self.comments) else [] + + return "\n".join(lines) + + +class DissolveEvent(object): + + def __init__( + self, + a_side_event, + transition, + b_side_clip, + tracks, + kind, + rate, + style, + reelname_len + ): + # Note: We don't make the A-Side event line here as it is represented + # by its own event (edit number). + + cut_line = EventLine(kind, rate) + + if a_side_event: + cut_line.reel = a_side_event.reel + cut_line.source_in = a_side_event.source_out + cut_line.source_out = a_side_event.source_out + cut_line.record_in = a_side_event.record_out + cut_line.record_out = a_side_event.record_out + + self.from_comments = _generate_comment_lines( + clip=a_side_event.clip, + style=style, + edl_rate=rate, + reelname_len=reelname_len, + from_or_to='FROM' + ) + else: + cut_line.reel = 'BL' + cut_line.source_in = opentime.RationalTime(0.0, rate) + cut_line.source_out = opentime.RationalTime(0.0, rate) + cut_line.record_in = opentime.RationalTime(0.0, rate) + cut_line.record_out = opentime.RationalTime(0.0, rate) + + self.cut_line = cut_line + + dslve_line = EventLine( + kind, + rate, + reel=_reel_from_clip(b_side_clip, reelname_len) + ) + dslve_line.source_in = b_side_clip.source_range.start_time + dslve_line.source_out = b_side_clip.source_range.end_time_exclusive() + range_in_timeline = b_side_clip.transformed_time_range( + b_side_clip.trimmed_range(), + tracks + ) + dslve_line.record_in = range_in_timeline.start_time + dslve_line.record_out = range_in_timeline.end_time_exclusive() + dslve_line.dissolve_length = transition.out_offset + self.dissolve_line = dslve_line + + self.to_comments = _generate_comment_lines( + clip=b_side_clip, + style=style, + edl_rate=rate, + reelname_len=reelname_len, + from_or_to='TO' + ) + + self.a_side_event = a_side_event + self.transition = transition + self.b_side_clip = b_side_clip + + # Expose so that any subsequent dissolves can borrow their values. + self.clip = b_side_clip + self.source_out = dslve_line.source_out + self.record_out = dslve_line.record_out + self.reel = dslve_line.reel + + def __str__(self): + a_side = self.a_side_event + return '{a_type}({a_name}) -> {b_type}({b_name})'.format( + a_type=a_side.clip.schema_name() if a_side else '', + a_name=a_side.clip.name if a_side else '', + b_type=self.b_side_clip.schema_name(), + b_name=self.b_side_clip.name + ) + + def to_edl_format(self): + """ + Example output: + + Cross dissolve... + 002 Clip1 V C 00:00:07:08 00:00:07:08 00:00:01:21 00:00:01:21 + 002 Clip2 V D 100 00:00:09:07 00:00:17:15 00:00:01:21 00:00:10:05 + * FROM CLIP NAME: Clip1 + * FROM CLIP: /var/tmp/clip1.001.exr + * TO CLIP NAME: Clip2 + * TO CLIP: /var/tmp/clip2.001.exr + + Fade in... + 001 BL V C 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 + 001 My_Clip V D 012 00:00:02:02 00:00:03:04 00:00:00:00 00:00:01:02 + * TO CLIP NAME: My Clip + * TO FILE: /var/tmp/clip.001.exr + + Fade out... + 002 My_Clip V C 00:00:01:12 00:00:01:12 00:00:00:12 00:00:00:12 + 002 BL V D 012 00:00:00:00 00:00:00:12 00:00:00:12 00:00:01:00 + * FROM CLIP NAME: My Clip + * FROM FILE: /var/tmp/clip.001.exr + """ + + lines = [ + self.cut_line.to_edl_format(self.edit_number), + self.dissolve_line.to_edl_format(self.edit_number) + ] + lines += self.from_comments if hasattr(self, 'from_comments') else [] + lines += self.to_comments if len(self.to_comments) else [] + + return "\n".join(lines) + + +class EventLine(object): + def __init__(self, kind, rate, reel='AX'): + self.reel = reel + self._kind = 'V' if kind == schema.TrackKind.Video else 'A' + self._rate = rate + + self.source_in = opentime.RationalTime(0.0, rate=rate) + self.source_out = opentime.RationalTime(0.0, rate=rate) + self.record_in = opentime.RationalTime(0.0, rate=rate) + self.record_out = opentime.RationalTime(0.0, rate=rate) + + self.dissolve_length = opentime.RationalTime(0.0, rate) + + def to_edl_format(self, edit_number): + ser = { + 'edit': edit_number, + 'reel': self.reel, + 'kind': self._kind, + 'src_in': opentime.to_timecode(self.source_in, self._rate), + 'src_out': opentime.to_timecode(self.source_out, self._rate), + 'rec_in': opentime.to_timecode(self.record_in, self._rate), + 'rec_out': opentime.to_timecode(self.record_out, self._rate), + 'diss': int( + opentime.to_frames(self.dissolve_length, self._rate) + ), + } + + if self.is_dissolve(): + return "{edit:03d} {reel:8} {kind:5} D {diss:03d} " \ + "{src_in} {src_out} {rec_in} {rec_out}".format(**ser) + else: + return "{edit:03d} {reel:8} {kind:5} C " \ + "{src_in} {src_out} {rec_in} {rec_out}".format(**ser) + + def is_dissolve(self): + return self.dissolve_length.value > 0 + + +def _generate_comment_lines( + clip, + style, + edl_rate, + reelname_len, + from_or_to='FROM' +): + lines = [] + url = None + + if not clip or isinstance(clip, schema.Gap): + return [] + + suffix = '' + timing_effect = _relevant_timing_effect(clip) + if timing_effect and timing_effect.effect_name == 'FreezeFrame': + suffix = ' FF' + + if clip.media_reference: + if hasattr(clip.media_reference, 'target_url'): + url = clip.media_reference.target_url + + else: + url = clip.name + + if from_or_to not in ['FROM', 'TO']: + raise exceptions.NotSupportedError( + "The clip FROM or TO value '{}' is not supported.".format( + from_or_to + ) + ) + + if timing_effect and isinstance(timing_effect, schema.LinearTimeWarp): + lines.append( + 'M2 {}\t\t{}\t\t\t{}'.format( + clip.name, + timing_effect.time_scalar * edl_rate, + opentime.to_timecode( + clip.trimmed_range().start_time, + edl_rate + ) + ) + ) + + if clip.name: + # Avid Media Composer outputs two spaces before the + # clip name so we match that. + lines.append( + "* {from_or_to} CLIP NAME: {name}{suffix}".format( + from_or_to=from_or_to, + name=clip.name, + suffix=suffix + ) + ) + if timing_effect and timing_effect.effect_name == "FreezeFrame": + lines.append('* * FREEZE FRAME') + if url and style == 'avid': + lines.append("* {from_or_to} CLIP: {url}".format( + from_or_to=from_or_to, + url=url + )) + if url and style == 'nucoda': + lines.append("* {from_or_to} FILE: {url}".format( + from_or_to=from_or_to, + url=url + )) + + if reelname_len and not clip.metadata.get('cmx_3600', {}).get('reel'): + lines.append("* OTIO TRUNCATED REEL NAME FROM: {url}".format( + url=os.path.basename(_flip_windows_slashes(url or clip.name)) + )) + + cdl = clip.metadata.get('cdl') + if cdl: + asc_sop = cdl.get('asc_sop') + asc_sat = cdl.get('asc_sat') + if asc_sop: + lines.append( + "*ASC_SOP ({} {} {}) ({} {} {}) ({} {} {})".format( + asc_sop['slope'][0], + asc_sop['slope'][1], + asc_sop['slope'][2], + asc_sop['offset'][0], + asc_sop['offset'][1], + asc_sop['offset'][2], + asc_sop['power'][0], + asc_sop['power'][1], + asc_sop['power'][2] + )) + if asc_sat: + lines.append("*ASC_SAT {}".format( + asc_sat + )) + + # Output any markers on this clip + for marker in clip.markers: + timecode = opentime.to_timecode( + marker.marked_range.start_time, + edl_rate + ) + + color = marker.color + meta = marker.metadata.get("cmx_3600") + if not color and meta and meta.get("color"): + color = meta.get("color").upper() + comment = (marker.name or '').upper() + lines.append("* LOC: {} {:7} {}".format(timecode, color, comment)) + + # If we are carrying any unhandled CMX 3600 comments on this clip + # then output them blindly. + extra_comments = clip.metadata.get('cmx_3600', {}).get('comments', []) + for comment in extra_comments: + lines.append("* {}".format(comment)) + + return lines + + +def _flip_windows_slashes(path): + return re.sub(r'\\', '/', path) + + +def _reel_from_clip(clip, reelname_len): + if isinstance(clip, schema.Gap): + return 'BL' + + elif clip.metadata.get('cmx_3600', {}).get('reel'): + return clip.metadata.get('cmx_3600').get('reel') + + _reel = clip.name or 'AX' + + if isinstance(clip.media_reference, schema.ExternalReference): + _reel = clip.media_reference.name or os.path.basename( + clip.media_reference.target_url + ) + + # Flip Windows slashes + _reel = os.path.basename(_flip_windows_slashes(_reel)) + + # Strip extension + reel = re.sub(r'([.][a-zA-Z]+)$', '', _reel) + + if reelname_len: + # Remove non valid characters + reel = re.sub(r'[^ a-zA-Z0-9]+', '', reel) + + if len(reel) > reelname_len: + reel = reel[:reelname_len] + + elif len(reel) < reelname_len: + reel += ' ' * (reelname_len - len(reel)) + + return reel diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py b/pype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py new file mode 100644 index 00000000000..48f684cc360 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py @@ -0,0 +1,1941 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO Final Cut Pro 7 XML Adapter.""" + +import collections +import functools +import itertools +import math +import os +import re +from xml.etree import cElementTree +from xml.dom import minidom + +# urlparse's name changes in Python 3 +try: + # Python 2.7 + import urlparse as urllib_parse +except ImportError: + # Python 3 + import urllib.parse as urllib_parse + +# Same with the ABC classes from collections +try: + # Python 3 + from collections.abc import Mapping +except ImportError: + # Python 2.7 + from collections import Mapping + +from opentimelineio import ( + core, + opentime, + schema, +) + +# namespace to use for metadata +META_NAMESPACE = 'fcp_xml' + +# Regex to match identifiers like clipitem-22 +ID_RE = re.compile(r"^(?P[a-zA-Z]*)-(?P\d*)$") + + +# --------- +# utilities +# --------- + + +class _Context(Mapping): + """ + An inherited value context. + + In FCP XML there is a concept of inheritance down the element heirarchy. + For instance, a ``clip`` element may not specify the ``rate`` locally, but + instead inherit it from the parent ``track`` element. + + This object models that as a stack of elements. When a value needs to be + queried from the context, it will be gathered by walking from the top of + the stack until the value is found. + + For example, to find the ``rate`` element as an immediate child most + appropriate to the current context, you would do something like:: + ``my_current_context["./rate"]`` + + This object can be thought of as immutable. You get a new context when you + push an element. This prevents inadvertant tampering with parent contexts + that may be used at levels above. + + This DOES NOT support ``id`` attribute dereferencing, please make sure to + do that prior to using this structure. + + .. seealso:: https://developer.apple.com/library/archive/documentation\ + /AppleApplications/Reference/FinalCutPro_XML/Basics/Basics.html#\ + //apple_ref/doc/uid/TP30001154-TPXREF102 + """ + + def __init__(self, element=None, parent_elements=None): + if parent_elements is not None: + self.elements = parent_elements[:] + else: + self.elements = [] + + if element is not None: + self.elements.append(element) + + def _all_keys(self): + """ + Returns a set of all the keys available in the context stack. + """ + return set( + itertools.chain.fromiterable(e.keys() for e in self.elements) + ) + + def __getitem__(self, key): + # Walk down the contexts until the item is found + for element in reversed(self.elements): + found_element = element.find(key) + if found_element is not None: + return found_element + + raise KeyError(key) + + def __iter__(self): + # This is unlikely to be used, so we'll do it the expensive way + return iter(self._all_keys) + + def __len__(self): + # This is unlikely to be used, so we'll do it the expensive way + return len(self._all_keys) + + def context_pushing_element(self, element): + """ + Pushes an element to the top of the stack. + + :param element: Element to push to the stack. + :return: The new context with the provided element pushed to the top + of the stack. + :raises: :class:`ValueError` if the element is already in the stack. + """ + for context_element in self.elements: + if context_element == element: + raise ValueError( + "element {} already in context".format(element) + ) + + return _Context(element, self.elements) + + +def _url_to_path(url): + parsed = urllib_parse.urlparse(url) + return parsed.path + + +def _bool_value(element): + """ + Given an xml element, returns the tag text converted to a bool. + + :param element: The element to fetch the value from. + + :return: A boolean. + """ + return (element.text.lower() == "true") + + +def _element_identification_string(element): + """ + Gets a string that will hopefully help in identifing an element when there + is an error. + """ + info_string = "tag: {}".format(element.tag) + try: + elem_id = element.attrib["id"] + info_string += " id: {}".format(elem_id) + except KeyError: + pass + + return info_string + + +def _name_from_element(element): + """ + Fetches the name from the ``name`` element child of the provided element. + If no element exists, returns ``None``. + + :param element: The element to find the name for. + + :return: The name string or ``None`` + """ + name_elem = element.find("./name") + if name_elem is not None: + return name_elem.text + + return None + + +def _rate_for_element(element): + """ + Takes an FCP rate element and returns a rate to use with otio. + + :param element: An FCP rate element. + + :return: The float rate. + """ + # rate is encoded as a timebase (int) which can be drop-frame + base = float(element.find("./timebase").text) + if _bool_value(element.find("./ntsc")): + base *= 1000.0 / 1001 + + return base + + +def _rate_from_context(context): + """ + Given the context object, gets the appropriate rate. + + :param context: The :class:`_Context` instance to find the rate in. + + :return: The rate value or ``None`` if no rate is available in the context. + """ + try: + rate_element = context["./rate"] + except KeyError: + return None + + return _rate_for_element(rate_element) + + +def _time_from_timecode_element(tc_element, context=None): + """ + Given a timecode xml element, returns the time that represents. + + .. todo:: Non Drop-Frame timecode is not yet supported by OTIO. + + :param tc_element: The ``timecode`` element. + :param context: The context dict under which this timecode is being gotten. + + :return: The :class:`opentime.RationalTime` representation of the + timecode. + """ + if context is not None: + local_context = context.context_pushing_element(tc_element) + else: + local_context = _Context(tc_element) + + # Resolve the rate + rate = _rate_from_context(local_context) + + # Try using the display format and frame number + frame = tc_element.find("./frame") + + # Use frame number, if available + if frame is not None: + frame_num = int(frame.text) + return opentime.RationalTime(frame_num, rate) + + # If a TC string is provided, parse rate from it + tc_string_element = tc_element.find("./string") + if tc_string_element is None: + raise ValueError("Timecode element missing required elements") + + tc_string = tc_string_element.text + + return opentime.from_timecode(tc_string, rate) + + +def _track_kind_from_element(media_element): + """ + Given an FCP XML media sub-element, returns an appropriate + :class:`schema.TrackKind` value corresponding to that media type. + + :param media_element: An XML element that is a child of the ``media`` tag. + + :return: The corresponding :class`schema.TrackKind` value. + :raises: :class:`ValueError` When the media type is unsupported. + """ + element_tag = media_element.tag.lower() + if element_tag == "audio": + return schema.TrackKind.Audio + elif element_tag == "video": + return schema.TrackKind.Video + + raise ValueError("Unsupported media kind: {}".format(media_element.tag)) + + +def _is_primary_audio_channel(track): + """ + Determines whether or not this is the "primary" audio track. + + audio may be structured in stereo where each channel occupies a separate + track. This importer keeps stereo pairs ganged together as a single track. + + :param track: An XML track element. + + :return: A boolean ``True`` if this is the first track. + """ + exploded_index = track.attrib.get('currentExplodedTrackIndex', '0') + exploded_count = track.attrib.get('totalExplodedTrackCount', '1') + + return (exploded_index == '0' or exploded_count == '1') + + +def _transition_cut_point(transition_item, context): + """ + Returns the end time at which the transition progresses from one clip to + the next. + + :param transition_item: The XML element for the transition. + :param context: The context dictionary applying to this transition. + + :return: The :class:`opentime.RationalTime` the transition cuts at. + """ + alignment = transition_item.find('./alignment').text + start = int(transition_item.find('./start').text) + end = int(transition_item.find('./end').text) + + # start/end time is in the parent context's rate + local_context = context.context_pushing_element(transition_item) + rate = _rate_from_context(local_context) + + if alignment in ('end', 'end-black'): + value = end + elif alignment in ('start', 'start-black'): + value = start + elif alignment in ('center',): + value = int((start + end) / 2) + else: + value = int((start + end) / 2) + + return opentime.RationalTime(value, rate) + + +def _xml_tree_to_dict(node, ignore_tags=None, omit_timing=True): + """ + Translates the tree under a provided node mapping to a dictionary/list + representation. XML tag attributes are placed in the dictionary with an + ``@`` prefix. + + .. note:: In addition to the provided ignore tags, this filters a subset of + timing metadata such as ``frame`` and ``string`` elements within timecode + elements. + + .. warning:: This scheme does not allow for leaf elements to have + attributes. for the moment this doesn't seem to be an issue. + + :param node: The root xml element to express childeren of in the + dictionary. + :param ignore_tags: A collection of tagnames to skip when converting. + :param omit_timing: If ``True``, omits timing-specific tags. + + :return: The dictionary representation. + """ + if node.tag == "timecode": + additional_ignore_tags = {"frame", "string"} + else: + additional_ignore_tags = tuple() + + out_dict = collections.OrderedDict() + + # Handle the attributes + out_dict.update( + collections.OrderedDict( + ("@{}".format(k), v) for k, v in node.attrib.items() + ) + ) + + # Now traverse the child tags + encountered_tags = set() + list_tags = set() + for info_node in node: + # Skip tags we were asked to omit + node_tag = info_node.tag + if ignore_tags and node_tag in ignore_tags: + continue + + # Skip some special case tags related to timing information + if node_tag in additional_ignore_tags: + continue + + # If there are children, make this a sub-dictionary by recursing + if len(info_node): + node_value = _xml_tree_to_dict(info_node) + else: + node_value = info_node.text + + # If we've seen this node before, then treat it as a list + if node_tag in list_tags: + # We've established that this tag is a list, append to that + out_dict[node_tag].append(node_value) + elif node_tag in encountered_tags: + # This appears to be a list we didn't know about, convert + out_dict[node_tag] = [ + out_dict[node_tag], node_value + ] + list_tags.add(node_tag) + else: + # Store the value + out_dict[node_tag] = node_value + encountered_tags.add(node_tag) + + return out_dict + + +def _dict_to_xml_tree(data_dict, tag): + """ + Given a dictionary, returns an element tree storing the data. This is the + inverse of :func:`_xml_tree_to_dict`. + + Any key/value pairs in the dictionary heirarchy where the key is prefixed + with ``@`` will be treated as attributes on the containing element. + + .. note:: This will automatically omit some kinds of metadata it should + be up to the xml building functions to manage (such as timecode and id). + + :param data_dict: The dictionary to turn into an XML tree. + :param tag: The tag name to use for the top-level element. + + :return: The top element for the dictionary + """ + top_attributes = collections.OrderedDict( + (k[1:], v) for k, v in data_dict.items() + if k != "@id" and k.startswith("@") + ) + top_element = cElementTree.Element(tag, **top_attributes) + + def elements_for_value(python_value, element_tag): + """ Creates a list of appropriate XML elements given a value. """ + if isinstance(python_value, dict): + element = _dict_to_xml_tree(python_value, element_tag) + return [element] + elif isinstance(python_value, list): + return itertools.chain.from_iterable( + elements_for_value(item, element_tag) for item in python_value + ) + else: + element = cElementTree.Element(element_tag) + if python_value is not None: + element.text = str(python_value) + return [element] + + # Drop timecode, rate, and link elements from roundtripping because they + # may become stale with timeline updates. + default_ignore_keys = {"timecode", "rate", "link"} + specific_ignore_keys = {"samplecharacteristics": {"timecode"}} + ignore_keys = specific_ignore_keys.get(tag, default_ignore_keys) + + # push the elements into the tree + for key, value in data_dict.items(): + if key in ignore_keys: + continue + + # We already handled the attributes + if key.startswith("@"): + continue + + elements = elements_for_value(value, key) + top_element.extend(elements) + + return top_element + + +def _element_with_item_metadata(tag, item): + """ + Given a tag name, gets the FCP XML metadata dict and creates a tree of XML + with that metadata under a top element with the provided tag. + + :param tag: The XML tag for the root element. + :param item: An otio object with a metadata dict. + """ + item_meta = item.metadata.get(META_NAMESPACE) + if item_meta: + return _dict_to_xml_tree(item_meta, tag) + + return cElementTree.Element(tag) + + +def _get_or_create_subelement(parent_element, tag): + """ + Given an element and tag name, either gets the direct child of parent with + that tag name or creates a new subelement with that tag and returns it. + + :param parent_element: The element to get or create the subelement from. + :param tag: The tag for the subelement. + """ + sub_element = parent_element.find(tag) + if sub_element is None: + sub_element = cElementTree.SubElement(parent_element, tag) + + return sub_element + + +def _make_pretty_string(tree_e): + # most of the parsing in this adapter is done with cElementTree because it + # is simpler and faster. However, the string representation it returns is + # far from elegant. Therefor we feed it through minidom to provide an xml + # with indentations. + string = cElementTree.tostring(tree_e, encoding="UTF-8", method="xml") + dom = minidom.parseString(string) + return dom.toprettyxml(indent=' ') + + +def marker_for_element(marker_element, rate): + """ + Creates an :class:`schema.Marker` for the provided element. + + :param marker_element: The XML element for the marker. + :param rate: The rate for the object the marker is attached to. + + :return: The :class:`schema.Marker` instance. + """ + # TODO: The spec doc indicates that in and out are required, but doesn't + # say they have to be locally specified, so is it possible they + # could be inherited? + marker_in = opentime.RationalTime( + float(marker_element.find("./in").text), rate + ) + marker_out_value = float(marker_element.find("./out").text) + if marker_out_value > 0: + marker_out = opentime.RationalTime( + marker_out_value, rate + ) + marker_duration = (marker_out - marker_in) + else: + marker_duration = opentime.RationalTime(rate=rate) + + marker_range = opentime.TimeRange(marker_in, marker_duration) + + md_dict = _xml_tree_to_dict(marker_element, {"in", "out", "name"}) + metadata = {META_NAMESPACE: md_dict} if md_dict else None + + return schema.Marker( + name=_name_from_element(marker_element), + marked_range=marker_range, + metadata=metadata + ) + + +def markers_from_element(element, context=None): + """ + Given an element, returns the list of markers attached to it. + + :param element: An element with one or more ``marker`` child elements. + :param context: The context for this element. + + :return: A :class:`list` of :class:`schema.Marker` instances attached + to the provided element. + """ + if context is not None: + local_context = context.context_pushing_element(element) + else: + local_context = _Context(element) + rate = _rate_from_context(local_context) + + return [marker_for_element(e, rate) for e in element.iterfind("./marker")] + + +class FCP7XMLParser: + """ + Implements parsing of an FCP XML file into an OTIO timeline. + + Parsing FCP XML elements include two concepts that require carrying state: + 1. Inheritance + 2. The id Attribute + + .. seealso:: https://developer.apple.com/library/archive/documentation/\ + AppleApplications/Reference/FinalCutPro_XML/Basics/Basics.html\ + #//apple_ref/doc/uid/TP30001154-TPXREF102 + + Inheritance is implemented using a _Context object that is pushed down + through layers of parsing. A given parsing method is passed the element to + parse into an otio object along with the context that element exists under + (e.x. a track element parsing method is given the track element and the + sequence context for that track). + + The id attribute dereferencing is handled through a lookup table stored on + parser instances and using the ``_derefed_`` methods to take an element and + find dereference elements. + """ + + _etree = None + """ The root etree for the FCP XML. """ + + _id_map = None + """ A mapping of id to the first element encountered with that id. """ + + def __init__(self, element_tree): + """ + Constructor, must be init with an xml etree. + """ + self._etree = element_tree + + self._id_map = {} + + def _derefed_element(self, element): + """ + Given an element, dereferences it by it's id attribute if needed. If + the element has an id attribute and it's our first time encountering + it, store the id. + """ + if element is None: + return element + + try: + elem_id = element.attrib["id"] + except KeyError: + return element + + return self._id_map.setdefault(elem_id, element) + + def _derefed_iterfind(self, element, path): + """ + Given an elemnt, finds elements with the provided path below and + returns an iterator of the dereferenced versions of those. + + :param element: The XML etree element. + :param path: The path to find subelements. + + :return: iterator of subelements dereferenced by id. + """ + return ( + self._derefed_element(e) for e in element.iterfind(path) + ) + + def top_level_sequences(self): + """" + Returns a list of timelines for the top-level sequences in the file. + """ + context = _Context() + + # If the tree has just sequences at the top level, this will catch them + top_iter = self._derefed_iterfind(self._etree, "./sequence") + + # If there is a project or bin at the top level, this should cath them + project_and_bin_iter = self._derefed_iterfind( + self._etree, ".//children/sequence" + ) + + # Make an iterator that will exhaust both the above + sequence_iter = itertools.chain(top_iter, project_and_bin_iter) + + return [self.timeline_for_sequence(s, context) for s in sequence_iter] + + def timeline_for_sequence(self, sequence_element, context): + """ + Returns either an :class`schema.Timeline` parsed from a sequence + element. + + :param sequence_element: The sequence element. + :param context: The context dictionary. + + :return: The appropriate OTIO object for the element. + """ + local_context = context.context_pushing_element(sequence_element) + + name = _name_from_element(sequence_element) + parsed_tags = {"name", "media", "marker", "duration"} + md_dict = _xml_tree_to_dict(sequence_element, parsed_tags) + + sequence_timecode = self._derefed_element( + sequence_element.find("./timecode") + ) + if sequence_timecode is not None: + seq_start_time = _time_from_timecode_element( + sequence_timecode, local_context + ) + else: + seq_start_time = None + + media_element = self._derefed_element(sequence_element.find("./media")) + if media_element is None: + tracks = None + else: + # Reach down into the media block and escalate metadata to the + # sequence + for media_type in media_element: + media_info_dict = _xml_tree_to_dict(media_type, {"track"}) + if media_info_dict: + media_dict = md_dict.setdefault( + "media", collections.OrderedDict() + ) + media_dict[media_type.tag] = media_info_dict + + tracks = self.stack_for_element(media_element, local_context) + tracks.name = name + + # TODO: Should we be parsing the duration tag and pad out a track with + # gap to match? + + timeline = schema.Timeline( + name=name, + global_start_time=seq_start_time, + metadata={META_NAMESPACE: md_dict} if md_dict else {}, + ) + timeline.tracks = tracks + + # Push the sequence markers onto the top stack + markers = markers_from_element(sequence_element, context) + timeline.tracks.markers.extend(markers) + + return timeline + + def stack_for_element(self, element, context): + """ + Given an element, parses out track information as a stack. + + :param element: The element under which to find the tracks (typically + a ``media`` element. + :param context: The current parser context. + + :return: A :class:`schema.Stack` of the tracks. + """ + # Determine the context + local_context = context.context_pushing_element(element) + + tracks = [] + media_type_elements = self._derefed_iterfind(element, "./") + for media_type_element in media_type_elements: + try: + track_kind = _track_kind_from_element(media_type_element) + except ValueError: + # Unexpected element + continue + + is_audio = (track_kind == schema.TrackKind.Audio) + track_elements = self._derefed_iterfind( + media_type_element, "./track" + ) + for track_element in track_elements: + if is_audio and not _is_primary_audio_channel(track_element): + continue + + tracks.append( + self.track_for_element( + track_element, track_kind, local_context + ) + ) + + markers = markers_from_element(element, context) + + stack = schema.Stack( + children=tracks, + markers=markers, + name=_name_from_element(element), + ) + + return stack + + def track_for_element(self, track_element, track_kind, context): + """ + Given a track element, constructs the OTIO track. + + :param track_element: The track XML element. + :param track_kind: The :class:`schema.TrackKind` for the track. + :param context: The context dict for this track. + """ + local_context = context.context_pushing_element(track_element) + name_element = track_element.find("./name") + track_name = (name_element.text if name_element is not None else None) + + timeline_item_tags = {"clipitem", "generatoritem", "transitionitem"} + + md_dict = _xml_tree_to_dict(track_element, timeline_item_tags) + track_metadata = {META_NAMESPACE: md_dict} if md_dict else None + + track = schema.Track( + name=track_name, + kind=track_kind, + metadata=track_metadata, + ) + + # Iterate through and parse track items + track_rate = _rate_from_context(local_context) + current_timeline_time = opentime.RationalTime(0, track_rate) + head_transition_element = None + for i, item_element in enumerate(track_element): + if item_element.tag not in timeline_item_tags: + continue + + item_element = self._derefed_element(item_element) + + # Do a lookahead to try and find the tail transition item + try: + tail_transition_element = track_element[i + 1] + if tail_transition_element.tag != "transitionitem": + tail_transition_element = None + else: + tail_transition_element = self._derefed_element( + tail_transition_element + ) + except IndexError: + tail_transition_element = None + + track_item, item_range = self.item_and_timing_for_element( + item_element, + head_transition_element, + tail_transition_element, + local_context, + ) + + # Insert gap between timeline cursor and the new item if needed. + if current_timeline_time < item_range.start_time: + gap_duration = (item_range.start_time - current_timeline_time) + gap_range = opentime.TimeRange( + duration=gap_duration.rescaled_to(track_rate) + ) + track.append(schema.Gap(source_range=gap_range)) + + # Add the item and advance the timeline cursor + track.append(track_item) + current_timeline_time = item_range.end_time_exclusive() + + # Stash the element for the next iteration if it's a transition + if item_element.tag == "transitionitem": + head_transition_element = item_element + + return track + + def media_reference_for_file_element(self, file_element, context): + """ + Given a file XML element, returns the + :class`schema.ExternalReference`. + + :param file_element: The file xml element. + :param context: The parent context dictionary. + + :return: An :class:`schema.ExternalReference`. + """ + local_context = context.context_pushing_element(file_element) + media_ref_rate = _rate_from_context(local_context) + + name = _name_from_element(file_element) + + # Get the full metadata + metadata_ignore_keys = {"duration", "name", "pathurl"} + md_dict = _xml_tree_to_dict(file_element, metadata_ignore_keys) + metadata_dict = {META_NAMESPACE: md_dict} if md_dict else None + + # Determine the file path + path_element = file_element.find("./pathurl") + if path_element is not None: + path = path_element.text + else: + path = None + + # Find the timing + timecode_element = file_element.find("./timecode") + if timecode_element is not None: + start_time = _time_from_timecode_element(timecode_element) + start_time = start_time.rescaled_to(media_ref_rate) + else: + start_time = opentime.RationalTime(0, media_ref_rate) + + duration_element = file_element.find("./duration") + if duration_element is not None: + duration = opentime.RationalTime( + float(duration_element.text), media_ref_rate + ) + available_range = opentime.TimeRange(start_time, duration) + elif timecode_element is not None: + available_range = opentime.TimeRange( + start_time, + opentime.RationalTime(0, media_ref_rate), + ) + else: + available_range = None + + if path is None: + media_reference = schema.MissingReference( + name=name, + available_range=available_range, + metadata=metadata_dict, + ) + else: + media_reference = schema.ExternalReference( + target_url=path, + available_range=available_range, + metadata=metadata_dict, + ) + media_reference.name = name + + return media_reference + + def media_reference_for_effect_element(self, effect_element): + """ + Given an effect element, returns a generator reference. + + :param effect_element: The effect for the generator. + + :return: An :class:`schema.GeneratorReference` instance. + """ + name = _name_from_element(effect_element) + md_dict = _xml_tree_to_dict(effect_element, {"name"}) + + return schema.GeneratorReference( + name=name, + metadata=({META_NAMESPACE: md_dict} if md_dict else None) + ) + + def item_and_timing_for_element( + self, item_element, head_transition, tail_transition, context + ): + """ + Given a track item, returns a tuple with the appropriate OpenTimelineIO + schema item as the first element and an + :class:`opentime.TimeRange`of theresolved timeline range the clip + occupies. + + :param item_element: The track item XML node. + :param head_transition: The xml element for the transition immediately + before or ``None``. + :param tail_transition: The xml element for the transition immediately + after or ``None``. + :param context: The context dictionary. + + :return: An :class:`core.Item` subclass instance and + :class:`opentime.TimeRange` for the item. + """ + parent_rate = _rate_from_context(context) + + # Establish the start/end time in the timeline + start_value = int(item_element.find("./start").text) + end_value = int(item_element.find("./end").text) + + if start_value == -1: + # determine based on the cut point of the head transition + start = _transition_cut_point(head_transition, context) + + # This offset is needed to determing how much to advance from the + # clip media's in time. Duration accounts for this offset for the + # out time. + transition_rate = _rate_from_context( + context.context_pushing_element(head_transition) + ) + start_offset = start - opentime.RationalTime( + int(head_transition.find('./start').text), transition_rate + ) + else: + start = opentime.RationalTime(start_value, parent_rate) + start_offset = opentime.RationalTime() + + if end_value == -1: + # determine based on the cut point of the tail transition + end = _transition_cut_point(tail_transition, context) + else: + end = opentime.RationalTime(end_value, parent_rate) + + item_range = opentime.TimeRange(start, (end - start)) + + # Get the metadata dictionary for the item + item_metadata_ignore_keys = { + "name", + "start", + "end", + "in", + "out", + "duration", + "file", + "marker", + "effect", + "rate", + "sequence", + } + metadata_dict = _xml_tree_to_dict( + item_element, item_metadata_ignore_keys + ) + + # deserialize the item + if item_element.tag in {"clipitem", "generatoritem"}: + item = self.clip_for_element( + item_element, item_range, start_offset, context + ) + elif item_element.tag == "transitionitem": + item = self.transition_for_element(item_element, context) + else: + name = "unknown-{}".format(item_element.tag) + item = core.Item(name=name, source_range=item_range) + + if metadata_dict: + item.metadata.setdefault(META_NAMESPACE, {}).update(metadata_dict) + + return (item, item_range) + + def clip_for_element( + self, clipitem_element, item_range, start_offset, context + ): + """ + Given a clipitem xml element, returns an :class:`schema.Clip`. + + :param clipitem_element: The element to create a clip for. + :param item_range: The time range in the timeline the clip occupies. + :param start_offset: The amount by which the ``in`` time of the clip + source should be advanced (usually due to a transition). + :param context: The parent context for the clip. + + :return: The :class:`schema.Clip` instance. + """ + local_context = context.context_pushing_element(clipitem_element) + + name = _name_from_element(clipitem_element) + + file_element = self._derefed_element(clipitem_element.find("./file")) + sequence_element = self._derefed_element( + clipitem_element.find("./sequence") + ) + if clipitem_element.tag == "generatoritem": + generator_effect_element = clipitem_element.find( + "./effect[effecttype='generator']" + ) + else: + generator_effect_element = None + + media_start_time = opentime.RationalTime() + if sequence_element is not None: + item = self.stack_for_element(sequence_element, local_context) + # TODO: is there an applicable media start time we should be + # using from nested sequences? + elif file_element is not None or generator_effect_element is not None: + if file_element is not None: + media_reference = self.media_reference_for_file_element( + file_element, local_context + ) + # See if there is a start offset + timecode_element = file_element.find("./timecode") + if timecode_element is not None: + media_start_time = _time_from_timecode_element( + timecode_element + ) + elif generator_effect_element is not None: + media_reference = self.media_reference_for_effect_element( + generator_effect_element + ) + + item = schema.Clip( + name=name, + media_reference=media_reference, + ) + else: + raise TypeError( + 'Type of clip item is not supported {}'.format( + _element_identification_string(clipitem_element) + ) + ) + + # Add the markers + markers = markers_from_element(clipitem_element, context) + item.markers.extend(markers) + + # Find the in time (source time relative to media start) + clip_rate = _rate_from_context(local_context) + in_value = float(clipitem_element.find('./in').text) + in_time = opentime.RationalTime(in_value, clip_rate) + + # Offset the "in" time by the start offset of the media + soure_start_time = in_time + media_start_time + start_offset + duration = item_range.duration + + # Source Range is the item range expressed in the clip's rate (for now) + source_range = opentime.TimeRange( + soure_start_time.rescaled_to(clip_rate), + duration.rescaled_to(clip_rate), + ) + + item.source_range = source_range + + # Parse the filters + filter_iter = self._derefed_iterfind(clipitem_element, "./filter") + for filter_element in filter_iter: + item.effects.append( + self.effect_from_filter_element(filter_element) + ) + + return item + + def effect_from_filter_element(self, filter_element): + """ + Given a filter element, creates an :class:`schema.Effect`. + + :param filter_element: The ``filter`` element containing the effect. + + :return: The effect instance. + """ + effect_element = filter_element.find("./effect") + + if effect_element is None: + raise ValueError( + "could not find effect in filter: {}".format(filter_element) + ) + + name = effect_element.find("./name").text + + effect_metadata = _xml_tree_to_dict(effect_element, {"name"}) + + return schema.Effect( + name, + metadata={META_NAMESPACE: effect_metadata}, + ) + + def transition_for_element(self, item_element, context): + """ + Creates an OTIO transition for the provided transition element. + + :param item_element: The element to create a transition for. + :param context: The parent context for the element. + + :return: The :class:`schema.Transition` instance. + """ + # start and end times are in the parent's rate + rate = _rate_from_context(context) + start = opentime.RationalTime( + int(item_element.find('./start').text), + rate + ) + end = opentime.RationalTime( + int(item_element.find('./end').text), + rate + ) + cut_point = _transition_cut_point(item_element, context) + + transition = schema.Transition( + name=item_element.find('./effect/name').text, + transition_type=schema.TransitionTypes.SMPTE_Dissolve, + in_offset=cut_point - start, + out_offset=end - cut_point, + ) + + return transition + + +# ------------------------ +# building single track +# ------------------------ + + +def _backreference_for_item(item, tag, br_map): + """ + Given an item, determines what the id in the backreference map should be. + If the item is already tracked in the map, it will be returned, otherwise + a new id will be minted. + + .. note:: ``br_map`` may be mutated by this function. ``br_map`` is + intended to be an opaque data structure and only accessed through this + function, the structure of data in br_map may change. + + :param item: The :class:`core.SerializableObject` to create an id for. + :param tag: The tag name that will be used for object in xml. + :param br_map: The dictionary containing backreference information + generated so far. + + :return: A 2-tuple of (id_string, is_new_id) where the ``id_string`` is + the value for the xml id attribute and ``is_new_id`` is ``True`` when + this is the first time that id was encountered. + """ + # br_map is structured as a dictionary with tags as keys, and dictionaries + # of hash to id int as values. + + def id_string(id_int): + return "{}-{}".format(tag, id_int) + + # Determine how to uniquely identify the referenced item + if isinstance(item, schema.ExternalReference): + item_hash = hash(str(item.target_url)) + else: + # TODO: This may become a performance issue. It means that every + # non-ref object is serialized to json and hashed each time it's + # encountered. + item_hash = hash( + core.json_serializer.serialize_json_to_string(item) + ) + + is_new_id = False + item_id = br_map.get(tag, {}).get(item_hash) + if item_id is not None: + return (id_string(item_id), is_new_id) + + # This is a new id, figure out what it should be. + is_new_id = True + + # Attempt to preserve the ID from the input metadata. + preferred_id = None + orig_id_string = item.metadata.get(META_NAMESPACE, {}).get("@id") + if orig_id_string is not None: + orig_id_match = ID_RE.match(orig_id_string) + if orig_id_match is not None: + match_groups = orig_id_match.groupdict() + orig_tagname = match_groups["tag"] + if orig_tagname == tag: + preferred_id = int(match_groups["id"]) + + # Generate an id by finding the lowest value in a contiguous range not + # colliding with an existing value + tag_id_map = br_map.setdefault(tag, {}) + existing_ids = set(tag_id_map.values()) + if preferred_id is not None and preferred_id not in existing_ids: + item_id = preferred_id + else: + # Make a range from 1 including the ID after the largest assigned + # (hence the +2 since range is non-inclusive on the upper bound) + max_assigned_id = max(existing_ids) if existing_ids else 0 + max_possible_id = (max_assigned_id + 2) + possible_ids = set(range(1, max_possible_id)) + + # Select the lowest unassigned ID + item_id = min(possible_ids.difference(existing_ids)) + + # Store the created id + tag_id_map[item_hash] = item_id + + return (id_string(item_id), is_new_id) + + +def _backreference_build(tag): + """ + A decorator for functions creating XML elements to implement the id system + described in FCP XML. + + This wrapper determines if the otio item is equivalent to one encountered + before with the provided tag name. If the item hasn't been encountered then + the wrapped function will be invoked and the XML element from that function + will have the ``id`` attribute set and be stored in br_map. + If the item is equivalent to a previously provided item, the wrapped + function won't be invoked and a simple tag with the previous instance's id + will be returned instead. + + The wrapped function must: + - Have the otio item as the first positional argument. + - Have br_map (backreference map, a dictionary) as the last positional + arg. br_map stores the state for encountered items. + + :param tag: The xml tag of the element the wrapped function generates. + """ + # We can also encode these back-references if an item is accessed multiple + # times. To do this we store an id attribute on the element. For back- + # references we then only need to return an empty element of that type with + # the id we logged before + + def singleton_decorator(func): + @functools.wraps(func) + def wrapper(item, *args, **kwargs): + br_map = args[-1] + + item_id, id_is_new = _backreference_for_item(item, tag, br_map) + + # if the item exists in the map already, we should use the + # abbreviated XML element referring to the original + if not id_is_new: + return cElementTree.Element(tag, id=item_id) + + # This is the first time for this unique item, it needs it's full + # XML. Get the element generated by the wrapped function and add + # the id attribute. + elem = func(item, *args, **kwargs) + elem.attrib["id"] = item_id + + return elem + + return wrapper + + return singleton_decorator + + +def _append_new_sub_element(parent, tag, attrib=None, text=None): + """ + Creates a sub-element with the provided tag, attributes, and text. + + This is a convenience because the :class:`SubElement` constructor does not + provide the ability to set ``text``. + + :param parent: The parent element. + :param tag: The tag string for the element. + :param attrib: An optional dictionary of attributes for the element. + :param text: Optional text value for the element. + + :return: The new XML element. + """ + elem = cElementTree.SubElement(parent, tag, **attrib or {}) + if text is not None: + elem.text = text + + return elem + + +def _build_rate(fps): + """ + Given a framerate, makes a ``rate`` xml tree. + + :param fps: The framerate. + :return: The fcp xml ``rate`` tree. + """ + rate = math.ceil(fps) + + rate_e = cElementTree.Element('rate') + _append_new_sub_element(rate_e, 'timebase', text=str(int(rate))) + _append_new_sub_element( + rate_e, + 'ntsc', + text='FALSE' if rate == fps else 'TRUE' + ) + return rate_e + + +def _build_timecode(time, fps, drop_frame=False, additional_metadata=None): + """ + Makes a timecode xml element tree. + + .. warning:: The drop_frame parameter is currently ignored and + auto-determined by rate. This is because the underlying otio timecode + conversion assumes DFTC based on rate. + + :param time: The :class:`opentime.RationalTime` for the timecode. + :param fps: The framerate for the timecode. + :param drop_frame: If True, generates drop-frame timecode. + :param additional_metadata: A dictionary with other metadata items like + ``field``, ``reel``, ``source``, and ``format``. It is assumed this + dictionary is of the form generated by :func:`_xml_tree_to_dict` when + the file was read originally. + + :return: The ``timecode`` element. + """ + if additional_metadata: + # Only allow legal child items for the timecode element + filtered = { + k: v for k, v in additional_metadata.items() + if k in {"field", "reel", "source", "format"} + } + tc_element = _dict_to_xml_tree(filtered, "timecode") + else: + tc_element = cElementTree.Element("timecode") + + tc_element.append(_build_rate(fps)) + rate_is_not_ntsc = (tc_element.find('./rate/ntsc').text == "FALSE") + if drop_frame and rate_is_not_ntsc: + tc_fps = fps * (1000 / 1001.0) + else: + tc_fps = fps + + # Get the time values + tc_time = opentime.RationalTime(time.value_rescaled_to(fps), tc_fps) + tc_string = opentime.to_timecode(tc_time, tc_fps, drop_frame) + + _append_new_sub_element(tc_element, "string", text=tc_string) + + frame_number = int(round(time.value)) + _append_new_sub_element( + tc_element, "frame", text="{:.0f}".format(frame_number) + ) + + drop_frame = (";" in tc_string) + display_format = "DF" if drop_frame else "NDF" + _append_new_sub_element(tc_element, "displayformat", text=display_format) + + return tc_element + + +def _build_item_timings( + item_e, + item, + timeline_range, + transition_offsets, + timecode +): + # source_start is absolute time taking into account the timecode of the + # media. But xml regards the source in point from the start of the media. + # So we subtract the media timecode. + item_rate = item.source_range.start_time.rate + source_start = (item.source_range.start_time - timecode) + source_start = source_start.rescaled_to(item_rate) + + source_end = (item.source_range.end_time_exclusive() - timecode) + source_end = source_end.rescaled_to(item_rate) + + start = '{:.0f}'.format(timeline_range.start_time.value) + end = '{:.0f}'.format(timeline_range.end_time_exclusive().value) + + item_e.append(_build_rate(item_rate)) + + if transition_offsets[0] is not None: + start = '-1' + source_start -= transition_offsets[0] + if transition_offsets[1] is not None: + end = '-1' + source_end += transition_offsets[1] + + _append_new_sub_element( + item_e, 'duration', + text='{:.0f}'.format(item.source_range.duration.value) + ) + _append_new_sub_element(item_e, 'start', text=start) + _append_new_sub_element(item_e, 'end', text=end) + _append_new_sub_element( + item_e, + 'in', + text='{:.0f}'.format(source_start.value) + ) + _append_new_sub_element( + item_e, + 'out', + text='{:.0f}'.format(source_end.value) + ) + + +@_backreference_build('file') +def _build_empty_file(media_ref, parent_range, br_map): + file_e = _element_with_item_metadata("file", media_ref) + _append_new_sub_element(file_e, "name", text=media_ref.name) + + if media_ref.available_range is not None: + available_range = media_ref.available_range + else: + available_range = opentime.TimeRange( + opentime.RationalTime(0, parent_range.start_time.rate), + parent_range.duration, + ) + + ref_rate = available_range.start_time.rate + file_e.append(_build_rate(ref_rate)) + + # Only provide a duration if one came from the media, don't invent one. + # For example, Slugs have no duration specified. + if media_ref.available_range: + duration = available_range.duration.rescaled_to(ref_rate) + _append_new_sub_element( + file_e, + 'duration', + text='{:.0f}'.format(duration.value), + ) + + # timecode + ref_tc_metadata = media_ref.metadata.get(META_NAMESPACE, {}).get( + "timecode" + ) + tc_element = _build_timecode_from_metadata( + available_range.start_time, ref_tc_metadata + ) + file_e.append(tc_element) + + file_media_e = _get_or_create_subelement(file_e, "media") + if file_media_e.find("video") is None: + _append_new_sub_element(file_media_e, "video") + + return file_e + + +@_backreference_build('file') +def _build_file(media_reference, br_map): + file_e = _element_with_item_metadata("file", media_reference) + + available_range = media_reference.available_range + url_path = _url_to_path(media_reference.target_url) + + file_name = ( + media_reference.name if media_reference.name + else os.path.basename(url_path) + ) + _append_new_sub_element(file_e, 'name', text=file_name) + _append_new_sub_element(file_e, 'pathurl', text=media_reference.target_url) + + # timing info + file_e.append(_build_rate(available_range.start_time.rate)) + _append_new_sub_element( + file_e, 'duration', + text='{:.0f}'.format(available_range.duration.value) + ) + + # timecode + ref_tc_metadata = media_reference.metadata.get(META_NAMESPACE, {}).get( + "timecode" + ) + tc_element = _build_timecode_from_metadata( + available_range.start_time, ref_tc_metadata + ) + file_e.append(tc_element) + + # we need to flag the file reference with the content types, otherwise it + # will not get recognized + # TODO: We should use a better method for this. Perhaps pre-walk the + # timeline and find all the track kinds this media is present in? + if not file_e.find("media"): + file_media_e = _get_or_create_subelement(file_e, "media") + + audio_exts = {'.wav', '.aac', '.mp3', '.aif', '.aiff', '.m4a'} + has_video = (os.path.splitext(url_path)[1].lower() not in audio_exts) + if has_video and file_media_e.find("video") is None: + _append_new_sub_element(file_media_e, "video") + + # TODO: This is assuming all files have an audio track. Not sure what + # the implications of that are. + if file_media_e.find("audio") is None: + _append_new_sub_element(file_media_e, "audio") + + return file_e + + +def _build_transition_item( + transition_item, + timeline_range, + transition_offsets, + br_map, +): + transition_e = _element_with_item_metadata( + "transitionitem", transition_item + ) + _append_new_sub_element( + transition_e, + 'start', + text='{:.0f}'.format(timeline_range.start_time.value) + ) + _append_new_sub_element( + transition_e, + 'end', + text='{:.0f}'.format(timeline_range.end_time_exclusive().value) + ) + + # Only add an alignment if it didn't already come in from the metadata dict + if transition_e.find("alignment") is None: + # default center aligned + alignment = "center" + if not transition_item.in_offset.value: + alignment = 'start-black' + elif not transition_item.out_offset.value: + alignment = 'end-black' + + _append_new_sub_element(transition_e, 'alignment', text=alignment) + # todo support 'start' and 'end' alignment + + transition_e.append(_build_rate(timeline_range.start_time.rate)) + + # Only add an effect if it didn't already come in from the metadata dict + if not transition_e.find("./effect"): + try: + effectid = transition_item.metadata[META_NAMESPACE]["effectid"] + except KeyError: + effectid = "Cross Dissolve" + + effect_e = _append_new_sub_element(transition_e, 'effect') + _append_new_sub_element(effect_e, 'name', text=transition_item.name) + _append_new_sub_element(effect_e, 'effectid', text=effectid) + _append_new_sub_element(effect_e, 'effecttype', text='transition') + _append_new_sub_element(effect_e, 'mediatype', text='video') + + return transition_e + + +@_backreference_build("clipitem") +def _build_clip_item_without_media( + clip_item, + timeline_range, + transition_offsets, + br_map, +): + # TODO: Does this need to be a separate function or could it be unified + # with _build_clip_item? + clip_item_e = _element_with_item_metadata("clipitem", clip_item) + if "frameBlend" not in clip_item_e.attrib: + clip_item_e.attrib["frameBlend"] = "FALSE" + + if clip_item.media_reference.available_range: + media_start_time = clip_item.media_reference.available_range.start_time + else: + media_start_time = opentime.RationalTime( + 0, timeline_range.start_time.rate + ) + + _append_new_sub_element(clip_item_e, 'name', text=clip_item.name) + clip_item_e.append( + _build_empty_file( + clip_item.media_reference, timeline_range, br_map + ) + ) + clip_item_e.extend([_build_marker(m) for m in clip_item.markers]) + + _build_item_timings( + clip_item_e, + clip_item, + timeline_range, + transition_offsets, + media_start_time, + ) + + return clip_item_e + + +@_backreference_build("clipitem") +def _build_clip_item(clip_item, timeline_range, transition_offsets, br_map): + is_generator = isinstance( + clip_item.media_reference, schema.GeneratorReference + ) + + tagname = "generatoritem" if is_generator else "clipitem" + clip_item_e = _element_with_item_metadata(tagname, clip_item) + if "frameBlend" not in clip_item_e.attrib: + clip_item_e.attrib["frameBlend"] = "FALSE" + + if is_generator: + clip_item_e.append(_build_generator_effect(clip_item, br_map)) + else: + clip_item_e.append(_build_file(clip_item.media_reference, br_map)) + + # set the clip name from the media reference if not defined on the clip + if clip_item.name is not None: + name = clip_item.name + elif is_generator: + name = clip_item.media_reference.name + else: + url_path = _url_to_path(clip_item.media_reference.target_url) + name = os.path.basename(url_path) + + _append_new_sub_element(clip_item_e, 'name', text=name) + + if clip_item.media_reference.available_range: + clip_item_e.append( + _build_rate(clip_item.source_range.start_time.rate) + ) + clip_item_e.extend(_build_marker(m) for m in clip_item.markers) + + if clip_item.media_reference.available_range: + timecode = clip_item.media_reference.available_range.start_time + else: + timecode = opentime.RationalTime( + 0, clip_item.source_range.start_time.rate + ) + + _build_item_timings( + clip_item_e, + clip_item, + timeline_range, + transition_offsets, + timecode + ) + + return clip_item_e + + +def _build_generator_effect(clip_item, br_map): + """ + Builds an effect element for the generator ref on the provided clip item. + + :param clip_item: a clip with a :class:`schema.GeneratorReference` as + its ``media_reference``. + :param br_map: The backreference map. + """ + # Since we don't support effects in a standard way, just try and build + # based on the metadata provided at deserialization so we can roundtrip + generator_ref = clip_item.media_reference + try: + fcp_xml_effect_info = generator_ref.metadata[META_NAMESPACE] + except KeyError: + return _build_empty_file( + generator_ref, + clip_item.source_range, + br_map, + ) + + # Get the XML Tree built from the metadata + effect_element = _dict_to_xml_tree(fcp_xml_effect_info, "effect") + + # Validate the metadata and make sure it contains the required elements + for required in ("effectid", "effecttype", "mediatype", "effectcategory"): + if effect_element.find(required) is None: + return _build_empty_file( + generator_ref, + clip_item.source_range, + br_map, + ) + + # Add the name + _append_new_sub_element(effect_element, "name", text=generator_ref.name) + + return effect_element + + +@_backreference_build("clipitem") +def _build_track_item(track, timeline_range, transition_offsets, br_map): + clip_item_e = _element_with_item_metadata("clipitem", track) + if "frameBlend" not in clip_item_e.attrib: + clip_item_e.attrib["frameBlend"] = "FALSE" + + _append_new_sub_element( + clip_item_e, + 'name', + text=os.path.basename(track.name) + ) + + track_e = _build_sequence_for_stack(track, timeline_range, br_map) + + clip_item_e.append(_build_rate(track.source_range.start_time.rate)) + clip_item_e.extend([_build_marker(m) for m in track.markers]) + clip_item_e.append(track_e) + timecode = opentime.RationalTime(0, timeline_range.start_time.rate) + + _build_item_timings( + clip_item_e, + track, + timeline_range, + transition_offsets, + timecode + ) + + return clip_item_e + + +def _build_item(item, timeline_range, transition_offsets, br_map): + if isinstance(item, schema.Transition): + return _build_transition_item( + item, + timeline_range, + transition_offsets, + br_map + ) + elif isinstance(item, schema.Clip): + if isinstance( + item.media_reference, + schema.MissingReference + ): + return _build_clip_item_without_media( + item, + timeline_range, + transition_offsets, + br_map + ) + else: + return _build_clip_item( + item, + timeline_range, + transition_offsets, + br_map + ) + elif isinstance(item, schema.Stack): + return _build_track_item( + item, + timeline_range, + transition_offsets, + br_map + ) + else: + raise ValueError('Unsupported item: ' + str(item)) + + +def _build_top_level_track(track, track_rate, br_map): + track_e = _element_with_item_metadata("track", track) + + for n, item in enumerate(track): + if isinstance(item, schema.Gap): + continue + + transition_offsets = [None, None] + previous_item = track[n - 1] if n > 0 else None + next_item = track[n + 1] if n + 1 < len(track) else None + if not isinstance(item, schema.Transition): + # find out if this item has any neighboring transition + if isinstance(previous_item, schema.Transition): + if previous_item.out_offset.value: + transition_offsets[0] = previous_item.in_offset + else: + transition_offsets[0] = None + if isinstance(next_item, schema.Transition): + if next_item.in_offset.value: + transition_offsets[1] = next_item.out_offset + else: + transition_offsets[1] = None + + timeline_range = track.range_of_child_at_index(n) + timeline_range = opentime.TimeRange( + timeline_range.start_time.rescaled_to(track_rate), + timeline_range.duration.rescaled_to(track_rate) + ) + track_e.append( + _build_item(item, timeline_range, transition_offsets, br_map) + ) + + return track_e + + +def _build_marker(marker): + marker_e = _element_with_item_metadata("marker", marker) + + marked_range = marker.marked_range + + _append_new_sub_element(marker_e, 'name', text=marker.name) + _append_new_sub_element( + marker_e, 'in', + text='{:.0f}'.format(marked_range.start_time.value) + ) + _append_new_sub_element(marker_e, 'out', text='-1') + + return marker_e + + +def _build_timecode_from_metadata(time, tc_metadata=None): + """ + Makes a timecode element with the given time and (if available) + ```timecode`` metadata stashed on input. + + :param time: The :class:`opentime.RationalTime` to encode. + :param tc_metadata: The xml dict for the ``timecode`` element populated + on read. + + :return: A timecode element. + """ + if tc_metadata is None: + tc_metadata = {} + + try: + # Parse the rate in the preserved metadata, if available + tc_rate = _rate_for_element( + _dict_to_xml_tree(tc_metadata["rate"], "rate") + ) + except KeyError: + # Default to the rate in the start time + tc_rate = time.rate + + drop_frame = (tc_metadata.get("displayformat", "NDF") == "DF") + + return _build_timecode( + time, + tc_rate, + drop_frame, + additional_metadata=tc_metadata, + ) + + +@_backreference_build('sequence') +def _build_sequence_for_timeline(timeline, timeline_range, br_map): + sequence_e = _element_with_item_metadata("sequence", timeline) + + _add_stack_elements_to_sequence( + timeline.tracks, sequence_e, timeline_range, br_map + ) + + # In the case of timelines, use the timeline name rather than the stack + # name. + if timeline.name: + sequence_e.find('./name').text = timeline.name + + # Add the sequence global start + if timeline.global_start_time is not None: + seq_tc_metadata = timeline.metadata.get(META_NAMESPACE, {}).get( + "timecode" + ) + tc_element = _build_timecode_from_metadata( + timeline.global_start_time, seq_tc_metadata + ) + sequence_e.append(tc_element) + + return sequence_e + + +@_backreference_build('sequence') +def _build_sequence_for_stack(stack, timeline_range, br_map): + sequence_e = _element_with_item_metadata("sequence", stack) + + _add_stack_elements_to_sequence(stack, sequence_e, timeline_range, br_map) + + return sequence_e + + +def _add_stack_elements_to_sequence(stack, sequence_e, timeline_range, br_map): + _append_new_sub_element(sequence_e, 'name', text=stack.name) + _append_new_sub_element( + sequence_e, 'duration', + text='{:.0f}'.format(timeline_range.duration.value) + ) + sequence_e.append(_build_rate(timeline_range.start_time.rate)) + track_rate = timeline_range.start_time.rate + + media_e = _get_or_create_subelement(sequence_e, "media") + video_e = _get_or_create_subelement(media_e, 'video') + audio_e = _get_or_create_subelement(media_e, 'audio') + + for track in stack: + track_elements = _build_top_level_track(track, track_rate, br_map) + if track.kind == schema.TrackKind.Video: + video_e.append(track_elements) + elif track.kind == schema.TrackKind.Audio: + audio_e.append(track_elements) + + for marker in stack.markers: + sequence_e.append(_build_marker(marker)) + + +def _build_collection(collection, br_map): + tracks = [] + for item in collection: + if not isinstance(item, schema.Timeline): + continue + + timeline_range = opentime.TimeRange( + start_time=item.global_start_time, + duration=item.duration() + ) + tracks.append( + _build_sequence_for_timeline(item, timeline_range, br_map) + ) + + return tracks + + +# -------------------- +# adapter requirements +# -------------------- + +def read_from_string(input_str): + tree = cElementTree.fromstring(input_str) + + parser = FCP7XMLParser(tree) + sequences = parser.top_level_sequences() + + if len(sequences) == 1: + return sequences[0] + elif len(sequences) > 1: + return schema.SerializableCollection( + name="Sequences", + children=sequences, + ) + else: + raise ValueError('No top-level sequences found') + + +def write_to_string(input_otio): + tree_e = cElementTree.Element('xmeml', version="4") + project_e = _append_new_sub_element(tree_e, 'project') + _append_new_sub_element(project_e, 'name', text=input_otio.name) + children_e = _append_new_sub_element(project_e, 'children') + + br_map = collections.defaultdict(dict) + + if isinstance(input_otio, schema.Timeline): + timeline_range = opentime.TimeRange( + start_time=input_otio.global_start_time, + duration=input_otio.duration() + ) + children_e.append( + _build_sequence_for_timeline( + input_otio, timeline_range, br_map + ) + ) + elif isinstance(input_otio, schema.SerializableCollection): + children_e.extend( + _build_collection(input_otio, br_map) + ) + + return _make_pretty_string(tree_e) diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/otio_json.py b/pype/vendor/python/python_2/opentimelineio/adapters/otio_json.py new file mode 100644 index 00000000000..66b8db29044 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/otio_json.py @@ -0,0 +1,48 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""This adapter lets you read and write native .otio files""" + +from .. import ( + core +) + + +# @TODO: Implement out of process plugins that hand around JSON + + +def read_from_file(filepath): + return core.deserialize_json_from_file(filepath) + + +def read_from_string(input_str): + return core.deserialize_json_from_string(input_str) + + +def write_to_string(input_otio): + return core.serialize_json_to_string(input_otio) + + +def write_to_file(input_otio, filepath): + return core.serialize_json_to_file(input_otio, filepath) diff --git a/pype/vendor/python/python_2/opentimelineio/algorithms/__init__.py b/pype/vendor/python/python_2/opentimelineio/algorithms/__init__.py new file mode 100644 index 00000000000..e211598bb37 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/algorithms/__init__.py @@ -0,0 +1,44 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Algorithms for OTIO objects.""" + +# flake8: noqa +from .track_algo import ( + track_trimmed_to_range, + track_with_expanded_transitions +) + +from .stack_algo import ( + flatten_stack, + top_clip_at_time, +) + +from .filter import ( + filtered_composition, + filtered_with_sequence_context +) +from .timeline_algo import ( + timeline_trimmed_to_range +) diff --git a/pype/vendor/python/python_2/opentimelineio/algorithms/filter.py b/pype/vendor/python/python_2/opentimelineio/algorithms/filter.py new file mode 100644 index 00000000000..8f9e2ed41bb --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/algorithms/filter.py @@ -0,0 +1,275 @@ +#!/usr/bin/env python +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Algorithms for filtering OTIO files. """ + +import copy + +from .. import ( + schema +) + + +def _is_in(thing, container): + return any(thing is item for item in container) + + +def _isinstance_in(child, typelist): + return any(isinstance(child, t) for t in typelist) + + +def filtered_composition( + root, + unary_filter_fn, + types_to_prune=None, +): + """Filter a deep copy of root (and children) with unary_filter_fn. + + types_to_prune:: tuple of types, example: (otio.schema.Gap,...) + + 1. Make a deep copy of root + 2. Starting with root, perform a depth first traversal + 3. For each item (including root): + a. if types_to_prune is not None and item is an instance of a type + in types_to_prune, prune it from the copy, continue. + b. Otherwise, pass the copy to unary_filter_fn. If unary_filter_fn: + I. returns an object: add it to the copy, replacing original + II. returns a tuple: insert it into the list, replacing original + III. returns None: prune it + 4. If an item is pruned, do not traverse its children + 5. Return the new deep copy. + + EXAMPLE 1 (filter): + If your unary function is: + def fn(thing): + if thing.name == B: + return thing' # some transformation of B + else: + return thing + + If you have a track: [A,B,C] + + filtered_composition(track, fn) => [A,B',C] + + EXAMPLE 2 (prune): + If your unary function is: + def fn(thing): + if thing.name == B: + return None + else: + return thing + + filtered_composition(track, fn) => [A,C] + + EXAMPLE 3 (expand): + If your unary function is: + def fn(thing): + if thing.name == B: + return tuple(B_1,B_2,B_3) + else: + return thing + + filtered_composition(track, fn) => [A,B_1,B_2,B_3,C] + + EXAMPLE 4 (prune gaps): + track :: [Gap, A, Gap] + filtered_composition( + track, lambda _:_, types_to_prune=(otio.schema.Gap,)) => [A] + """ + + # deep copy everything + mutable_object = copy.deepcopy(root) + + prune_list = set() + + header_list = [mutable_object] + + if isinstance(mutable_object, schema.Timeline): + header_list.append(mutable_object.tracks) + + iter_list = header_list + list(mutable_object.each_child()) + + for child in iter_list: + if _safe_parent(child) is not None and _is_in(child.parent(), prune_list): + prune_list.add(child) + continue + + parent = None + child_index = None + if _safe_parent(child) is not None: + child_index = child.parent().index(child) + parent = child.parent() + del child.parent()[child_index] + + # first try to prune + if (types_to_prune and _isinstance_in(child, types_to_prune)): + result = None + # finally call the user function + else: + result = unary_filter_fn(child) + + if child is mutable_object: + mutable_object = result + + if result is None: + prune_list.add(child) + continue + + if type(result) is not tuple: + result = [result] + + if parent is not None: + parent[child_index:child_index] = result + + return mutable_object + + +def _safe_parent(child): + if hasattr(child, 'parent'): + return child.parent() + return None + + +def filtered_with_sequence_context( + root, + reduce_fn, + types_to_prune=None, +): + """Filter a deep copy of root (and children) with reduce_fn. + + reduce_fn::function(previous_item, current, next_item) (see below) + types_to_prune:: tuple of types, example: (otio.schema.Gap,...) + + 1. Make a deep copy of root + 2. Starting with root, perform a depth first traversal + 3. For each item (including root): + a. if types_to_prune is not None and item is an instance of a type + in types_to_prune, prune it from the copy, continue. + b. Otherwise, pass (prev, copy, and next) to reduce_fn. If reduce_fn: + I. returns an object: add it to the copy, replacing original + II. returns a tuple: insert it into the list, replacing original + III. returns None: prune it + + ** note that reduce_fn is always passed objects from the original + deep copy, not what prior calls return. See below for examples + 4. If an item is pruned, do not traverse its children + 5. Return the new deep copy. + + EXAMPLE 1 (filter): + >>> track = [A,B,C] + >>> def fn(prev_item, thing, next_item): + ... if prev_item.name == A: + ... return D # some new clip + ... else: + ... return thing + >>> filtered_with_sequence_context(track, fn) => [A,D,C] + + order of calls to fn: + fn(None, A, B) => A + fn(A, B, C) => D + fn(B, C, D) => C # !! note that it was passed B instead of D. + + EXAMPLE 2 (prune): + >>> track = [A,B,C] + >>> def fn(prev_item, thing, next_item): + ... if prev_item.name == A: + ... return None # prune the clip + ... else: + ... return thing + >>> filtered_with_sequence_context(track, fn) => [A,C] + + order of calls to fn: + fn(None, A, B) => A + fn(A, B, C) => None + fn(B, C, D) => C # !! note that it was passed B instead of D. + + EXAMPLE 3 (expand): + >>> def fn(prev_item, thing, next_item): + ... if prev_item.name == A: + ... return (D, E) # tuple of new clips + ... else: + ... return thing + >>> filtered_with_sequence_context(track, fn) => [A, D, E, C] + + the order of calls to fn will be: + fn(None, A, B) => A + fn(A, B, C) => (D, E) + fn(B, C, D) => C # !! note that it was passed B instead of D. + """ + + # deep copy everything + mutable_object = copy.deepcopy(root) + + prune_list = set() + + header_list = [mutable_object] + + if isinstance(mutable_object, schema.Timeline): + header_list.append(mutable_object.tracks) + + iter_list = header_list + list(mutable_object.each_child()) + + # expand to include prev, next when appropriate + expanded_iter_list = [] + for child in iter_list: + if _safe_parent(child) and isinstance(child.parent(), schema.Track): + prev_item, next_item = child.parent().neighbors_of(child) + expanded_iter_list.append((prev_item, child, next_item)) + else: + expanded_iter_list.append((None, child, None)) + + for prev_item, child, next_item in expanded_iter_list: + if _safe_parent(child) is not None and _is_in(child.parent(), prune_list): + prune_list.add(child) + continue + + parent = None + child_index = None + if _safe_parent(child) is not None: + child_index = child.parent().index(child) + parent = child.parent() + del child.parent()[child_index] + + # first try to prune + if types_to_prune and _isinstance_in(child, types_to_prune): + result = None + # finally call the user function + else: + result = reduce_fn(prev_item, child, next_item) + + if child is mutable_object: + mutable_object = result + + if result is None: + prune_list.add(child) + continue + + if type(result) is not tuple: + result = [result] + + if parent is not None: + parent[child_index:child_index] = result + + return mutable_object diff --git a/pype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py b/pype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py new file mode 100644 index 00000000000..cdb6424b462 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py @@ -0,0 +1,138 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +__doc__ = """ Algorithms for stack objects. """ + +import copy + +from .. import ( + schema, + opentime, +) +from . import ( + track_algo +) + + +def top_clip_at_time(in_stack, t): + """Return the topmost visible child that overlaps with time t. + + Example: + tr1: G1, A, G2 + tr2: [B------] + G1, and G2 are gaps, A and B are clips. + + If t is within A, a will be returned. If t is within G1 or G2, B will be + returned. + """ + + # ensure that it only runs on stacks + if not isinstance(in_stack, schema.Stack): + raise ValueError( + "Argument in_stack must be of type otio.schema.Stack, " + "not: '{}'".format( + type(in_stack) + ) + ) + + # build a range to use the `each_child`method. + search_range = opentime.TimeRange( + start_time=t, + # 0 duration so we are just sampling a point in time. + # XXX Should this duration be equal to the length of one sample? + # opentime.RationalTime(1, rate)? + duration=opentime.RationalTime(0, t.rate) + ) + + # walk through the children of the stack in reverse order. + for track in reversed(in_stack): + valid_results = [] + if hasattr(track, "each_child"): + valid_results = list( + c for c in track.each_clip(search_range, shallow_search=True) + if c.visible() + ) + + # XXX doesn't handle nested tracks/stacks at the moment + + for result in valid_results: + return result + + return None + + +def flatten_stack(in_stack): + """Flatten a Stack, or a list of Tracks, into a single Track. + Note that the 1st Track is the bottom one, and the last is the top. + """ + + flat_track = schema.Track() + flat_track.name = "Flattened" + + # map of track to track.range_of_all_children + range_track_map = {} + + def _get_next_item( + in_stack, + track_index=None, + trim_range=None + ): + if track_index is None: + # start with the top-most track + track_index = len(in_stack) - 1 + if track_index < 0: + # if you get to the bottom, you're done + return + + track = in_stack[track_index] + if trim_range is not None: + track = track_algo.track_trimmed_to_range(track, trim_range) + + track_map = range_track_map.get(track) + if track_map is None: + track_map = track.range_of_all_children() + range_track_map[track] = track_map + + for item in track: + if ( + item.visible() + or track_index == 0 + or isinstance(item, schema.Transition) + ): + yield item + else: + trim = track_map[item] + if trim_range is not None: + trim = opentime.TimeRange( + start_time=trim.start_time + trim_range.start_time, + duration=trim.duration + ) + track_map[item] = trim + for more in _get_next_item(in_stack, track_index - 1, trim): + yield more + + for item in _get_next_item(in_stack): + flat_track.append(copy.deepcopy(item)) + + return flat_track diff --git a/pype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py b/pype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py new file mode 100644 index 00000000000..bbb0ae62755 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py @@ -0,0 +1,56 @@ +# +# Copyright 2019 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Algorithms for timeline objects.""" + +import copy + +from . import ( + track_algo +) + + +def timeline_trimmed_to_range(in_timeline, trim_range): + """Returns a new timeline that is a copy of the in_timeline, but with items + outside the trim_range removed and items on the ends trimmed to the + trim_range. Note that the timeline is never expanded, only shortened. + Please note that you could do nearly the same thing non-destructively by + just setting the Track's source_range but sometimes you want to really cut + away the stuff outside and that's what this function is meant for.""" + new_timeline = copy.deepcopy(in_timeline) + + for track_num, child_track in enumerate(in_timeline.tracks): + # @TODO: put the trim_range into the space of the tracks + # new_range = new_timeline.tracks.transformed_time_range( + # trim_range, + # child_track + # ) + + # trim the track and assign it to the new stack. + new_timeline.tracks[track_num] = track_algo.track_trimmed_to_range( + child_track, + trim_range + ) + + return new_timeline diff --git a/pype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py b/pype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py new file mode 100644 index 00000000000..8ac406f1d6d --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py @@ -0,0 +1,236 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Algorithms for track objects.""" + +import copy + +from .. import ( + schema, + exceptions, + opentime, +) + + +def track_trimmed_to_range(in_track, trim_range): + """Returns a new track that is a copy of the in_track, but with items + outside the trim_range removed and items on the ends trimmed to the + trim_range. Note that the track is never expanded, only shortened. + Please note that you could do nearly the same thing non-destructively by + just setting the Track's source_range but sometimes you want to really cut + away the stuff outside and that's what this function is meant for.""" + new_track = copy.deepcopy(in_track) + + track_map = new_track.range_of_all_children() + + # iterate backwards so we can delete items + for c, child in reversed(list(enumerate(new_track))): + child_range = track_map[child] + if not trim_range.overlaps(child_range): + # completely outside the trim range, so we discard it + del new_track[c] + elif trim_range.contains(child_range): + # completely contained, keep the whole thing + pass + else: + if isinstance(child, schema.Transition): + raise exceptions.CannotTrimTransitionsError( + "Cannot trim in the middle of a Transition." + ) + + # we need to clip the end(s) + child_source_range = child.trimmed_range() + + # should we trim the start? + if trim_range.start_time > child_range.start_time: + trim_amount = trim_range.start_time - child_range.start_time + child_source_range = opentime.TimeRange( + start_time=child_source_range.start_time + trim_amount, + duration=child_source_range.duration - trim_amount + + ) + + # should we trim the end? + trim_end = trim_range.end_time_exclusive() + child_end = child_range.end_time_exclusive() + if trim_end < child_end: + trim_amount = child_end - trim_end + child_source_range = opentime.TimeRange( + start_time=child_source_range.start_time, + duration=child_source_range.duration - trim_amount + + ) + + # set the new child's trims + child.source_range = child_source_range + + return new_track + + +def track_with_expanded_transitions(in_track): + """Expands transitions such that neighboring clips are trimmed into + regions of overlap. + + For example, if your track is: + Clip1, T, Clip2 + + will return: + Clip1', Clip1_t, T, Clip2_t, Clip2' + + Where Clip1' is the part of Clip1 not in the transition, Clip1_t is the + part inside the transition and so on. + """ + + result_track = [] + + seq_iter = iter(in_track) + prev_thing = None + thing = next(seq_iter, None) + next_thing = next(seq_iter, None) + + while thing is not None: + if isinstance(thing, schema.Transition): + result_track.append(_expand_transition(thing, in_track)) + else: + # not a transition, but might be trimmed by one before or after + # in the track + pre_transition = None + next_transition = None + + if isinstance(prev_thing, schema.Transition): + pre_transition = prev_thing + + if isinstance(next_thing, schema.Transition): + next_transition = next_thing + + result_track.append( + _trim_from_transitions( + thing, + pre=pre_transition, + post=next_transition + ) + ) + + # loop + prev_thing = thing + thing = next_thing + next_thing = next(seq_iter, None) + + return result_track + + +def _expand_transition(target_transition, from_track): + """ Expand transitions into the portions of pre-and-post clips that + overlap with the transition. + """ + + result = from_track.neighbors_of( + target_transition, + schema.NeighborGapPolicy.around_transitions + ) + + trx_duration = target_transition.in_offset + target_transition.out_offset + + # make copies of the before and after, and modify their in/out points + pre = copy.deepcopy(result.previous) + + if isinstance(pre, schema.Transition): + raise exceptions.TransitionFollowingATransitionError( + "cannot put two transitions next to each other in a track: " + "{}, {}".format( + pre, + target_transition + ) + ) + if target_transition.in_offset is None: + raise RuntimeError( + "in_offset is None on: {}".format(target_transition) + ) + + if target_transition.out_offset is None: + raise RuntimeError( + "out_offset is None on: {}".format(target_transition) + ) + + pre.name = (pre.name or "") + "_transition_pre" + + # ensure that pre.source_range is set, because it will get manipulated + tr = pre.trimmed_range() + + pre.source_range = opentime.TimeRange( + start_time=( + tr.end_time_exclusive() - target_transition.in_offset + ), + duration=trx_duration.rescaled_to( + tr.start_time + ) + ) + + post = copy.deepcopy(result.next) + if isinstance(post, schema.Transition): + raise exceptions.TransitionFollowingATransitionError( + "cannot put two transitions next to each other in a track: " + "{}, {}".format( + target_transition, + post + ) + ) + + post.name = (post.name or "") + "_transition_post" + + # ensure that post.source_range is set, because it will get manipulated + tr = post.trimmed_range() + + post.source_range = opentime.TimeRange( + start_time=( + tr.start_time - target_transition.in_offset + ).rescaled_to(tr.start_time), + duration=trx_duration.rescaled_to(tr.start_time) + ) + + return pre, target_transition, post + + +def _trim_from_transitions(thing, pre=None, post=None): + """ Trim clips next to transitions. """ + + result = copy.deepcopy(thing) + + # We might not have a source_range yet, + # We can trim to the computed trimmed_range to + # ensure we have something. + new_range = result.trimmed_range() + start_time = new_range.start_time + duration = new_range.duration + + if pre: + start_time += pre.out_offset + duration -= pre.out_offset + + if post: + duration -= post.in_offset + + result.source_range = opentime.TimeRange(start_time, duration) + + return result diff --git a/pype/vendor/python/python_2/opentimelineio/console/__init__.py b/pype/vendor/python/python_2/opentimelineio/console/__init__.py new file mode 100644 index 00000000000..e5f6e869880 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/__init__.py @@ -0,0 +1,40 @@ +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Console scripts for OpenTimelineIO + +.. moduleauthor:: Pixar Animation Studios +""" + +# flake8: noqa + +# in dependency hierarchy +from . import ( + otioconvert, + otiocat, + otiostat, + console_utils, + autogen_serialized_datamodel, +) + diff --git a/pype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py b/pype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py new file mode 100644 index 00000000000..046e8cbd1c8 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python +# +# Copyright 2019 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + + +"""Generates documentation of the serialized data model for OpenTimelineIO.""" + +import argparse +import inspect +import json +import tempfile +import sys + +try: + # python2 + import StringIO as io +except ImportError: + # python3 + import io + +import opentimelineio as otio + + +DOCUMENT_HEADER = """# OpenTimelineIO Serialized Data Documentation + +This document is a list of all the OpenTimelineIO classes that serialize to and +from JSON, omitting SchemaDef plugins. + +This document is automatically generated by running + docs/autogen_serialized_datamodel.py, or by running `make doc-model`. It is + part of the unit tests suite and should be updated whenever the schema changes. + If it needs to be updated, run: `make doc-model-update` and this file should be + regenerated. + +# Classes + +""" + +FIELDS_ONLY_HEADER = """# OpenTimelineIO Serialized Data Documentation + +This document is a list of all the OpenTimelineIO classes that serialize to and +from JSON, omitting plugins classes and docstrings. + +This document is automatically generated by running + docs/autogen_serialized_datamodel.py, or by running `make doc-model`. It is + part of the unit tests suite and should be updated whenever the schema changes. + If it needs to be updated, run: `make doc-model-update` and this file should be + regenerated. + +# Classes + +""" + +CLASS_HEADER_WITH_DOCS = """ +### {classname} + +*full module path*: `{modpath}` + +*documentation*: + +``` +{docstring} +``` + +parameters: +""" + +CLASS_HEADER_ONLY_FIELDS = """ +### {classname} + +parameters: +""" + +MODULE_HEADER = """ +## Module: {modname} +""" + +PROP_HEADER = """- *{propkey}*: {prophelp} +""" + +# @TODO: having type information here would be awesome +PROP_HEADER_NO_HELP = """- *{propkey}* +""" + +# three ways to try and get the property + docstring +PROP_FETCHERS = ( + lambda cl, k: inspect.getdoc(getattr(cl, k)), + lambda cl, k: inspect.getdoc(getattr(cl, "_" + k)), + lambda cl, k: inspect.getdoc(getattr(cl(), k)) and "" or "", +) + + +def _parsed_args(): + """ parse commandline arguments with argparse """ + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + group = parser.add_mutually_exclusive_group() + group.add_argument( + "-d", + "--dryrun", + action="store_true", + default=False, + help="Dryrun mode - print out instead of perform actions" + ) + group.add_argument( + "-o", + "--output", + type=str, + default=None, + help="Update the baseline with the current version" + ) + + return parser.parse_args() + + +# things to skip +SKIP_CLASSES = [otio.core.SerializableObject, otio.core.UnknownSchema] +SKIP_KEYS = ["OTIO_SCHEMA"] # not data, just for the backing format +SKIP_MODULES = ["opentimelineio.schemadef"] # because these are plugins + + +def _generate_model_for_module(mod, classes, modules): + modules.add(mod) + + # fetch the classes from this module + serializeable_classes = [ + thing for thing in mod.__dict__.values() + if ( + inspect.isclass(thing) + and thing not in classes + and issubclass(thing, otio.core.SerializableObject) + or thing in ( + otio.opentime.RationalTime, + otio.opentime.TimeRange, + otio.opentime.TimeTransform, + ) + ) + ] + + # serialize/deserialize the classes to capture their serialized parameters + model = {} + for cl in serializeable_classes: + if cl in SKIP_CLASSES: + continue + + model[cl] = {} + field_dict = json.loads(otio.adapters.otio_json.write_to_string(cl())) + for k in field_dict.keys(): + if k in SKIP_KEYS: + continue + + for fetcher in PROP_FETCHERS: + try: + model[cl][k] = fetcher(cl, k) + break + except AttributeError: + pass + else: + sys.stderr.write("ERROR: could not fetch property: {}".format(k)) + + # Stashing the OTIO_SCHEMA back into the dictionary since the + # documentation uses this information in its header. + model[cl]["OTIO_SCHEMA"] = field_dict["OTIO_SCHEMA"] + + classes.update(model) + + # find new modules to recurse into + new_mods = sorted( + ( + thing for thing in mod.__dict__.values() + if ( + inspect.ismodule(thing) + and thing not in modules + and all(not thing.__name__.startswith(t) for t in SKIP_MODULES) + ) + ), + key=lambda mod: str(mod) + ) + + # recurse into the new modules and update the classes and modules values + [_generate_model_for_module(m, classes, modules) for m in new_mods] + + +def _generate_model(): + classes = {} + modules = set() + _generate_model_for_module(otio, classes, modules) + return classes + + +def _write_documentation(model): + md_with_helpstrings = io.StringIO() + md_only_fields = io.StringIO() + + md_with_helpstrings.write(DOCUMENT_HEADER) + md_only_fields.write(FIELDS_ONLY_HEADER) + + modules = {} + for cl in model: + modules.setdefault(cl.__module__, []).append(cl) + + CURRENT_MODULE = None + for module_list in sorted(modules): + this_mod = ".".join(module_list.split('.')[:2]) + if this_mod != CURRENT_MODULE: + CURRENT_MODULE = this_mod + md_with_helpstrings.write(MODULE_HEADER.format(modname=this_mod)) + md_only_fields.write(MODULE_HEADER.format(modname=this_mod)) + + # because these are classes, they need to sort on their stringified + # names + for cl in sorted(modules[module_list], key=lambda cl: str(cl)): + modname = inspect.getmodule(cl).__name__ + label = model[cl]["OTIO_SCHEMA"] + md_with_helpstrings.write( + CLASS_HEADER_WITH_DOCS.format( + classname=label, + modpath=modname + "." + cl.__name__, + docstring=cl.__doc__ + ) + ) + md_only_fields.write( + CLASS_HEADER_ONLY_FIELDS.format( + classname=label, + ) + ) + + for key, helpstr in sorted(model[cl].items()): + if key in SKIP_KEYS: + continue + md_with_helpstrings.write( + PROP_HEADER.format(propkey=key, prophelp=helpstr) + ) + md_only_fields.write( + PROP_HEADER_NO_HELP.format(propkey=key) + ) + + return md_with_helpstrings.getvalue(), md_only_fields.getvalue() + + +def main(): + """ main entry point """ + args = _parsed_args() + with_docs, without_docs = generate_and_write_documentation() + + # print it out somewhere + if args.dryrun: + print(with_docs) + return + + output = args.output + if not output: + output = tempfile.NamedTemporaryFile( + 'w', + suffix="otio_serialized_schema.md", + delete=False + ).name + + with open(output, 'w') as fo: + fo.write(with_docs) + + # write version without docstrings + prefix, suffix = output.rsplit('.', 1) + output_only_fields = prefix + "-only-fields." + suffix + + with open(output_only_fields, 'w') as fo: + fo.write(without_docs) + + print("wrote documentation to {} and {}".format(output, output_only_fields)) + + +def generate_and_write_documentation(): + model = _generate_model() + return _write_documentation(model) + + +if __name__ == '__main__': + main() diff --git a/pype/vendor/python/python_2/opentimelineio/console/console_utils.py b/pype/vendor/python/python_2/opentimelineio/console/console_utils.py new file mode 100644 index 00000000000..9c659433e39 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/console_utils.py @@ -0,0 +1,72 @@ +# +# Copyright 2019 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +import ast + +from .. import ( + media_linker, +) + +"""Utilities for OpenTimelineIO commandline modules.""" + + +def arg_list_to_map(arg_list, label): + """ + Convert an argument of the form -A foo=bar from the parsed result to a map. + """ + + argument_map = {} + for pair in arg_list: + if '=' not in pair: + raise ValueError( + "error: {} arguments must be in the form key=value" + " got: {}".format(label, pair) + ) + + key, val = pair.split('=', 1) # only split on the 1st '=' + try: + # Sometimes we need to pass a bool, int, list, etc. + parsed_value = ast.literal_eval(val) + except (ValueError, SyntaxError): + # Fall back to a simple string + parsed_value = val + argument_map[key] = parsed_value + + return argument_map + + +def media_linker_name(ml_name_arg): + """ + Parse commandline arguments for the media linker, which can be not set + (fall back to default), "" or "none" (don't link media) or the name of a + media linker to use. + """ + if ml_name_arg.lower() == 'default': + media_linker_name = media_linker.MediaLinkingPolicy.ForceDefaultLinker + elif ml_name_arg.lower() in ['none', '']: + media_linker_name = media_linker.MediaLinkingPolicy.DoNotLinkMedia + else: + media_linker_name = ml_name_arg + + return media_linker_name diff --git a/pype/vendor/python/python_2/opentimelineio/console/otiocat.py b/pype/vendor/python/python_2/opentimelineio/console/otiocat.py new file mode 100644 index 00000000000..95131445120 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/otiocat.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Print the contents of an OTIO file to stdout.""" + +import argparse +import sys + +import opentimelineio as otio + + +def _parsed_args(): + """ parse commandline arguments with argparse """ + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + 'filepath', + type=str, + nargs='+', + help='files to print the contents of' + ) + parser.add_argument( + '-a', + '--adapter-arg', + type=str, + default=[], + action='append', + help='Extra arguments to be passed to input adapter in the form of ' + 'key=value. Values are strings, numbers or Python literals: True, ' + 'False, etc. Can be used multiple times: -a burrito="bar" -a taco=12.' + ) + parser.add_argument( + '-m', + '--media-linker', + type=str, + default="Default", + help=( + "Specify a media linker. 'Default' means use the " + "$OTIO_DEFAULT_MEDIA_LINKER if set, 'None' or '' means explicitly " + "disable the linker, and anything else is interpreted as the name" + " of the media linker to use." + ) + ) + parser.add_argument( + '-M', + '--media-linker-arg', + type=str, + default=[], + action='append', + help='Extra arguments to be passed to the media linker in the form of ' + 'key=value. Values are strings, numbers or Python literals: True, ' + 'False, etc. Can be used multiple times: -M burrito="bar" -M taco=12.' + ) + + return parser.parse_args() + + +def _otio_compatible_file_to_json_string( + fpath, + media_linker_name, + media_linker_argument_map, + adapter_argument_map +): + """Read the file at fpath with the default otio adapter and return the json + as a string. + """ + + adapter = otio.adapters.from_name("otio_json") + return adapter.write_to_string( + otio.adapters.read_from_file( + fpath, + media_linker_name=media_linker_name, + media_linker_argument_map=media_linker_argument_map, + **adapter_argument_map + ) + ) + + +def main(): + """Parse arguments and call _otio_compatible_file_to_json_string.""" + + args = _parsed_args() + + media_linker_name = otio.console.console_utils.media_linker_name( + args.media_linker + ) + + try: + read_adapter_arg_map = otio.console.console_utils.arg_list_to_map( + args.adapter_arg, + "adapter" + ) + media_linker_argument_map = otio.console.console_utils.arg_list_to_map( + args.media_linker_arg, + "media linker" + ) + except ValueError as exc: + sys.stderr.write("\n" + str(exc) + "\n") + sys.exit(1) + + for fpath in args.filepath: + print( + _otio_compatible_file_to_json_string( + fpath, + media_linker_name, + media_linker_argument_map, + read_adapter_arg_map + ) + ) + + +if __name__ == '__main__': + main() diff --git a/pype/vendor/python/python_2/opentimelineio/console/otioconvert.py b/pype/vendor/python/python_2/opentimelineio/console/otioconvert.py new file mode 100644 index 00000000000..9d45a0fcf49 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/otioconvert.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +import argparse +import sys +import copy + +import opentimelineio as otio + +__doc__ = """ Python wrapper around OTIO to convert timeline files between \ +formats. + +Available adapters: {} +""".format(otio.adapters.available_adapter_names()) + + +def _parsed_args(): + """ parse commandline arguments with argparse """ + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '-i', + '--input', + type=str, + required=True, + help='path to input file', + ) + parser.add_argument( + '-o', + '--output', + type=str, + required=True, + help='path to output file', + ) + parser.add_argument( + '-I', + '--input-adapter', + type=str, + default=None, + help="Explicitly use this adapter for reading the input file", + ) + parser.add_argument( + '-O', + '--output-adapter', + type=str, + default=None, + help="Explicitly use this adapter for writing the output file", + ) + parser.add_argument( + '-T', + '--tracks', + type=str, + default=None, + help="Pick one or more tracks, by 0-based index, separated by commas.", + ) + parser.add_argument( + '-m', + '--media-linker', + type=str, + default="Default", + help=( + "Specify a media linker. 'Default' means use the " + "$OTIO_DEFAULT_MEDIA_LINKER if set, 'None' or '' means explicitly " + "disable the linker, and anything else is interpreted as the name" + " of the media linker to use." + ) + ) + parser.add_argument( + '-M', + '--media-linker-arg', + type=str, + default=[], + action='append', + help='Extra arguments to be passed to the media linker in the form of ' + 'key=value. Values are strings, numbers or Python literals: True, ' + 'False, etc. Can be used multiple times: -M burrito="bar" -M taco=12.' + ) + parser.add_argument( + '-a', + '--adapter-arg', + type=str, + default=[], + action='append', + help='Extra arguments to be passed to input adapter in the form of ' + 'key=value. Values are strings, numbers or Python literals: True, ' + 'False, etc. Can be used multiple times: -a burrito="bar" -a taco=12.' + ) + parser.add_argument( + '-A', + '--output-adapter-arg', + type=str, + default=[], + action='append', + help='Extra arguments to be passed to output adapter in the form of ' + 'key=value. Values are strings, numbers or Python literals: True, ' + 'False, etc. Can be used multiple times: -A burrito="bar" -A taco=12.' + ) + trim_args = parser.add_argument_group( + title="Trim Arguments", + description="Arguments that allow you to trim the OTIO file." + ) + trim_args.add_argument( + '--begin', + type=str, + default=None, + help=( + "Trim out everything in the timeline before this time, in the " + "global time frame of the timeline. Argument should be in the form" + ' "VALUE,RATE", eg: --begin "10,24". Requires --end argument.' + ), + ) + trim_args.add_argument( + '--end', + type=str, + default=None, + help=( + "Trim out everything in the timeline after this time, in the " + "global time frame of the timeline. Argument should be in the form" + ' "VALUE,RATE", eg: --begin "10,24". Requires --begin argument.' + ), + ) + + result = parser.parse_args() + + if result.begin is not None and result.end is None: + parser.error("--begin requires --end.") + if result.end is not None and result.begin is None: + parser.error("--end requires --begin.") + + if result.begin is not None: + try: + value, rate = result.begin.split(",") + result.begin = otio.opentime.RationalTime(float(value), float(rate)) + except ValueError: + parser.error( + "--begin argument needs to be of the form: VALUE,RATE where " + "VALUE is the (float) time value of the resulting RationalTime " + "and RATE is the (float) time rate of the resulting RationalTime," + " not '{}'".format(result.begin) + ) + + if result.end is not None: + try: + value, rate = result.end.split(",") + result.end = otio.opentime.RationalTime(float(value), float(rate)) + except ValueError: + parser.error( + "--end argument needs to be of the form: VALUE,RATE where " + "VALUE is the (float) time value of the resulting RationalTime " + "and RATE is the (float) time rate of the resulting RationalTime," + " not '{}'".format(result.begin) + ) + + return result + + +def main(): + """Parse arguments and convert the files.""" + + args = _parsed_args() + + in_adapter = args.input_adapter + if in_adapter is None: + in_adapter = otio.adapters.from_filepath(args.input).name + + out_adapter = args.output_adapter + if out_adapter is None: + out_adapter = otio.adapters.from_filepath(args.output).name + + media_linker_name = otio.console.console_utils.media_linker_name( + args.media_linker + ) + + try: + read_adapter_arg_map = otio.console.console_utils.arg_list_to_map( + args.adapter_arg, + "input adapter" + ) + ml_args = otio.console.console_utils.arg_list_to_map( + args.media_linker_arg, + "media linker" + ) + except ValueError as exc: + sys.stderr.write("\n" + str(exc) + "\n") + sys.exit(1) + + result_tl = otio.adapters.read_from_file( + args.input, + in_adapter, + media_linker_name=media_linker_name, + media_linker_argument_map=ml_args, + **read_adapter_arg_map + ) + + if args.tracks: + result_tracks = copy.deepcopy(otio.schema.Stack()) + del result_tracks[:] + for track in args.tracks.split(","): + tr = result_tl.tracks[int(track)] + del result_tl.tracks[int(track)] + print("track {0} is of kind: '{1}'".format(track, tr.kind)) + result_tracks.append(tr) + result_tl.tracks = result_tracks + + # handle trim arguments + if args.begin is not None and args.end is not None: + result_tl = otio.algorithms.timeline_trimmed_to_range( + result_tl, + otio.opentime.range_from_start_end_time(args.begin, args.end) + ) + + try: + write_adapter_arg_map = otio.console.console_utils.arg_list_to_map( + args.output_adapter_arg, + "output adapter" + ) + except ValueError as exc: + sys.stderr.write("\n" + str(exc) + "\n") + sys.exit(1) + + otio.adapters.write_to_file( + result_tl, + args.output, + out_adapter, + **write_adapter_arg_map + ) + + +if __name__ == '__main__': + try: + main() + except otio.exceptions.OTIOError as err: + sys.stderr.write("ERROR: " + str(err) + "\n") + sys.exit(1) diff --git a/pype/vendor/python/python_2/opentimelineio/console/otiostat.py b/pype/vendor/python/python_2/opentimelineio/console/otiostat.py new file mode 100644 index 00000000000..9cd554727ab --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/otiostat.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Print statistics about the otio file, including validation information.""" + +import argparse +import sys + +import opentimelineio as otio + + +def _parsed_args(): + """ parse commandline arguments with argparse """ + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + 'filepath', + type=str, + nargs='+', + help='files to operate on' + ) + + return parser.parse_args() + + +TESTS = [] + + +def stat_check(name): + def real_stat_check(fn): + TESTS.append((name, fn)) + return fn + return real_stat_check + + +@stat_check("parsed") +def _did_parse(input): + return input and True or False + + +@stat_check("top level object") +def _top_level_object(input): + return input._serializable_label + + +@stat_check("number of tracks") +def _num_tracks(input): + try: + return len(input.tracks) + except AttributeError: + return 0 + + +@stat_check("Tracks are the same length") +def _equal_length_tracks(tl): + if not tl.tracks: + return True + for i, track in enumerate(tl.tracks): + if track.duration() != tl.tracks[0].duration(): + raise RuntimeError( + "track {} is not the same duration as the other tracks." + " Track {} duration, vs: {}".format( + i, + track.duration(), + tl.tracks[0].duration() + ) + ) + return True + + +@stat_check("deepest nesting") +def _deepest_nesting(input): + def depth(parent): + if not isinstance(parent, otio.core.Composition): + return 1 + d = 0 + for child in parent: + d = max(d, depth(child) + 1) + return d + if isinstance(input, otio.schema.Timeline): + return depth(input.tracks) + 1 + else: + return depth(input) + + +@stat_check("number of clips") +def _num_clips(input): + return len(list(input.each_clip())) + + +@stat_check("total duration") +def _total_duration(input): + try: + return input.tracks.duration() + except AttributeError: + return "n/a" + + +@stat_check("total duration in timecode") +def _total_duration_timecode(input): + try: + d = input.tracks.duration() + return otio.opentime.to_timecode(d, d.rate) + except AttributeError: + return "n/a" + + +@stat_check("top level rate") +def _top_level_rate(input): + try: + return input.tracks.duration().rate + except AttributeError: + return "n/a" + + +@stat_check("clips with cdl data") +def _clips_with_cdl_data(input): + return len(list(c for c in input.each_clip() if 'cdl' in c.metadata)) + + +@stat_check("Tracks with non standard types") +def _sequences_with_non_standard_types(input): + return len( + list( + c + for c in input.each_child(descended_from_type=otio.schema.Track) + if c.kind not in (otio.schema.TrackKind.__dict__) + ) + ) + + +def _stat_otio(input_otio): + for (test, testfunc) in TESTS: + try: + print("{}: {}".format(test, testfunc(input_otio))) + except (otio.exceptions.OTIOError) as e: + sys.stderr.write( + "There was an OTIO Error: " + " {}\n".format(e), + ) + continue + except (Exception) as e: + sys.stderr.write("There was a system error: {}\n".format(e)) + continue + + +def main(): + """ main entry point """ + args = _parsed_args() + + for fp in args.filepath: + try: + parsed_otio = otio.adapters.read_from_file(fp) + except (otio.exceptions.OTIOError) as e: + sys.stderr.write( + "The file did not successfully parse, with error:" + " {}\n".format(e), + ) + continue + except (Exception) as e: + sys.stderr.write("There was a system error: {}\n".format(e)) + continue + + _stat_otio(parsed_otio) + + +if __name__ == '__main__': + main() diff --git a/pype/vendor/python/python_2/opentimelineio/core/__init__.py b/pype/vendor/python/python_2/opentimelineio/core/__init__.py new file mode 100644 index 00000000000..ac5c0bbcc00 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/__init__.py @@ -0,0 +1,67 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Internal implementation details of OpenTimelineIO.""" + +# flake8: noqa + +from . import ( + serializable_object +) +from .serializable_object import ( + SerializableObject, + serializable_field, + deprecated_field, +) +from .composable import ( + Composable +) +from .item import ( + Item +) +from . import composition +from .composition import ( + Composition, +) +from . import type_registry +from .type_registry import ( + register_type, + upgrade_function_for, + schema_name_from_label, + schema_version_from_label, + instance_from_schema, +) +from .json_serializer import ( + serialize_json_to_string, + serialize_json_to_file, + deserialize_json_from_string, + deserialize_json_from_file, +) +from .media_reference import ( + MediaReference, +) +from . import unknown_schema +from .unknown_schema import ( + UnknownSchema +) diff --git a/pype/vendor/python/python_2/opentimelineio/core/composable.py b/pype/vendor/python/python_2/opentimelineio/core/composable.py new file mode 100644 index 00000000000..78c7fba3492 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/composable.py @@ -0,0 +1,141 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Composable class definition. + +An object that can be composed by tracks. +""" + +import weakref + +from . import serializable_object +from . import type_registry + +import copy + + +@type_registry.register_type +class Composable(serializable_object.SerializableObject): + """An object that can be composed by tracks. + + Base class of: + Item + Transition + """ + + name = serializable_object.serializable_field( + "name", + doc="Composable name." + ) + metadata = serializable_object.serializable_field( + "metadata", + doc="Metadata dictionary for this Composable." + ) + + _serializable_label = "Composable.1" + _class_path = "core.Composable" + + def __init__(self, name=None, metadata=None): + super(Composable, self).__init__() + self._parent = None + + # initialize the serializable fields + self.name = name + self.metadata = copy.deepcopy(metadata) if metadata else {} + + @staticmethod + def visible(): + """Return the visibility of the Composable. By default True.""" + + return False + + @staticmethod + def overlapping(): + """Return whether an Item is overlapping. By default False.""" + + return False + + # @{ functions to express the composable hierarchy + def _root_parent(self): + return ([self] + self._ancestors())[-1] + + def _ancestors(self): + ancestors = [] + seqi = self + while seqi.parent() is not None: + seqi = seqi.parent() + ancestors.append(seqi) + return ancestors + + def parent(self): + """Return the parent Composable, or None if self has no parent.""" + + return self._parent() if self._parent is not None else None + + def _set_parent(self, new_parent): + if new_parent is not None and self.parent() is not None: + raise ValueError( + "Composable named '{}' is already in a composition named '{}'," + " remove from previous parent before adding to new one." + " Composable: {}, Composition: {}".format( + self.name, + self.parent() is not None and self.parent().name or None, + self, + self.parent() + ) + ) + self._parent = weakref.ref(new_parent) if new_parent is not None else None + + def is_parent_of(self, other): + """Returns true if self is a parent or ancestor of other.""" + + visited = set([]) + while other.parent() is not None and other.parent() not in visited: + if other.parent() is self: + return True + visited.add(other) + other = other.parent() + + return False + + # @} + + def __repr__(self): + return ( + "otio.{}(" + "name={}, " + "metadata={}" + ")".format( + self._class_path, + repr(self.name), + repr(self.metadata) + ) + ) + + def __str__(self): + return "{}({}, {})".format( + self._class_path.split('.')[-1], + self.name, + str(self.metadata) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/core/composition.py b/pype/vendor/python/python_2/opentimelineio/core/composition.py new file mode 100644 index 00000000000..4da5a4b0915 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/composition.py @@ -0,0 +1,718 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Composition base class. An object that contains `Items`.""" + +import collections + +from . import ( + serializable_object, + type_registry, + item, + composable, +) + +from .. import ( + opentime, + exceptions +) + + +def _bisect_right( + seq, + tgt, + key_func, + lower_search_bound=0, + upper_search_bound=None +): + """Return the index of the last item in seq such that all e in seq[:index] + have key_func(e) <= tgt, and all e in seq[index:] have key_func(e) > tgt. + + Thus, seq.insert(index, value) will insert value after the rightmost item + such that meets the above condition. + + lower_search_bound and upper_search_bound bound the slice to be searched. + + Assumes that seq is already sorted. + """ + + if lower_search_bound < 0: + raise ValueError('lower_search_bound must be non-negative') + + if upper_search_bound is None: + upper_search_bound = len(seq) + + while lower_search_bound < upper_search_bound: + midpoint_index = (lower_search_bound + upper_search_bound) // 2 + + if tgt < key_func(seq[midpoint_index]): + upper_search_bound = midpoint_index + else: + lower_search_bound = midpoint_index + 1 + + return lower_search_bound + + +def _bisect_left( + seq, + tgt, + key_func, + lower_search_bound=0, + upper_search_bound=None +): + """Return the index of the last item in seq such that all e in seq[:index] + have key_func(e) < tgt, and all e in seq[index:] have key_func(e) >= tgt. + + Thus, seq.insert(index, value) will insert value before the leftmost item + such that meets the above condition. + + lower_search_bound and upper_search_bound bound the slice to be searched. + + Assumes that seq is already sorted. + """ + + if lower_search_bound < 0: + raise ValueError('lower_search_bound must be non-negative') + + if upper_search_bound is None: + upper_search_bound = len(seq) + + while lower_search_bound < upper_search_bound: + midpoint_index = (lower_search_bound + upper_search_bound) // 2 + + if key_func(seq[midpoint_index]) < tgt: + lower_search_bound = midpoint_index + 1 + else: + upper_search_bound = midpoint_index + + return lower_search_bound + + +@type_registry.register_type +class Composition(item.Item, collections.MutableSequence): + """Base class for an OTIO Item that contains other Items. + + Should be subclassed (for example by Track and Stack), not used + directly. + """ + + _serializable_label = "Composition.1" + _composition_kind = "Composition" + _modname = "core" + _composable_base_class = composable.Composable + + def __init__( + self, + name=None, + children=None, + source_range=None, + markers=None, + effects=None, + metadata=None + ): + item.Item.__init__( + self, + name=name, + source_range=source_range, + markers=markers, + effects=effects, + metadata=metadata + ) + collections.MutableSequence.__init__(self) + + # Because we know that all children are unique, we store a set + # of all the children as well to speed up __contain__ checks. + self._child_lookup = set() + + self._children = [] + if children: + # cannot simply set ._children to children since __setitem__ runs + # extra logic (assigning ._parent pointers) and populates the + # internal membership set _child_lookup. + self.extend(children) + + _children = serializable_object.serializable_field( + "children", + list, + "Items contained by this composition." + ) + + @property + def composition_kind(self): + """Returns a label specifying the kind of composition.""" + + return self._composition_kind + + def __str__(self): + return "{}({}, {}, {}, {})".format( + self._composition_kind, + str(self.name), + str(self._children), + str(self.source_range), + str(self.metadata) + ) + + def __repr__(self): + return ( + "otio.{}.{}(" + "name={}, " + "children={}, " + "source_range={}, " + "metadata={}" + ")".format( + self._modname, + self._composition_kind, + repr(self.name), + repr(self._children), + repr(self.source_range), + repr(self.metadata) + ) + ) + + transform = serializable_object.deprecated_field() + + def child_at_time( + self, + search_time, + shallow_search=False, + ): + """Return the child that overlaps with time search_time. + + search_time is in the space of self. + + If shallow_search is false, will recurse into compositions. + """ + + range_map = self.range_of_all_children() + + # find the first item whose end_time_exclusive is after the + first_inside_range = _bisect_left( + seq=self._children, + tgt=search_time, + key_func=lambda child: range_map[child].end_time_exclusive(), + ) + + # find the last item whose start_time is before the + last_in_range = _bisect_right( + seq=self._children, + tgt=search_time, + key_func=lambda child: range_map[child].start_time, + lower_search_bound=first_inside_range, + ) + + # limit the search to children who are in the search_range + possible_matches = self._children[first_inside_range:last_in_range] + + result = None + for thing in possible_matches: + if range_map[thing].overlaps(search_time): + result = thing + break + + # if the search cannot or should not continue + if ( + result is None + or shallow_search + or not hasattr(result, "child_at_time") + ): + return result + + # before you recurse, you have to transform the time into the + # space of the child + child_search_time = self.transformed_time(search_time, result) + + return result.child_at_time(child_search_time, shallow_search) + + def each_child( + self, + search_range=None, + descended_from_type=composable.Composable, + shallow_search=False, + ): + """ Generator that returns each child contained in the composition in + the order in which it is found. + + Arguments: + search_range: if specified, only children whose range overlaps with + the search range will be yielded. + descended_from_type: if specified, only children who are a + descendent of the descended_from_type will be yielded. + shallow_search: if True, will only search children of self, not + and not recurse into children of children. + """ + if search_range: + range_map = self.range_of_all_children() + + # find the first item whose end_time_inclusive is after the + # start_time of the search range + first_inside_range = _bisect_left( + seq=self._children, + tgt=search_range.start_time, + key_func=lambda child: range_map[child].end_time_inclusive(), + ) + + # find the last item whose start_time is before the + # end_time_inclusive of the search_range + last_in_range = _bisect_right( + seq=self._children, + tgt=search_range.end_time_inclusive(), + key_func=lambda child: range_map[child].start_time, + lower_search_bound=first_inside_range, + ) + + # limit the search to children who are in the search_range + children = self._children[first_inside_range:last_in_range] + else: + # otherwise search all the children + children = self._children + + for child in children: + # filter out children who are not descended from the specified type + # shortcut the isinstance if descended_from_type is composable + # (since all objects in compositions are already composables) + is_descendant = descended_from_type == composable.Composable + if is_descendant or isinstance(child, descended_from_type): + yield child + + # if not a shallow_search, for children that are compositions, + # recurse into their children + if not shallow_search and hasattr(child, "each_child"): + + if search_range is not None: + search_range = self.transformed_time_range(search_range, child) + + for valid_child in child.each_child( + search_range, + descended_from_type, + shallow_search + ): + yield valid_child + + def range_of_child_at_index(self, index): + """Return the range of a child item in the time range of this + composition. + + For example, with a track: + [ClipA][ClipB][ClipC] + + The self.range_of_child_at_index(2) will return: + TimeRange(ClipA.duration + ClipB.duration, ClipC.duration) + + To be implemented by subclass of Composition. + """ + + raise NotImplementedError + + def trimmed_range_of_child_at_index(self, index): + """Return the trimmed range of the child item at index in the time + range of this composition. + + For example, with a track: + + [ ] + + [ClipA][ClipB][ClipC] + + The range of index 2 (ClipC) will be just like + range_of_child_at_index() but trimmed based on this Composition's + source_range. + + To be implemented by child. + """ + + raise NotImplementedError + + def range_of_all_children(self): + """Return a dict mapping children to their range in this object.""" + + raise NotImplementedError + + def __copy__(self): + result = super(Composition, self).__copy__() + + # Children are *not* copied with a shallow copy since the meaning is + # ambiguous - they have a parent pointer which would need to be flipped + # or they would need to be copied, which implies a deepcopy(). + # + # This follows from the python documentation on copy/deepcopy: + # https://docs.python.org/2/library/copy.html + # + # """ + # - A shallow copy constructs a new compound object and then (to the + # extent possible) inserts references into it to the objects found in + # the original. + # - A deep copy constructs a new compound object and then, recursively, + # inserts copies into it of the objects found in the original. + # """ + result._children = [] + + return result + + def __deepcopy__(self, md): + result = super(Composition, self).__deepcopy__(md) + + # deepcopy should have already copied the children, so only parent + # pointers need to be updated. + [c._set_parent(result) for c in result._children] + + # we also need to reconstruct the membership set of _child_lookup. + result._child_lookup.update(result._children) + + return result + + def _path_to_child(self, child): + if not isinstance(child, composable.Composable): + raise TypeError( + "An object child of 'Composable' is required," + " not type '{}'".format( + type(child) + ) + ) + + current = child + parents = [] + + while(current is not self): + try: + current = current.parent() + except AttributeError: + raise exceptions.NotAChildError( + "Item '{}' is not a child of '{}'.".format(child, self) + ) + + parents.append(current) + + return parents + + def range_of_child(self, child, reference_space=None): + """The range of the child in relation to another item + (reference_space), not trimmed based on this + composition's source_range. + + Note that reference_space must be in the same timeline as self. + + For example: + + | [-----] | seq + + [-----------------] Clip A + + If ClipA has duration 17, and seq has source_range: 5, duration 15, + seq.range_of_child(Clip A) will return (0, 17) + ignoring the source range of seq. + + To get the range of the child with the source_range applied, use the + trimmed_range_of_child() method. + """ + + if not reference_space: + reference_space = self + + parents = self._path_to_child(child) + + current = child + result_range = None + + for parent in parents: + index = parent.index(current) + parent_range = parent.range_of_child_at_index(index) + + if not result_range: + result_range = parent_range + current = parent + continue + + result_range = opentime.TimeRange( + start_time=result_range.start_time + parent_range.start_time, + duration=result_range.duration + ) + current = parent + + if reference_space is not self: + result_range = self.transformed_time_range( + result_range, + reference_space + ) + + return result_range + + def handles_of_child(self, child): + """If media beyond the ends of this child are visible due to adjacent + Transitions (only applicable in a Track) then this will return the + head and tail offsets as a tuple of RationalTime objects. If no handles + are present on either side, then None is returned instead of a + RationalTime. + + Example usage: + >>> head, tail = track.handles_of_child(clip) + >>> if head: + ... print('Do something') + >>> if tail: + ... print('Do something else') + """ + return (None, None) + + def trimmed_range_of_child(self, child, reference_space=None): + """Get range of the child in reference_space coordinates, after the + self.source_range is applied. + + Example + | [-----] | seq + [-----------------] Clip A + + If ClipA has duration 17, and seq has source_range: 5, duration 10, + seq.trimmed_range_of_child(Clip A) will return (5, 10) + Which is trimming the range according to the source_range of seq. + + To get the range of the child without the source_range applied, use the + range_of_child() method. + + Another example + | [-----] | seq source range starts on frame 4 and goes to frame 8 + [ClipA][ClipB] (each 6 frames long) + + >>> seq.range_of_child(CLipA) + 0, duration 6 + >>> seq.trimmed_range_of_child(ClipA): + 4, duration 2 + """ + + if not reference_space: + reference_space = self + + if not reference_space == self: + raise NotImplementedError + + parents = self._path_to_child(child) + + current = child + result_range = None + + for parent in parents: + index = parent.index(current) + parent_range = parent.trimmed_range_of_child_at_index(index) + + if not result_range: + result_range = parent_range + current = parent + continue + + result_range.start_time += parent_range.start_time + current = parent + + if not self.source_range or not result_range: + return result_range + + new_start_time = max( + self.source_range.start_time, + result_range.start_time + ) + + # trimmed out + if new_start_time >= result_range.end_time_exclusive(): + return None + + # compute duration + new_duration = min( + result_range.end_time_exclusive(), + self.source_range.end_time_exclusive() + ) - new_start_time + + if new_duration.value < 0: + return None + + return opentime.TimeRange(new_start_time, new_duration) + + def trim_child_range(self, child_range): + if not self.source_range: + return child_range + + # cropped out entirely + past_end_time = self.source_range.start_time >= child_range.end_time_exclusive() + before_start_time = \ + self.source_range.end_time_exclusive() <= child_range.start_time + + if past_end_time or before_start_time: + return None + + if child_range.start_time < self.source_range.start_time: + child_range = opentime.range_from_start_end_time( + self.source_range.start_time, + child_range.end_time_exclusive() + ) + + if ( + child_range.end_time_exclusive() > + self.source_range.end_time_exclusive() + ): + child_range = opentime.range_from_start_end_time( + child_range.start_time, + self.source_range.end_time_exclusive() + ) + + return child_range + + # @{ SerializableObject override. + def _update(self, d): + """Like the dictionary .update() method. + + Update the data dictionary of this SerializableObject with the .data + of d if d is a SerializableObject or if d is a dictionary, d itself. + """ + + # use the parent update function + super(Composition, self)._update(d) + + # ...except for the 'children' field, which needs to run through the + # insert method so that _parent pointers are correctly set on children. + self._children = [] + self.extend(d.get('children', [])) + # @} + + # @{ collections.MutableSequence implementation + def __getitem__(self, item): + return self._children[item] + + def _setitem_slice(self, key, value): + set_value = set(value) + + # check if any members in the new slice are repeated + if len(set_value) != len(value): + raise ValueError( + "Instancing not allowed in Compositions, {} contains repeated" + " items.".format(value) + ) + + old = self._children[key] + if old: + set_old = set(old) + set_outside_old = set(self._children).difference(set_old) + + isect = set_outside_old.intersection(set_value) + if isect: + raise ValueError( + "Attempting to insert duplicates of items {} already " + "present in container, instancing not allowed in " + "Compositions".format(isect) + ) + + # update old parent + for val in old: + val._set_parent(None) + self._child_lookup.remove(val) + + # insert into _children + self._children[key] = value + + # update new parent + if value: + for val in value: + val._set_parent(self) + self._child_lookup.add(val) + + def __setitem__(self, key, value): + # fetch the current thing at that index/slice + old = self._children[key] + + # in the case of key being a slice, old and value are both sequences + if old is value: + return + + if isinstance(key, slice): + return self._setitem_slice(key, value) + + if value in self: + raise ValueError( + "Composable {} already present in this container, instancing" + " not allowed in otio compositions.".format(value) + ) + + # unset the old child's parent and delete the membership entry. + if old is not None: + old._set_parent(None) + self._child_lookup.remove(old) + + # put it into our list of children + self._children[key] = value + + # set the new parent + if value is not None: + value._set_parent(self) + + # put it into our membership tracking set + self._child_lookup.add(value) + + def insert(self, index, item): + """Insert an item into the composition at location `index`.""" + + if not isinstance(item, self._composable_base_class): + raise TypeError( + "Not allowed to insert an object of type {0} into a {1}, only" + " objects descending from {2}. Tried to insert: {3}".format( + type(item), + type(self), + self._composable_base_class, + str(item) + ) + ) + + if item in self: + raise ValueError( + "Composable {} already present in this container, instancing" + " not allowed in otio compositions.".format(item) + ) + + # set the item's parent and add it to our membership tracking and list + # of children + item._set_parent(self) + self._child_lookup.add(item) + self._children.insert(index, item) + + def __contains__(self, item): + """Use our internal membership tracking set to speed up searches.""" + return item in self._child_lookup + + def __len__(self): + """The len() of a Composition is the # of children in it. + Note that this also means that a Composition with no children + is considered False, so take care to test for "if foo is not None" + versus just "if foo" when the difference matters.""" + return len(self._children) + + def __delitem__(self, key): + # grab the old value + old = self._children[key] + + # remove it from the membership tracking set and clear parent + if old is not None: + if isinstance(key, slice): + for val in old: + self._child_lookup.remove(val) + val._set_parent(None) + else: + self._child_lookup.remove(old) + old._set_parent(None) + + # remove it from our list of children + del self._children[key] diff --git a/pype/vendor/python/python_2/opentimelineio/core/item.py b/pype/vendor/python/python_2/opentimelineio/core/item.py new file mode 100644 index 00000000000..7e035a3a9ec --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/item.py @@ -0,0 +1,243 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of the Item base class. OTIO Objects that contain media.""" + +import copy + +from .. import ( + opentime, + exceptions, +) + +from . import ( + serializable_object, + composable, +) + + +class Item(composable.Composable): + """An Item is a Composable that can be part of a Composition or Timeline. + + More specifically, it is a Composable that has meaningful duration. + + Can also hold effects and markers. + + Base class of: + - Composition (and children) + - Clip + - Gap + """ + + _serializable_label = "Item.1" + _class_path = "core.Item" + + def __init__( + self, + name=None, + source_range=None, + effects=None, + markers=None, + metadata=None, + ): + super(Item, self).__init__(name=name, metadata=metadata) + + self.source_range = copy.deepcopy(source_range) + self.effects = copy.deepcopy(effects) if effects else [] + self.markers = copy.deepcopy(markers) if markers else [] + + name = serializable_object.serializable_field("name", doc="Item name.") + source_range = serializable_object.serializable_field( + "source_range", + opentime.TimeRange, + doc="Range of source to trim to. Can be None or a TimeRange." + ) + + @staticmethod + def visible(): + """Return the visibility of the Item. By default True.""" + + return True + + def duration(self): + """Convience wrapper for the trimmed_range.duration of the item.""" + + return self.trimmed_range().duration + + def available_range(self): + """Implemented by child classes, available range of media.""" + + raise NotImplementedError + + def trimmed_range(self): + """The range after applying the source range.""" + if self.source_range is not None: + return copy.copy(self.source_range) + + return self.available_range() + + def visible_range(self): + """The range of this item's media visible to its parent. + Includes handles revealed by adjacent transitions (if any). + This will always be larger or equal to trimmed_range().""" + result = self.trimmed_range() + if self.parent(): + head, tail = self.parent().handles_of_child(self) + if head: + result = opentime.TimeRange( + start_time=result.start_time - head, + duration=result.duration + head + ) + if tail: + result = opentime.TimeRange( + start_time=result.start_time, + duration=result.duration + tail + ) + return result + + def trimmed_range_in_parent(self): + """Find and return the trimmed range of this item in the parent.""" + if not self.parent(): + raise exceptions.NotAChildError( + "No parent of {}, cannot compute range in parent.".format(self) + ) + + return self.parent().trimmed_range_of_child(self) + + def range_in_parent(self): + """Find and return the untrimmed range of this item in the parent.""" + if not self.parent(): + raise exceptions.NotAChildError( + "No parent of {}, cannot compute range in parent.".format(self) + ) + + return self.parent().range_of_child(self) + + def transformed_time(self, t, to_item): + """Converts time t in the coordinate system of self to coordinate + system of to_item. + + Note that self and to_item must be part of the same timeline (they must + have a common ancestor). + + Example: + + 0 20 + [------t----D----------] + [--A-][t----B---][--C--] + 100 101 110 + 101 in B = 6 in D + + t = t argument + """ + + if not isinstance(t, opentime.RationalTime): + raise ValueError( + "transformed_time only operates on RationalTime, not {}".format( + type(t) + ) + ) + + # does not operate in place + result = copy.copy(t) + + if to_item is None: + return result + + root = self._root_parent() + + # transform t to root parent's coordinate system + item = self + while item != root and item != to_item: + + parent = item.parent() + result -= item.trimmed_range().start_time + result += parent.range_of_child(item).start_time + + item = parent + + ancestor = item + + # transform from root parent's coordinate system to to_item + item = to_item + while item != root and item != ancestor: + + parent = item.parent() + result += item.trimmed_range().start_time + result -= parent.range_of_child(item).start_time + + item = parent + + assert(item is ancestor) + + return result + + def transformed_time_range(self, tr, to_item): + """Transforms the timerange tr to the range of child or self to_item.""" + + return opentime.TimeRange( + self.transformed_time(tr.start_time, to_item), + tr.duration + ) + + markers = serializable_object.serializable_field( + "markers", + doc="List of markers on this item." + ) + effects = serializable_object.serializable_field( + "effects", + doc="List of effects on this item." + ) + metadata = serializable_object.serializable_field( + "metadata", + doc="Metadata dictionary for this item." + ) + + def __repr__(self): + return ( + "otio.{}(" + "name={}, " + "source_range={}, " + "effects={}, " + "markers={}, " + "metadata={}" + ")".format( + self._class_path, + repr(self.name), + repr(self.source_range), + repr(self.effects), + repr(self.markers), + repr(self.metadata) + ) + ) + + def __str__(self): + return "{}({}, {}, {}, {}, {})".format( + self._class_path.split('.')[-1], + self.name, + str(self.source_range), + str(self.effects), + str(self.markers), + str(self.metadata) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/core/json_serializer.py b/pype/vendor/python/python_2/opentimelineio/core/json_serializer.py new file mode 100644 index 00000000000..fee82421439 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/json_serializer.py @@ -0,0 +1,218 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Serializer for SerializableObjects to JSON + +Used for the otio_json adapter as well as for plugins and manifests. +""" + +import json + +from . import ( + SerializableObject, + type_registry, +) + +from .unknown_schema import UnknownSchema + +from .. import ( + exceptions, + opentime, +) + + +# @TODO: Handle file version drifting + + +class _SerializableObjectEncoder(json.JSONEncoder): + + """ Encoder for the SerializableObject OTIO Class and its descendents. """ + + def default(self, obj): + for typename, encfn in _ENCODER_LIST: + if isinstance(obj, typename): + return encfn(obj) + + return json.JSONEncoder.default(self, obj) + + +def serialize_json_to_string(root, indent=4): + """Serialize a tree of SerializableObject to JSON. + + Returns a JSON string. + """ + + return _SerializableObjectEncoder( + sort_keys=True, + indent=indent + ).encode(root) + + +def serialize_json_to_file(root, to_file): + """ + Serialize a tree of SerializableObject to JSON. + + Writes the result to the given file path. + """ + + content = serialize_json_to_string(root) + + with open(to_file, 'w') as file_contents: + file_contents.write(content) + +# @{ Encoders + + +def _encoded_serializable_object(input_otio): + if not input_otio._serializable_label: + raise exceptions.InvalidSerializableLabelError( + input_otio._serializable_label + ) + result = { + "OTIO_SCHEMA": input_otio._serializable_label, + } + result.update(input_otio._data) + return result + + +def _encoded_unknown_schema_object(input_otio): + orig_label = input_otio.data.get(UnknownSchema._original_label) + if not orig_label: + raise exceptions.InvalidSerializableLabelError( + orig_label + ) + # result is just a dict, not a SerializableObject + result = {} + result.update(input_otio.data) + result["OTIO_SCHEMA"] = orig_label # override the UnknownSchema label + del result[UnknownSchema._original_label] + return result + + +def _encoded_time(input_otio): + return { + "OTIO_SCHEMA": "RationalTime.1", + 'value': input_otio.value, + 'rate': input_otio.rate + } + + +def _encoded_time_range(input_otio): + return { + "OTIO_SCHEMA": "TimeRange.1", + 'start_time': _encoded_time(input_otio.start_time), + 'duration': _encoded_time(input_otio.duration) + } + + +def _encoded_transform(input_otio): + return { + "OTIO_SCHEMA": "TimeTransform.1", + 'offset': _encoded_time(input_otio.offset), + 'scale': input_otio.scale, + 'rate': input_otio.rate + } +# @} + + +# Ordered list of functions for encoding OTIO objects to JSON. +# More particular cases should precede more general cases. +_ENCODER_LIST = [ + (opentime.RationalTime, _encoded_time), + (opentime.TimeRange, _encoded_time_range), + (opentime.TimeTransform, _encoded_transform), + (UnknownSchema, _encoded_unknown_schema_object), + (SerializableObject, _encoded_serializable_object) +] + +# @{ Decoders + + +def _decoded_time(input_otio): + return opentime.RationalTime( + input_otio['value'], + input_otio['rate'] + ) + + +def _decoded_time_range(input_otio): + return opentime.TimeRange( + input_otio['start_time'], + input_otio['duration'] + ) + + +def _decoded_transform(input_otio): + return opentime.TimeTransform( + input_otio['offset'], + input_otio['scale'] + ) +# @} + + +# Map of explicit decoder functions to schema labels (for opentime) +# because opentime is implemented with no knowledge of OTIO, it doesn't use the +# same pattern as SerializableObject. +_DECODER_FUNCTION_MAP = { + 'RationalTime.1': _decoded_time, + 'TimeRange.1': _decoded_time_range, + 'TimeTransform.1': _decoded_transform, +} + + +def _as_otio(dct): + """ Specialized JSON decoder for OTIO base Objects. """ + + if "OTIO_SCHEMA" in dct: + schema_label = dct["OTIO_SCHEMA"] + + if schema_label in _DECODER_FUNCTION_MAP: + return _DECODER_FUNCTION_MAP[schema_label](dct) + + schema_name = type_registry.schema_name_from_label(schema_label) + schema_version = type_registry.schema_version_from_label(schema_label) + del dct["OTIO_SCHEMA"] + + return type_registry.instance_from_schema( + schema_name, + schema_version, + dct + ) + + return dct + + +def deserialize_json_from_string(otio_string): + """ Deserialize a string containing JSON to OTIO objects. """ + + return json.loads(otio_string, object_hook=_as_otio) + + +def deserialize_json_from_file(otio_filepath): + """ Deserialize the file at otio_filepath containing JSON to OTIO. """ + + with open(otio_filepath, 'r') as file_contents: + result = deserialize_json_from_string(file_contents.read()) + result._json_path = otio_filepath + return result diff --git a/pype/vendor/python/python_2/opentimelineio/core/media_reference.py b/pype/vendor/python/python_2/opentimelineio/core/media_reference.py new file mode 100644 index 00000000000..ac348526131 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/media_reference.py @@ -0,0 +1,102 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Media Reference Classes and Functions.""" + +from .. import ( + opentime, +) +from . import ( + type_registry, + serializable_object, +) + +import copy + + +@type_registry.register_type +class MediaReference(serializable_object.SerializableObject): + """Base Media Reference Class. + + Currently handles string printing the child classes, which expose interface + into its data dictionary. + + The requirement is that the schema is named so that external systems can + fetch the required information correctly. + """ + _serializable_label = "MediaReference.1" + _name = "MediaReference" + + def __init__( + self, + name=None, + available_range=None, + metadata=None + ): + super(MediaReference, self).__init__() + + self.name = name + self.available_range = copy.deepcopy(available_range) + self.metadata = copy.deepcopy(metadata) or {} + + name = serializable_object.serializable_field( + "name", + doc="Name of this media reference." + ) + available_range = serializable_object.serializable_field( + "available_range", + opentime.TimeRange, + doc="Available range of media in this media reference." + ) + metadata = serializable_object.serializable_field( + "metadata", + dict, + doc="Metadata dictionary." + ) + + @property + def is_missing_reference(self): + return False + + def __str__(self): + return "{}({}, {}, {})".format( + self._name, + repr(self.name), + repr(self.available_range), + repr(self.metadata) + ) + + def __repr__(self): + return ( + "otio.schema.{}(" + "name={}," + " available_range={}," + " metadata={}" + ")" + ).format( + self._name, + repr(self.name), + repr(self.available_range), + repr(self.metadata) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/core/serializable_object.py b/pype/vendor/python/python_2/opentimelineio/core/serializable_object.py new file mode 100644 index 00000000000..27032569b0d --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/serializable_object.py @@ -0,0 +1,219 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implements the otio.core.SerializableObject""" + +import copy + +from . import ( + type_registry, +) + + +class SerializableObject(object): + """Base object for things that can be [de]serialized to/from .otio files. + + To define a new child class of this, you inherit from it and also use the + register_type decorator. Then you use the serializable_field function + above to create attributes that can be serialized/deserialized. + + You can use the upgrade_function_for decorator to upgrade older schemas + to newer ones. + + Finally, if you're in the process of upgrading schemas and you want to + catch code that refers to old attribute names, you can use the + deprecated_field function. This raises an exception if code attempts to + read or write to that attribute. After testing and before pushing, please + remove references to deprecated_field. + + For example + + >>> import opentimelineio as otio + + >>> @otio.core.register_type + ... class ExampleChild(otio.core.SerializableObject): + ... _serializable_label = "ExampleChild.7" + ... child_data = otio.core.serializable_field("child_data", int) + + # @TODO: delete once testing shows nothing is referencing this. + >>> old_child_data_name = otio.core.deprecated_field() + + >>> @otio.core.upgrade_function_for(ExampleChild, 3) + ... def upgrade_child_to_three(_data): + ... return {"child_data" : _data["old_child_data_name"]} + """ + + # Every child must define a _serializable_label attribute. + # This attribute is a string in the form of: "SchemaName.VersionNumber" + # Where VersionNumber is an integer. + # You can use the classmethods .schema_name() and .schema_version() to + # query these fields. + _serializable_label = None + _class_path = "core.SerializableObject" + + def __init__(self): + self._data = {} + + # @{ "Reference Type" semantics for SerializableObject + # We think of the SerializableObject as a reference type - by default + # comparison is pointer comparison, but you can use 'is_equivalent_to' to + # check if the contents of the SerializableObject are the same as some + # other SerializableObject's contents. + # + # Implicitly: + # def __eq__(self, other): + # return self is other + + def is_equivalent_to(self, other): + """Returns true if the contents of self and other match.""" + + try: + if self._data == other._data: + return True + + # XXX: Gross hack takes OTIO->JSON String->Python Dictionaries + # + # using the serializer ensures that we only compare fields that are + # serializable, which is how we define equivalence. + # + # we use json.loads() to turn the string back into dictionaries + # so we can use python's equivalence for things like floating + # point numbers (ie 5.0 == 5) without having to do string + # processing. + + from . import json_serializer + import json + + lhs_str = json_serializer.serialize_json_to_string(self) + lhs = json.loads(lhs_str) + + rhs_str = json_serializer.serialize_json_to_string(other) + rhs = json.loads(rhs_str) + + return (lhs == rhs) + except AttributeError: + return False + # @} + + def _update(self, d): + """Like the dictionary .update() method. + + Update the _data dictionary of this SerializableObject with the ._data + of d if d is a SerializableObject or if d is a dictionary, d itself. + """ + + if isinstance(d, SerializableObject): + self._data.update(d._data) + else: + self._data.update(d) + + @classmethod + def schema_name(cls): + return type_registry.schema_name_from_label( + cls._serializable_label + ) + + @classmethod + def schema_version(cls): + return type_registry.schema_version_from_label( + cls._serializable_label + ) + + @property + def is_unknown_schema(self): + # in general, SerializableObject will have a known schema + # but UnknownSchema subclass will redefine this property to be True + return False + + def __copy__(self): + raise NotImplementedError( + "Shallow copying is not permitted. Use a deep copy." + ) + + def __deepcopy__(self, md): + result = type(self)() + result._data = copy.deepcopy(self._data, md) + + return result + + def deepcopy(self): + return self.__deepcopy__({}) + + +def serializable_field(name, required_type=None, doc=None): + """Create a serializable_field for child classes of SerializableObject. + + Convienence function for adding attributes to child classes of + SerializableObject in such a way that they will be serialized/deserialized + automatically. + + Use it like this: + class foo(SerializableObject): + bar = serializable_field("bar", required_type=int, doc="example") + + This would indicate that class "foo" has a serializable field "bar". So: + f = foo() + f.bar = "stuff" + + # serialize & deserialize + otio_json = otio.adapters.from_name("otio") + f2 = otio_json.read_from_string(otio_json.write_to_string(f)) + + # fields should be equal + f.bar == f2.bar + + Additionally, the "doc" field will become the documentation for the + property. + """ + + def getter(self): + return self._data[name] + + def setter(self, val): + # always allow None values regardless of value of required_type + if required_type is not None and val is not None: + if not isinstance(val, required_type): + raise TypeError( + "attribute '{}' must be an instance of '{}', not: {}".format( + name, + required_type, + type(val) + ) + ) + + self._data[name] = val + + return property(getter, setter, doc=doc) + + +def deprecated_field(): + """ For marking attributes on a SerializableObject deprecated. """ + + def getter(self): + raise DeprecationWarning + + def setter(self, val): + raise DeprecationWarning + + return property(getter, setter, doc="Deprecated field, do not use.") diff --git a/pype/vendor/python/python_2/opentimelineio/core/type_registry.py b/pype/vendor/python/python_2/opentimelineio/core/type_registry.py new file mode 100644 index 00000000000..de4824c42dc --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/type_registry.py @@ -0,0 +1,152 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Core type registry system for registering OTIO types for serialization.""" + +from .. import ( + exceptions +) + + +# Types decorate use register_type() to insert themselves into this map +_OTIO_TYPES = {} + +# maps types to a map of versions to upgrade functions +_UPGRADE_FUNCTIONS = {} + + +def schema_name_from_label(label): + """Return the schema name from the label name.""" + + return label.split(".")[0] + + +def schema_version_from_label(label): + """Return the schema version from the label name.""" + + return int(label.split(".")[1]) + + +def schema_label_from_name_version(schema_name, schema_version): + """Return the serializeable object schema label given the name and version.""" + + return "{}.{}".format(schema_name, schema_version) + + +def register_type(classobj, schemaname=None): + """ Register a class to a Schema Label. + + Normally this is used as a decorator. However, in special cases where a + type has been renamed, you might need to register the new type to multiple + schema names. To do this: + + >>> @core.register_type + ... class MyNewClass(...): + ... pass + + >>> core.register_type(MyNewClass, "MyOldName") + + This will parse the old schema name into the new class type. You may also + need to write an upgrade function if the schema itself has changed. + """ + + if schemaname is None: + schemaname = schema_name_from_label(classobj._serializable_label) + + _OTIO_TYPES[schemaname] = classobj + + return classobj + + +def upgrade_function_for(cls, version_to_upgrade_to): + """Decorator for identifying schema class upgrade functions. + + Example + >>> @upgrade_function_for(MyClass, 5) + ... def upgrade_to_version_five(data): + ... pass + + This will get called to upgrade a schema of MyClass to version 5. My class + must be a class deriving from otio.core.SerializableObject. + + The upgrade function should take a single argument - the dictionary to + upgrade, and return a dictionary with the fields upgraded. + + Remember that you don't need to provide an upgrade function for upgrades + that add or remove fields, only for schema versions that change the field + names. + """ + + def decorator_func(func): + """ Decorator for marking upgrade functions """ + + _UPGRADE_FUNCTIONS.setdefault(cls, {})[version_to_upgrade_to] = func + + return func + + return decorator_func + + +def instance_from_schema(schema_name, schema_version, data_dict): + """Return an instance, of the schema from data in the data_dict.""" + + if schema_name not in _OTIO_TYPES: + from .unknown_schema import UnknownSchema + + # create an object of UnknownSchema type to represent the data + schema_label = schema_label_from_name_version(schema_name, schema_version) + data_dict[UnknownSchema._original_label] = schema_label + unknown_label = UnknownSchema._serializable_label + schema_name = schema_name_from_label(unknown_label) + schema_version = schema_version_from_label(unknown_label) + + cls = _OTIO_TYPES[schema_name] + + schema_version = int(schema_version) + if cls.schema_version() < schema_version: + raise exceptions.UnsupportedSchemaError( + "Schema '{}' has highest version available '{}', which is lower " + "than requested schema version '{}'".format( + schema_name, + cls.schema_version(), + schema_version + ) + ) + + if cls.schema_version() != schema_version: + # since the keys are the versions to upgrade to, sorting the keys + # before iterating through them should ensure that upgrade functions + # are called in order. + for version, upgrade_func in sorted( + _UPGRADE_FUNCTIONS[cls].items() + ): + if version < schema_version: + continue + + data_dict = upgrade_func(data_dict) + + obj = cls() + obj._update(data_dict) + + return obj diff --git a/pype/vendor/python/python_2/opentimelineio/core/unknown_schema.py b/pype/vendor/python/python_2/opentimelineio/core/unknown_schema.py new file mode 100644 index 00000000000..94c187710ed --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/unknown_schema.py @@ -0,0 +1,50 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +""" +Implementation of the UnknownSchema schema. +""" + +from .serializable_object import SerializableObject +from .type_registry import register_type + + +@register_type +class UnknownSchema(SerializableObject): + """Represents an object whose schema is unknown to us.""" + + _serializable_label = "UnknownSchema.1" + _name = "UnknownSchema" + _original_label = "UnknownSchemaOriginalLabel" + + @property + def is_unknown_schema(self): + return True + + @property + def data(self): + """Exposes the data dictionary of the underlying SerializableObject + directly. + """ + return self._data diff --git a/pype/vendor/python/python_2/opentimelineio/exceptions.py b/pype/vendor/python/python_2/opentimelineio/exceptions.py new file mode 100644 index 00000000000..7726f2ef719 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/exceptions.py @@ -0,0 +1,89 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Exception classes for OpenTimelineIO""" + + +class OTIOError(Exception): + pass + + +class CouldNotReadFileError(OTIOError): + pass + + +class NoKnownAdapterForExtensionError(OTIOError): + pass + + +class ReadingNotSupportedError(OTIOError): + pass + + +class WritingNotSupportedError(OTIOError): + pass + + +class NotSupportedError(OTIOError): + pass + + +class InvalidSerializableLabelError(OTIOError): + pass + + +class CannotComputeAvailableRangeError(OTIOError): + pass + + +class AdapterDoesntSupportFunctionError(OTIOError): + pass + + +class UnsupportedSchemaError(OTIOError): + pass + + +class NotAChildError(OTIOError): + pass + + +class InstancingNotAllowedError(OTIOError): + pass + + +class TransitionFollowingATransitionError(OTIOError): + pass + + +class MisconfiguredPluginError(OTIOError): + pass + + +class CannotTrimTransitionsError(OTIOError): + pass + + +class NoDefaultMediaLinkerError(OTIOError): + pass diff --git a/pype/vendor/python/python_2/opentimelineio/hooks.py b/pype/vendor/python/python_2/opentimelineio/hooks.py new file mode 100644 index 00000000000..311154553d9 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/hooks.py @@ -0,0 +1,174 @@ +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +from . import ( + plugins, + core, +) + +__doc__ = """ +HookScripts are plugins that run at defined points ("Hooks"). + +They expose a hook_function with signature: +hook_function :: otio.schema.Timeline, Dict -> otio.schema.Timeline + +Both hook scripts and the hooks they attach to are defined in the plugin +manifest. + +You can attach multiple hook scripts to a hook. They will be executed in list +order, first to last. + +They are defined by the manifests HookScripts and hooks areas. + +>>> +{ + "OTIO_SCHEMA" : "PluginManifest.1", + "hook_scripts" : [ + { + "OTIO_SCHEMA" : "HookScript.1", + "name" : "example hook", + "execution_scope" : "in process", + "filepath" : "example.py" + } + ], + "hooks" : { + "pre_adapter_write" : ["example hook"], + "post_adapter_read" : [] + } +} + +The 'hook_scripts' area loads the python modules with the 'hook_function's to +call in them. The 'hooks' area defines the hooks (and any associated +scripts). You can further query and modify these from python. + +>>> import opentimelineio as otio +... hook_list = otio.hooks.scripts_attached_to("some_hook") # -> ['a','b','c'] +... +... # to run the hook scripts: +... otio.hooks.run("some_hook", some_timeline, optional_argument_dict) + +This will pass (some_timeline, optional_argument_dict) to 'a', which will +a new timeline that will get passed into 'b' with optional_argument_dict, +etc. + +To Edit the order, change the order in the list: + +>>> hook_list[0], hook_list[2] = hook_list[2], hook_list[0] +... print hook_list # ['c','b','a'] + +Now c will run, then b, then a. + +To delete a function the list: + +>>> del hook_list[1] +""" + + +@core.register_type +class HookScript(plugins.PythonPlugin): + _serializable_label = "HookScript.1" + + def __init__( + self, + name=None, + execution_scope=None, + filepath=None, + ): + """HookScript plugin constructor.""" + + super(HookScript, self).__init__(name, execution_scope, filepath) + + def run(self, in_timeline, argument_map={}): + """Run the hook_function associated with this plugin.""" + + # @TODO: should in_timeline be passed in place? or should a copy be + # made? + return self._execute_function( + "hook_function", + in_timeline=in_timeline, + argument_map=argument_map + ) + + def __str__(self): + return "HookScript({}, {}, {})".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath) + ) + + def __repr__(self): + return ( + "otio.hooks.HookScript(" + "name={}, " + "execution_scope={}, " + "filepath={}" + ")".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath) + ) + ) + + +def names(): + """Return a list of all the registered hooks.""" + + return plugins.ActiveManifest().hooks.keys() + + +def available_hookscript_names(): + """Return the names of HookScripts that have been registered.""" + + return [hs.name for hs in plugins.ActiveManifest().hook_scripts] + + +def available_hookscripts(): + """Return the HookScripts objects that have been registered.""" + return plugins.ActiveManifest().hook_scripts + + +def scripts_attached_to(hook): + """Return an editable list of all the hook scriptss that are attached to + the specified hook, in execution order. Changing this list will change the + order that scripts run in, and deleting a script will remove it from + executing + """ + + # @TODO: Should this return a copy? + return plugins.ActiveManifest().hooks[hook] + + +def run(hook, tl, extra_args=None): + """Run all the scripts associated with hook, passing in tl and extra_args. + + Will return the return value of the last hook script. + + If no hookscripts are defined, returns tl. + """ + + hook_scripts = plugins.ActiveManifest().hooks[hook] + for name in hook_scripts: + hs = plugins.ActiveManifest().from_name(name, "hook_scripts") + tl = hs.run(tl, extra_args) + return tl diff --git a/pype/vendor/python/python_2/opentimelineio/media_linker.py b/pype/vendor/python/python_2/opentimelineio/media_linker.py new file mode 100644 index 00000000000..25473ac1d59 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/media_linker.py @@ -0,0 +1,169 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +""" MediaLinker plugins fire after an adapter has read a file in order to +produce MediaReferences that point at valid, site specific media. + +They expose a "link_media_reference" function with the signature: +link_media_reference :: otio.schema.Clip -> otio.core.MediaReference + +or: + def linked_media_reference(from_clip): + result = otio.core.MediaReference() # whichever subclass + # do stuff + return result + +To get context information, they can inspect the metadata on the clip and on +the media reference. The .parent() method can be used to find the containing +track if metadata is stored there. + +Please raise an instance (or child instance) of +otio.exceptions.CannotLinkMediaError() if there is a problem linking the media. + +For example: + for clip in timeline.each_clip(): + try: + new_mr = otio.media_linker.linked_media_reference(clip) + clip.media_reference = new_mr + except otio.exceptions.CannotLinkMediaError: + # or report the error + pass +""" + +import os + +from . import ( + exceptions, + plugins, + core, +) + + +# Enum describing different media linker policies +class MediaLinkingPolicy: + DoNotLinkMedia = "__do_not_link_media" + ForceDefaultLinker = "__default" + + +# @TODO: wrap this up in the plugin system somehow? automatically generate? +def available_media_linker_names(): + """Return a string list of the available media linker plugins.""" + + return [str(adp.name) for adp in plugins.ActiveManifest().media_linkers] + + +def from_name(name): + """Fetch the media linker object by the name of the adapter directly.""" + + if name == MediaLinkingPolicy.ForceDefaultLinker or not name: + name = os.environ.get("OTIO_DEFAULT_MEDIA_LINKER", None) + + if not name: + return None + + # @TODO: make this handle the enums + try: + return plugins.ActiveManifest().from_name( + name, + kind_list="media_linkers" + ) + except exceptions.NotSupportedError: + raise exceptions.NotSupportedError( + "media linker not supported: {}, available: {}".format( + name, + available_media_linker_names() + ) + ) + + +def default_media_linker(): + try: + return os.environ['OTIO_DEFAULT_MEDIA_LINKER'] + except KeyError: + raise exceptions.NoDefaultMediaLinkerError( + "No default Media Linker set in $OTIO_DEFAULT_MEDIA_LINKER" + ) + + +def linked_media_reference( + target_clip, + media_linker_name=MediaLinkingPolicy.ForceDefaultLinker, + media_linker_argument_map=None +): + media_linker = from_name(media_linker_name) + + if not media_linker: + return target_clip + + # @TODO: connect this argument map up to the function call through to the + # real linker + if not media_linker_argument_map: + media_linker_argument_map = {} + + return media_linker.link_media_reference( + target_clip, + media_linker_argument_map + ) + + +@core.register_type +class MediaLinker(plugins.PythonPlugin): + _serializable_label = "MediaLinker.1" + + def __init__( + self, + name=None, + execution_scope=None, + filepath=None, + ): + super(MediaLinker, self).__init__(name, execution_scope, filepath) + + def link_media_reference(self, in_clip, media_linker_argument_map=None): + media_linker_argument_map = media_linker_argument_map or {} + + return self._execute_function( + "link_media_reference", + in_clip=in_clip, + media_linker_argument_map=media_linker_argument_map + ) + + def __str__(self): + return "MediaLinker({}, {}, {})".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath) + ) + + def __repr__(self): + return ( + "otio.media_linker.MediaLinker(" + "name={}, " + "execution_scope={}, " + "filepath={}" + ")".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath) + ) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/opentime.py b/pype/vendor/python/python_2/opentimelineio/opentime.py new file mode 100644 index 00000000000..e7e58b9475a --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/opentime.py @@ -0,0 +1,856 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Library for expressing and transforming time. + +NOTE: This module is written specifically with a future port to C in mind. +When ported to C, Time will be a struct and these functions should be very +simple. +""" + +import math +import copy + + +VALID_NON_DROPFRAME_TIMECODE_RATES = ( + 1, + 12, + 23.976, + 23.98, + (24000 / 1001.0), + 24, + 25, + 30, + 29.97, + (30000 / 1001.0), + 48, + 50, + 59.94, + (60000 / 1001.0), + 60, +) + +VALID_DROPFRAME_TIMECODE_RATES = ( + 29.97, + (30000 / 1001.0), + 59.94, + (60000 / 1001.0), +) + +VALID_TIMECODE_RATES = ( + VALID_NON_DROPFRAME_TIMECODE_RATES + VALID_DROPFRAME_TIMECODE_RATES) + +_fn_cache = object.__setattr__ + + +class RationalTime(object): + """ Represents an instantaneous point in time, value * (1/rate) seconds + from time 0seconds. + """ + + # Locks RationalTime instances to only these attributes + __slots__ = ['value', 'rate'] + + def __init__(self, value=0.0, rate=1.0): + _fn_cache(self, "value", value) + _fn_cache(self, "rate", rate) + + def __setattr__(self, key, val): + """Enforces immutability """ + raise AttributeError("RationalTime is Immutable.") + + def __copy__(self, memodict=None): + return RationalTime(self.value, self.rate) + + # Always deepcopy, since we want this class to behave like a value type + __deepcopy__ = __copy__ + + def rescaled_to(self, new_rate): + """Returns the time for this time converted to new_rate""" + + try: + new_rate = new_rate.rate + except AttributeError: + pass + + if self.rate == new_rate: + return copy.copy(self) + + return RationalTime( + self.value_rescaled_to(new_rate), + new_rate + ) + + def value_rescaled_to(self, new_rate): + """Returns the time value for self converted to new_rate""" + + try: + new_rate = new_rate.rate + except AttributeError: + pass + + if new_rate == self.rate: + return self.value + + # TODO: This math probably needs some overrun protection + try: + return float(self.value) * float(new_rate) / float(self.rate) + except (AttributeError, TypeError, ValueError): + raise TypeError( + "Sorry, RationalTime cannot be rescaled to a value of type " + "'{}', only RationalTime and numbers are supported.".format( + type(new_rate) + ) + ) + + def almost_equal(self, other, delta=0.0): + try: + rescaled_value = self.value_rescaled_to(other.rate) + return abs(rescaled_value - other.value) <= delta + + except AttributeError: + return False + + def __add__(self, other): + """Returns a RationalTime object that is the sum of self and other. + + If self and other have differing time rates, the result will have the + have the rate of the faster time. + """ + + try: + if self.rate == other.rate: + return RationalTime(self.value + other.value, self.rate) + except AttributeError: + if not isinstance(other, RationalTime): + raise TypeError( + "RationalTime may only be added to other objects of type " + "RationalTime, not {}.".format(type(other)) + ) + raise + + if self.rate > other.rate: + scale = self.rate + value = self.value + other.value_rescaled_to(scale) + else: + scale = other.rate + value = self.value_rescaled_to(scale) + other.value + + return RationalTime(value, scale) + + # because RationalTime is immutable, += is sugar around + + __iadd__ = __add__ + + def __sub__(self, other): + """Returns a RationalTime object that is self - other. + + If self and other have differing time rates, the result will have the + have the rate of the faster time. + """ + + try: + if self.rate == other.rate: + return RationalTime(self.value - other.value, self.rate) + except AttributeError: + if not isinstance(other, RationalTime): + raise TypeError( + "RationalTime may only be added to other objects of type " + "RationalTime, not {}.".format(type(other)) + ) + raise + + if self.rate > other.rate: + scale = self.rate + value = self.value - other.value_rescaled_to(scale) + else: + scale = other.rate + value = self.value_rescaled_to(scale) - other.value + + return RationalTime(value=value, rate=scale) + + def _comparable_floats(self, other): + """Returns a tuple of two floats, (self, other), which are suitable + for comparison. + + If other is not of a type that can be compared, TypeError is raised + """ + try: + return ( + float(self.value) / self.rate, + float(other.value) / other.rate + ) + except AttributeError: + if not isinstance(other, RationalTime): + raise TypeError( + "RationalTime can only be compared to other objects of type " + "RationalTime, not {}".format(type(other)) + ) + raise + + def __gt__(self, other): + f_self, f_other = self._comparable_floats(other) + return f_self > f_other + + def __lt__(self, other): + f_self, f_other = self._comparable_floats(other) + return f_self < f_other + + def __le__(self, other): + f_self, f_other = self._comparable_floats(other) + return f_self <= f_other + + def __ge__(self, other): + f_self, f_other = self._comparable_floats(other) + return f_self >= f_other + + def __repr__(self): + return ( + "otio.opentime.RationalTime(value={value}," + " rate={rate})".format( + value=repr(self.value), + rate=repr(self.rate), + ) + ) + + def __str__(self): + return "RationalTime({}, {})".format( + str(self.value), + str(self.rate) + ) + + def __eq__(self, other): + try: + return self.value_rescaled_to(other.rate) == other.value + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash((self.value, self.rate)) + + +class TimeTransform(object): + """1D Transform for RationalTime. Has offset and scale.""" + + def __init__(self, offset=RationalTime(), scale=1.0, rate=None): + self.offset = copy.copy(offset) + self.scale = float(scale) + self.rate = float(rate) if rate else None + + def applied_to(self, other): + if isinstance(other, TimeRange): + return range_from_start_end_time( + start_time=self.applied_to(other.start_time), + end_time_exclusive=self.applied_to(other.end_time_exclusive()) + ) + + target_rate = self.rate if self.rate is not None else other.rate + if isinstance(other, TimeTransform): + return TimeTransform( + offset=self.offset + other.offset, + scale=self.scale * other.scale, + rate=target_rate + ) + elif isinstance(other, RationalTime): + value = other.value * self.scale + result = RationalTime(value, other.rate) + self.offset + if target_rate is not None: + result = result.rescaled_to(target_rate) + + return result + else: + raise TypeError( + "TimeTransform can only be applied to a TimeTransform or " + "RationalTime, not a {}".format(type(other)) + ) + + def __repr__(self): + return ( + "otio.opentime.TimeTransform(offset={}, scale={}, rate={})".format( + repr(self.offset), + repr(self.scale), + repr(self.rate) + ) + ) + + def __str__(self): + return ( + "TimeTransform({}, {}, {})".format( + str(self.offset), + str(self.scale), + str(self.rate) + ) + ) + + def __eq__(self, other): + try: + return ( + (self.offset, self.scale, self.rate) == + (other.offset, other.scale, self.rate) + ) + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash((self.offset, self.scale, self.rate)) + + +class BoundStrategy(object): + """Different bounding strategies for TimeRange """ + + Free = 1 + Clamp = 2 + + +class TimeRange(object): + """Contains a range of time, starting (and including) start_time and + lasting duration.value * (1/duration.rate) seconds. + + A 0 duration TimeRange is the same as a RationalTime, and contains only the + start_time of the TimeRange. + """ + + __slots__ = ['start_time', 'duration'] + + def __init__(self, start_time=None, duration=None): + if not isinstance(start_time, RationalTime) and start_time is not None: + raise TypeError( + "start_time must be a RationalTime, not " + "'{}'".format(start_time) + ) + if ( + duration is not None and ( + not isinstance(duration, RationalTime) + or duration.value < 0.0 + ) + ): + raise TypeError( + "duration must be a RationalTime with value >= 0, not " + "'{}'".format(duration) + ) + + # if the start time has not been passed in + if not start_time: + if duration: + # ...get the rate from the duration + start_time = RationalTime(rate=duration.rate) + else: + # otherwise use the default + start_time = RationalTime() + _fn_cache(self, "start_time", copy.copy(start_time)) + + if not duration: + # ...get the rate from the start_time + duration = RationalTime(rate=start_time.rate) + _fn_cache(self, "duration", copy.copy(duration)) + + def __setattr__(self, key, val): + raise AttributeError("TimeRange is Immutable.") + + def __copy__(self, memodict=None): + # Construct a new one directly to avoid the overhead of deepcopy + return TimeRange( + copy.copy(self.start_time), + copy.copy(self.duration) + ) + + # Always deepcopy, since we want this class to behave like a value type + __deepcopy__ = __copy__ + + def end_time_inclusive(self): + """The time of the last sample that contains data in the TimeRange. + + If the TimeRange goes from (0, 24) w/ duration (10, 24), this will be + (9, 24) + + If the TimeRange goes from (0, 24) w/ duration (10.5, 24): + (10, 24) + + In other words, the last frame with data (however fractional). + """ + + if ( + self.end_time_exclusive() - self.start_time.rescaled_to(self.duration) + ).value > 1: + + result = ( + self.end_time_exclusive() - RationalTime(1, self.start_time.rate) + ) + + # if the duration's value has a fractional component + if self.duration.value != math.floor(self.duration.value): + result = RationalTime( + math.floor(self.end_time_exclusive().value), + result.rate + ) + + return result + else: + return copy.deepcopy(self.start_time) + + def end_time_exclusive(self): + """"Time of the first sample outside the time range. + + If Start Frame is 10 and duration is 5, then end_time_exclusive is 15, + even though the last time with data in this range is 14. + + If Start Frame is 10 and duration is 5.5, then end_time_exclusive is + 15.5, even though the last time with data in this range is 15. + """ + + return self.duration + self.start_time.rescaled_to(self.duration) + + def extended_by(self, other): + """Construct a new TimeRange that is this one extended by another.""" + + if not isinstance(other, TimeRange): + raise TypeError( + "extended_by requires rtime be a TimeRange, not a '{}'".format( + type(other) + ) + ) + + start_time = min(self.start_time, other.start_time) + new_end_time = max( + self.end_time_exclusive(), + other.end_time_exclusive() + ) + duration = duration_from_start_end_time(start_time, new_end_time) + return TimeRange(start_time, duration) + + # @TODO: remove? + def clamped( + self, + other, + start_bound=BoundStrategy.Free, + end_bound=BoundStrategy.Free + ): + """Clamp 'other' (either a RationalTime or a TimeRange), according to + self.start_time/end_time_exclusive and the bound arguments. + """ + + if isinstance(other, RationalTime): + if start_bound == BoundStrategy.Clamp: + other = max(other, self.start_time) + if end_bound == BoundStrategy.Clamp: + # @TODO: this should probably be the end_time_inclusive, + # not exclusive + other = min(other, self.end_time_exclusive()) + return other + elif isinstance(other, TimeRange): + start_time = other.start_time + end = other.end_time_exclusive() + if start_bound == BoundStrategy.Clamp: + start_time = max(other.start_time, self.start_time) + if end_bound == BoundStrategy.Clamp: + end = min(self.end_time_exclusive(), end) + duration = duration_from_start_end_time(start_time, end) + return TimeRange(start_time, duration) + else: + raise TypeError( + "TimeRange can only be applied to RationalTime objects, not " + "{}".format(type(other)) + ) + return self + + def contains(self, other): + """Return true if self completely contains other. + + (RationalTime or TimeRange) + """ + + if isinstance(other, RationalTime): + return ( + self.start_time <= other and other < self.end_time_exclusive()) + elif isinstance(other, TimeRange): + return ( + self.start_time <= other.start_time and + self.end_time_exclusive() >= other.end_time_exclusive() + ) + raise TypeError( + "contains only accepts on otio.opentime.RationalTime or " + "otio.opentime.TimeRange, not {}".format(type(other)) + ) + + def overlaps(self, other): + """Return true if self overlaps any part of other. + + (RationalTime or TimeRange) + """ + + if isinstance(other, RationalTime): + return self.contains(other) + elif isinstance(other, TimeRange): + return ( + ( + self.start_time < other.end_time_exclusive() and + other.start_time < self.end_time_exclusive() + ) + ) + raise TypeError( + "overlaps only accepts on otio.opentime.RationalTime or " + "otio.opentime.TimeRange, not {}".format(type(other)) + ) + + def __hash__(self): + return hash((self.start_time, self.duration)) + + def __eq__(self, rhs): + try: + return ( + (self.start_time, self.duration) == + (rhs.start_time, rhs.duration) + ) + except AttributeError: + return False + + def __ne__(self, rhs): + return not (self == rhs) + + def __repr__(self): + return ( + "otio.opentime.TimeRange(start_time={}, duration={})".format( + repr(self.start_time), + repr(self.duration), + ) + ) + + def __str__(self): + return ( + "TimeRange({}, {})".format( + str(self.start_time), + str(self.duration), + ) + ) + + +def from_frames(frame, fps): + """Turn a frame number and fps into a time object. + :param frame: (:class:`int`) Frame number. + :param fps: (:class:`float`) Frame-rate for the (:class:`RationalTime`) instance. + + :return: (:class:`RationalTime`) Instance for the frame and fps provided. + """ + + return RationalTime(int(frame), fps) + + +def to_frames(time_obj, fps=None): + """Turn a RationalTime into a frame number.""" + + if not fps or time_obj.rate == fps: + return int(time_obj.value) + + return int(time_obj.value_rescaled_to(fps)) + + +def validate_timecode_rate(rate): + """Check if rate is of valid type and value. + Raises (:class:`TypeError` for wrong type of rate. + Raises (:class:`VaueError`) for invalid rate value. + + :param rate: (:class:`int`) or (:class:`float`) The frame rate in question + """ + if not isinstance(rate, (int, float)): + raise TypeError( + "rate must be or not {t}".format(t=type(rate))) + + if rate not in VALID_TIMECODE_RATES: + raise ValueError( + '{rate} is not a valid frame rate, ' + 'Please use one of these: {valid}'.format( + rate=rate, + valid=VALID_TIMECODE_RATES)) + + +def from_timecode(timecode_str, rate): + """Convert a timecode string into a RationalTime. + + :param timecode_str: (:class:`str`) A colon-delimited timecode. + :param rate: (:class:`float`) The frame-rate to calculate timecode in + terms of. + + :return: (:class:`RationalTime`) Instance for the timecode provided. + """ + # Validate rate + validate_timecode_rate(rate) + + # Check if rate is drop frame + rate_is_dropframe = rate in VALID_DROPFRAME_TIMECODE_RATES + + # Make sure only DF timecodes are treated as such + treat_as_df = rate_is_dropframe and ';' in timecode_str + + # Check if timecode indicates drop frame + if ';' in timecode_str: + if not rate_is_dropframe: + raise ValueError( + 'Timecode "{}" indicates drop-frame rate ' + 'due to the ";" frame divider. ' + 'Passed rate ({}) is of non-drop-frame rate. ' + 'Valid drop-frame rates are: {}'.format( + timecode_str, + rate, + VALID_DROPFRAME_TIMECODE_RATES)) + else: + timecode_str = timecode_str.replace(';', ':') + + hours, minutes, seconds, frames = timecode_str.split(":") + + # Timecode is declared in terms of nominal fps + nominal_fps = int(math.ceil(rate)) + + if int(frames) >= nominal_fps: + raise ValueError( + 'Frame rate mismatch. Timecode "{}" has frames beyond {}.'.format( + timecode_str, nominal_fps - 1)) + + dropframes = 0 + if treat_as_df: + if rate == 29.97: + dropframes = 2 + + elif rate == 59.94: + dropframes = 4 + + # To use for drop frame compensation + total_minutes = int(hours) * 60 + int(minutes) + + # convert to frames + value = ( + ((total_minutes * 60) + int(seconds)) * nominal_fps + int(frames)) - \ + (dropframes * (total_minutes - (total_minutes // 10))) + + return RationalTime(value, rate) + + +def to_timecode(time_obj, rate=None, drop_frame=None): + """Convert a RationalTime into a timecode string. + + :param time_obj: (:class:`RationalTime`) instance to express as timecode. + :param rate: (:class:`float`) The frame-rate to calculate timecode in + terms of. (Default time_obj.rate) + :param drop_frame: (:class:`bool`) ``True`` to make drop-frame timecode, + ``False`` for non-drop. If left ``None``, a format will be guessed + based on rate. + + :return: (:class:`str`) The timecode. + """ + if time_obj is None: + return None + + rate = rate or time_obj.rate + + # Validate rate + validate_timecode_rate(rate) + + # Check if rate is drop frame + rate_is_dropframe = rate in VALID_DROPFRAME_TIMECODE_RATES + if drop_frame and not rate_is_dropframe: + raise ValueError( + "Invalid rate for drop-frame timecode {}".format(time_obj.rate) + ) + + # if in auto-detect for DFTC, use the rate to decide + if drop_frame is None: + drop_frame = rate_is_dropframe + + dropframes = 0 + if drop_frame: + if rate in (29.97, (30000 / 1001.0)): + dropframes = 2 + + elif rate == 59.94: + dropframes = 4 + + # For non-dftc, use the integral frame rate + if not drop_frame: + rate = round(rate) + + # Number of frames in an hour + frames_per_hour = int(round(rate * 60 * 60)) + # Number of frames in a day - timecode rolls over after 24 hours + frames_per_24_hours = frames_per_hour * 24 + # Number of frames per ten minutes + frames_per_10_minutes = int(round(rate * 60 * 10)) + # Number of frames per minute is the round of the framerate * 60 minus + # the number of dropped frames + frames_per_minute = int(round(rate) * 60) - dropframes + + value = time_obj.value + + if value < 0: + raise ValueError( + "Negative values are not supported for converting to timecode.") + + # If frame_number is greater than 24 hrs, next operation will rollover + # clock + value %= frames_per_24_hours + + if drop_frame: + d = value // frames_per_10_minutes + m = value % frames_per_10_minutes + if m > dropframes: + value += (dropframes * 9 * d) + \ + dropframes * ((m - dropframes) // frames_per_minute) + else: + value += dropframes * 9 * d + + nominal_fps = int(math.ceil(rate)) + + frames = value % nominal_fps + seconds = (value // nominal_fps) % 60 + minutes = ((value // nominal_fps) // 60) % 60 + hours = (((value // nominal_fps) // 60) // 60) + + tc = "{HH:02d}:{MM:02d}:{SS:02d}{div}{FF:02d}" + + return tc.format( + HH=int(hours), + MM=int(minutes), + SS=int(seconds), + div=drop_frame and ";" or ":", + FF=int(frames)) + + +def from_time_string(time_str, rate): + """Convert a time with microseconds string into a RationalTime. + + :param time_str: (:class:`str`) A HH:MM:ss.ms time. + :param rate: (:class:`float`) The frame-rate to calculate timecode in + terms of. + + :return: (:class:`RationalTime`) Instance for the timecode provided. + """ + + if ';' in time_str: + raise ValueError('Drop-Frame timecodes not supported.') + + hours, minutes, seconds = time_str.split(":") + microseconds = "0" + if '.' in seconds: + seconds, microseconds = str(seconds).split('.') + microseconds = microseconds[0:6] + seconds = '.'.join([seconds, microseconds]) + time_obj = from_seconds( + float(seconds) + + (int(minutes) * 60) + + (int(hours) * 60 * 60) + ) + return time_obj.rescaled_to(rate) + + +def to_time_string(time_obj): + """ + Convert this timecode to time with microsecond, as formated in FFMPEG + + :return: Number formated string of time + """ + if time_obj is None: + return None + # convert time object to seconds + seconds = to_seconds(time_obj) + + # reformat in time string + time_units_per_minute = 60 + time_units_per_hour = time_units_per_minute * 60 + time_units_per_day = time_units_per_hour * 24 + + days, hour_units = divmod(seconds, time_units_per_day) + hours, minute_units = divmod(hour_units, time_units_per_hour) + minutes, seconds = divmod(minute_units, time_units_per_minute) + microseconds = "0" + seconds = str(seconds) + if '.' in seconds: + seconds, microseconds = str(seconds).split('.') + + # TODO: There are some rollover policy issues for days and hours, + # We need to research these + + return "{hours}:{minutes}:{seconds}.{microseconds}".format( + hours="{n:0{width}d}".format(n=int(hours), width=2), + minutes="{n:0{width}d}".format(n=int(minutes), width=2), + seconds="{n:0{width}d}".format(n=int(seconds), width=2), + microseconds=microseconds[0:6] + ) + + +def from_seconds(seconds): + """Convert a number of seconds into RationalTime""" + + # Note: in the future we may consider adding a preferred rate arg + time_obj = RationalTime(value=seconds, rate=1) + + return time_obj + + +def to_seconds(time_obj): + """ Convert a RationalTime into float seconds """ + return time_obj.value_rescaled_to(1) + + +def from_footage(footage): + raise NotImplementedError + + +def to_footage(time_obj): + raise NotImplementedError + + +def duration_from_start_end_time(start_time, end_time_exclusive): + """Compute duration of samples from first to last. This is not the same as + distance. For example, the duration of a clip from frame 10 to frame 15 + is 6 frames. Result in the rate of start_time. + """ + + # @TODO: what to do when start_time > end_time_exclusive? + + if start_time.rate == end_time_exclusive.rate: + return RationalTime( + end_time_exclusive.value - start_time.value, + start_time.rate + ) + else: + return RationalTime( + ( + end_time_exclusive.value_rescaled_to(start_time) + - start_time.value + ), + start_time.rate + ) + + +# @TODO: create range from start/end [in,ex]clusive +def range_from_start_end_time(start_time, end_time_exclusive): + """Create a TimeRange from start and end RationalTimes.""" + + return TimeRange( + start_time, + duration=duration_from_start_end_time(start_time, end_time_exclusive) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/plugins/__init__.py b/pype/vendor/python/python_2/opentimelineio/plugins/__init__.py new file mode 100644 index 00000000000..dedb3da37e2 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/plugins/__init__.py @@ -0,0 +1,33 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Plugin system for OTIO""" + +# flake8: noqa + +from .python_plugin import PythonPlugin +from .manifest import ( + manifest_from_file, + ActiveManifest, +) diff --git a/pype/vendor/python/python_2/opentimelineio/plugins/manifest.py b/pype/vendor/python/python_2/opentimelineio/plugins/manifest.py new file mode 100644 index 00000000000..2a769effec3 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/plugins/manifest.py @@ -0,0 +1,282 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of an adapter registry system for OTIO.""" + +import inspect +import logging +import os + +# on some python interpreters, pkg_resources is not available +try: + import pkg_resources +except ImportError: + pkg_resources = None + +from .. import ( + core, + exceptions, +) + + +def manifest_from_file(filepath): + """Read the .json file at filepath into a Manifest object.""" + + result = core.deserialize_json_from_file(filepath) + result.source_files.append(filepath) + result._update_plugin_source(filepath) + return result + + +def manifest_from_string(input_string): + """Deserialize the json string into a manifest object.""" + + result = core.deserialize_json_from_string(input_string) + + # try and get the caller's name + name = "unknown" + stack = inspect.stack() + if len(stack) > 1 and len(stack[1]) > 3: + # filename function name + name = "{}:{}".format(stack[1][1], stack[1][3]) + + # set the value in the manifest + src_string = "call to manifest_from_string() in " + name + result.source_files.append(src_string) + result._update_plugin_source(src_string) + + return result + + +@core.register_type +class Manifest(core.SerializableObject): + """Defines an OTIO plugin Manifest. + + This is an internal OTIO implementation detail. A manifest tracks a + collection of adapters and allows finding specific adapters by suffix + + For writing your own adapters, consult: + https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# + """ + _serializable_label = "PluginManifest.1" + + def __init__(self): + super(Manifest, self).__init__() + self.adapters = [] + self.schemadefs = [] + self.media_linkers = [] + self.source_files = [] + + # hook system stuff + self.hooks = {} + self.hook_scripts = [] + + adapters = core.serializable_field( + "adapters", + type([]), + "Adapters this manifest describes." + ) + schemadefs = core.serializable_field( + "schemadefs", + type([]), + "Schemadefs this manifest describes." + ) + media_linkers = core.serializable_field( + "media_linkers", + type([]), + "Media Linkers this manifest describes." + ) + hooks = core.serializable_field( + "hooks", + type({}), + "Hooks that hooks scripts can be attached to." + ) + hook_scripts = core.serializable_field( + "hook_scripts", + type([]), + "Scripts that can be attached to hooks." + ) + + def extend(self, another_manifest): + """ + Extend the adapters, schemadefs, and media_linkers lists of this manifest + by appending the contents of the corresponding lists of another_manifest. + """ + if another_manifest: + self.adapters.extend(another_manifest.adapters) + self.schemadefs.extend(another_manifest.schemadefs) + self.media_linkers.extend(another_manifest.media_linkers) + self.hook_scripts.extend(another_manifest.hook_scripts) + + for trigger_name, hooks in another_manifest.hooks.items(): + if trigger_name in self.hooks: + self.hooks[trigger_name].extend(hooks) + + def _update_plugin_source(self, path): + """Track the source .json for a given adapter.""" + + for thing in (self.adapters + self.schemadefs + + self.media_linkers + self.hook_scripts): + thing._json_path = path + + def from_filepath(self, suffix): + """Return the adapter object associated with a given file suffix.""" + + for adapter in self.adapters: + if suffix.lower() in adapter.suffixes: + return adapter + raise exceptions.NoKnownAdapterForExtensionError(suffix) + + def adapter_module_from_suffix(self, suffix): + """Return the adapter module associated with a given file suffix.""" + + adp = self.from_filepath(suffix) + return adp.module() + + def from_name(self, name, kind_list="adapters"): + """Return the adapter object associated with a given adapter name.""" + + for thing in getattr(self, kind_list): + if name == thing.name: + return thing + + raise exceptions.NotSupportedError( + "Could not find plugin: '{}' in kind_list: '{}'." + " options: {}".format( + name, + kind_list, + getattr(self, kind_list) + ) + ) + + def adapter_module_from_name(self, name): + """Return the adapter module associated with a given adapter name.""" + + adp = self.from_name(name) + return adp.module() + + def schemadef_module_from_name(self, name): + """Return the schemadef module associated with a given schemadef name.""" + + adp = self.from_name(name, kind_list="schemadefs") + return adp.module() + + +_MANIFEST = None + + +def load_manifest(): + # build the manifest of adapters, starting with builtin adapters + result = manifest_from_file( + os.path.join( + os.path.dirname(os.path.dirname(inspect.getsourcefile(core))), + "adapters", + "builtin_adapters.plugin_manifest.json" + ) + ) + + # layer contrib plugins after built in ones + try: + import opentimelineio_contrib as otio_c + + contrib_manifest = manifest_from_file( + os.path.join( + os.path.dirname(inspect.getsourcefile(otio_c)), + "adapters", + "contrib_adapters.plugin_manifest.json" + ) + ) + result.extend(contrib_manifest) + except ImportError: + pass + + # Discover setuptools-based plugins + if pkg_resources: + for plugin in pkg_resources.iter_entry_points( + "opentimelineio.plugins" + ): + plugin_name = plugin.name + try: + plugin_entry_point = plugin.load() + try: + plugin_manifest = plugin_entry_point.plugin_manifest() + except AttributeError: + if not pkg_resources.resource_exists( + plugin.module_name, + 'plugin_manifest.json' + ): + raise + manifest_stream = pkg_resources.resource_stream( + plugin.module_name, + 'plugin_manifest.json' + ) + plugin_manifest = core.deserialize_json_from_string( + manifest_stream.read().decode('utf-8') + ) + manifest_stream.close() + filepath = pkg_resources.resource_filename( + plugin.module_name, + 'plugin_manifest.json' + ) + plugin_manifest._update_plugin_source(filepath) + + except Exception: + logging.exception( + "could not load plugin: {}".format(plugin_name) + ) + continue + + result.extend(plugin_manifest) + else: + # XXX: Should we print some kind of warning that pkg_resources isn't + # available? + pass + + # read local adapter manifests, if they exist + _local_manifest_path = os.environ.get("OTIO_PLUGIN_MANIFEST_PATH", None) + if _local_manifest_path is not None: + for json_path in _local_manifest_path.split(":"): + if not os.path.exists(json_path): + # XXX: In case error reporting is requested + # print( + # "Warning: OpenTimelineIO cannot access path '{}' from " + # "$OTIO_PLUGIN_MANIFEST_PATH".format(json_path) + # ) + continue + + LOCAL_MANIFEST = manifest_from_file(json_path) + result.extend(LOCAL_MANIFEST) + + # force the schemadefs to load and add to schemadef module namespace + for s in result.schemadefs: + s.module() + return result + + +def ActiveManifest(force_reload=False): + global _MANIFEST + if not _MANIFEST or force_reload: + _MANIFEST = load_manifest() + + return _MANIFEST diff --git a/pype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py b/pype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py new file mode 100644 index 00000000000..c749bd5f9d6 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py @@ -0,0 +1,128 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Base class for OTIO plugins that are exposed by manifests.""" + +import os +import imp + +from .. import ( + core, + exceptions, +) + + +class PythonPlugin(core.SerializableObject): + """A class of plugin that is encoded in a python module, exposed via a + manifest. + """ + + _serializable_label = "PythonPlugin.1" + + def __init__( + self, + name=None, + execution_scope=None, + filepath=None, + ): + super(PythonPlugin, self).__init__() + self.name = name + self.execution_scope = execution_scope + self.filepath = filepath + self._json_path = None + self._module = None + + name = core.serializable_field("name", doc="Adapter name.") + execution_scope = core.serializable_field( + "execution_scope", + str, + doc=( + "Describes whether this adapter is executed in the current python" + " process or in a subshell. Options are: " + "['in process', 'out of process']." + ) + ) + filepath = core.serializable_field( + "filepath", + str, + doc=( + "Absolute path or relative path to adapter module from location of" + " json." + ) + ) + + def module_abs_path(self): + """Return an absolute path to the module implementing this adapter.""" + + filepath = self.filepath + if not os.path.isabs(filepath): + if not self._json_path: + raise exceptions.MisconfiguredPluginError( + "{} plugin is misconfigured, missing json path. " + "plugin: {}".format( + self.name, + repr(self) + ) + ) + + filepath = os.path.join(os.path.dirname(self._json_path), filepath) + + return filepath + + def _imported_module(self, namespace): + """Load the module this plugin points at.""" + + pyname = os.path.splitext(os.path.basename(self.module_abs_path()))[0] + pydir = os.path.dirname(self.module_abs_path()) + + (file_obj, pathname, description) = imp.find_module(pyname, [pydir]) + + with file_obj: + # this will reload the module if it has already been loaded. + mod = imp.load_module( + "opentimelineio.{}.{}".format(namespace, self.name), + file_obj, + pathname, + description + ) + + return mod + + def module(self): + """Return the module object for this adapter. """ + + if not self._module: + self._module = self._imported_module("adapters") + + return self._module + + def _execute_function(self, func_name, **kwargs): + """Execute func_name on this adapter with error checking.""" + + # collects the error handling into a common place. + if not hasattr(self.module(), func_name): + raise exceptions.AdapterDoesntSupportFunctionError( + "Sorry, {} doesn't support {}.".format(self.name, func_name) + ) + return (getattr(self.module(), func_name)(**kwargs)) diff --git a/pype/vendor/python/python_2/opentimelineio/schema/__init__.py b/pype/vendor/python/python_2/opentimelineio/schema/__init__.py new file mode 100644 index 00000000000..419f337bf64 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/__init__.py @@ -0,0 +1,75 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +# flake8: noqa + +"""User facing classes.""" + +from .missing_reference import ( + MissingReference +) +from .external_reference import ( + ExternalReference +) +from .clip import ( + Clip, +) +from .track import ( + Track, + TrackKind, + NeighborGapPolicy, +) +from .stack import ( + Stack, +) +from .timeline import ( + Timeline, + timeline_from_clips, +) +from .marker import ( + Marker, + MarkerColor, +) +from .gap import ( + Gap, +) +from .effect import ( + Effect, + TimeEffect, + LinearTimeWarp, + FreezeFrame, +) +from .transition import ( + Transition, + TransitionTypes, +) +from .serializable_collection import ( + SerializableCollection +) +from .generator_reference import ( + GeneratorReference +) +from .schemadef import ( + SchemaDef +) diff --git a/pype/vendor/python/python_2/opentimelineio/schema/clip.py b/pype/vendor/python/python_2/opentimelineio/schema/clip.py new file mode 100644 index 00000000000..44d38dfcf11 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/clip.py @@ -0,0 +1,130 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of the Clip class, for pointing at media.""" + +import copy + +from .. import ( + core, + exceptions, +) +from . import ( + missing_reference +) + + +@core.register_type +class Clip(core.Item): + """The base editable object in OTIO. + + Contains a media reference and a trim on that media reference. + """ + + _serializable_label = "Clip.1" + + def __init__( + self, + name=None, + media_reference=None, + source_range=None, + markers=[], + effects=[], + metadata=None, + ): + core.Item.__init__( + self, + name=name, + source_range=source_range, + markers=markers, + effects=effects, + metadata=metadata + ) + + if not media_reference: + media_reference = missing_reference.MissingReference() + self._media_reference = copy.deepcopy(media_reference) + + name = core.serializable_field("name", doc="Name of this clip.") + transform = core.deprecated_field() + _media_reference = core.serializable_field( + "media_reference", + core.MediaReference, + "Media reference to the media this clip represents." + ) + + @property + def media_reference(self): + if self._media_reference is None: + self._media_reference = missing_reference.MissingReference() + return self._media_reference + + @media_reference.setter + def media_reference(self, val): + if val is None: + val = missing_reference.MissingReference() + self._media_reference = val + + def available_range(self): + if not self.media_reference: + raise exceptions.CannotComputeAvailableRangeError( + "No media reference set on clip: {}".format(self) + ) + + if not self.media_reference.available_range: + raise exceptions.CannotComputeAvailableRangeError( + "No available_range set on media reference on clip: {}".format( + self + ) + ) + + return copy.copy(self.media_reference.available_range) + + def __str__(self): + return 'Clip("{}", {}, {}, {})'.format( + self.name, + self.media_reference, + self.source_range, + self.metadata + ) + + def __repr__(self): + return ( + 'otio.schema.Clip(' + 'name={}, ' + 'media_reference={}, ' + 'source_range={}, ' + 'metadata={}' + ')'.format( + repr(self.name), + repr(self.media_reference), + repr(self.source_range), + repr(self.metadata), + ) + ) + + def each_clip(self, search_range=None): + """Yields self.""" + + yield self diff --git a/pype/vendor/python/python_2/opentimelineio/schema/effect.py b/pype/vendor/python/python_2/opentimelineio/schema/effect.py new file mode 100644 index 00000000000..61eb4204faa --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/effect.py @@ -0,0 +1,130 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of Effect OTIO class.""" + +from .. import ( + core +) + +import copy + + +@core.register_type +class Effect(core.SerializableObject): + _serializable_label = "Effect.1" + + def __init__( + self, + name=None, + effect_name=None, + metadata=None + ): + super(Effect, self).__init__() + self.name = name + self.effect_name = effect_name + self.metadata = copy.deepcopy(metadata) if metadata else {} + + name = core.serializable_field( + "name", + doc="Name of this effect object. Example: 'BlurByHalfEffect'." + ) + effect_name = core.serializable_field( + "effect_name", + doc="Name of the kind of effect (example: 'Blur', 'Crop', 'Flip')." + ) + metadata = core.serializable_field( + "metadata", + dict, + doc="Metadata dictionary." + ) + + def __str__(self): + return ( + "Effect(" + "{}, " + "{}, " + "{}" + ")".format( + str(self.name), + str(self.effect_name), + str(self.metadata), + ) + ) + + def __repr__(self): + return ( + "otio.schema.Effect(" + "name={}, " + "effect_name={}, " + "metadata={}" + ")".format( + repr(self.name), + repr(self.effect_name), + repr(self.metadata), + ) + ) + + +@core.register_type +class TimeEffect(Effect): + "Base Time Effect Class" + _serializable_label = "TimeEffect.1" + pass + + +@core.register_type +class LinearTimeWarp(TimeEffect): + "A time warp that applies a linear scale across the entire clip" + _serializable_label = "LinearTimeWarp.1" + + def __init__(self, name=None, time_scalar=1, metadata=None): + Effect.__init__( + self, + name=name, + effect_name="LinearTimeWarp", + metadata=metadata + ) + self.time_scalar = time_scalar + + time_scalar = core.serializable_field( + "time_scalar", + doc="Linear time scalar applied to clip. " + "2.0 = double speed, 0.5 = half speed." + ) + + +@core.register_type +class FreezeFrame(LinearTimeWarp): + "Hold the first frame of the clip for the duration of the clip." + _serializable_label = "FreezeFrame.1" + + def __init__(self, name=None, metadata=None): + LinearTimeWarp.__init__( + self, + name=name, + time_scalar=0, + metadata=metadata + ) + self.effect_name = "FreezeFrame" diff --git a/pype/vendor/python/python_2/opentimelineio/schema/external_reference.py b/pype/vendor/python/python_2/opentimelineio/schema/external_reference.py new file mode 100644 index 00000000000..87db4d46525 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/external_reference.py @@ -0,0 +1,69 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +""" +Implementation of the ExternalReference media reference schema. +""" + +from .. import ( + core, +) + + +@core.register_type +class ExternalReference(core.MediaReference): + """Reference to media via a url, for example "file:///var/tmp/foo.mov" """ + + _serializable_label = "ExternalReference.1" + _name = "ExternalReference" + + def __init__( + self, + target_url=None, + available_range=None, + metadata=None, + ): + core.MediaReference.__init__( + self, + available_range=available_range, + metadata=metadata + ) + + self.target_url = target_url + + target_url = core.serializable_field( + "target_url", + doc=( + "URL at which this media lives. For local references, use the " + "'file://' format." + ) + ) + + def __str__(self): + return 'ExternalReference("{}")'.format(self.target_url) + + def __repr__(self): + return 'otio.schema.ExternalReference(target_url={})'.format( + repr(self.target_url) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/schema/gap.py b/pype/vendor/python/python_2/opentimelineio/schema/gap.py new file mode 100644 index 00000000000..4c8165db8ff --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/gap.py @@ -0,0 +1,82 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +from .. import ( + core, + opentime, +) + +"""Gap Item - represents a transparent gap in content.""" + + +@core.register_type +class Gap(core.Item): + _serializable_label = "Gap.1" + _class_path = "schema.Gap" + + def __init__( + self, + name=None, + # note - only one of the following two arguments is accepted + # if neither is provided, source_range will be set to an empty + # TimeRange + # Duration is provided as a convienence for creating a gap of a certain + # length. IE: Gap(duration=otio.opentime.RationalTime(300, 24)) + duration=None, + source_range=None, + effects=None, + markers=None, + metadata=None, + ): + if duration and source_range: + raise RuntimeError( + "Cannot instantiate with both a source range and a duration." + ) + + if duration: + source_range = opentime.TimeRange( + opentime.RationalTime(0, duration.rate), + duration + ) + elif source_range is None: + # if neither is provided, seed TimeRange as an empty Source Range. + source_range = opentime.TimeRange() + + core.Item.__init__( + self, + name=name, + source_range=source_range, + effects=effects, + markers=markers, + metadata=metadata + ) + + @staticmethod + def visible(): + return False + + +# the original name for "gap" was "filler" - this will turn "Filler" found in +# OTIO files into Gap automatically. +core.register_type(Gap, "Filler") diff --git a/pype/vendor/python/python_2/opentimelineio/schema/generator_reference.py b/pype/vendor/python/python_2/opentimelineio/schema/generator_reference.py new file mode 100644 index 00000000000..ef1dde836e2 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/generator_reference.py @@ -0,0 +1,76 @@ +""" +Generators are media references that _produce_ media rather than refer to it. +""" + +from .. import ( + core, +) + + +@core.register_type +class GeneratorReference(core.MediaReference): + """ + Base class for Generators. + + Generators are media references that become "generators" in editorial + systems. For example, color bars or a solid color. + """ + + _serializable_label = "GeneratorReference.1" + _name = "GeneratorReference" + + def __init__( + self, + name=None, + generator_kind=None, + available_range=None, + parameters=None, + metadata=None + ): + super(GeneratorReference, self).__init__( + name, + available_range, + metadata + ) + + if parameters is None: + parameters = {} + self.parameters = parameters + self.generator_kind = generator_kind + + parameters = core.serializable_field( + "parameters", + dict, + doc="Dictionary of parameters for generator." + ) + generator_kind = core.serializable_field( + "generator_kind", + required_type=type(""), + # @TODO: need to clarify if this also has an enum of supported types + # / generic + doc="Kind of generator reference, as defined by the " + "schema.generator_reference.GeneratorReferenceTypes enum." + ) + + def __str__(self): + return 'GeneratorReference("{}", "{}", {}, {})'.format( + self.name, + self.generator_kind, + self.parameters, + self.metadata + ) + + def __repr__(self): + return ( + 'otio.schema.GeneratorReference(' + 'name={}, ' + 'generator_kind={}, ' + 'parameters={}, ' + 'metadata={}' + ')'.format( + repr(self.name), + repr(self.generator_kind), + repr(self.parameters), + repr(self.metadata), + ) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/schema/marker.py b/pype/vendor/python/python_2/opentimelineio/schema/marker.py new file mode 100644 index 00000000000..d8b6f1c2720 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/marker.py @@ -0,0 +1,128 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Marker class. Holds metadata over regions of time.""" + +from .. import ( + core, + opentime, +) + + +class MarkerColor: + """ Enum encoding colors of markers as strings. """ + + PINK = "PINK" + RED = "RED" + ORANGE = "ORANGE" + YELLOW = "YELLOW" + GREEN = "GREEN" + CYAN = "CYAN" + BLUE = "BLUE" + PURPLE = "PURPLE" + MAGENTA = "MAGENTA" + BLACK = "BLACK" + WHITE = "WHITE" + + +@core.register_type +class Marker(core.SerializableObject): + + """ Holds metadata over time on a timeline """ + + _serializable_label = "Marker.2" + _class_path = "marker.Marker" + + def __init__( + self, + name=None, + marked_range=None, + color=MarkerColor.RED, + metadata=None, + ): + core.SerializableObject.__init__( + self, + ) + self.name = name + self.marked_range = marked_range + self.color = color + self.metadata = metadata or {} + + name = core.serializable_field("name", doc="Name of this marker.") + + marked_range = core.serializable_field( + "marked_range", + opentime.TimeRange, + "Range this marker applies to, relative to the Item this marker is " + "attached to (e.g. the Clip or Track that owns this marker)." + ) + + color = core.serializable_field( + "color", + required_type=type(MarkerColor.RED), + doc="Color string for this marker (for example: 'RED'), based on the " + "otio.schema.marker.MarkerColor enum." + ) + + # old name + range = core.deprecated_field() + + metadata = core.serializable_field( + "metadata", + dict, + "Metadata dictionary." + ) + + def __repr__(self): + return ( + "otio.schema.Marker(" + "name={}, " + "marked_range={}, " + "metadata={}" + ")".format( + repr(self.name), + repr(self.marked_range), + repr(self.metadata), + ) + ) + + def __str__(self): + return ( + "Marker(" + "{}, " + "{}, " + "{}" + ")".format( + str(self.name), + str(self.marked_range), + str(self.metadata), + ) + ) + + +@core.upgrade_function_for(Marker, 2) +def _version_one_to_two(data): + data["marked_range"] = data["range"] + del data["range"] + return data diff --git a/pype/vendor/python/python_2/opentimelineio/schema/missing_reference.py b/pype/vendor/python/python_2/opentimelineio/schema/missing_reference.py new file mode 100644 index 00000000000..88bc1862fc7 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/missing_reference.py @@ -0,0 +1,43 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +""" +Implementation of the MissingReference media reference schema. +""" + +from .. import ( + core, +) + + +@core.register_type +class MissingReference(core.MediaReference): + """Represents media for which a concrete reference is missing.""" + + _serializable_label = "MissingReference.1" + _name = "MissingReference" + + @property + def is_missing_reference(self): + return True diff --git a/pype/vendor/python/python_2/opentimelineio/schema/schemadef.py b/pype/vendor/python/python_2/opentimelineio/schema/schemadef.py new file mode 100644 index 00000000000..5fb4e05abd9 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/schemadef.py @@ -0,0 +1,65 @@ + +from .. import ( + core, + exceptions, + plugins, + schemadef +) + + +@core.register_type +class SchemaDef(plugins.PythonPlugin): + _serializable_label = "SchemaDef.1" + + def __init__( + self, + name=None, + execution_scope=None, + filepath=None, + ): + super(SchemaDef, self).__init__(name, execution_scope, filepath) + + def module(self): + """ + Return the module object for this schemadef plugin. + If the module hasn't already been imported, it is imported and + injected into the otio.schemadefs namespace as a side-effect. + (redefines PythonPlugin.module()) + """ + + if not self._module: + self._module = self._imported_module("schemadef") + if self.name: + schemadef._add_schemadef_module(self.name, self._module) + + return self._module + + +def available_schemadef_names(): + """Return a string list of the available schemadefs.""" + + return [str(sd.name) for sd in plugins.ActiveManifest().schemadefs] + + +def from_name(name): + """Fetch the schemadef plugin object by the name of the schema directly.""" + + try: + return plugins.ActiveManifest().from_name(name, kind_list="schemadefs") + except exceptions.NotSupportedError: + raise exceptions.NotSupportedError( + "schemadef not supported: {}, available: {}".format( + name, + available_schemadef_names() + ) + ) + + +def module_from_name(name): + """Fetch the plugin's module by the name of the schemadef. + + Will load the plugin if it has not already been loaded. Reading a file that + contains the schemadef will also trigger a load of the plugin. + """ + plugin = from_name(name) + return plugin.module() diff --git a/pype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py b/pype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py new file mode 100644 index 00000000000..523ea77ddbb --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py @@ -0,0 +1,149 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""A serializable collection of SerializableObjects.""" + +import collections +import copy + +from .. import ( + core +) + +from . import ( + clip +) + + +@core.register_type +class SerializableCollection( + core.SerializableObject, + collections.MutableSequence +): + """A kind of composition which can hold any serializable object. + + This composition approximates the concept of a `bin` - a collection of + SerializableObjects that do not have any compositing meaning, but can + serialize to/from OTIO correctly, with metadata and a named collection. + """ + + _serializable_label = "SerializableCollection.1" + _class_path = "schema.SerializableCollection" + + def __init__( + self, + name=None, + children=None, + metadata=None, + ): + super(SerializableCollection, self).__init__() + + self.name = name + self._children = children or [] + self.metadata = copy.deepcopy(metadata) if metadata else {} + + name = core.serializable_field( + "name", + doc="SerializableCollection name." + ) + _children = core.serializable_field( + "children", + list, + "SerializableObject contained by this container." + ) + metadata = core.serializable_field( + "metadata", + dict, + doc="Metadata dictionary for this SerializableCollection." + ) + + # @{ Stringification + def __str__(self): + return "SerializableCollection({}, {}, {})".format( + str(self.name), + str(self._children), + str(self.metadata) + ) + + def __repr__(self): + return ( + "otio.{}(" + "name={}, " + "children={}, " + "metadata={}" + ")".format( + self._class_path, + repr(self.name), + repr(self._children), + repr(self.metadata) + ) + ) + # @} + + # @{ collections.MutableSequence implementation + def __getitem__(self, item): + return self._children[item] + + def __setitem__(self, key, value): + self._children[key] = value + + def insert(self, index, item): + self._children.insert(index, item) + + def __len__(self): + return len(self._children) + + def __delitem__(self, item): + del self._children[item] + # @} + + def each_child( + self, + search_range=None, + descended_from_type=core.composable.Composable + ): + for i, child in enumerate(self._children): + # filter out children who are not descended from the specified type + is_descendant = descended_from_type == core.composable.Composable + if is_descendant or isinstance(child, descended_from_type): + yield child + + # for children that are compositions, recurse into their children + if hasattr(child, "each_child"): + for valid_child in ( + c for c in child.each_child( + search_range, + descended_from_type + ) + ): + yield valid_child + + def each_clip(self, search_range=None): + return self.each_child(search_range, clip.Clip) + + +# the original name for "SerializableCollection" was "SerializeableCollection" +# this will turn this misspelling found in OTIO files into the correct instance +# automatically. +core.register_type(SerializableCollection, 'SerializeableCollection') diff --git a/pype/vendor/python/python_2/opentimelineio/schema/stack.py b/pype/vendor/python/python_2/opentimelineio/schema/stack.py new file mode 100644 index 00000000000..bf67158dc08 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/stack.py @@ -0,0 +1,120 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""A stack represents a series of composable.Composables that are arranged such +that their start times are at the same point. + +Most commonly, this would be a series of schema.Track objects that then +contain clips. The 0 time of those tracks would be coincide with the 0-time of +the stack. + +Stacks are in compositing order, with later children obscuring earlier +children. In other words, from bottom to top. If a stack has three children, +[A, B, C], C is above B which is above A. + +A stack is the length of its longest child. If a child ends before the other +children, then an earlier index child would be visible before it. +""" + +from .. import ( + core, + opentime, + exceptions +) + +from . import ( + clip +) + + +@core.register_type +class Stack(core.Composition): + _serializable_label = "Stack.1" + _composition_kind = "Stack" + _modname = "schema" + + def __init__( + self, + name=None, + children=None, + source_range=None, + markers=None, + effects=None, + metadata=None + ): + core.Composition.__init__( + self, + name=name, + children=children, + source_range=source_range, + markers=markers, + effects=effects, + metadata=metadata + ) + + def range_of_child_at_index(self, index): + try: + child = self[index] + except IndexError: + raise exceptions.NoSuchChildAtIndex(index) + + dur = child.duration() + + return opentime.TimeRange( + start_time=opentime.RationalTime(0, dur.rate), + duration=dur + ) + + def each_clip(self, search_range=None): + return self.each_child(search_range, clip.Clip) + + def available_range(self): + if len(self) == 0: + return opentime.TimeRange() + + duration = max(child.duration() for child in self) + + return opentime.TimeRange( + opentime.RationalTime(0, duration.rate), + duration=duration + ) + + def range_of_all_children(self): + child_map = {} + for i, c in enumerate(self._children): + child_map[c] = self.range_of_child_at_index(i) + return child_map + + def trimmed_range_of_child_at_index(self, index, reference_space=None): + range = self.range_of_child_at_index(index) + + if not self.source_range: + return range + + range = opentime.TimeRange( + start_time=self.source_range.start_time, + duration=min(range.duration, self.source_range.duration) + ) + + return range diff --git a/pype/vendor/python/python_2/opentimelineio/schema/timeline.py b/pype/vendor/python/python_2/opentimelineio/schema/timeline.py new file mode 100644 index 00000000000..fe7d6952ab2 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/timeline.py @@ -0,0 +1,133 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of the OTIO built in schema, Timeline object.""" + +import copy + +from .. import ( + core, + opentime, +) + +from . import stack, track + + +@core.register_type +class Timeline(core.SerializableObject): + _serializable_label = "Timeline.1" + + def __init__( + self, + name=None, + tracks=None, + global_start_time=None, + metadata=None, + ): + super(Timeline, self).__init__() + self.name = name + self.global_start_time = copy.deepcopy(global_start_time) + + if tracks is None: + tracks = [] + self.tracks = stack.Stack(name="tracks", children=tracks) + + self.metadata = copy.deepcopy(metadata) if metadata else {} + + name = core.serializable_field("name", doc="Name of this timeline.") + tracks = core.serializable_field( + "tracks", + core.Composition, + doc="Stack of tracks containing items." + ) + metadata = core.serializable_field( + "metadata", + dict, + "Metadata dictionary." + ) + global_start_time = core.serializable_field( + "global_start_time", + opentime.RationalTime, + doc="Global starting time value and rate of the timeline." + ) + + def __str__(self): + return 'Timeline("{}", {})'.format(str(self.name), str(self.tracks)) + + def __repr__(self): + return ( + "otio.schema.Timeline(name={}, tracks={})".format( + repr(self.name), + repr(self.tracks) + ) + ) + + def each_child(self, search_range=None, descended_from_type=core.Composable): + return self.tracks.each_child(search_range, descended_from_type) + + def each_clip(self, search_range=None): + """Return a flat list of each clip, limited to the search_range.""" + + return self.tracks.each_clip(search_range) + + def duration(self): + """Duration of this timeline.""" + + return self.tracks.duration() + + def range_of_child(self, child): + """Range of the child object contained in this timeline.""" + + return self.tracks.range_of_child(child) + + def video_tracks(self): + """ + This convenience method returns a list of the top-level video tracks in + this timeline. + """ + return [ + trck for trck + in self.tracks + if (isinstance(trck, track.Track) and + trck.kind == track.TrackKind.Video) + ] + + def audio_tracks(self): + """ + This convenience method returns a list of the top-level audio tracks in + this timeline. + """ + return [ + trck for trck + in self.tracks + if (isinstance(trck, track.Track) and + trck.kind == track.TrackKind.Audio) + ] + + +def timeline_from_clips(clips): + """Convenience for making a single track timeline from a list of clips.""" + + trck = track.Track(children=clips) + return Timeline(tracks=[trck]) diff --git a/pype/vendor/python/python_2/opentimelineio/schema/track.py b/pype/vendor/python/python_2/opentimelineio/schema/track.py new file mode 100644 index 00000000000..29b0e7f1aeb --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/track.py @@ -0,0 +1,242 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implement Track sublcass of composition.""" + +import collections + +from .. import ( + core, + opentime, +) + +from . import ( + gap, + transition, + clip, +) + + +class TrackKind: + Video = "Video" + Audio = "Audio" + + +class NeighborGapPolicy: + """ enum for deciding how to add gap when asking for neighbors """ + never = 0 + around_transitions = 1 + + +@core.register_type +class Track(core.Composition): + _serializable_label = "Track.1" + _composition_kind = "Track" + _modname = "schema" + + def __init__( + self, + name=None, + children=None, + kind=TrackKind.Video, + source_range=None, + markers=None, + effects=None, + metadata=None, + ): + core.Composition.__init__( + self, + name=name, + children=children, + source_range=source_range, + markers=markers, + effects=effects, + metadata=metadata + ) + self.kind = kind + + kind = core.serializable_field( + "kind", + doc="Composition kind (Stack, Track)" + ) + + def range_of_child_at_index(self, index): + child = self[index] + + # sum the durations of all the children leading up to the chosen one + start_time = sum( + ( + o_c.duration() + for o_c in (c for c in self[:index] if not c.overlapping()) + ), + opentime.RationalTime(value=0, rate=child.duration().rate) + ) + if isinstance(child, transition.Transition): + start_time -= child.in_offset + + return opentime.TimeRange(start_time, child.duration()) + + def trimmed_range_of_child_at_index(self, index, reference_space=None): + child_range = self.range_of_child_at_index(index) + + return self.trim_child_range(child_range) + + def handles_of_child(self, child): + """If media beyond the ends of this child are visible due to adjacent + Transitions (only applicable in a Track) then this will return the + head and tail offsets as a tuple of RationalTime objects. If no handles + are present on either side, then None is returned instead of a + RationalTime. + + Example usage + + >>> head, tail = track.handles_of_child(clip) + >>> if head: + ... print('do something') + >>> if tail: + ... print('do something else') + """ + head, tail = None, None + before, after = self.neighbors_of(child) + if isinstance(before, transition.Transition): + head = before.in_offset + if isinstance(after, transition.Transition): + tail = after.out_offset + + return head, tail + + def available_range(self): + # Sum up our child items' durations + duration = sum( + (c.duration() for c in self if isinstance(c, core.Item)), + opentime.RationalTime() + ) + + # Add the implicit gap when a Transition is at the start/end + if self and isinstance(self[0], transition.Transition): + duration += self[0].in_offset + if self and isinstance(self[-1], transition.Transition): + duration += self[-1].out_offset + + result = opentime.TimeRange( + start_time=opentime.RationalTime(0, duration.rate), + duration=duration + ) + + return result + + def each_clip(self, search_range=None, shallow_search=False): + return self.each_child(search_range, clip.Clip, shallow_search) + + def neighbors_of(self, item, insert_gap=NeighborGapPolicy.never): + """Returns the neighbors of the item as a namedtuple, (previous, next). + + Can optionally fill in gaps when transitions have no gaps next to them. + + with insert_gap == NeighborGapPolicy.never: + [A, B, C] :: neighbors_of(B) -> (A, C) + [A, B, C] :: neighbors_of(A) -> (None, B) + [A, B, C] :: neighbors_of(C) -> (B, None) + [A] :: neighbors_of(A) -> (None, None) + + with insert_gap == NeighborGapPolicy.around_transitions: + (assuming A and C are transitions) + [A, B, C] :: neighbors_of(B) -> (A, C) + [A, B, C] :: neighbors_of(A) -> (Gap, B) + [A, B, C] :: neighbors_of(C) -> (B, Gap) + [A] :: neighbors_of(A) -> (Gap, Gap) + """ + + try: + index = self.index(item) + except ValueError: + raise ValueError( + "item: {} is not in composition: {}".format( + item, + self + ) + ) + + previous, next_item = None, None + + # look before index + if index == 0: + if insert_gap == NeighborGapPolicy.around_transitions: + if isinstance(item, transition.Transition): + previous = gap.Gap( + source_range=opentime.TimeRange(duration=item.in_offset)) + elif index > 0: + previous = self[index - 1] + + if index == len(self) - 1: + if insert_gap == NeighborGapPolicy.around_transitions: + if isinstance(item, transition.Transition): + next_item = gap.Gap( + source_range=opentime.TimeRange(duration=item.out_offset)) + elif index < len(self) - 1: + next_item = self[index + 1] + + return collections.namedtuple('neighbors', ('previous', 'next'))( + previous, + next_item + ) + + def range_of_all_children(self): + """Return a dict mapping children to their range in this track.""" + + if not self._children: + return {} + + result_map = {} + + # Heuristic to guess what the rate should be set to based on the first + # thing in the track. + first_thing = self._children[0] + if isinstance(first_thing, transition.Transition): + rate = first_thing.in_offset.rate + else: + rate = first_thing.trimmed_range().duration.rate + + last_end_time = opentime.RationalTime(0, rate) + + for thing in self._children: + if isinstance(thing, transition.Transition): + result_map[thing] = opentime.TimeRange( + last_end_time - thing.in_offset, + thing.out_offset + thing.in_offset, + ) + else: + last_range = opentime.TimeRange( + last_end_time, + thing.trimmed_range().duration + ) + result_map[thing] = last_range + last_end_time = last_range.end_time_exclusive() + + return result_map + + +# the original name for "track" was "sequence" - this will turn "Sequence" +# found in OTIO files into Track automatically. +core.register_type(Track, "Sequence") diff --git a/pype/vendor/python/python_2/opentimelineio/schema/transition.py b/pype/vendor/python/python_2/opentimelineio/schema/transition.py new file mode 100644 index 00000000000..93b54ab1ab7 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/transition.py @@ -0,0 +1,159 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Transition base class""" + +from .. import ( + opentime, + core, + exceptions, +) + +import copy + + +class TransitionTypes: + """Enum encoding types of transitions. + + This is for representing "Dissolves" and "Wipes" defined by the + multi-source effect as defined by SMPTE 258M-2004 7.6.3.2 + + Other effects are handled by the `schema.Effect` class. + """ + + # @{ SMPTE transitions. + SMPTE_Dissolve = "SMPTE_Dissolve" + # SMPTE_Wipe = "SMPTE_Wipe" -- @TODO + # @} + + # Non SMPTE transitions. + Custom = "Custom_Transition" + + +@core.register_type +class Transition(core.Composable): + """Represents a transition between two items.""" + + _serializable_label = "Transition.1" + + def __init__( + self, + name=None, + transition_type=None, + # @TODO: parameters will be added later as needed (SMPTE_Wipe will + # probably require it) + # parameters=None, + in_offset=None, + out_offset=None, + metadata=None + ): + core.Composable.__init__( + self, + name=name, + metadata=metadata + ) + + # init everything as None first, so that we will catch uninitialized + # values via exceptions + # if parameters is None: + # parameters = {} + # self.parameters = parameters + self.transition_type = transition_type + self.in_offset = copy.deepcopy(in_offset) + self.out_offset = copy.deepcopy(out_offset) + + transition_type = core.serializable_field( + "transition_type", + required_type=type(TransitionTypes.SMPTE_Dissolve), + doc="Kind of transition, as defined by the " + "schema.transition.TransitionTypes enum." + ) + # parameters = core.serializable_field( + # "parameters", + # doc="Parameters of the transition." + # ) + in_offset = core.serializable_field( + "in_offset", + required_type=opentime.RationalTime, + doc="Amount of the previous clip this transition overlaps, exclusive." + ) + out_offset = core.serializable_field( + "out_offset", + required_type=opentime.RationalTime, + doc="Amount of the next clip this transition overlaps, exclusive." + ) + + def __str__(self): + return 'Transition("{}", "{}", {}, {}, {})'.format( + self.name, + self.transition_type, + self.in_offset, + self.out_offset, + # self.parameters, + self.metadata + ) + + def __repr__(self): + return ( + 'otio.schema.Transition(' + 'name={}, ' + 'transition_type={}, ' + 'in_offset={}, ' + 'out_offset={}, ' + # 'parameters={}, ' + 'metadata={}' + ')'.format( + repr(self.name), + repr(self.transition_type), + repr(self.in_offset), + repr(self.out_offset), + # repr(self.parameters), + repr(self.metadata), + ) + ) + + @staticmethod + def overlapping(): + return True + + def duration(self): + return self.in_offset + self.out_offset + + def range_in_parent(self): + """Find and return the range of this item in the parent.""" + if not self.parent(): + raise exceptions.NotAChildError( + "No parent of {}, cannot compute range in parent.".format(self) + ) + + return self.parent().range_of_child(self) + + def trimmed_range_in_parent(self): + """Find and return the timmed range of this item in the parent.""" + if not self.parent(): + raise exceptions.NotAChildError( + "No parent of {}, cannot compute range in parent.".format(self) + ) + + return self.parent().trimmed_range_of_child(self) diff --git a/pype/vendor/python/python_2/opentimelineio/schemadef/__init__.py b/pype/vendor/python/python_2/opentimelineio/schemadef/__init__.py new file mode 100644 index 00000000000..568b3eaaa7a --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schemadef/__init__.py @@ -0,0 +1,5 @@ + +def _add_schemadef_module(name, mod): + """Insert a new module name and module object into schemadef namespace.""" + ns = globals() # the namespace dict of the schemadef package + ns[name] = mod diff --git a/pype/vendor/python/python_2/opentimelineio/test_utils.py b/pype/vendor/python/python_2/opentimelineio/test_utils.py new file mode 100644 index 00000000000..e173275ff58 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/test_utils.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Utility assertions for OTIO Unit tests.""" + +import re + +from . import ( + adapters +) + + +class OTIOAssertions(object): + def assertJsonEqual(self, known, test_result): + """Convert to json and compare that (more readable).""" + self.maxDiff = None + + known_str = adapters.write_to_string(known, 'otio_json') + test_str = adapters.write_to_string(test_result, 'otio_json') + + def strip_trailing_decimal_zero(s): + return re.sub(r'"(value|rate)": (\d+)\.0', r'"\1": \2', s) + + self.assertMultiLineEqual( + strip_trailing_decimal_zero(known_str), + strip_trailing_decimal_zero(test_str) + ) + + def assertIsOTIOEquivalentTo(self, known, test_result): + """Test using the 'is equivalent to' method on SerializableObject""" + + self.assertTrue(known.is_equivalent_to(test_result)) diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/__init__.py b/pype/vendor/python/python_2/opentimelineio_contrib/__init__.py new file mode 100644 index 00000000000..7f7a82f46a2 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/__init__.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Unsupported contrib code for OpenTimelineIO.""" + +# flake8: noqa + +from . import ( + adapters +) + +__version__ = "0.11.0" +__author__ = "Pixar Animation Studios" +__author_email__ = "opentimelineio@pixar.com" +__license__ = "Modified Apache 2.0 License" diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py new file mode 100644 index 00000000000..9e283d37472 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py @@ -0,0 +1,764 @@ +# +# Copyright 2019 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""AAF Adapter Transcriber + +Specifies how to transcribe an OpenTimelineIO file into an AAF file. +""" + +import aaf2 +import abc +import uuid +import opentimelineio as otio +import os +import copy +import re + + +AAF_PARAMETERDEF_PAN = aaf2.auid.AUID("e4962322-2267-11d3-8a4c-0050040ef7d2") +AAF_OPERATIONDEF_MONOAUDIOPAN = aaf2.auid.AUID("9d2ea893-0968-11d3-8a38-0050040ef7d2") +AAF_PARAMETERDEF_AVIDPARAMETERBYTEORDER = uuid.UUID( + "c0038672-a8cf-11d3-a05b-006094eb75cb") +AAF_PARAMETERDEF_AVIDEFFECTID = uuid.UUID( + "93994bd6-a81d-11d3-a05b-006094eb75cb") +AAF_PARAMETERDEF_AFX_FG_KEY_OPACITY_U = uuid.UUID( + "8d56813d-847e-11d5-935a-50f857c10000") +AAF_PARAMETERDEF_LEVEL = uuid.UUID("e4962320-2267-11d3-8a4c-0050040ef7d2") +AAF_VVAL_EXTRAPOLATION_ID = uuid.UUID("0e24dd54-66cd-4f1a-b0a0-670ac3a7a0b3") +AAF_OPERATIONDEF_SUBMASTER = uuid.UUID("f1db0f3d-8d64-11d3-80df-006008143e6f") + + +class AAFAdapterError(otio.exceptions.OTIOError): + pass + + +class AAFValidationError(AAFAdapterError): + pass + + +class AAFFileTranscriber(object): + """ + AAFFileTranscriber + + AAFFileTranscriber manages the file-level knowledge during a conversion from + otio to aaf. This includes keeping track of unique tapemobs and mastermobs. + """ + + def __init__(self, input_otio, aaf_file, **kwargs): + """ + AAFFileTranscriber requires an input timeline and an output pyaaf2 file handle. + + Args: + input_otio: an input OpenTimelineIO timeline + aaf_file: a pyaaf2 file handle to an output file + """ + self.aaf_file = aaf_file + self.compositionmob = self.aaf_file.create.CompositionMob() + self.compositionmob.name = input_otio.name + self.compositionmob.usage = "Usage_TopLevel" + self.aaf_file.content.mobs.append(self.compositionmob) + self._unique_mastermobs = {} + self._unique_tapemobs = {} + self._clip_mob_ids_map = _gather_clip_mob_ids(input_otio, **kwargs) + + def _unique_mastermob(self, otio_clip): + """Get a unique mastermob, identified by clip metadata mob id.""" + mob_id = self._clip_mob_ids_map.get(otio_clip) + mastermob = self._unique_mastermobs.get(mob_id) + if not mastermob: + mastermob = self.aaf_file.create.MasterMob() + mastermob.name = otio_clip.name + mastermob.mob_id = aaf2.mobid.MobID(mob_id) + self.aaf_file.content.mobs.append(mastermob) + self._unique_mastermobs[mob_id] = mastermob + return mastermob + + def _unique_tapemob(self, otio_clip): + """Get a unique tapemob, identified by clip metadata mob id.""" + mob_id = self._clip_mob_ids_map.get(otio_clip) + tapemob = self._unique_tapemobs.get(mob_id) + if not tapemob: + tapemob = self.aaf_file.create.SourceMob() + tapemob.name = otio_clip.name + tapemob.descriptor = self.aaf_file.create.ImportDescriptor() + # If the edit_rate is not an integer, we need + # to use drop frame with a nominal integer fps. + edit_rate = otio_clip.visible_range().duration.rate + timecode_fps = round(edit_rate) + tape_timecode_slot = tapemob.create_timecode_slot( + edit_rate=edit_rate, + timecode_fps=timecode_fps, + drop_frame=(edit_rate != timecode_fps) + ) + timecode_start = ( + otio_clip.media_reference.available_range.start_time.value) + timecode_length = ( + otio_clip.media_reference.available_range.duration.value) + + tape_timecode_slot.segment.start = timecode_start + tape_timecode_slot.segment.length = timecode_length + self.aaf_file.content.mobs.append(tapemob) + self._unique_tapemobs[mob_id] = tapemob + return tapemob + + def track_transcriber(self, otio_track): + """Return an appropriate _TrackTranscriber given an otio track.""" + if otio_track.kind == otio.schema.TrackKind.Video: + transcriber = VideoTrackTranscriber(self, otio_track) + elif otio_track.kind == otio.schema.TrackKind.Audio: + transcriber = AudioTrackTranscriber(self, otio_track) + else: + raise otio.exceptions.NotSupportedError( + "Unsupported track kind: {}".format(otio_track.kind)) + return transcriber + + +def validate_metadata(timeline): + """Print a check of necessary metadata requirements for an otio timeline.""" + + all_checks = [__check(timeline, "duration().rate")] + edit_rate = __check(timeline, "duration().rate").value + + for child in timeline.each_child(): + checks = [] + if isinstance(child, otio.schema.Gap): + checks = [ + __check(child, "duration().rate").equals(edit_rate) + ] + if isinstance(child, otio.schema.Clip): + checks = [ + __check(child, "duration().rate").equals(edit_rate), + __check(child, "media_reference.available_range.duration.rate" + ).equals(edit_rate), + __check(child, "media_reference.available_range.start_time.rate" + ).equals(edit_rate) + ] + if isinstance(child, otio.schema.Transition): + checks = [ + __check(child, "duration().rate").equals(edit_rate), + __check(child, "metadata['AAF']['PointList']"), + __check(child, "metadata['AAF']['OperationGroup']['Operation']" + "['DataDefinition']['Name']"), + __check(child, "metadata['AAF']['OperationGroup']['Operation']" + "['Description']"), + __check(child, "metadata['AAF']['OperationGroup']['Operation']" + "['Name']"), + __check(child, "metadata['AAF']['CutPoint']") + ] + all_checks.extend(checks) + + if any(check.errors for check in all_checks): + raise AAFValidationError("\n" + "\n".join( + sum([check.errors for check in all_checks], []))) + + +def _gather_clip_mob_ids(input_otio, + prefer_file_mob_id=False, + use_empty_mob_ids=False, + **kwargs): + """ + Create dictionary of otio clips with their corresponding mob ids. + """ + + def _from_clip_metadata(clip): + """Get the MobID from the clip.metadata.""" + return clip.metadata.get("AAF", {}).get("SourceID") + + def _from_media_reference_metadata(clip): + """Get the MobID from the media_reference.metadata.""" + return (clip.media_reference.metadata.get("AAF", {}).get("MobID") or + clip.media_reference.metadata.get("AAF", {}).get("SourceID")) + + def _from_aaf_file(clip): + """ Get the MobID from the AAF file itself.""" + mob_id = None + target_url = clip.media_reference.target_url + if os.path.isfile(target_url) and target_url.endswith("aaf"): + with aaf2.open(clip.media_reference.target_url) as aaf_file: + mastermobs = list(aaf_file.content.mastermobs()) + if len(mastermobs) == 1: + mob_id = mastermobs[0].mob_id + return mob_id + + def _generate_empty_mobid(clip): + """Generate a meaningless MobID.""" + return aaf2.mobid.MobID.new() + + strategies = [ + _from_clip_metadata, + _from_media_reference_metadata, + _from_aaf_file + ] + + if prefer_file_mob_id: + strategies.remove(_from_aaf_file) + strategies.insert(0, _from_aaf_file) + + if use_empty_mob_ids: + strategies.append(_generate_empty_mobid) + + clip_mob_ids = {} + + for otio_clip in input_otio.each_clip(): + for strategy in strategies: + mob_id = strategy(otio_clip) + if mob_id: + clip_mob_ids[otio_clip] = mob_id + break + else: + raise AAFAdapterError("Cannot find mob ID for clip {}".format(otio_clip)) + + return clip_mob_ids + + +def _stackify_nested_groups(timeline): + """ + Ensure that all nesting in a given timeline is in a stack container. + This conforms with how AAF thinks about nesting, there needs + to be an outer container, even if it's just one object. + """ + copied = copy.deepcopy(timeline) + for track in copied.tracks: + for i, child in enumerate(track.each_child()): + is_nested = isinstance(child, otio.schema.Track) + is_parent_in_stack = isinstance(child.parent(), otio.schema.Stack) + if is_nested and not is_parent_in_stack: + stack = otio.schema.Stack() + track.remove(child) + stack.append(child) + track.insert(i, stack) + return copied + + +class _TrackTranscriber(object): + """ + _TrackTranscriber is the base class for the conversion of a given otio track. + + _TrackTranscriber is not meant to be used by itself. It provides the common + functionality to inherit from. We need an abstract base class because Audio and + Video are handled differently. + """ + __metaclass__ = abc.ABCMeta + + def __init__(self, root_file_transcriber, otio_track): + """ + _TrackTranscriber + + Args: + root_file_transcriber: the corresponding 'parent' AAFFileTranscriber object + otio_track: the given otio_track to convert + """ + self.root_file_transcriber = root_file_transcriber + self.compositionmob = root_file_transcriber.compositionmob + self.aaf_file = root_file_transcriber.aaf_file + self.otio_track = otio_track + self.edit_rate = next(self.otio_track.each_child()).duration().rate + self.timeline_mobslot, self.sequence = self._create_timeline_mobslot() + self.timeline_mobslot.name = self.otio_track.name + + def transcribe(self, otio_child): + """Transcribe otio child to corresponding AAF object""" + if isinstance(otio_child, otio.schema.Gap): + filler = self.aaf_filler(otio_child) + return filler + elif isinstance(otio_child, otio.schema.Transition): + transition = self.aaf_transition(otio_child) + return transition + elif isinstance(otio_child, otio.schema.Clip): + source_clip = self.aaf_sourceclip(otio_child) + return source_clip + elif isinstance(otio_child, otio.schema.Track): + sequence = self.aaf_sequence(otio_child) + return sequence + elif isinstance(otio_child, otio.schema.Stack): + operation_group = self.aaf_operation_group(otio_child) + return operation_group + else: + raise otio.exceptions.NotSupportedError( + "Unsupported otio child type: {}".format(type(otio_child))) + + @property + @abc.abstractmethod + def media_kind(self): + """Return the string for what kind of track this is.""" + pass + + @property + @abc.abstractmethod + def _master_mob_slot_id(self): + """ + Return the MasterMob Slot ID for the corresponding track media kind + """ + # MasterMob's and MasterMob slots have to be unique. We handle unique + # MasterMob's with _unique_mastermob(). We also need to protect against + # duplicate MasterMob slots. As of now, we mandate all picture clips to + # be created in MasterMob slot 1 and all sound clips to be created in + # MasterMob slot 2. While this is a little inadequate, it works for now + pass + + @abc.abstractmethod + def _create_timeline_mobslot(self): + """ + Return a timeline_mobslot and sequence for this track. + + In AAF, a TimelineMobSlot is a container for the Sequence. A Sequence is + analogous to an otio track. + + Returns: + Returns a tuple of (TimelineMobSlot, Sequence) + """ + pass + + @abc.abstractmethod + def default_descriptor(self, otio_clip): + pass + + @abc.abstractmethod + def _transition_parameters(self): + pass + + def aaf_filler(self, otio_gap): + """Convert an otio Gap into an aaf Filler""" + length = otio_gap.visible_range().duration.value + filler = self.aaf_file.create.Filler(self.media_kind, length) + return filler + + def aaf_sourceclip(self, otio_clip): + """Convert an otio Clip into an aaf SourceClip""" + tapemob, tapemob_slot = self._create_tapemob(otio_clip) + filemob, filemob_slot = self._create_filemob(otio_clip, tapemob, tapemob_slot) + mastermob, mastermob_slot = self._create_mastermob(otio_clip, + filemob, + filemob_slot) + + # We need both `start_time` and `duration` + # Here `start` is the offset between `first` and `in` values. + + offset = (otio_clip.visible_range().start_time - + otio_clip.available_range().start_time) + start = offset.value + length = otio_clip.visible_range().duration.value + + compmob_clip = self.compositionmob.create_source_clip( + slot_id=self.timeline_mobslot.slot_id, + start=start, + length=length, + media_kind=self.media_kind) + compmob_clip.mob = mastermob + compmob_clip.slot = mastermob_slot + compmob_clip.slot_id = mastermob_slot.slot_id + return compmob_clip + + def aaf_transition(self, otio_transition): + """Convert an otio Transition into an aaf Transition""" + if (otio_transition.transition_type != + otio.schema.transition.TransitionTypes.SMPTE_Dissolve): + print( + "Unsupported transition type: {}".format( + otio_transition.transition_type)) + return None + + transition_params, varying_value = self._transition_parameters() + + interpolation_def = self.aaf_file.create.InterpolationDef( + aaf2.misc.LinearInterp, "LinearInterp", "Linear keyframe interpolation") + self.aaf_file.dictionary.register_def(interpolation_def) + varying_value["Interpolation"].value = ( + self.aaf_file.dictionary.lookup_interperlationdef("LinearInterp")) + + pointlist = otio_transition.metadata["AAF"]["PointList"] + + c1 = self.aaf_file.create.ControlPoint() + c1["EditHint"].value = "Proportional" + c1.value = pointlist[0]["Value"] + c1.time = pointlist[0]["Time"] + + c2 = self.aaf_file.create.ControlPoint() + c2["EditHint"].value = "Proportional" + c2.value = pointlist[1]["Value"] + c2.time = pointlist[1]["Time"] + + varying_value["PointList"].extend([c1, c2]) + + op_group_metadata = otio_transition.metadata["AAF"]["OperationGroup"] + effect_id = op_group_metadata["Operation"].get("Identification") + is_time_warp = op_group_metadata["Operation"].get("IsTimeWarp") + by_pass = op_group_metadata["Operation"].get("Bypass") + number_inputs = op_group_metadata["Operation"].get("NumberInputs") + operation_category = op_group_metadata["Operation"].get("OperationCategory") + data_def_name = op_group_metadata["Operation"]["DataDefinition"]["Name"] + data_def = self.aaf_file.dictionary.lookup_datadef(str(data_def_name)) + description = op_group_metadata["Operation"]["Description"] + op_def_name = otio_transition.metadata["AAF"][ + "OperationGroup" + ]["Operation"]["Name"] + + # Create OperationDefinition + op_def = self.aaf_file.create.OperationDef(uuid.UUID(effect_id), op_def_name) + self.aaf_file.dictionary.register_def(op_def) + op_def.media_kind = self.media_kind + datadef = self.aaf_file.dictionary.lookup_datadef(self.media_kind) + op_def["IsTimeWarp"].value = is_time_warp + op_def["Bypass"].value = by_pass + op_def["NumberInputs"].value = number_inputs + op_def["OperationCategory"].value = str(operation_category) + op_def["ParametersDefined"].extend(transition_params) + op_def["DataDefinition"].value = data_def + op_def["Description"].value = str(description) + + # Create OperationGroup + length = otio_transition.duration().value + operation_group = self.aaf_file.create.OperationGroup(op_def, length) + operation_group["DataDefinition"].value = datadef + operation_group["Parameters"].append(varying_value) + + # Create Transition + transition = self.aaf_file.create.Transition(self.media_kind, length) + transition["OperationGroup"].value = operation_group + transition["CutPoint"].value = otio_transition.metadata["AAF"]["CutPoint"] + transition["DataDefinition"].value = datadef + return transition + + def aaf_sequence(self, otio_track): + """Convert an otio Track into an aaf Sequence""" + sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind) + length = 0 + for nested_otio_child in otio_track: + result = self.transcribe(nested_otio_child) + length += result.length + sequence.components.append(result) + sequence.length = length + return sequence + + def aaf_operation_group(self, otio_stack): + """ + Create and return an OperationGroup which will contain other AAF objects + to support OTIO nesting + """ + # Create OperationDefinition + op_def = self.aaf_file.create.OperationDef(AAF_OPERATIONDEF_SUBMASTER, + "Submaster") + self.aaf_file.dictionary.register_def(op_def) + op_def.media_kind = self.media_kind + datadef = self.aaf_file.dictionary.lookup_datadef(self.media_kind) + + # These values are necessary for pyaaf2 OperationDefinitions + op_def["IsTimeWarp"].value = False + op_def["Bypass"].value = 0 + op_def["NumberInputs"].value = -1 + op_def["OperationCategory"].value = "OperationCategory_Effect" + op_def["DataDefinition"].value = datadef + + # Create OperationGroup + operation_group = self.aaf_file.create.OperationGroup(op_def) + operation_group.media_kind = self.media_kind + operation_group["DataDefinition"].value = datadef + + length = 0 + for nested_otio_child in otio_stack: + result = self.transcribe(nested_otio_child) + length += result.length + operation_group.segments.append(result) + operation_group.length = length + return operation_group + + def _create_tapemob(self, otio_clip): + """ + Return a physical sourcemob for an otio Clip based on the MobID. + + Returns: + Returns a tuple of (TapeMob, TapeMobSlot) + """ + tapemob = self.root_file_transcriber._unique_tapemob(otio_clip) + tapemob_slot = tapemob.create_empty_slot(self.edit_rate, self.media_kind) + tapemob_slot.segment.length = ( + otio_clip.media_reference.available_range.duration.value) + return tapemob, tapemob_slot + + def _create_filemob(self, otio_clip, tapemob, tapemob_slot): + """ + Return a file sourcemob for an otio Clip. Needs a tapemob and tapemob slot. + + Returns: + Returns a tuple of (FileMob, FileMobSlot) + """ + filemob = self.aaf_file.create.SourceMob() + self.aaf_file.content.mobs.append(filemob) + + filemob.descriptor = self.default_descriptor(otio_clip) + filemob_slot = filemob.create_timeline_slot(self.edit_rate) + filemob_clip = filemob.create_source_clip( + slot_id=filemob_slot.slot_id, + length=tapemob_slot.segment.length, + media_kind=tapemob_slot.segment.media_kind) + filemob_clip.mob = tapemob + filemob_clip.slot = tapemob_slot + filemob_clip.slot_id = tapemob_slot.slot_id + filemob_slot.segment = filemob_clip + return filemob, filemob_slot + + def _create_mastermob(self, otio_clip, filemob, filemob_slot): + """ + Return a mastermob for an otio Clip. Needs a filemob and filemob slot. + + Returns: + Returns a tuple of (MasterMob, MasterMobSlot) + """ + mastermob = self.root_file_transcriber._unique_mastermob(otio_clip) + timecode_length = otio_clip.media_reference.available_range.duration.value + + try: + mastermob_slot = mastermob.slot_at(self._master_mob_slot_id) + except IndexError: + mastermob_slot = ( + mastermob.create_timeline_slot(edit_rate=self.edit_rate, + slot_id=self._master_mob_slot_id)) + mastermob_clip = mastermob.create_source_clip( + slot_id=mastermob_slot.slot_id, + length=timecode_length, + media_kind=self.media_kind) + mastermob_clip.mob = filemob + mastermob_clip.slot = filemob_slot + mastermob_clip.slot_id = filemob_slot.slot_id + mastermob_slot.segment = mastermob_clip + return mastermob, mastermob_slot + + +class VideoTrackTranscriber(_TrackTranscriber): + """Video track kind specialization of TrackTranscriber.""" + + @property + def media_kind(self): + return "picture" + + @property + def _master_mob_slot_id(self): + return 1 + + def _create_timeline_mobslot(self): + """ + Create a Sequence container (TimelineMobSlot) and Sequence. + + TimelineMobSlot --> Sequence + """ + timeline_mobslot = self.compositionmob.create_timeline_slot( + edit_rate=self.edit_rate) + sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind) + timeline_mobslot.segment = sequence + return timeline_mobslot, sequence + + def default_descriptor(self, otio_clip): + # TODO: Determine if these values are the correct, and if so, + # maybe they should be in the AAF metadata + descriptor = self.aaf_file.create.CDCIDescriptor() + descriptor["ComponentWidth"].value = 8 + descriptor["HorizontalSubsampling"].value = 2 + descriptor["ImageAspectRatio"].value = "16/9" + descriptor["StoredWidth"].value = 1920 + descriptor["StoredHeight"].value = 1080 + descriptor["FrameLayout"].value = "FullFrame" + descriptor["VideoLineMap"].value = [42, 0] + descriptor["SampleRate"].value = 24 + descriptor["Length"].value = 1 + return descriptor + + def _transition_parameters(self): + """ + Return video transition parameters + """ + # Create ParameterDef for AvidParameterByteOrder + byteorder_typedef = self.aaf_file.dictionary.lookup_typedef("aafUInt16") + param_byteorder = self.aaf_file.create.ParameterDef( + AAF_PARAMETERDEF_AVIDPARAMETERBYTEORDER, + "AvidParameterByteOrder", + "", + byteorder_typedef) + self.aaf_file.dictionary.register_def(param_byteorder) + + # Create ParameterDef for AvidEffectID + avid_effect_typdef = self.aaf_file.dictionary.lookup_typedef("AvidBagOfBits") + param_effect_id = self.aaf_file.create.ParameterDef( + AAF_PARAMETERDEF_AVIDEFFECTID, + "AvidEffectID", + "", + avid_effect_typdef) + self.aaf_file.dictionary.register_def(param_effect_id) + + # Create ParameterDef for AFX_FG_KEY_OPACITY_U + opacity_param_def = self.aaf_file.dictionary.lookup_typedef("Rational") + opacity_param = self.aaf_file.create.ParameterDef( + AAF_PARAMETERDEF_AFX_FG_KEY_OPACITY_U, + "AFX_FG_KEY_OPACITY_U", + "", + opacity_param_def) + self.aaf_file.dictionary.register_def(opacity_param) + + # Create VaryingValue + opacity_u = self.aaf_file.create.VaryingValue() + opacity_u.parameterdef = self.aaf_file.dictionary.lookup_parameterdef( + "AFX_FG_KEY_OPACITY_U") + opacity_u["VVal_Extrapolation"].value = AAF_VVAL_EXTRAPOLATION_ID + opacity_u["VVal_FieldCount"].value = 1 + + return [param_byteorder, param_effect_id], opacity_u + + +class AudioTrackTranscriber(_TrackTranscriber): + """Audio track kind specialization of TrackTranscriber.""" + + @property + def media_kind(self): + return "sound" + + @property + def _master_mob_slot_id(self): + return 2 + + def aaf_sourceclip(self, otio_clip): + # Parameter Definition + typedef = self.aaf_file.dictionary.lookup_typedef("Rational") + param_def = self.aaf_file.create.ParameterDef(AAF_PARAMETERDEF_PAN, + "Pan", + "Pan", + typedef) + self.aaf_file.dictionary.register_def(param_def) + interp_def = self.aaf_file.create.InterpolationDef(aaf2.misc.LinearInterp, + "LinearInterp", + "LinearInterp") + self.aaf_file.dictionary.register_def(interp_def) + # PointList + length = otio_clip.duration().value + c1 = self.aaf_file.create.ControlPoint() + c1["ControlPointSource"].value = 2 + c1["Time"].value = aaf2.rational.AAFRational("0/{}".format(length)) + c1["Value"].value = 0 + c2 = self.aaf_file.create.ControlPoint() + c2["ControlPointSource"].value = 2 + c2["Time"].value = aaf2.rational.AAFRational("{}/{}".format(length - 1, length)) + c2["Value"].value = 0 + varying_value = self.aaf_file.create.VaryingValue() + varying_value.parameterdef = param_def + varying_value["Interpolation"].value = interp_def + varying_value["PointList"].extend([c1, c2]) + opgroup = self.timeline_mobslot.segment + opgroup.parameters.append(varying_value) + + return super(AudioTrackTranscriber, self).aaf_sourceclip(otio_clip) + + def _create_timeline_mobslot(self): + """ + Create a Sequence container (TimelineMobSlot) and Sequence. + Sequence needs to be in an OperationGroup. + + TimelineMobSlot --> OperationGroup --> Sequence + """ + # TimelineMobSlot + timeline_mobslot = self.compositionmob.create_sound_slot( + edit_rate=self.edit_rate) + # OperationDefinition + opdef = self.aaf_file.create.OperationDef(AAF_OPERATIONDEF_MONOAUDIOPAN, + "Audio Pan") + opdef.media_kind = self.media_kind + opdef["NumberInputs"].value = 1 + self.aaf_file.dictionary.register_def(opdef) + # OperationGroup + total_length = sum([t.duration().value for t in self.otio_track]) + opgroup = self.aaf_file.create.OperationGroup(opdef) + opgroup.media_kind = self.media_kind + opgroup.length = total_length + timeline_mobslot.segment = opgroup + # Sequence + sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind) + sequence.length = total_length + opgroup.segments.append(sequence) + return timeline_mobslot, sequence + + def default_descriptor(self, otio_clip): + descriptor = self.aaf_file.create.PCMDescriptor() + descriptor["AverageBPS"].value = 96000 + descriptor["BlockAlign"].value = 2 + descriptor["QuantizationBits"].value = 16 + descriptor["AudioSamplingRate"].value = 48000 + descriptor["Channels"].value = 1 + descriptor["SampleRate"].value = 48000 + descriptor["Length"].value = ( + otio_clip.media_reference.available_range.duration.value) + return descriptor + + def _transition_parameters(self): + """ + Return audio transition parameters + """ + # Create ParameterDef for ParameterDef_Level + def_level_typedef = self.aaf_file.dictionary.lookup_typedef("Rational") + param_def_level = self.aaf_file.create.ParameterDef(AAF_PARAMETERDEF_LEVEL, + "ParameterDef_Level", + "", + def_level_typedef) + self.aaf_file.dictionary.register_def(param_def_level) + + # Create VaryingValue + level = self.aaf_file.create.VaryingValue() + level.parameterdef = ( + self.aaf_file.dictionary.lookup_parameterdef("ParameterDef_Level")) + + return [param_def_level], level + + +class __check(object): + """ + __check is a private helper class that safely gets values given to check + for existence and equality + """ + + def __init__(self, obj, tokenpath): + self.orig = obj + self.value = obj + self.errors = [] + self.tokenpath = tokenpath + try: + for token in re.split(r"[\.\[]", tokenpath): + if token.endswith("()"): + self.value = getattr(self.value, token.replace("()", ""))() + elif "]" in token: + self.value = self.value[token.strip("[]'\"")] + else: + self.value = getattr(self.value, token) + except Exception as e: + self.value = None + self.errors.append("{}{} {}.{} does not exist, {}".format( + self.orig.name if hasattr(self.orig, "name") else "", + type(self.orig), + type(self.orig).__name__, + self.tokenpath, e)) + + def equals(self, val): + """Check if the retrieved value is equal to a given value.""" + if self.value is not None and self.value != val: + self.errors.append( + "{}{} {}.{} not equal to {} (expected) != {} (actual)".format( + self.orig.name if hasattr(self.orig, "name") else "", + type(self.orig), + type(self.orig).__name__, self.tokenpath, val, self.value)) + return self diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py new file mode 100644 index 00000000000..6c21ea3e55d --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py @@ -0,0 +1,979 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO Advanced Authoring Format (AAF) Adapter + +Depending on if/where PyAAF is installed, you may need to set this env var: + OTIO_AAF_PYTHON_LIB - should point at the PyAAF module. +""" + +import os +import sys +import numbers +import copy +from collections import Iterable +import opentimelineio as otio + +lib_path = os.environ.get("OTIO_AAF_PYTHON_LIB") +if lib_path and lib_path not in sys.path: + sys.path.insert(0, lib_path) + +import aaf2 # noqa: E402 +import aaf2.content # noqa: E402 +import aaf2.mobs # noqa: E402 +import aaf2.components # noqa: E402 +import aaf2.core # noqa: E402 +from opentimelineio_contrib.adapters.aaf_adapter import aaf_writer # noqa: E402 + + +debug = False +__names = set() + + +def _get_parameter(item, parameter_name): + values = dict((value.name, value) for value in item.parameters.value) + return values.get(parameter_name) + + +def _get_name(item): + if isinstance(item, aaf2.components.SourceClip): + try: + return item.mob.name or "Untitled SourceClip" + except AttributeError: + # Some AAFs produce this error: + # RuntimeError: failed with [-2146303738]: mob not found + return "SourceClip Missing Mob?" + if hasattr(item, 'name'): + name = item.name + if name: + return name + return _get_class_name(item) + + +def _get_class_name(item): + if hasattr(item, "class_name"): + return item.class_name + else: + return item.__class__.__name__ + + +def _transcribe_property(prop): + # XXX: The unicode type doesn't exist in Python 3 (all strings are unicode) + # so we have to use type(u"") which works in both Python 2 and 3. + if isinstance(prop, (str, type(u""), numbers.Integral, float)): + return prop + + elif isinstance(prop, list): + result = {} + for child in prop: + if hasattr(child, "name") and hasattr(child, "value"): + result[child.name] = _transcribe_property(child.value) + else: + # @TODO: There may be more properties that we might want also. + # If you want to see what is being skipped, turn on debug. + if debug: + debug_message = \ + "Skipping unrecognized property: {} of parent {}" + print(debug_message.format(child, prop)) + return result + elif hasattr(prop, "properties"): + result = {} + for child in prop.properties(): + result[child.name] = _transcribe_property(child.value) + return result + else: + return str(prop) + + +def _find_timecode_mobs(item): + mobs = [item.mob] + + for c in item.walk(): + if isinstance(c, aaf2.components.SourceClip): + mob = c.mob + if mob: + mobs.append(mob) + else: + continue + else: + # This could be 'EssenceGroup', 'Pulldown' or other segment + # subclasses + # See also: https://jira.pixar.com/browse/SE-3457 + # For example: + # An EssenceGroup is a Segment that has one or more + # alternate choices, each of which represent different variations + # of one actual piece of content. + # According to the AAF Object Specification and Edit Protocol + # documents: + # "Typically the different representations vary in essence format, + # compression, or frame size. The application is responsible for + # choosing the appropriate implementation of the essence." + # It also says they should all have the same length, but + # there might be nested Sequences inside which we're not attempting + # to handle here (yet). We'll need a concrete example to ensure + # we're doing the right thing. + # TODO: Is the Timecode for an EssenceGroup correct? + # TODO: Try CountChoices() and ChoiceAt(i) + # For now, lets just skip it. + continue + + return mobs + + +def _extract_timecode_info(mob): + """Given a mob with a single timecode slot, return the timecode and length + in that slot as a tuple + """ + timecodes = [slot.segment for slot in mob.slots + if isinstance(slot.segment, aaf2.components.Timecode)] + + if len(timecodes) == 1: + timecode = timecodes[0] + timecode_start = timecode.getvalue('Start') + timecode_length = timecode.getvalue('Length') + + if timecode_start is None or timecode_length is None: + raise otio.exceptions.NotSupportedError( + "Unexpected timecode value(s) in mob named: `{}`." + " `Start`: {}, `Length`: {}".format(mob.name, + timecode_start, + timecode_length) + ) + + return timecode_start, timecode_length + elif len(timecodes) > 1: + raise otio.exceptions.NotSupportedError( + "Error: mob has more than one timecode slots, this is not" + " currently supported by the AAF adapter. found: {} slots, " + " mob name is: '{}'".format(len(timecodes), mob.name) + ) + else: + return None + + +def _add_child(parent, child, source): + if child is None: + if debug: + print("Adding null child? {}".format(source)) + elif isinstance(child, otio.schema.Marker): + parent.markers.append(child) + else: + parent.append(child) + + +def _transcribe(item, parents, editRate, masterMobs): + result = None + metadata = {} + + # First lets grab some standard properties that are present on + # many types of AAF objects... + metadata["Name"] = _get_name(item) + metadata["ClassName"] = _get_class_name(item) + + # Some AAF objects (like TimelineMobSlot) have an edit rate + # which should be used for all of the object's children. + # We will pass it on to any recursive calls to _transcribe() + if hasattr(item, "edit_rate"): + editRate = float(item.edit_rate) + + if isinstance(item, aaf2.components.Component): + metadata["Length"] = item.length + + if isinstance(item, aaf2.core.AAFObject): + for prop in item.properties(): + if hasattr(prop, 'name') and hasattr(prop, 'value'): + key = str(prop.name) + value = prop.value + metadata[key] = _transcribe_property(value) + + # Now we will use the item's class to determine which OTIO type + # to transcribe into. Note that the order of this if/elif/... chain + # is important, because the class hierarchy of AAF objects is more + # complex than OTIO. + + if isinstance(item, aaf2.content.ContentStorage): + result = otio.schema.SerializableCollection() + + # Gather all the Master Mobs, so we can find them later by MobID + # when we parse the SourceClips in the composition + if masterMobs is None: + masterMobs = {} + for mob in item.mastermobs(): + child = _transcribe(mob, parents + [item], editRate, masterMobs) + if child is not None: + mobID = child.metadata.get("AAF", {}).get("MobID") + masterMobs[mobID] = child + + for mob in item.compositionmobs(): + child = _transcribe(mob, parents + [item], editRate, masterMobs) + _add_child(result, child, mob) + + elif isinstance(item, aaf2.mobs.Mob): + result = otio.schema.Timeline() + + for slot in item.slots: + track = _transcribe(slot, parents + [item], editRate, masterMobs) + _add_child(result.tracks, track, slot) + + # Use a heuristic to find the starting timecode from + # this track and use it for the Timeline's global_start_time + start_time = _find_timecode_track_start(track) + if start_time: + result.global_start_time = start_time + + elif isinstance(item, aaf2.components.SourceClip): + result = otio.schema.Clip() + + # Evidently the last mob is the one with the timecode + mobs = _find_timecode_mobs(item) + # Get the Timecode start and length values + last_mob = mobs[-1] if mobs else None + timecode_info = _extract_timecode_info(last_mob) if last_mob else None + + source_start = int(metadata.get("StartTime", "0")) + source_length = item.length + media_start = source_start + media_length = item.length + + if timecode_info: + media_start, media_length = timecode_info + source_start += media_start + + # The goal here is to find a source range. Actual editorial opinions are found on SourceClips in the + # CompositionMobs. To figure out whether this clip is directly in the CompositionMob, we detect if our + # parent mobs are only CompositionMobs. If they were anything else - a MasterMob, a SourceMob, we would + # know that this is in some indirect relationship. + parent_mobs = filter(lambda parent: isinstance(parent, aaf2.mobs.Mob), parents) + is_directly_in_composition = all(isinstance(mob, aaf2.mobs.CompositionMob) for mob in parent_mobs) + if is_directly_in_composition: + result.source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(source_start, editRate), + otio.opentime.RationalTime(source_length, editRate) + ) + + # The goal here is to find an available range. Media ranges are stored in the related MasterMob, and there + # should only be one - hence the name "Master" mob. Somewhere down our chain (either a child or our parents) + # is a MasterMob. For SourceClips in the CompositionMob, it is our child. For everything else, it is a + # previously encountered parent. Find the MasterMob in our chain, and then extract the information from that. + child_mastermob = item.mob if isinstance(item.mob, aaf2.mobs.MasterMob) else None + parent_mastermobs = [parent for parent in parents if isinstance(parent, aaf2.mobs.MasterMob)] + parent_mastermob = parent_mastermobs[0] if len(parent_mastermobs) > 1 else None + mastermob = child_mastermob or parent_mastermob or None + + if mastermob: + media = otio.schema.MissingReference() + media.available_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(media_start, editRate), + otio.opentime.RationalTime(media_length, editRate) + ) + # copy the metadata from the master into the media_reference + mastermob_child = masterMobs.get(str(mastermob.mob_id)) + media.metadata["AAF"] = mastermob_child.metadata.get("AAF", {}) + result.media_reference = media + + elif isinstance(item, aaf2.components.Transition): + result = otio.schema.Transition() + + # Does AAF support anything else? + result.transition_type = otio.schema.TransitionTypes.SMPTE_Dissolve + + # Extract value and time attributes of both ControlPoints used for + # creating AAF Transition objects + varying_value = None + for param in item.getvalue('OperationGroup').parameters: + if isinstance(param, aaf2.misc.VaryingValue): + varying_value = param + break + + if varying_value is not None: + for control_point in varying_value.getvalue('PointList'): + value = control_point.value + time = control_point.time + metadata.setdefault('PointList', []).append({'Value': value, + 'Time': time}) + + in_offset = int(metadata.get("CutPoint", "0")) + out_offset = item.length - in_offset + result.in_offset = otio.opentime.RationalTime(in_offset, editRate) + result.out_offset = otio.opentime.RationalTime(out_offset, editRate) + + elif isinstance(item, aaf2.components.Filler): + result = otio.schema.Gap() + + length = item.length + result.source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, editRate), + otio.opentime.RationalTime(length, editRate) + ) + + elif isinstance(item, aaf2.components.NestedScope): + # TODO: Is this the right class? + result = otio.schema.Stack() + + for slot in item.slots: + child = _transcribe(slot, parents + [item], editRate, masterMobs) + _add_child(result, child, slot) + + elif isinstance(item, aaf2.components.Sequence): + result = otio.schema.Track() + + for component in item.components: + child = _transcribe(component, parents + [item], editRate, masterMobs) + _add_child(result, child, component) + + elif isinstance(item, aaf2.components.OperationGroup): + result = _transcribe_operation_group( + item, parents, metadata, editRate, masterMobs + ) + + elif isinstance(item, aaf2.mobslots.TimelineMobSlot): + result = otio.schema.Track() + + child = _transcribe(item.segment, parents + [item], editRate, masterMobs) + _add_child(result, child, item.segment) + + elif isinstance(item, aaf2.mobslots.MobSlot): + result = otio.schema.Track() + + child = _transcribe(item.segment, parents + [item], editRate, masterMobs) + _add_child(result, child, item.segment) + + elif isinstance(item, aaf2.components.Timecode): + pass + + elif isinstance(item, aaf2.components.Pulldown): + pass + + elif isinstance(item, aaf2.components.EdgeCode): + pass + + elif isinstance(item, aaf2.components.ScopeReference): + # TODO: is this like FILLER? + + result = otio.schema.Gap() + + length = item.length + result.source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, editRate), + otio.opentime.RationalTime(length, editRate) + ) + + elif isinstance(item, aaf2.components.DescriptiveMarker): + + # Markers come in on their own separate Track. + # TODO: We should consolidate them onto the same track(s) as the clips + # result = otio.schema.Marker() + pass + + elif isinstance(item, aaf2.components.Selector): + # If you mute a clip in media composer, it becomes one of these in the + # AAF. + result = _transcribe( + item.getvalue("Selected"), + parents + [item], + editRate, + masterMobs + ) + + alternates = [ + _transcribe(alt, parents + [item], editRate, masterMobs) + for alt in item.getvalue("Alternates") + ] + + # muted case -- if there is only one item its muted, otherwise its + # a multi cam thing + if alternates and len(alternates) == 1: + metadata['muted_clip'] = True + result.name = str(alternates[0].name) + "_MUTED" + + metadata['alternates'] = alternates + + # @TODO: There are a bunch of other AAF object types that we will + # likely need to add support for. I'm leaving this code here to help + # future efforts to extract the useful information out of these. + + # elif isinstance(item, aaf.storage.File): + # self.extendChildItems([item.header]) + + # elif isinstance(item, aaf.storage.Header): + # self.extendChildItems([item.storage()]) + # self.extendChildItems([item.dictionary()]) + + # elif isinstance(item, aaf.dictionary.Dictionary): + # l = [] + # l.append(DummyItem(list(item.class_defs()), 'ClassDefs')) + # l.append(DummyItem(list(item.codec_defs()), 'CodecDefs')) + # l.append(DummyItem(list(item.container_defs()), 'ContainerDefs')) + # l.append(DummyItem(list(item.data_defs()), 'DataDefs')) + # l.append(DummyItem(list(item.interpolation_defs()), + # 'InterpolationDefs')) + # l.append(DummyItem(list(item.klvdata_defs()), 'KLVDataDefs')) + # l.append(DummyItem(list(item.operation_defs()), 'OperationDefs')) + # l.append(DummyItem(list(item.parameter_defs()), 'ParameterDefs')) + # l.append(DummyItem(list(item.plugin_defs()), 'PluginDefs')) + # l.append(DummyItem(list(item.taggedvalue_defs()), 'TaggedValueDefs')) + # l.append(DummyItem(list(item.type_defs()), 'TypeDefs')) + # self.extendChildItems(l) + # + # elif isinstance(item, pyaaf.AxSelector): + # self.extendChildItems(list(item.EnumAlternateSegments())) + # + # elif isinstance(item, pyaaf.AxScopeReference): + # #print item, item.GetRelativeScope(),item.GetRelativeSlot() + # pass + # + # elif isinstance(item, pyaaf.AxEssenceGroup): + # segments = [] + # + # for i in xrange(item.CountChoices()): + # choice = item.GetChoiceAt(i) + # segments.append(choice) + # self.extendChildItems(segments) + # + # elif isinstance(item, pyaaf.AxProperty): + # self.properties['Value'] = str(item.GetValue()) + + elif isinstance(item, Iterable): + result = otio.schema.SerializableCollection() + for child in item: + result.append( + _transcribe( + child, + parents + [item], + editRate, + masterMobs + ) + ) + else: + # For everything else, we just ignore it. + # To see what is being ignored, turn on the debug flag + if debug: + print("SKIPPING: {}: {} -- {}".format(type(item), item, result)) + + # Did we get anything? If not, we're done + if result is None: + return None + + # Okay, now we've turned the AAF thing into an OTIO result + # There's a bit more we can do before we're ready to return the result. + + # If we didn't get a name yet, use the one we have in metadata + if result.name is None: + result.name = metadata["Name"] + + # Attach the AAF metadata + if not result.metadata: + result.metadata = {} + result.metadata["AAF"] = metadata + + # Double check that we got the length we expected + if isinstance(result, otio.core.Item): + length = metadata.get("Length") + if ( + length + and result.source_range is not None + and result.source_range.duration.value != length + ): + raise otio.exceptions.OTIOError( + "Wrong duration? {} should be {} in {}".format( + result.source_range.duration.value, + length, + result + ) + ) + + # Did we find a Track? + if isinstance(result, otio.schema.Track): + # Try to figure out the kind of Track it is + if hasattr(item, 'media_kind'): + media_kind = str(item.media_kind) + result.metadata["AAF"]["MediaKind"] = media_kind + if media_kind == "Picture": + result.kind = otio.schema.TrackKind.Video + elif media_kind in ("SoundMasterTrack", "Sound"): + result.kind = otio.schema.TrackKind.Audio + else: + # Timecode, Edgecode, others? + result.kind = None + + # Done! + return result + + +def _find_timecode_track_start(track): + # See if we can find a starting timecode in here... + aaf_metadata = track.metadata.get("AAF", {}) + + # Is this a Timecode track? + if aaf_metadata.get("MediaKind") == "Timecode": + edit_rate = aaf_metadata.get("EditRate", "0") + fps = aaf_metadata.get("Segment", {}).get("FPS", 0) + start = aaf_metadata.get("Segment", {}).get("Start", "0") + + # Often times there are several timecode tracks, so + # we use a heuristic to only pay attention to Timecode + # tracks with a FPS that matches the edit rate. + if edit_rate == str(fps): + return otio.opentime.RationalTime( + value=int(start), + rate=float(edit_rate) + ) + + # We didn't find anything useful + return None + + +def _transcribe_linear_timewarp(item, parameters): + # this is a linear time warp + effect = otio.schema.LinearTimeWarp() + + offset_map = _get_parameter(item, 'PARAM_SPEED_OFFSET_MAP_U') + + # If we have a LinearInterp with just 2 control points, then + # we can compute the time_scalar. Note that the SpeedRatio is + # NOT correct in many AAFs - we aren't sure why, but luckily we + # can compute the correct value this way. + points = offset_map.get("PointList") + if len(points) > 2: + # This is something complicated... try the fancy version + return _transcribe_fancy_timewarp(item, parameters) + elif ( + len(points) == 2 + and float(points[0].time) == 0 + and float(points[0].value) == 0 + ): + # With just two points, we can compute the slope + effect.time_scalar = float(points[1].value) / float(points[1].time) + else: + # Fall back to the SpeedRatio if we didn't understand the points + ratio = parameters.get("SpeedRatio") + if ratio == str(item.length): + # If the SpeedRatio == the length, this is a freeze frame + effect.time_scalar = 0 + elif '/' in ratio: + numerator, denominator = map(float, ratio.split('/')) + # OTIO time_scalar is 1/x from AAF's SpeedRatio + effect.time_scalar = denominator / numerator + else: + effect.time_scalar = 1.0 / float(ratio) + + # Is this is a freeze frame? + if effect.time_scalar == 0: + # Note: we might end up here if any of the code paths above + # produced a 0 time_scalar. + # Use the FreezeFrame class instead of LinearTimeWarp + effect = otio.schema.FreezeFrame() + + return effect + + +def _transcribe_fancy_timewarp(item, parameters): + + # For now, this is an unsupported time effect... + effect = otio.schema.TimeEffect() + effect.effect_name = None # Unsupported + effect.name = item.get("Name") + + return effect + + # TODO: Here is some sample code that pulls out the full + # details of a non-linear speed map. + + # speed_map = item.parameter['PARAM_SPEED_MAP_U'] + # offset_map = item.parameter['PARAM_SPEED_OFFSET_MAP_U'] + # Also? PARAM_OFFSET_MAP_U (without the word "SPEED" in it?) + # print(speed_map['PointList'].value) + # print(speed_map.count()) + # print(speed_map.interpolation_def().name) + # + # for p in speed_map.points(): + # print(" ", float(p.time), float(p.value), p.edit_hint) + # for prop in p.point_properties(): + # print(" ", prop.name, prop.value, float(prop.value)) + # + # print(offset_map.interpolation_def().name) + # for p in offset_map.points(): + # edit_hint = p.edit_hint + # time = p.time + # value = p.value + # + # pass + # # print " ", float(p.time), float(p.value) + # + # for i in range(100): + # float(offset_map.value_at("%i/100" % i)) + # + # # Test file PARAM_SPEED_MAP_U is AvidBezierInterpolator + # # currently no implement for value_at + # try: + # speed_map.value_at(.25) + # except NotImplementedError: + # pass + # else: + # raise + + +def _transcribe_operation_group(item, parents, metadata, editRate, masterMobs): + result = otio.schema.Stack() + + operation = metadata.get("Operation", {}) + parameters = metadata.get("Parameters", {}) + result.name = operation.get("Name") + + # Trust the length that is specified in the AAF + length = metadata.get("Length") + result.source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, editRate), + otio.opentime.RationalTime(length, editRate) + ) + + # Look for speed effects... + effect = None + if operation.get("IsTimeWarp"): + if operation.get("Name") == "Motion Control": + + offset_map = _get_parameter(item, 'PARAM_SPEED_OFFSET_MAP_U') + # TODO: We should also check the PARAM_OFFSET_MAP_U which has + # an interpolation_def().name as well. + if offset_map is not None: + interpolation = offset_map.interpolation.name + else: + interpolation = None + + if interpolation == "LinearInterp": + effect = _transcribe_linear_timewarp(item, parameters) + else: + effect = _transcribe_fancy_timewarp(item, parameters) + + else: + # Unsupported time effect + effect = otio.schema.TimeEffect() + effect.effect_name = None # Unsupported + effect.name = operation.get("Name") + else: + # Unsupported effect + effect = otio.schema.Effect() + effect.effect_name = None # Unsupported + effect.name = operation.get("Name") + + if effect is not None: + result.effects.append(effect) + effect.metadata = { + "AAF": { + "Operation": operation, + "Parameters": parameters + } + } + + for segment in item.getvalue("InputSegments"): + child = _transcribe(segment, parents + [item], editRate, masterMobs) + if child: + _add_child(result, child, segment) + + return result + + +def _fix_transitions(thing): + if isinstance(thing, otio.schema.Timeline): + _fix_transitions(thing.tracks) + elif ( + isinstance(thing, otio.core.Composition) + or isinstance(thing, otio.schema.SerializableCollection) + ): + if isinstance(thing, otio.schema.Track): + for c, child in enumerate(thing): + + # Don't touch the Transitions themselves, + # only the Clips & Gaps next to them. + if not isinstance(child, otio.core.Item): + continue + + # Was the item before us a Transition? + if c > 0 and isinstance( + thing[c - 1], + otio.schema.Transition + ): + pre_trans = thing[c - 1] + + if child.source_range is None: + child.source_range = child.trimmed_range() + csr = child.source_range + child.source_range = otio.opentime.TimeRange( + start_time=csr.start_time + pre_trans.in_offset, + duration=csr.duration - pre_trans.in_offset + ) + + # Is the item after us a Transition? + if c < len(thing) - 1 and isinstance( + thing[c + 1], + otio.schema.Transition + ): + post_trans = thing[c + 1] + + if child.source_range is None: + child.source_range = child.trimmed_range() + csr = child.source_range + child.source_range = otio.opentime.TimeRange( + start_time=csr.start_time, + duration=csr.duration - post_trans.out_offset + ) + + for child in thing: + _fix_transitions(child) + + +def _simplify(thing): + if isinstance(thing, otio.schema.SerializableCollection): + if len(thing) == 1: + return _simplify(thing[0]) + else: + for c, child in enumerate(thing): + thing[c] = _simplify(child) + return thing + + elif isinstance(thing, otio.schema.Timeline): + result = _simplify(thing.tracks) + + # Only replace the Timeline's stack if the simplified result + # was also a Stack. Otherwise leave it (the contents will have + # been simplified in place). + if isinstance(result, otio.schema.Stack): + thing.tracks = result + + return thing + + elif isinstance(thing, otio.core.Composition): + # simplify our children + for c, child in enumerate(thing): + thing[c] = _simplify(child) + + # remove empty children of Stacks + if isinstance(thing, otio.schema.Stack): + for c in reversed(range(len(thing))): + child = thing[c] + if not _contains_something_valuable(child): + # TODO: We're discarding metadata... should we retain it? + del thing[c] + + # Look for Stacks within Stacks + c = len(thing) - 1 + while c >= 0: + child = thing[c] + # Is my child a Stack also? (with no effects) + if ( + not _has_effects(child) + and + ( + isinstance(child, otio.schema.Stack) + or ( + isinstance(child, otio.schema.Track) + and len(child) == 1 + and isinstance(child[0], otio.schema.Stack) + and child[0] + and isinstance(child[0][0], otio.schema.Track) + ) + ) + ): + if isinstance(child, otio.schema.Track): + child = child[0] + + # Pull the child's children into the parent + num = len(child) + children_of_child = child[:] + # clear out the ownership of 'child' + del child[:] + thing[c:c + 1] = children_of_child + + # TODO: We may be discarding metadata, should we merge it? + # TODO: Do we need to offset the markers in time? + thing.markers.extend(child.markers) + # Note: we don't merge effects, because we already made + # sure the child had no effects in the if statement above. + + c = c + num + c = c - 1 + + # skip redundant containers + if _is_redundant_container(thing): + # TODO: We may be discarding metadata here, should we merge it? + result = thing[0].deepcopy() + # TODO: Do we need to offset the markers in time? + result.markers.extend(thing.markers) + # TODO: The order of the effects is probably important... + # should they be added to the end or the front? + # Intuitively it seems like the child's effects should come before + # the parent's effects. This will need to be solidified when we + # add more effects support. + result.effects.extend(thing.effects) + # Keep the parent's length, if it has one + if thing.source_range: + # make sure it has a source_range first + if not result.source_range: + try: + result.source_range = result.trimmed_range() + except otio.exceptions.CannotComputeAvailableRangeError: + result.source_range = copy.copy(thing.source_range) + # modify the duration, but leave the start_time as is + result.source_range = otio.opentime.TimeRange( + result.source_range.start_time, + thing.source_range.duration + ) + return result + + # if thing is the top level stack, all of its children must be in tracks + if isinstance(thing, otio.schema.Stack) and thing.parent() is None: + children_needing_tracks = [] + for child in thing: + if isinstance(child, otio.schema.Track): + continue + children_needing_tracks.append(child) + + for child in children_needing_tracks: + orig_index = thing.index(child) + del thing[orig_index] + new_track = otio.schema.Track() + new_track.append(child) + thing.insert(orig_index, new_track) + + return thing + + +def _has_effects(thing): + if isinstance(thing, otio.core.Item): + if len(thing.effects) > 0: + return True + + +def _is_redundant_container(thing): + + is_composition = isinstance(thing, otio.core.Composition) + if not is_composition: + return False + + has_one_child = len(thing) == 1 + if not has_one_child: + return False + + am_top_level_track = ( + type(thing) is otio.schema.Track + and type(thing.parent()) is otio.schema.Stack + and thing.parent().parent() is None + ) + + return ( + not am_top_level_track + # am a top level track but my only child is a track + or ( + type(thing) is otio.schema.Track + and type(thing[0]) is otio.schema.Track + ) + ) + + +def _contains_something_valuable(thing): + if isinstance(thing, otio.core.Item): + if len(thing.effects) > 0 or len(thing.markers) > 0: + return True + + if isinstance(thing, otio.core.Composition): + + if len(thing) == 0: + # NOT valuable because it is empty + return False + + for child in thing: + if _contains_something_valuable(child): + # valuable because this child is valuable + return True + + # none of the children were valuable, so thing is NOT valuable + return False + + if isinstance(thing, otio.schema.Gap): + # TODO: Are there other valuable things we should look for on a Gap? + return False + + # anything else is presumed to be valuable + return True + + +def read_from_file(filepath, simplify=True): + + with aaf2.open(filepath) as aaf_file: + + storage = aaf_file.content + + # Note: We're skipping: f.header + # Is there something valuable in there? + + __names.clear() + masterMobs = {} + + result = _transcribe(storage, parents=list(), editRate=None, masterMobs=masterMobs) + top = storage.toplevel() + if top: + # re-transcribe just the top-level mobs + # but use all the master mobs we found in the 1st pass + __names.clear() # reset the names back to 0 + result = _transcribe(top, parents=list(), editRate=None, masterMobs=masterMobs) + + # AAF is typically more deeply nested than OTIO. + # Lets try to simplify the structure by collapsing or removing + # unnecessary stuff. + if simplify: + result = _simplify(result) + + # OTIO represents transitions a bit different than AAF, so + # we need to iterate over them and modify the items on either side. + # Note that we do this *after* simplifying, since the structure + # may change during simplification. + _fix_transitions(result) + + return result + + +def write_to_file(input_otio, filepath, **kwargs): + with aaf2.open(filepath, "w") as f: + + timeline = aaf_writer._stackify_nested_groups(input_otio) + + aaf_writer.validate_metadata(timeline) + + otio2aaf = aaf_writer.AAFFileTranscriber(timeline, f, **kwargs) + + if not isinstance(timeline, otio.schema.Timeline): + raise otio.exceptions.NotSupportedError( + "Currently only supporting top level Timeline") + + for otio_track in timeline.tracks: + # Ensure track must have clip to get the edit_rate + if len(otio_track) == 0: + continue + + transcriber = otio2aaf.track_transcriber(otio_track) + + for otio_child in otio_track: + result = transcriber.transcribe(otio_child) + if result: + transcriber.sequence.components.append(result) diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py new file mode 100644 index 00000000000..150ed6d93d5 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py @@ -0,0 +1,318 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO Avid Log Exchange (ALE) Adapter""" +import re +import opentimelineio as otio + +DEFAULT_VIDEO_FORMAT = '1080' + + +def AVID_VIDEO_FORMAT_FROM_WIDTH_HEIGHT(width, height): + """Utility function to map a width and height to an Avid Project Format""" + + format_map = { + '1080': "1080", + '720': "720", + '576': "PAL", + '486': "NTSC", + } + mapped = format_map.get(str(height), "CUSTOM") + # check for the 2K DCI 1080 format + if mapped == '1080' and width > 1920: + mapped = "CUSTOM" + return mapped + + +class ALEParseError(otio.exceptions.OTIOError): + pass + + +def _parse_data_line(line, columns, fps): + row = line.split("\t") + + if len(row) < len(columns): + # Fill in blanks for any missing fields in this row + row.extend([""] * (len(columns) - len(row))) + + if len(row) > len(columns): + raise ALEParseError("Too many values on row: " + line) + + try: + + # Gather all the columns into a dictionary + # For expected columns, like Name, Start, etc. we will pop (remove) + # those from metadata, leaving the rest alone. + metadata = dict(zip(columns, row)) + + clip = otio.schema.Clip() + clip.name = metadata.pop("Name", None) + + # When looking for Start, Duration and End, they might be missing + # or blank. Treat None and "" as the same via: get(k,"")!="" + # To have a valid source range, you need Start and either Duration + # or End. If all three are provided, we check to make sure they match. + if metadata.get("Start", "") != "": + value = metadata.pop("Start") + try: + start = otio.opentime.from_timecode(value, fps) + except (ValueError, TypeError): + raise ALEParseError("Invalid Start timecode: {}".format(value)) + duration = None + end = None + if metadata.get("Duration", "") != "": + value = metadata.pop("Duration") + try: + duration = otio.opentime.from_timecode(value, fps) + except (ValueError, TypeError): + raise ALEParseError("Invalid Duration timecode: {}".format( + value + )) + if metadata.get("End", "") != "": + value = metadata.pop("End") + try: + end = otio.opentime.from_timecode(value, fps) + except (ValueError, TypeError): + raise ALEParseError("Invalid End timecode: {}".format( + value + )) + if duration is None: + duration = end - start + if end is None: + end = start + duration + if end != start + duration: + raise ALEParseError( + "Inconsistent Start, End, Duration: " + line + ) + clip.source_range = otio.opentime.TimeRange( + start, + duration + ) + + if metadata.get("Source File"): + source = metadata.pop("Source File") + clip.media_reference = otio.schema.ExternalReference( + target_url=source + ) + + # We've pulled out the key/value pairs that we treat specially. + # Put the remaining key/values into clip.metadata["ALE"] + clip.metadata["ALE"] = metadata + + return clip + except Exception as ex: + raise ALEParseError("Error parsing line: {}\n{}".format( + line, repr(ex) + )) + + +def _video_format_from_metadata(clips): + # Look for clips with Image Size metadata set + max_height = 0 + max_width = 0 + for clip in clips: + fields = clip.metadata.get("ALE", {}) + res = fields.get("Image Size", "") + m = re.search(r'([0-9]{1,})\s*[xX]\s*([0-9]{1,})', res) + if m and len(m.groups()) >= 2: + width = int(m.group(1)) + height = int(m.group(2)) + if height > max_height: + max_height = height + if width > max_width: + max_width = width + + # We don't have any image size information, use the defaut + if max_height == 0: + return DEFAULT_VIDEO_FORMAT + else: + return AVID_VIDEO_FORMAT_FROM_WIDTH_HEIGHT(max_width, max_height) + + +def read_from_string(input_str, fps=24): + + collection = otio.schema.SerializableCollection() + header = {} + columns = [] + + def nextline(lines): + return lines.pop(0) + + lines = input_str.splitlines() + while len(lines): + line = nextline(lines) + + # skip blank lines + if line.strip() == "": + continue + + if line.strip() == "Heading": + while len(lines): + line = nextline(lines) + + if line.strip() == "": + break + + if "\t" not in line: + raise ALEParseError("Invalid Heading line: " + line) + + segments = line.split("\t") + while len(segments) >= 2: + key, val = segments.pop(0), segments.pop(0) + header[key] = val + if len(segments) != 0: + raise ALEParseError("Invalid Heading line: " + line) + + if "FPS" in header: + fps = float(header["FPS"]) + + if line.strip() == "Column": + if len(lines) == 0: + raise ALEParseError("Unexpected end of file after: " + line) + + line = nextline(lines) + columns = line.split("\t") + + if line.strip() == "Data": + while len(lines): + line = nextline(lines) + + if line.strip() == "": + continue + + clip = _parse_data_line(line, columns, fps) + + collection.append(clip) + + collection.metadata["ALE"] = { + "header": header, + "columns": columns + } + + return collection + + +def write_to_string(input_otio, columns=None, fps=None, video_format=None): + + # Get all the clips we're going to export + clips = list(input_otio.each_clip()) + + result = "" + + result += "Heading\n" + header = dict(input_otio.metadata.get("ALE", {}).get("header", {})) + + # Force this, since we've hard coded tab delimiters + header["FIELD_DELIM"] = "TABS" + + if fps is None: + # If we weren't given a FPS, is there one in the header metadata? + if "FPS" in header: + fps = float(header["FPS"]) + else: + # Would it be better to infer this by inspecting the input clips? + fps = 24 + header["FPS"] = str(fps) + else: + # Put the value we were given into the header + header["FPS"] = str(fps) + + # Check if we have been supplied a VIDEO_FORMAT, if not lets set one + if video_format is None: + # Do we already have it in the header? If so, lets leave that as is + if "VIDEO_FORMAT" not in header: + header["VIDEO_FORMAT"] = _video_format_from_metadata(clips) + else: + header["VIDEO_FORMAT"] = str(video_format) + + headers = list(header.items()) + headers.sort() # make the output predictable + for key, val in headers: + result += "{}\t{}\n".format(key, val) + + # If the caller passed in a list of columns, use that, otherwise + # we need to discover the columns that should be output. + if columns is None: + # Is there a hint about the columns we want (and column ordering) + # at the top level? + columns = input_otio.metadata.get("ALE", {}).get("columns", []) + + # Scan all the clips for any extra columns + for clip in clips: + fields = clip.metadata.get("ALE", {}) + for key in fields.keys(): + if key not in columns: + columns.append(key) + + # Always output these + for c in ["Duration", "End", "Start", "Name", "Source File"]: + if c not in columns: + columns.insert(0, c) + + result += "\nColumn\n{}\n".format("\t".join(columns)) + + result += "\nData\n" + + def val_for_column(column, clip): + if column == "Name": + return clip.name + elif column == "Source File": + if ( + clip.media_reference and + hasattr(clip.media_reference, 'target_url') and + clip.media_reference.target_url + ): + return clip.media_reference.target_url + else: + return "" + elif column == "Start": + if not clip.source_range: + return "" + return otio.opentime.to_timecode( + clip.source_range.start_time, fps + ) + elif column == "Duration": + if not clip.source_range: + return "" + return otio.opentime.to_timecode( + clip.source_range.duration, fps + ) + elif column == "End": + if not clip.source_range: + return "" + return otio.opentime.to_timecode( + clip.source_range.end_time_exclusive(), fps + ) + else: + return clip.metadata.get("ALE", {}).get(column) + + for clip in clips: + row = [] + for column in columns: + val = str(val_for_column(column, clip) or "") + val.replace("\t", " ") # don't allow tabs inside a value + row.append(val) + result += "\t".join(row) + "\n" + + return result diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py new file mode 100644 index 00000000000..93741bbb146 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py @@ -0,0 +1,93 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# +"""FFMPEG Burnins Adapter""" +import os +import sys + + +def build_burnins(input_otio): + """ + Generates the burnin objects for each clip within the otio container + + :param input_otio: OTIO container + :rtype: [ffmpeg_burnins.Burnins(), ...] + """ + + if os.path.dirname(__file__) not in sys.path: + sys.path.append(os.path.dirname(__file__)) + + import ffmpeg_burnins + key = 'burnins' + + burnins = [] + for clip in input_otio.each_clip(): + + # per clip burnin data + burnin_data = clip.media_reference.metadata.get(key) + if not burnin_data: + # otherwise default to global burnin + burnin_data = input_otio.metadata.get(key) + + if not burnin_data: + continue + + media = clip.media_reference.target_url + if media.startswith('file://'): + media = media[7:] + streams = burnin_data.get('streams') + burnins.append(ffmpeg_burnins.Burnins(media, + streams=streams)) + burnins[-1].otio_media = media + burnins[-1].otio_overwrite = burnin_data.get('overwrite') + burnins[-1].otio_args = burnin_data.get('args') + + for burnin in burnin_data.get('burnins', []): + align = burnin.pop('align') + function = burnin.pop('function') + if function == 'text': + text = burnin.pop('text') + options = ffmpeg_burnins.TextOptions() + options.update(burnin) + burnins[-1].add_text(text, align, options=options) + elif function == 'frame_number': + options = ffmpeg_burnins.FrameNumberOptions() + options.update(burnin) + burnins[-1].add_frame_numbers(align, options=options) + elif function == 'timecode': + options = ffmpeg_burnins.TimeCodeOptions() + options.update(burnin) + burnins[-1].add_timecode(align, options=options) + else: + raise RuntimeError("Unknown function '%s'" % function) + + return burnins + + +def write_to_file(input_otio, filepath): + """required OTIO function hook""" + + for burnin in build_burnins(input_otio): + burnin.render(os.path.join(filepath, burnin.otio_media), + args=burnin.otio_args, + overwrite=burnin.otio_overwrite) diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json new file mode 100644 index 00000000000..ceaf0a3067f --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json @@ -0,0 +1,61 @@ +{ + "OTIO_SCHEMA" : "PluginManifest.1", + "adapters": [ + { + "OTIO_SCHEMA": "Adapter.1", + "name": "fcpx_xml", + "execution_scope": "in process", + "filepath": "fcpx_xml.py", + "suffixes": ["fcpxml"] + }, + { + "OTIO_SCHEMA": "Adapter.1", + "name": "hls_playlist", + "execution_scope": "in process", + "filepath": "hls_playlist.py", + "suffixes": ["m3u8"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "rv_session", + "execution_scope" : "in process", + "filepath" : "rv.py", + "suffixes" : ["rv"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "maya_sequencer", + "execution_scope" : "in process", + "filepath" : "maya_sequencer.py", + "suffixes" : ["ma","mb"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "ale", + "execution_scope" : "in process", + "filepath" : "ale.py", + "suffixes" : ["ale"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "burnins", + "execution_scope" : "in process", + "filepath" : "burnins.py", + "suffixes" : [] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "AAF", + "execution_scope" : "in process", + "filepath" : "advanced_authoring_format.py", + "suffixes" : ["aaf"] + }, + { + "OTIO_SCHEMA": "Adapter.1", + "name": "xges", + "execution_scope": "in process", + "filepath": "xges.py", + "suffixes": ["xges"] + } + ] +} diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py new file mode 100644 index 00000000000..45d77976cf5 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py @@ -0,0 +1,261 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +import os +import sys + +# deal with renaming of default library from python 2 / 3 +try: + import urlparse as urllib_parse +except ImportError: + import urllib.parse as urllib_parse + +# import maya and handle standalone mode +from maya import cmds + +try: + cmds.ls +except AttributeError: + from maya import standalone + standalone.initialize(name='python') + +import opentimelineio as otio + +# Mapping of Maya FPS Enum to rate. +FPS = { + 'game': 15, + 'film': 24, + 'pal': 25, + 'ntsc': 30, + 'show': 48, + 'palf': 50, + 'ntscf': 60 +} + + +def _url_to_path(url): + if url is None: + return None + + return urllib_parse.urlparse(url).path + + +def _video_url_for_shot(shot): + current_file = os.path.normpath(cmds.file(q=True, sn=True)) + return os.path.join( + os.path.dirname(current_file), + 'playblasts', + '{base_name}_{shot_name}.mov'.format( + base_name=os.path.basename(os.path.splitext(current_file)[0]), + shot_name=cmds.shot(shot, q=True, shotName=True) + ) + ) + + +def _match_existing_shot(item, existing_shots): + if existing_shots is None: + return None + + if item.media_reference.is_missing_reference: + return None + + url_path = _url_to_path(item.media_reference.target_url) + return next( + ( + shot for shot in existing_shots + if _video_url_for_shot(shot) == url_path + ), + None + ) + + +# ------------------------ +# building single track +# ------------------------ + +def _build_shot(item, track_no, track_range, existing_shot=None): + camera = None + if existing_shot is None: + camera = cmds.camera(name=item.name.split('.')[0] + '_cam')[0] + cmds.shot( + existing_shot or item.name.split('.')[0], + e=existing_shot is not None, + shotName=item.name, + track=track_no, + currentCamera=camera, + startTime=item.trimmed_range().start_time.value, + endTime=item.trimmed_range().end_time_inclusive().value, + sequenceStartTime=track_range.start_time.value, + sequenceEndTime=track_range.end_time_inclusive().value + ) + + +def _build_track(track, track_no, existing_shots=None): + for n, item in enumerate(track): + if not isinstance(item, otio.schema.Clip): + continue + + track_range = track.range_of_child_at_index(n) + if existing_shots is not None: + existing_shot = _match_existing_shot(item, existing_shots) + else: + existing_shot = None + + _build_shot(item, track_no, track_range, existing_shot) + + +def build_sequence(timeline, clean=False): + existing_shots = cmds.ls(type='shot') or [] + if clean: + cmds.delete(existing_shots) + existing_shots = [] + + tracks = [ + track for track in timeline.tracks + if track.kind == otio.schema.TrackKind.Video + ] + + for track_no, track in enumerate(reversed(tracks)): + _build_track(track, track_no, existing_shots=existing_shots) + + +def read_from_file(path, clean=True): + timeline = otio.adapters.read_from_file(path) + build_sequence(timeline, clean=clean) + + +# ----------------------- +# parsing single track +# ----------------------- + +def _get_gap(duration): + rate = FPS.get(cmds.currentUnit(q=True, time=True), 25) + gap_range = otio.opentime.TimeRange( + duration=otio.opentime.RationalTime(duration, rate) + ) + return otio.schema.Gap(source_range=gap_range) + + +def _read_shot(shot): + rate = FPS.get(cmds.currentUnit(q=True, time=True), 25) + start = int(cmds.shot(shot, q=True, startTime=True)) + end = int(cmds.shot(shot, q=True, endTime=True)) + 1 + + video_reference = otio.schema.ExternalReference( + target_url=_video_url_for_shot(shot), + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(value=start, rate=rate), + otio.opentime.RationalTime(value=end - start, rate=rate) + ) + ) + + return otio.schema.Clip( + name=cmds.shot(shot, q=True, shotName=True), + media_reference=video_reference, + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(value=start, rate=rate), + otio.opentime.RationalTime(value=end - start, rate=rate) + ) + ) + + +def _read_track(shots): + v = otio.schema.Track(kind=otio.schema.track.TrackKind.Video) + + last_clip_end = 0 + for shot in shots: + seq_start = int(cmds.shot(shot, q=True, sequenceStartTime=True)) + seq_end = int(cmds.shot(shot, q=True, sequenceEndTime=True)) + + # add gap if necessary + fill_time = seq_start - last_clip_end + last_clip_end = seq_end + 1 + if fill_time: + v.append(_get_gap(fill_time)) + + # add clip + v.append(_read_shot(shot)) + + return v + + +def read_sequence(): + rate = FPS.get(cmds.currentUnit(q=True, time=True), 25) + shots = cmds.ls(type='shot') or [] + per_track = {} + + for shot in shots: + track_no = cmds.shot(shot, q=True, track=True) + if track_no not in per_track: + per_track[track_no] = [] + per_track[track_no].append(shot) + + timeline = otio.schema.Timeline() + timeline.global_start_time = otio.opentime.RationalTime(0, rate) + + for track_no in reversed(sorted(per_track.keys())): + track_shots = per_track[track_no] + timeline.tracks.append(_read_track(track_shots)) + + return timeline + + +def write_to_file(path): + timeline = read_sequence() + otio.adapters.write_to_file(timeline, path) + + +def main(): + read_write_arg = sys.argv[1] + filepath = sys.argv[2] + + write = False + if read_write_arg == "write": + write = True + + if write: + # read the input OTIO off stdin + input_otio = otio.adapters.read_from_string( + sys.stdin.read(), + 'otio_json' + ) + build_sequence(input_otio, clean=True) + cmds.file(rename=filepath) + cmds.file(save=True, type="mayaAscii") + else: + cmds.file(filepath, o=True) + sys.stdout.write( + "\nOTIO_JSON_BEGIN\n" + + otio.adapters.write_to_string( + read_sequence(), + "otio_json" + ) + + "\nOTIO_JSON_END\n" + ) + + cmds.quit(force=True) + + +if __name__ == "__main__": + main() diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py new file mode 100644 index 00000000000..f11295bb60a --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py @@ -0,0 +1,327 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""RV External Adapter component. + +Because the rv adapter requires being run from within the RV py-interp to take +advantage of modules inside of RV, this script gets shelled out to from the +RV OTIO adapter. + +Requires that you set the environment variables: + OTIO_RV_PYTHON_LIB - should point at the parent directory of rvSession + OTIO_RV_PYTHON_BIN - should point at py-interp from within rv +""" + +# python +import sys +import os + +# otio +import opentimelineio as otio + +# rv import +sys.path += [os.path.join(os.environ["OTIO_RV_PYTHON_LIB"], "rvSession")] +import rvSession # noqa + + +def main(): + """ entry point, should be called from the rv adapter in otio """ + + session_file = rvSession.Session() + + output_fname = sys.argv[1] + + # read the input OTIO off stdin + input_otio = otio.adapters.read_from_string(sys.stdin.read(), 'otio_json') + + result = write_otio(input_otio, session_file) + session_file.setViewNode(result) + session_file.write(output_fname) + + +# exception class @{ +class NoMappingForOtioTypeError(otio.exceptions.OTIOError): + pass +# @} + + +def write_otio(otio_obj, to_session, track_kind=None): + WRITE_TYPE_MAP = { + otio.schema.Timeline: _write_timeline, + otio.schema.Stack: _write_stack, + otio.schema.Track: _write_track, + otio.schema.Clip: _write_item, + otio.schema.Gap: _write_item, + otio.schema.Transition: _write_transition, + otio.schema.SerializableCollection: _write_collection, + } + + if type(otio_obj) in WRITE_TYPE_MAP: + return WRITE_TYPE_MAP[type(otio_obj)](otio_obj, to_session, track_kind) + + raise NoMappingForOtioTypeError( + str(type(otio_obj)) + " on object: {}".format(otio_obj) + ) + + +def _write_dissolve(pre_item, in_dissolve, post_item, to_session, track_kind=None): + rv_trx = to_session.newNode("CrossDissolve", str(in_dissolve.name)) + + rate = pre_item.trimmed_range().duration.rate + rv_trx.setProperty( + "CrossDissolve", + "", + "parameters", + "startFrame", + rvSession.gto.FLOAT, + 1.0 + ) + rv_trx.setProperty( + "CrossDissolve", + "", + "parameters", + "numFrames", + rvSession.gto.FLOAT, + int( + ( + in_dissolve.in_offset + + in_dissolve.out_offset + ).rescaled_to(rate).value + ) + ) + + rv_trx.setProperty( + "CrossDissolve", + "", + "output", + "fps", + rvSession.gto.FLOAT, + rate + ) + + pre_item_rv = write_otio(pre_item, to_session, track_kind) + rv_trx.addInput(pre_item_rv) + + post_item_rv = write_otio(post_item, to_session, track_kind) + + node_to_insert = post_item_rv + + if ( + hasattr(pre_item, "media_reference") + and pre_item.media_reference + and pre_item.media_reference.available_range + and hasattr(post_item, "media_reference") + and post_item.media_reference + and post_item.media_reference.available_range + and ( + post_item.media_reference.available_range.start_time.rate != + pre_item.media_reference.available_range.start_time.rate + ) + ): + # write a retime to make sure post_item is in the timebase of pre_item + rt_node = to_session.newNode("Retime", "transition_retime") + rt_node.setTargetFps( + pre_item.media_reference.available_range.start_time.rate + ) + + post_item_rv = write_otio(post_item, to_session, track_kind) + + rt_node.addInput(post_item_rv) + node_to_insert = rt_node + + rv_trx.addInput(node_to_insert) + + return rv_trx + + +def _write_transition( + pre_item, + in_trx, + post_item, + to_session, + track_kind=None +): + trx_map = { + otio.schema.TransitionTypes.SMPTE_Dissolve: _write_dissolve, + } + + if in_trx.transition_type not in trx_map: + return + + return trx_map[in_trx.transition_type]( + pre_item, + in_trx, + post_item, + to_session, + track_kind + ) + + +def _write_stack(in_stack, to_session, track_kind=None): + new_stack = to_session.newNode("Stack", str(in_stack.name) or "tracks") + + for seq in in_stack: + result = write_otio(seq, to_session, track_kind) + if result: + new_stack.addInput(result) + + return new_stack + + +def _write_track(in_seq, to_session, _=None): + new_seq = to_session.newNode("Sequence", str(in_seq.name) or "track") + + items_to_serialize = otio.algorithms.track_with_expanded_transitions( + in_seq + ) + + track_kind = in_seq.kind + + for thing in items_to_serialize: + if isinstance(thing, tuple): + result = _write_transition(*thing, to_session=to_session, + track_kind=track_kind) + elif thing.duration().value == 0: + continue + else: + result = write_otio(thing, to_session, track_kind) + + if result: + new_seq.addInput(result) + + return new_seq + + +def _write_timeline(tl, to_session, _=None): + result = write_otio(tl.tracks, to_session) + return result + + +def _write_collection(collection, to_session, track_kind=None): + results = [] + for item in collection: + result = write_otio(item, to_session, track_kind) + if result: + results.append(result) + + if results: + return results[0] + + +def _create_media_reference(item, src, track_kind=None): + if hasattr(item, "media_reference") and item.media_reference: + if isinstance(item.media_reference, otio.schema.ExternalReference): + media = [str(item.media_reference.target_url)] + + if track_kind == otio.schema.TrackKind.Audio: + # Create blank video media to accompany audio for valid source + blank = "{},start={},end={},fps={}.movieproc".format( + "blank", + item.available_range().start_time.value, + item.available_range().end_time_inclusive().value, + item.available_range().duration.rate + ) + # Inserting blank media here forces all content to only + # produce audio. We do it twice in case we look at this in + # stereo + media = [blank, blank] + media + + src.setMedia(media) + return True + + elif isinstance(item.media_reference, otio.schema.GeneratorReference): + if item.media_reference.generator_kind == "SMPTEBars": + kind = "smptebars" + src.setMedia( + [ + "{},start={},end={},fps={}.movieproc".format( + kind, + item.available_range().start_time.value, + item.available_range().end_time_inclusive().value, + item.available_range().duration.rate + ) + ] + ) + return True + + return False + + +def _write_item(it, to_session, track_kind=None): + src = to_session.newNode("Source", str(it.name) or "clip") + + src.setProperty( + "RVSourceGroup", + "source", + "attributes", + "otio_metadata", + rvSession.gto.STRING, str(it.metadata) + ) + + range_to_read = it.trimmed_range() + + if not range_to_read: + raise otio.exceptions.OTIOError( + "No valid range on clip: {0}.".format( + str(it) + ) + ) + + # because OTIO has no global concept of FPS, the rate of the duration is + # used as the rate for the range of the source. + # RationalTime.value_rescaled_to returns the time value of the object in + # time rate of the argument. + src.setCutIn( + range_to_read.start_time.value_rescaled_to( + range_to_read.duration + ) + ) + src.setCutOut( + range_to_read.end_time_inclusive().value_rescaled_to( + range_to_read.duration + ) + ) + src.setFPS(range_to_read.duration.rate) + + # if the media reference is missing + if not _create_media_reference(it, src, track_kind): + kind = "smptebars" + if isinstance(it, otio.schema.Gap): + kind = "blank" + src.setMedia( + [ + "{},start={},end={},fps={}.movieproc".format( + kind, + range_to_read.start_time.value, + range_to_read.end_time_inclusive().value, + range_to_read.duration.rate + ) + ] + ) + + return src + + +if __name__ == "__main__": + main() diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py new file mode 100644 index 00000000000..e219b58a1a6 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py @@ -0,0 +1,1182 @@ +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO Final Cut Pro X XML Adapter. """ +import os +import subprocess +from xml.etree import cElementTree +from xml.dom import minidom +from fractions import Fraction +from datetime import date + +try: + from urllib import unquote +except ImportError: + from urllib.parse import unquote + +import opentimelineio as otio + +META_NAMESPACE = "fcpx_xml" + +COMPOSABLE_ELEMENTS = ("video", "audio", "ref-clip", "asset-clip") + +FRAMERATE_FRAMEDURATION = {23.98: "1001/24000s", + 24: "25/600s", + 25: "1/25s", + 29.97: "1001/30000s", + 30: "100/3000s", + 50: "1/50s", + 59.94: "1001/60000s", + 60: "1/60s"} + + +def format_name(frame_rate, path): + """ + Helper to get the formatName used in FCP X XML format elements. This + uses ffprobe to get the frame size of the the clip at the provided path. + + Args: + frame_rate (int): The frame rate of the clip at the provided path + path (str): The path to the clip to probe + + Returns: + str: The format name. If empty, then ffprobe couldn't find the item + """ + + path = path.replace("file://", "") + path = unquote(path) + if not os.path.exists(path): + return "" + + try: + frame_size = subprocess.check_output( + [ + "ffprobe", + "-v", + "error", + "-select_streams", + "v:0", + "-show_entries", + "stream=height,width", + "-of", + "csv=s=x:p=0", + path + ] + ) + except (subprocess.CalledProcessError, OSError): + frame_size = "" + + if not frame_size: + return "" + + frame_size = frame_size.rstrip() + + if "1920" in frame_size: + frame_size = "1080" + + if frame_size.endswith("1280"): + frame_size = "720" + + return "FFVideoFormat{}p{}".format(frame_size, frame_rate) + + +def to_rational_time(rational_number, fps): + """ + This converts a rational number value to an otio RationalTime object + + Args: + rational_number (str): This is a rational number from an FCP X XML + fps (int): The frame rate to use for calculating the rational time + + Returns: + RationalTime: A RationalTime object + """ + + if rational_number == "0s" or rational_number is None: + frames = 0 + else: + parts = rational_number.split("/") + if len(parts) > 1: + frames = int( + float(parts[0]) / float(parts[1].replace("s", "")) * float(fps) + ) + else: + frames = int(float(parts[0].replace("s", "")) * float(fps)) + + return otio.opentime.RationalTime(frames, int(fps)) + + +def from_rational_time(rational_time): + """ + This converts a RationalTime object to a rational number as a string + + Args: + rational_time (RationalTime): a rational time object + + Returns: + str: A rational number as a string + """ + + if int(rational_time.value) == 0: + return "0s" + result = Fraction( + float(rational_time.value) / float(rational_time.rate) + ).limit_denominator() + if str(result.denominator) == "1": + return "{}s".format(result.numerator) + return "{}/{}s".format(result.numerator, result.denominator) + + +class FcpxOtio(object): + """ + This object is responsible for knowing how to convert an otio into an + FCP X XML + """ + + def __init__(self, otio_timeline): + self.otio_timeline = otio_timeline + self.fcpx_xml = cElementTree.Element("fcpxml", version="1.8") + self.resource_element = cElementTree.SubElement( + self.fcpx_xml, + "resources" + ) + if self.otio_timeline.schema_name() == "Timeline": + self.timelines = [self.otio_timeline] + else: + self.timelines = list( + self.otio_timeline.each_child( + descended_from_type=otio.schema.Timeline + ) + ) + + if len(self.timelines) > 1: + self.event_resource = cElementTree.SubElement( + self.fcpx_xml, + "event", + {"name": self._event_name()} + ) + else: + self.event_resource = self.fcpx_xml + + self.resource_count = 0 + + def to_xml(self): + """ + Convert an otio to an FCP X XML + + Returns: + str: FCPX XML content + """ + + for project in self.timelines: + top_sequence = self._stack_to_sequence(project.tracks) + + project_element = cElementTree.Element( + "project", + { + "name": project.name, + "uid": project.metadata.get("fcpx", {}).get("uid", "") + } + ) + project_element.append(top_sequence) + self.event_resource.append(project_element) + + if not self.timelines: + for clip in self._clips(): + if not clip.parent(): + self._add_asset(clip) + + for stack in self._stacks(): + ref_element = self._element_for_item( + stack, + None, + ref_only=True, + compound=True + ) + self.event_resource.append(ref_element) + child_parent_map = {c: p for p in self.fcpx_xml.iter() for c in p} + + for marker in [marker for marker in self.fcpx_xml.iter("marker")]: + parent = child_parent_map.get(marker) + marker_attribs = marker.attrib.copy() + parent.remove(marker) + cElementTree.SubElement( + parent, + "marker", + marker_attribs + ) + + xml = cElementTree.tostring( + self.fcpx_xml, + encoding="UTF-8", + method="xml" + ) + dom = minidom.parseString(xml) + pretty = dom.toprettyxml(indent=" ") + return pretty.replace( + '', + '\n\n' + ) + + def _stack_to_sequence(self, stack, compound_clip=False): + format_element = self._find_or_create_format_from(stack) + sequence_element = cElementTree.Element( + "sequence", + { + "duration": self._calculate_rational_number( + stack.duration().value, + stack.duration().rate + ), + "format": str(format_element.get("id")) + } + ) + spine = cElementTree.SubElement(sequence_element, "spine") + video_tracks = [ + t for t in stack + if t.kind == otio.schema.TrackKind.Video + ] + audio_tracks = [ + t for t in stack + if t.kind == otio.schema.TrackKind.Audio + ] + + for idx, track in enumerate(video_tracks): + self._track_for_spine(track, idx, spine, compound_clip) + + for idx, track in enumerate(audio_tracks): + lane_id = -(idx + 1) + self._track_for_spine(track, lane_id, spine, compound_clip) + return sequence_element + + def _track_for_spine(self, track, lane_id, spine, compound): + for child in self._lanable_items(track.each_child()): + if self._item_in_compound_clip(child) and not compound: + continue + child_element = self._element_for_item( + child, + lane_id, + compound=compound + ) + if not lane_id: + spine.append(child_element) + continue + if child.schema_name() == "Gap": + continue + + parent_element = self._find_parent_element( + spine, + track.trimmed_range_of_child(child).start_time, + self._find_or_create_format_from(track).get("id") + ) + offset = self._offset_based_on_parent( + child_element, + parent_element, + self._find_or_create_format_from(track).get("id") + ) + child_element.set( + "offset", + from_rational_time(offset) + ) + + parent_element.append(child_element) + return [] + + def _find_parent_element(self, spine, trimmed_range, format_id): + for item in spine.iter(): + if item.tag not in ("clip", "asset-clip", "gap", "ref-clip"): + continue + if item.get("lane") is not None: + continue + if item.tag == "gap" and item.find("./audio") is not None: + continue + offset = to_rational_time( + item.get("offset"), + self._frame_rate_from_element(item, format_id) + ) + duration = to_rational_time( + item.get("duration"), + self._frame_rate_from_element(item, format_id) + ) + total_time = offset + duration + if offset > trimmed_range: + continue + if total_time > trimmed_range: + return item + return None + + def _offset_based_on_parent(self, child, parent, default_format_id): + parent_offset = to_rational_time( + parent.get("offset"), + self._frame_rate_from_element(parent, default_format_id) + ) + child_offset = to_rational_time( + child.get("offset"), + self._frame_rate_from_element(child, default_format_id) + ) + + parent_start = to_rational_time( + parent.get("start"), + self._frame_rate_from_element(parent, default_format_id) + ) + return (child_offset - parent_offset) + parent_start + + def _frame_rate_from_element(self, element, default_format_id): + if element.tag == "gap": + format_id = default_format_id + + if element.tag == "ref-clip": + media_element = self._media_by_id(element.get("ref")) + asset = media_element.find("./sequence") + format_id = asset.get("format") + + if element.tag == "clip": + if element.find("./gap") is not None: + asset_id = element.find("./gap").find("./audio").get("ref") + else: + asset_id = element.find("./video").get("ref") + asset = self._asset_by_id(asset_id) + format_id = asset.get("format") + + if element.tag == "asset-clip": + asset = self._asset_by_id(element.get("ref")) + format_id = asset.get("format") + + format_element = self.resource_element.find( + "./format[@id='{}']".format(format_id) + ) + total, rate = format_element.get("frameDuration").split("/") + rate = rate.replace("s", "") + return int(float(rate) / float(total)) + + def _element_for_item(self, item, lane, ref_only=False, compound=False): + element = None + duration = self._calculate_rational_number( + item.duration().value, + item.duration().rate + ) + if item.schema_name() == "Clip": + asset_id = self._add_asset(item, compound_only=compound) + element = self._element_for_clip(item, asset_id, duration, lane) + + if item.schema_name() == "Gap": + element = self._element_for_gap(item, duration) + + if item.schema_name() == "Stack": + element = self._element_for_stack(item, duration, ref_only) + + if element is None: + return None + if lane: + element.set("lane", str(lane)) + for marker in item.markers: + marker_attribs = { + "start": from_rational_time(marker.marked_range.start_time), + "duration": from_rational_time(marker.marked_range.duration), + "value": marker.name + } + marker_element = cElementTree.Element( + "marker", + marker_attribs + ) + if marker.color == otio.schema.MarkerColor.RED: + marker_element.set("completed", "0") + if marker.color == otio.schema.MarkerColor.GREEN: + marker_element.set("completed", "1") + element.append(marker_element) + return element + + def _lanable_items(self, items): + return [ + item for item in items + if item.schema_name() in ["Gap", "Stack", "Clip"] + ] + + def _element_for_clip(self, item, asset_id, duration, lane): + element = cElementTree.Element( + "clip", + { + "name": item.name, + "offset": from_rational_time( + item.trimmed_range_in_parent().start_time + ), + "duration": duration + } + ) + start = from_rational_time(item.source_range.start_time) + if start != "0s": + element.set("start", str(start)) + if item.parent().kind == otio.schema.TrackKind.Video: + cElementTree.SubElement( + element, + "video", + { + "offset": "0s", + "ref": asset_id, + "duration": self._find_asset_duration(item) + } + ) + else: + gap_element = cElementTree.SubElement( + element, + "gap", + { + "name": "Gap", + "offset": "0s", + "duration": self._find_asset_duration(item) + } + ) + audio = cElementTree.SubElement( + gap_element, + "audio", + { + "offset": "0s", + "ref": asset_id, + "duration": self._find_asset_duration(item) + } + ) + if lane: + audio.set("lane", str(lane)) + return element + + def _element_for_gap(self, item, duration): + element = cElementTree.Element( + "gap", + { + "name": "Gap", + "duration": duration, + "offset": from_rational_time( + item.trimmed_range_in_parent().start_time + ), + "start": "3600s" + } + ) + return element + + def _element_for_stack(self, item, duration, ref_only): + media_element = self._add_compound_clip(item) + asset_id = media_element.get("id") + element = cElementTree.Element( + "ref-clip", + { + "name": item.name, + "duration": duration, + "ref": str(asset_id) + } + ) + if not ref_only: + element.set( + "offset", + from_rational_time( + item.trimmed_range_in_parent().start_time + ) + ) + element.set( + "start", + from_rational_time(item.source_range.start_time) + ) + if item.parent() and item.parent().kind == otio.schema.TrackKind.Audio: + element.set("srcEnable", "audio") + return element + + def _find_asset_duration(self, item): + if (item.media_reference and + not item.media_reference.is_missing_reference): + return self._calculate_rational_number( + item.media_reference.available_range.duration.value, + item.media_reference.available_range.duration.rate + ) + return self._calculate_rational_number( + item.duration().value, + item.duration().rate + ) + + def _find_asset_start(self, item): + if (item.media_reference and + not item.media_reference.is_missing_reference): + return self._calculate_rational_number( + item.media_reference.available_range.start_time.value, + item.media_reference.available_range.start_time.rate + ) + return self._calculate_rational_number( + item.source_range.start_time.value, + item.source_range.start_time.rate + ) + + def _clip_format_name(self, clip): + if clip.schema_name() in ("Stack", "Track"): + return "" + if not clip.media_reference: + return "" + + if clip.media_reference.is_missing_reference: + return "" + + return format_name( + clip.duration().rate, + clip.media_reference.target_url + ) + + def _find_or_create_format_from(self, clip): + frame_duration = self._framerate_to_frame_duration( + clip.duration().rate + ) + format_element = self._format_by_frame_rate(clip.duration().rate) + if format_element is None: + format_element = cElementTree.SubElement( + self.resource_element, + "format", + { + "id": self._resource_id_generator(), + "frameDuration": frame_duration, + "name": self._clip_format_name(clip) + } + ) + if format_element.get("name", "") == "": + format_element.set("name", self._clip_format_name(clip)) + return format_element + + def _add_asset(self, clip, compound_only=False): + format_element = self._find_or_create_format_from(clip) + asset = self._create_asset_element(clip, format_element) + + if not compound_only and not self._asset_clip_by_name(clip.name): + self._create_asset_clip_element( + clip, + format_element, + asset.get("id") + ) + + if not clip.parent(): + asset.set("hasAudio", "1") + asset.set("hasVideo", "1") + return asset.get("id") + if clip.parent().kind == otio.schema.TrackKind.Audio: + asset.set("hasAudio", "1") + if clip.parent().kind == otio.schema.TrackKind.Video: + asset.set("hasVideo", "1") + return asset.get("id") + + def _create_asset_clip_element(self, clip, format_element, resource_id): + duration = self._find_asset_duration(clip) + a_clip = cElementTree.SubElement( + self.event_resource, + "asset-clip", + { + "name": clip.name, + "format": format_element.get("id"), + "ref": resource_id, + "duration": duration + } + ) + if clip.media_reference and not clip.media_reference.is_missing_reference: + fcpx_metadata = clip.media_reference.metadata.get("fcpx", {}) + note_element = self._create_note_element( + fcpx_metadata.get("note", None) + ) + keyword_elements = self._create_keyword_elements( + fcpx_metadata.get("keywords", []) + ) + metadata_element = self._create_metadata_elements( + fcpx_metadata.get("metadata", None) + ) + + if note_element is not None: + a_clip.append(note_element) + if keyword_elements: + for keyword_element in keyword_elements: + a_clip.append(keyword_element) + if metadata_element is not None: + a_clip.append(metadata_element) + + def _create_asset_element(self, clip, format_element): + target_url = self._target_url_from_clip(clip) + asset = self._asset_by_path(target_url) + if asset is not None: + return asset + + asset = cElementTree.SubElement( + self.resource_element, + "asset", + { + "name": clip.name, + "src": target_url, + "format": format_element.get("id"), + "id": self._resource_id_generator(), + "duration": self._find_asset_duration(clip), + "start": self._find_asset_start(clip), + "hasAudio": "0", + "hasVideo": "0" + } + ) + return asset + + def _add_compound_clip(self, item): + media_element = self._media_by_name(item.name) + if media_element is not None: + return media_element + resource_id = self._resource_id_generator() + media_element = cElementTree.SubElement( + self.resource_element, + "media", + { + "name": self._compound_clip_name(item, resource_id), + "id": resource_id + } + ) + if item.metadata.get("fcpx", {}).get("uid", False): + media_element.set("uid", item.metadata.get("fcpx", {}).get("uid")) + media_element.append(self._stack_to_sequence(item, compound_clip=True)) + return media_element + + def _stacks(self): + return self.otio_timeline.each_child( + descended_from_type=otio.schema.Stack + ) + + def _clips(self): + return self.otio_timeline.each_child( + descended_from_type=otio.schema.Clip + ) + + def _resource_id_generator(self): + self.resource_count += 1 + return "r{}".format(self.resource_count) + + def _event_name(self): + if self.otio_timeline.name: + return self.otio_timeline.name + return date.strftime(date.today(), "%m-%e-%y") + + def _asset_by_path(self, path): + return self.resource_element.find("./asset[@src='{}']".format(path)) + + def _asset_by_id(self, asset_id): + return self.resource_element.find("./asset[@id='{}']".format(asset_id)) + + def _media_by_name(self, name): + return self.resource_element.find("./media[@name='{}']".format(name)) + + def _media_by_id(self, media_id): + return self.resource_element.find("./media[@id='{}']".format(media_id)) + + def _format_by_frame_rate(self, frame_rate): + frame_duration = self._framerate_to_frame_duration(frame_rate) + return self.resource_element.find( + "./format[@frameDuration='{}']".format(frame_duration) + ) + + def _asset_clip_by_name(self, name): + return self.event_resource.find( + "./asset-clip[@name='{}']".format(name) + ) + + # -------------------- + # static methods + # -------------------- + + @staticmethod + def _framerate_to_frame_duration(framerate): + frame_duration = FRAMERATE_FRAMEDURATION.get(int(framerate), "") + if not frame_duration: + frame_duration = FRAMERATE_FRAMEDURATION.get(float(framerate), "") + return frame_duration + + @staticmethod + def _target_url_from_clip(clip): + if (clip.media_reference and + not clip.media_reference.is_missing_reference): + return clip.media_reference.target_url + return "file:///tmp/{}".format(clip.name) + + @staticmethod + def _calculate_rational_number(duration, rate): + if int(duration) == 0: + return "0s" + result = Fraction(float(duration) / float(rate)).limit_denominator() + return "{}/{}s".format(result.numerator, result.denominator) + + @staticmethod + def _compound_clip_name(compound_clip, resource_id): + if compound_clip.name: + return compound_clip.name + return "compound_clip_{}".format(resource_id) + + @staticmethod + def _item_in_compound_clip(item): + stack_count = 0 + parent = item.parent() + while parent is not None: + if parent.schema_name() == "Stack": + stack_count += 1 + parent = parent.parent() + return stack_count > 1 + + @staticmethod + def _create_metadata_elements(metadata): + if metadata is None: + return None + metadata_element = cElementTree.Element( + "metadata" + ) + for metadata_dict in metadata: + cElementTree.SubElement( + metadata_element, + "md", + { + "key": list(metadata_dict.keys())[0], + "value": list(metadata_dict.values())[0] + } + ) + return metadata_element + + @staticmethod + def _create_keyword_elements(keywords): + keyword_elements = [] + for keyword_dict in keywords: + keyword_elements.append( + cElementTree.Element( + "keyword", + keyword_dict + ) + ) + return keyword_elements + + @staticmethod + def _create_note_element(note): + if not note: + return None + note_element = cElementTree.Element( + "note" + ) + note_element.text = note + return note_element + + +class FcpxXml(object): + """ + This object is responsible for knowing how to convert an FCP X XML + otio into an otio timeline + """ + + def __init__(self, xml_string): + self.fcpx_xml = cElementTree.fromstring(xml_string) + self.child_parent_map = {c: p for p in self.fcpx_xml.iter() for c in p} + + def to_otio(self): + """ + Convert an FCP X XML to an otio + + Returns: + OpenTimeline: An OpenTimeline Timeline object + """ + + if self.fcpx_xml.find("./library") is not None: + return self._from_library() + if self.fcpx_xml.find("./event") is not None: + return self._from_event(self.fcpx_xml.find("./event")) + if self.fcpx_xml.find("./project") is not None: + return self._from_project(self.fcpx_xml.find("./project")) + if ((self.fcpx_xml.find("./asset-clip") is not None) or + (self.fcpx_xml.find("./ref-clip") is not None)): + return self._from_clips() + + def _from_library(self): + # We are just grabbing the first even in the project for now + return self._from_event(self.fcpx_xml.find("./library/event")) + + def _from_event(self, event_element): + container = otio.schema.SerializableCollection( + name=event_element.get("name") + ) + for project in event_element.findall("./project"): + container.append(self._from_project(project)) + return container + + def _from_project(self, project_element): + timeline = otio.schema.Timeline(name=project_element.get("name", "")) + timeline.tracks = self._squence_to_stack( + project_element.find("./sequence", {}) + ) + return timeline + + def _from_clips(self): + container = otio.schema.SerializableCollection() + if self.fcpx_xml.find("./asset-clip") is not None: + for asset_clip in self.fcpx_xml.findall("./asset-clip"): + container.append( + self._build_composable( + asset_clip, + asset_clip.get("format") + ) + ) + + if self.fcpx_xml.find("./ref-clip") is not None: + for ref_clip in self.fcpx_xml.findall("./ref-clip"): + container.append( + self._build_composable( + ref_clip, + "r1" + ) + ) + return container + + def _squence_to_stack(self, sequence_element, name="", source_range=None): + timeline_items = [] + lanes = [] + stack = otio.schema.Stack(name=name, source_range=source_range) + for element in sequence_element.iter(): + if element.tag not in COMPOSABLE_ELEMENTS: + continue + composable = self._build_composable( + element, + sequence_element.get("format") + ) + + offset, lane = self._offset_and_lane( + element, + sequence_element.get("format") + ) + + timeline_items.append( + { + "track": lane, + "offset": offset, + "composable": composable, + "audio_only": self._audio_only(element) + } + ) + + lanes.append(lane) + sorted_lanes = list(set(lanes)) + sorted_lanes.sort() + for lane in sorted_lanes: + sorted_items = self._sorted_items(lane, timeline_items) + track = otio.schema.Track( + name=lane, + kind=self._track_type(sorted_items) + ) + + for item in sorted_items: + frame_diff = ( + int(item["offset"].value) - track.duration().value + ) + if frame_diff > 0: + track.append( + self._create_gap( + 0, + frame_diff, + sequence_element.get("format") + ) + ) + track.append(item["composable"]) + stack.append(track) + return stack + + def _build_composable(self, element, default_format): + timing_clip = self._timing_clip(element) + source_range = self._time_range( + timing_clip, + self._format_id_for_clip(element, default_format) + ) + + if element.tag != "ref-clip": + otio_composable = otio.schema.Clip( + name=timing_clip.get("name"), + media_reference=self._reference_from_id( + element.get("ref"), + default_format + ), + source_range=source_range + ) + else: + media_element = self._compound_clip_by_id(element.get("ref")) + otio_composable = self._squence_to_stack( + media_element.find("./sequence"), + name=media_element.get("name"), + source_range=source_range + ) + + for marker in timing_clip.findall(".//marker"): + otio_composable.markers.append( + self._marker(marker, default_format) + ) + + return otio_composable + + def _marker(self, element, default_format): + if element.get("completed", None) and element.get("completed") == "1": + color = otio.schema.MarkerColor.GREEN + if element.get("completed", None) and element.get("completed") == "0": + color = otio.schema.MarkerColor.RED + if not element.get("completed", None): + color = otio.schema.MarkerColor.PURPLE + + otio_marker = otio.schema.Marker( + name=element.get("value", ""), + marked_range=self._time_range(element, default_format), + color=color + ) + return otio_marker + + def _audio_only(self, element): + if element.tag == "audio": + return True + if element.tag == "asset-clip": + asset = self._asset_by_id(element.get("ref", None)) + if asset and asset.get("hasVideo", "0") == "0": + return True + if element.tag == "ref-clip": + if element.get("srcEnable", "video") == "audio": + return True + return False + + def _create_gap(self, start_frame, number_of_frames, defualt_format): + fps = self._format_frame_rate(defualt_format) + source_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(start_frame, fps), + duration=otio.opentime.RationalTime(number_of_frames, fps) + ) + return otio.schema.Gap(source_range=source_range) + + def _timing_clip(self, clip): + while clip.tag not in ("clip", "asset-clip", "ref-clip"): + clip = self.child_parent_map.get(clip) + return clip + + def _offset_and_lane(self, clip, default_format): + clip_format_id = self._format_id_for_clip(clip, default_format) + clip = self._timing_clip(clip) + parent = self.child_parent_map.get(clip) + + parent_format_id = self._format_id_for_clip(parent, default_format) + + if parent.tag == "spine" and parent.get("lane", None): + lane = parent.get("lane") + parent = self.child_parent_map.get(parent) + spine = True + else: + lane = clip.get("lane", "0") + spine = False + + clip_offset_frames = self._number_of_frames( + clip.get("offset"), + clip_format_id + ) + + if spine: + parent_start_frames = 0 + else: + parent_start_frames = self._number_of_frames( + parent.get("start", None), + parent_format_id + ) + + parent_offset_frames = self._number_of_frames( + parent.get("offset", None), + parent_format_id + ) + + clip_offset_frames = ( + (int(clip_offset_frames) - int(parent_start_frames)) + + int(parent_offset_frames) + ) + + offset = otio.opentime.RationalTime( + clip_offset_frames, + self._format_frame_rate(clip_format_id) + ) + + return offset, lane + + def _format_id_for_clip(self, clip, default_format): + if not clip.get("ref", None) or clip.tag == "gap": + return default_format + + resource = self._asset_by_id(clip.get("ref")) + + if resource is None: + resource = self._compound_clip_by_id( + clip.get("ref") + ).find("sequence") + + return resource.get("format", default_format) + + def _reference_from_id(self, asset_id, default_format): + asset = self._asset_by_id(asset_id) + if not asset.get("src", ""): + return otio.schema.MissingReference() + + available_range = otio.opentime.TimeRange( + start_time=to_rational_time( + asset.get("start"), + self._format_frame_rate( + asset.get("format", default_format) + ) + ), + duration=to_rational_time( + asset.get("duration"), + self._format_frame_rate( + asset.get("format", default_format) + ) + ) + ) + asset_clip = self._assetclip_by_ref(asset_id) + metadata = {} + if asset_clip: + metadata = self._create_metadta(asset_clip) + return otio.schema.ExternalReference( + target_url=asset.get("src"), + available_range=available_range, + metadata={"fcpx": metadata} + ) + + def _create_metadta(self, item): + metadata = {} + for element in item.iter(): + if element.tag == "md": + metadata.setdefault("metadata", []).append( + {element.attrib.get("key"): element.attrib.get("value")} + ) + # metadata.update( + # {element.attrib.get("key"): element.attrib.get("value")} + # ) + if element.tag == "note": + metadata.update({"note": element.text}) + if element.tag == "keyword": + metadata.setdefault("keywords", []).append(element.attrib) + return metadata + + # -------------------- + # time helpers + # -------------------- + def _format_frame_duration(self, format_id): + media_format = self._format_by_id(format_id) + total, rate = media_format.get("frameDuration").split("/") + rate = rate.replace("s", "") + return total, rate + + def _format_frame_rate(self, format_id): + fd_total, fd_rate = self._format_frame_duration(format_id) + return int(float(fd_rate) / float(fd_total)) + + def _number_of_frames(self, time_value, format_id): + if time_value == "0s" or time_value is None: + return 0 + fd_total, fd_rate = self._format_frame_duration(format_id) + time_value = time_value.split("/") + + if len(time_value) > 1: + time_value_a, time_value_b = time_value + return int( + (float(time_value_a) / float(time_value_b.replace("s", ""))) * + (float(fd_rate) / float(fd_total)) + ) + + return int( + int(time_value[0].replace("s", "")) * + (float(fd_rate) / float(fd_total)) + ) + + def _time_range(self, element, format_id): + return otio.opentime.TimeRange( + start_time=to_rational_time( + element.get("start", "0s"), + self._format_frame_rate(format_id) + ), + duration=to_rational_time( + element.get("duration"), + self._format_frame_rate(format_id) + ) + ) + # -------------------- + # search helpers + # -------------------- + + def _asset_by_id(self, asset_id): + return self.fcpx_xml.find( + "./resources/asset[@id='{}']".format(asset_id) + ) + + def _assetclip_by_ref(self, asset_id): + event = self.fcpx_xml.find("./event") + if event is None: + return self.fcpx_xml.find("./asset-clip[@ref='{}']".format(asset_id)) + else: + return event.find("./asset-clip[@ref='{}']".format(asset_id)) + + def _format_by_id(self, format_id): + return self.fcpx_xml.find( + "./resources/format[@id='{}']".format(format_id) + ) + + def _compound_clip_by_id(self, compound_id): + return self.fcpx_xml.find( + "./resources/media[@id='{}']".format(compound_id) + ) + + # -------------------- + # static methods + # -------------------- + @staticmethod + def _track_type(lane_items): + audio_only_items = [l for l in lane_items if l["audio_only"]] + if len(audio_only_items) == len(lane_items): + return otio.schema.TrackKind.Audio + return otio.schema.TrackKind.Video + + @staticmethod + def _sorted_items(lane, otio_objects): + lane_items = [item for item in otio_objects if item["track"] == lane] + return sorted(lane_items, key=lambda k: k["offset"]) + + +# -------------------- +# adapter requirements +# -------------------- +def read_from_string(input_str): + """ + Necessary read method for otio adapter + + Args: + input_str (str): An FCP X XML string + + Returns: + OpenTimeline: An OpenTimeline object + """ + + return FcpxXml(input_str).to_otio() + + +def write_to_string(input_otio): + """ + Necessary write method for otio adapter + + Args: + input_otio (OpenTimeline): An OpenTimeline object + + Returns: + str: The string contents of an FCP X XML + """ + + return FcpxOtio(input_otio).to_xml() diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py new file mode 100644 index 00000000000..28f0b97f55f --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py @@ -0,0 +1,424 @@ +# MIT License +# +# Copyright (c) 2017 Ed Caspersen +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# allcopies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module provides an interface to allow users to easily +build out an FFMPEG command with all the correct filters +for applying text (with a background) to the rendered media. +""" +import os +import sys +import json +from subprocess import Popen, PIPE +from PIL import ImageFont + + +def _is_windows(): + """ + queries if the current operating system is Windows + + :rtype: bool + """ + return sys.platform.startswith('win') or \ + sys.platform.startswith('cygwin') + + +def _system_font(): + """ + attempts to determine a default system font + + :rtype: str + """ + if _is_windows(): + font_path = os.path.join(os.environ['WINDIR'], 'Fonts') + fonts = ('arial.ttf', 'calibri.ttf', 'times.ttf') + elif sys.platform.startswith('darwin'): + font_path = '/System/Library/Fonts' + fonts = ('Menlo.ttc',) + else: + # assuming linux + font_path = 'usr/share/fonts/msttcorefonts' + fonts = ('arial.ttf', 'times.ttf', 'couri.ttf') + + system_font = None + backup = None + for font in fonts: + font = os.path.join(font_path, font) + if os.path.exists(font): + system_font = font + break + else: + if os.path.exists(font_path): + for each in os.listdir(font_path): + ext = os.path.splitext(each)[-1] + if ext[1:].startswith('tt'): + system_font = os.path.join(font_path, each) + return system_font or backup + + +# Default valuues +FONT = _system_font() +FONT_SIZE = 16 +FONT_COLOR = 'white' +BG_COLOR = 'black' +BG_PADDING = 5 + +# FFMPEG command strings +FFMPEG = ('ffmpeg -loglevel panic -i %(input)s ' + '%(filters)s %(args)s%(output)s') +FFPROBE = ('ffprobe -v quiet -print_format json -show_format ' + '-show_streams %(source)s') +BOX = 'box=1:boxborderw=%(border)d:boxcolor=%(color)s@%(opacity).1f' +DRAWTEXT = ("drawtext=text='%(text)s':x=%(x)s:y=%(y)s:fontcolor=" + "%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'") +TIMECODE = ("drawtext=timecode='%(text)s':timecode_rate=%(fps).2f" + ":x=%(x)s:y=%(y)s:fontcolor=" + "%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'") + + +# Valid aligment parameters. +TOP_CENTERED = 'top_centered' +BOTTOM_CENTERED = 'bottom_centered' +TOP_LEFT = 'top_left' +BOTTOM_LEFT = 'bottom_left' +TOP_RIGHT = 'top_right' +BOTTOM_RIGHT = 'bottom_right' + + +class Options(dict): + """ + Base options class. + """ + _params = { + 'opacity': 1, + 'x_offset': 0, + 'y_offset': 0, + 'font': FONT, + 'font_size': FONT_SIZE, + 'bg_color': BG_COLOR, + 'bg_padding': BG_PADDING, + 'font_color': FONT_COLOR + } + + def __init__(self, **kwargs): + super(Options, self).__init__() + params = self._params.copy() + params.update(kwargs) + super(Options, self).update(**params) + + def __setitem__(self, key, value): + if key not in self._params: + raise KeyError("Not a valid option key '%s'" % key) + super(Options, self).update({key: value}) + + +class FrameNumberOptions(Options): + """ + :key int frame_offset: offset the frame numbers + :key float opacity: opacity value (0-1) + :key str expression: expression that would be used instead of text + :key bool x_offset: X position offset + (does not apply to centered alignments) + :key bool y_offset: Y position offset + :key str font: path to the font file + :key int font_size: size to render the font in + :key str bg_color: background color of the box + :key int bg_padding: padding between the font and box + :key str font_color: color to render + """ + + def __init__(self, **kwargs): + self._params.update({ + 'frame_offset': 0, + 'expression': None + }) + super(FrameNumberOptions, self).__init__(**kwargs) + + +class TextOptions(Options): + """ + :key float opacity: opacity value (0-1) + :key str expression: expression that would be used instead of text + :key bool x_offset: X position offset + (does not apply to centered alignments) + :key bool y_offset: Y position offset + :key str font: path to the font file + :key int font_size: size to render the font in + :key str bg_color: background color of the box + :key int bg_padding: padding between the font and box + :key str font_color: color to render + """ + + +class TimeCodeOptions(Options): + """ + :key int frame_offset: offset the frame numbers + :key float fps: frame rate to calculate the timecode by + :key float opacity: opacity value (0-1) + :key bool x_offset: X position offset + (does not apply to centered alignments) + :key bool y_offset: Y position offset + :key str font: path to the font file + :key int font_size: size to render the font in + :key str bg_color: background color of the box + :key int bg_padding: padding between the font and box + :key str font_color: color to render + """ + + def __init__(self, **kwargs): + self._params.update({ + 'frame_offset': 0, + 'fps': 24 + }) + super(TimeCodeOptions, self).__init__(**kwargs) + + +class Burnins(object): + """ + Class that provides convenience API for building filter + flags for the FFMPEG command. + """ + + def __init__(self, source, streams=None): + """ + :param str source: source media file + :param [] streams: ffprobe stream data if parsed as a pre-process + """ + self.source = source + self.filters = { + 'drawtext': [] + } + self._streams = streams or _streams(self.source) + + def __repr__(self): + return '' % os.path.basename(self.source) + + @property + def start_frame(self): + """ + :rtype: int + """ + start_time = float(self._video_stream['start_time']) + return round(start_time * self.frame_rate) + + @property + def end_frame(self): + """ + :rtype: int + """ + end_time = float(self._video_stream['duration']) + return round(end_time * self.frame_rate) + + @property + def frame_rate(self): + """ + :rtype: int + """ + data = self._video_stream + tokens = data['r_frame_rate'].split('/') + return int(tokens[0]) / int(tokens[1]) + + @property + def _video_stream(self): + video_stream = None + for each in self._streams: + if each.get('codec_type') == 'video': + video_stream = each + break + else: + raise RuntimeError("Failed to locate video stream " + "from '%s'" % self.source) + return video_stream + + @property + def resolution(self): + """ + :rtype: (int, int) + """ + data = self._video_stream + return data['width'], data['height'] + + @property + def filter_string(self): + """ + Generates the filter string that would be applied + to the `-vf` argument + + :rtype: str + """ + return ','.join(self.filters['drawtext']) + + def add_timecode(self, align, options=None): + """ + Convenience method to create the frame number expression. + + :param enum align: alignment, must use provided enum flags + :param dict options: recommended to use TimeCodeOptions + """ + options = options or TimeCodeOptions() + timecode = _frames_to_timecode(options['frame_offset'], + self.frame_rate) + options = options.copy() + if not options.get('fps'): + options['fps'] = self.frame_rate + self._add_burnin(timecode.replace(':', r'\:'), + align, + options, + TIMECODE) + + def add_frame_numbers(self, align, options=None): + """ + Convenience method to create the frame number expression. + + :param enum align: alignment, must use provided enum flags + :param dict options: recommended to use FrameNumberOptions + """ + options = options or FrameNumberOptions() + options['expression'] = r'%%{eif\:n+%d\:d}' % options['frame_offset'] + text = str(int(self.end_frame + options['frame_offset'])) + self._add_burnin(text, align, options, DRAWTEXT) + + def add_text(self, text, align, options=None): + """ + Adding static text to a filter. + + :param str text: text to apply to the drawtext + :param enum align: alignment, must use provided enum flags + :param dict options: recommended to use TextOptions + """ + options = options or TextOptions() + self._add_burnin(text, align, options, DRAWTEXT) + + def _add_burnin(self, text, align, options, draw): + """ + Generic method for building the filter flags. + + :param str text: text to apply to the drawtext + :param enum align: alignment, must use provided enum flags + :param dict options: + """ + resolution = self.resolution + data = { + 'text': options.get('expression') or text, + 'color': options['font_color'], + 'size': options['font_size'] + } + data.update(options) + data.update(_drawtext(align, resolution, text, options)) + if 'font' in data and _is_windows(): + data['font'] = data['font'].replace(os.sep, r'\\' + os.sep) + data['font'] = data['font'].replace(':', r'\:') + self.filters['drawtext'].append(draw % data) + + if options.get('bg_color') is not None: + box = BOX % { + 'border': options['bg_padding'], + 'color': options['bg_color'], + 'opacity': options['opacity'] + } + self.filters['drawtext'][-1] += ':%s' % box + + def command(self, output=None, args=None, overwrite=False): + """ + Generate the entire FFMPEG command. + + :param str output: output file + :param str args: additional FFMPEG arguments + :param bool overwrite: overwrite the output if it exists + :returns: completed command + :rtype: str + """ + output = output or '' + if overwrite: + output = '-y %s' % output + return (FFMPEG % { + 'input': self.source, + 'output': output, + 'args': '%s ' % args if args else '', + 'filters': '-vf "%s"' % self.filter_string + }).strip() + + def render(self, output, args=None, overwrite=False): + """ + Render the media to a specified destination. + + :param str output: output file + :param str args: additional FFMPEG arguments + :param bool overwrite: overwrite the output if it exists + """ + if not overwrite and os.path.exists(output): + raise RuntimeError("Destination '%s' exists, please " + "use overwrite" % output) + command = self.command(output=output, + args=args, + overwrite=overwrite) + proc = Popen(command, shell=True) + proc.communicate() + if proc.returncode != 0: + raise RuntimeError("Failed to render '%s': %s'" + % (output, command)) + if not os.path.exists(output): + raise RuntimeError("Failed to generate '%s'" % output) + + +def _streams(source): + """ + :param str source: source media file + :rtype: [{}, ...] + """ + command = FFPROBE % {'source': source} + proc = Popen(command, shell=True, stdout=PIPE) + out = proc.communicate()[0] + if proc.returncode != 0: + raise RuntimeError("Failed to run: %s" % command) + return json.loads(out)['streams'] + + +def _drawtext(align, resolution, text, options): + """ + :rtype: {'x': int, 'y': int} + """ + x_pos = '0' + if align in (TOP_CENTERED, BOTTOM_CENTERED): + x_pos = 'w/2-tw/2' + elif align in (TOP_RIGHT, BOTTOM_RIGHT): + ifont = ImageFont.truetype(options['font'], + options['font_size']) + box_size = ifont.getsize(text) + x_pos = resolution[0] - (box_size[0] + options['x_offset']) + elif align in (TOP_LEFT, BOTTOM_LEFT): + x_pos = options['x_offset'] + + if align in (TOP_CENTERED, + TOP_RIGHT, + TOP_LEFT): + y_pos = '%d' % options['y_offset'] + else: + y_pos = 'h-text_h-%d' % (options['y_offset']) + return {'x': x_pos, 'y': y_pos} + + +def _frames_to_timecode(frames, framerate): + return '{0:02d}:{1:02d}:{2:02d}:{3:02d}'.format( + int(frames / (3600 * framerate)), + int(frames / (60 * framerate) % 60), + int(frames / framerate % 60), + int(frames % framerate)) diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py new file mode 100644 index 00000000000..e0e3f8f8724 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py @@ -0,0 +1,1781 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""HLS Playlist OpenTimelineIO adapter + +This adapter supports authoring of HLS playlists within OpenTimelineIO by using +clips to represent media fragments. + +Status: + - Export of Media Playlists well supported + - Export of Master Playlists supported + - Import of Media Playlists well supported + - Import of Master Playlists unsupported + - Explicit Variant Stream controls in Master Playlists unsupported + +In general, you can author otio as follows: + t = otio.schema.Timeline() + track = otio.schema.Track("v1") + track.metadata['HLS'] = { + "EXT-X-INDEPENDENT-SEGMENTS": None, + "EXT-X-PLAYLIST-TYPE": "VOD" + } + t.tracks.append(track) + + # Make a prototype media ref with the fragment's initialization metadata + fragmented_media_ref = otio.schema.ExternalReference( + target_url='video1.mp4', + metadata={ + "streaming": { + "init_byterange": { + "byte_count": 729, + "byte_offset": 0 + }, + "init_uri": "media-video-1.mp4" + } + } + ) + + # Make a copy of the media ref specifying the byte range for the fragment + media_ref1 = fragmented_media_ref.deepcopy() + media_ref1.available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 1), + otio.opentime.RationalTime(2.002, 1) + ) + media_ref1.metadata['streaming'].update( + { + "byte_count": 534220, + "byte_offset": 1361 + } + ) + + # make the fragment and append it + fragment1 = otio.schema.Clip(media_reference=media_ref1) + track.append(fragment1) + + # (repeat to define each fragment) + +The code above would yield an HLS playlist like: + #EXTM3U + #EXT-X-VERSION:7 + #EXT-X-TARGETDURATION:2 + #EXT-X-PLAYLIST-TYPE:VOD + #EXT-X-INDEPENDENT-SEGMENTS + #EXT-X-MEDIA-SEQUENCE:1 + #EXT-X-MAP:BYTERANGE="729@0",URI="media-video-1.mp4" + #EXTINF:2.00200, + #EXT-X-BYTERANGE:534220@1361 + video1.mp4 + #EXT-X-ENDLIST + +If you add min_segment_duration and max_segment_duration to the timeline's +metadata dictionary as RationalTime objects, you can control the rule set +deciding how many fragments to accumulate into a single segment. When nothing +is specified for these metadata keys, the adapter will create one segment per +fragment. + +In general, any metadata added to the track metadata dict under the HLS +namespace will be included at the top level of the exported playlist (see +``EXT-X-INDEPENDENT-SEGMENTS`` and ``EXT-X-PLAYLIST-TYPE`` in the example +above). Each segment will pass through any metadata in the HLS namespace from +the media_reference. + +If you write a Timeline with more than one track specified, then the adapter +will create an HLS master playlist. + +The following track metadata keys will be used to inform exported master +playlist metadata per variant stream: + bandwidth + codec + language + mimeType + group_id (audio) + autoselect (audio) + default (audio) +These values are translated to EXT-X-STREAM-INF and EXT-X-MEDIA +attributes as defined in sections 4.3.4.2 and 4.3.4.1 of +draft-pantos-http-live-streaming, respectively. +""" + +import re +import copy + +import opentimelineio as otio + +# TODO: determine output version based on features used +OUTPUT_PLAYLIST_VERSION = "7" + +# TODO: make sure all strings get sanitized through encoding and decoding +PLAYLIST_STRING_ENCODING = "utf-8" + +# Enable isinstance(my_instance, basestring) tests in Python 3 +# This can be phased out when Python 2 support is dropped. Replace tests with: +# isinstance(my_instance, str) + +try: + basestring +except NameError: + basestring = str + +""" +Matches a single key/value pair from an HLS Attribute List. +See section 4.2 of draft-pantos-http-live-streaming for more detail. +""" +ATTRIBUTE_RE = re.compile( + r'(?P[A-Z0-9-]+)' + r'\=' + + r'(?P(?:\"[^\r\n"]*\")|[^,]+)' + r',?' +) + +""" +Matches AttributeValue of the above regex into appropriate data types. +Note that these are meant to be joined using regex "or" in this order. +""" +_ATTRIBUTE_RE_VALUE_STR_LIST = [ + r'(?P(?P[0-9]+)x(?P[0-9]+))\Z', + r'(?P0[xX](?P[0-9A-F]+))\Z', + r'(?P-?[0-9]+\.[0-9]+)\Z', + r'(?P[0-9]+)\Z', + r'(?P\"(?P[^\r\n"]*)\")\Z', + r'(?P[^",\s]+)\Z' +] +ATTRIBUTE_VALUE_RE = re.compile("|".join(_ATTRIBUTE_RE_VALUE_STR_LIST)) + +""" +Matches a byterange as used in various contexts. +See section 4.3.2.2 of draft-pantos-http-live-streaming for an example use of +this byterange form. +""" +BYTERANGE_RE = re.compile(r'(?P\d+)(?:@(?P\d+))?') + +""" +Matches HLS Playlist tags or comments, respective. +See section 4.1 of draft-pantos-http-live-streaming for more detail. +""" +TAG_RE = re.compile( + r'#(?PEXT[^:\s]+)(?P:?)(?P.*)' +) +COMMENT_RE = re.compile(r'#(?!EXT)(?P.*)') + + +class AttributeListEnum(str): + """ A subclass allowing us to differentiate enums in HLS attribute lists + """ + + +def _value_from_raw_attribute_value(raw_attribute_value): + """ + Takes in a raw AttributeValue and returns an appopritate Python type. + If there is a problem decoding the value, None is returned. + """ + value_match = ATTRIBUTE_VALUE_RE.match(raw_attribute_value) + if not value_match: + return None + + group_dict = value_match.groupdict() + # suss out the match + for k, v in group_dict.items(): + # not a successful group match + if v is None: + continue + + # decode the string + if k == 'resolution': + return v + elif k == 'enumerated': + return AttributeListEnum(v) + elif k == 'hexcidecimal': + return int(group_dict['hex_value'], base=16) + elif k == 'floating_point': + return float(v) + elif k == 'decimal': + return int(v) + elif k == 'string': + # grab only the data within the quotes, excluding the quotes + string_value = group_dict['string_value'] + return string_value + + return None + + +class AttributeList(dict): + """ + Dictionary-like object representing an HLS AttributeList. + See section 4.2 of draft-pantos-http-live-streaming for more detail. + """ + + def __init__(self, other=None): + """ + contstructs an :class:`AttributeList`. + + ``Other`` can be either another dictionary-like object or a list of + key/value pairs + """ + if not other: + return + + try: + items = other.items() + except AttributeError: + items = other + + for k, v in items: + self[k] = v + + def __str__(self): + """ + Construct attribute list string as it would exist in an HLS playlist. + """ + attr_list_entries = [] + # Use a sorted version of the dictionary to ensure consistency + for k, v in sorted(self.items(), key=lambda i: i[0]): + out_value = '' + if isinstance(v, AttributeListEnum): + out_value = v + elif isinstance(v, basestring): + out_value = '"{}"'.format(v) + else: + out_value = str(v) + + attr_list_entries.append('{}={}'.format(k, out_value)) + + return ','.join(attr_list_entries) + + @classmethod + def from_string(cls, attrlist_string): + """ + Accepts an attribute list string and returns an :class:`AttributeList`. + + The values will be transformed to Python types. + """ + attr_list = cls() + match = ATTRIBUTE_RE.search(attrlist_string) + while match: + # unpack the values from the match + group_dict = match.groupdict() + name = group_dict['AttributeName'] + raw_value = group_dict['AttributeValue'] + + # parse the raw value + value = _value_from_raw_attribute_value(raw_value) + attr_list[name] = value + + # search for the next attribute in the string + match_end = match.span()[1] + match = ATTRIBUTE_RE.search(attrlist_string, match_end) + + return attr_list + + +# some special top-levle keys that HLS metadata will be decoded into +FORMAT_METADATA_KEY = 'HLS' +""" +Some concepts are translatable between HLS and other streaming formats (DASH). +These metadata keys are used on OTIO objects outside the HLS namespace because +they are higher level concepts. +""" +STREAMING_METADATA_KEY = 'streaming' +INIT_BYTERANGE_KEY = 'init_byterange' +INIT_URI_KEY = 'init_uri' +SEQUENCE_NUM_KEY = 'sequence_num' +BYTE_OFFSET_KEY = 'byte_offset' +BYTE_COUNT_KEY = 'byte_count' + + +class Byterange(object): + """Offers interpretation of HLS byte ranges in various forms.""" + + count = None + """(:class:`int`) Number of bytes included in the range.""" + + offset = None + """(:class:`int`) Byte offset at which the range starts.""" + + def __init__(self, count=None, offset=None): + """Constructs a :class:`Byterange` object. + + :param count: (:class:`int`) Number of bytes included in the range. + :param offset: (:class:`int`) Byte offset at which the range starts. + """ + self.count = (count if count is not None else 0) + self.offset = offset + + def __eq__(self, other): + if not isinstance(other, Byterange): + # fall back on identity, this should always be False + return (self is other) + return (self.count == other.count and self.offset == other.offset) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return '{}(offset = {}, count = {})'.format( + type(self), + str(self.offset), + str(self.count) + ) + + def __str__(self): + """returns a string in HLS format""" + + out_str = str(self.count) + if self.offset is not None: + out_str += '@{}'.format(str(self.offset)) + + return out_str + + def to_dict(self): + """Returns a dict suitable for storing in otio metadata. + + :return: (:class:`dict`) serializable version of byterange. + """ + range_dict = {BYTE_COUNT_KEY: self.count} + if self.offset is not None: + range_dict[BYTE_OFFSET_KEY] = self.offset + + return range_dict + + @classmethod + def from_string(cls, byterange_string): + """Construct a :class:`Byterange` given a string in HLS format. + + :param byterange_string: (:class:`str`) a byterange string. + :return: (:class:`Byterange`) The instance for the provided string. + """ + m = BYTERANGE_RE.match(byterange_string) + + return cls.from_match_dict(m.groupdict()) + + @classmethod + def from_match_dict(cls, match_dict): + """ + Construct a :class:`Byterange` given a groupdict from ``BYTERANGE_RE`` + + :param match_dict: (:class:`dict`) the ``match_dict``. + :return: (:class:`Byterange`) The instance for the provided string. + """ + byterange = cls(count=int(match_dict['n'])) + + try: + byterange.offset = int(match_dict['o']) + except KeyError: + pass + + return byterange + + @classmethod + def from_dict(cls, info_dict): + """ Creates a :class:`Byterange` given a dictionary containing keys + like generated from the :meth:`to_dict method`. + + :param info_dict: (:class:`dict`) Dictionary byterange. + :return: (:class:`Byterange`) an equivalent instance. + """ + byterange = cls( + count=info_dict.get(BYTE_COUNT_KEY), + offset=info_dict.get(BYTE_OFFSET_KEY) + ) + + return byterange + + +""" +For a given collection of media, HLS has two playlist types: + - Media Playlist + - Master Playlist + +The media playlist refers directly to the individual segments that make up an +audio or video track of a given program. The master playlist refers to a +collection of media playlists and provides ways to use them together +(rendition groups). + +See section 2 of draft-pantos-http-live-streaming for more detail. + +The constants below define which tags belong to which schema. +""" + +""" +Basic tags appear in both media and master playlists. +See section 4.3.1 of draft-pantos-http-live-streaming for more detail. +""" +BASIC_TAGS = set([ + "EXTM3U", + "EXT-X-VERSION" +]) + +""" +Media segment tags apply to either the following media or all subsequent +segments. They MUST NOT appear in master playlists. +See section 4.3.2 of draft-pantos-http-live-streaming for more detail. +""" +MEDIA_SEGMENT_TAGS = set([ + 'EXTINF', + 'EXT-X-BYTERANGE', + 'EXT-X-DISCONTINUITY', + 'EXT-X-KEY', + 'EXT-X-MAP', + 'EXT-X-PROGRAM-DATE-TIME', + 'EXT-X-DATERANGE' +]) + +""" The subset of above tags that apply to every segment following them """ +MEDIA_SEGMENT_SUBSEQUENT_TAGS = set([ + 'EXT-X-KEY', + 'EXT-X-MAP', +]) + +""" +Media Playlist tags must only occur once per playlist, and must not appear in +Master Playlists. +See section 4.3.3 of draft-pantos-http-live-streaming for more detail. +""" +MEDIA_PLAYLIST_TAGS = set([ + 'EXT-X-TARGETDURATION', + 'EXT-X-MEDIA-SEQUENCE', + 'EXT-X-DISCONTINUITY-SEQUENCE', + 'EXT-X-ENDLIST', + 'EXT-X-PLAYLIST-TYPE', + 'EXT-X-I-FRAMES-ONLY' +]) + +""" +Master playlist tags declare global parameters for the presentation. +They must not appear in media playlists. +See section 4.3.4 of draft-pantos-http-live-streaming for more detail. +""" +MASTER_PLAYLIST_TAGS = set([ + 'EXT-X-MEDIA', + 'EXT-X-STREAM-INF', + 'EXT-X-I-FRAME-STREAM-INF', + 'EXT-X-SESSION-DATA', + 'EXT-X-SESSION-KEY', +]) + +""" +Media or Master Playlist tags can appear in either media or master playlists. +See section 4.3.5 of draft-pantos-http-live-streaming for more detail. +These tags SHOULD appear in either the media or master playlist. If they occur +in both, their values MUST agree. +These values MUST NOT appear more than once in a playlist. +""" +MEDIA_OR_MASTER_TAGS = set([ + "EXT-X-INDEPENDENT-SEGMENTS", + "EXT-X-START" +]) + +""" +Some special tags used by the parser. +""" +PLAYLIST_START_TAG = "EXTM3U" +PLAYLIST_END_TAG = "EXT-X-ENDLIST" +PLAYLIST_VERSION_TAG = "EXT-X-VERSION" +PLAYLIST_SEGMENT_INF_TAG = "EXTINF" + +""" +attribute list entries to omit from EXT-I-FRAME-STREAM-INF tags +See section 4.3.4.3 of draft-pantos-http-live-streaming for more detail. +""" +I_FRAME_OMIT_ATTRS = set([ + 'FRAME-RATE', + 'AUDIO', + 'SUBTITLES', + 'CLOSED-CAPTIONS' +]) + +""" enum for kinds of playlist entries """ +EntryType = type('EntryType', (), { + 'tag': 'tag', + 'comment': 'comment', + 'URI': 'URI' +}) + +""" enum for types of playlists """ +PlaylistType = type('PlaylistType', (), { + 'media': 'media', + 'master': 'master' +}) + +""" mapping from HLS track type to otio ``TrackKind`` """ +HLS_TRACK_TYPE_TO_OTIO_KIND = { + AttributeListEnum('AUDIO'): otio.schema.TrackKind.Audio, + AttributeListEnum('VIDEO'): otio.schema.TrackKind.Video, + # TODO: determine how to handle SUBTITLES and CLOSED-CAPTIONS +} + +""" mapping from otio ``TrackKind`` to HLS track type """ +OTIO_TRACK_KIND_TO_HLS_TYPE = dict(( + (v, k) for k, v in HLS_TRACK_TYPE_TO_OTIO_KIND.items() +)) + + +class HLSPlaylistEntry(object): + """An entry in an HLS playlist. + + Entries can be a tag, a comment, or a URI. All HLS playlists are parsed + into lists of :class:`HLSPlaylistEntry` instances that can then be + interpreted against the HLS schema. + """ + + # TODO: rename this to entry_type to fix builtin masking + # type = None + """ (``EntryType``) the type of entry """ + + comment_string = None + """ + (:class:`str`) value of comment (if the ``entry_type`` is + ``EntryType.comment``). + """ + + tag_name = None + """ + (:class:`str`) Name of tag (if the ``entry_type`` is ``EntryType.tag``). + """ + + tag_value = None + """ + (:class:`str`) Value of tag (if the ``entry_type`` is ``EntryType.tag``). + """ + + uri = None + """ + (:class:`str`) Value of the URI (if the ``entry_type is ``EntryType.uri``). + """ + + def __init__(self, type): + """ + Constructs an :class:`HLSPlaylistEntry`. + + :param type: (``EntryType``) Type of entry. + """ + self.type = type + + def __repr__(self): + base_str = 'otio.adapter.HLSPlaylistEntry(type={}'.format( + self.type) + if self.type == EntryType.tag: + base_str += ', tag_name={}, tag_value={}'.format( + repr(self.tag_name), + repr(self.tag_value) + ) + elif self.type == EntryType.comment: + base_str += ', comment={}'.format(repr(self.comment_string)) + elif self.type == EntryType.URI: + base_str += ', URI={}'.format(repr(self.uri)) + + return base_str + ')' + + def __str__(self): + """ + Returns a string as it would appear in an HLS playlist. + + :return: (:class:`str`) HLS playlist entry string. + """ + if self.type == EntryType.comment and self.comment_string: + return "# {}".format(self.comment_string) + elif self.type == EntryType.comment: + # empty comments are blank lines + return "" + elif self.type == EntryType.URI: + return self.uri + elif self.type == EntryType.tag: + out_tag_name = self.tag_name + if self.tag_value is not None: + return '#{}:{}'.format(out_tag_name, self.tag_value) + else: + return '#{}'.format(out_tag_name) + + @classmethod + def tag_entry(cls, name, value=None): + """ + Creates an ``EntryType.tag`` :class:`HLSPlaylistEntry`. + + :param name: (:class:`str`) tag name. + :param value: (:class:`str`) tag value. + :return: (:class:`HLSPlaylistEntry`) Entry instance. + """ + entry = cls(EntryType.tag) + entry.tag_name = name + entry.tag_value = value + + return entry + + @classmethod + def comment_entry(cls, comment): + """Creates an ``EntryType.comment`` :class:`HLSPlaylistEntry`. + + :param comment: (:class:`str`) the comment. + :return: (:class:`HLSPlaylistEntry`) Entry instance. + """ + entry = cls(EntryType.comment) + entry.comment_string = comment + + return entry + + @classmethod + def uri_entry(cls, uri): + """Creates an ``EntryType.uri`` :class:`HLSPlaylistEntry`. + + :param uri: (:class:`str`) A URI string. + :return: (:class:`HLSPlaylistEntry`) Entry instance. + """ + entry = cls(EntryType.URI) + entry.uri = uri + + return entry + + @classmethod + def from_string(cls, entry_string): + """Creates an `:class:`HLSPlaylistEntry` given a string as it appears + in an HLS playlist. + + :param entry_string: (:class:`str`) String from an HLS playlist. + :return: (:class:`HLSPlaylistEntry`) Entry instance. + """ + # Empty lines are skipped + if not entry_string.strip(): + return None + + # Attempt to parse as a tag + m = TAG_RE.match(entry_string) + if m: + group_dict = m.groupdict() + tag_value = ( + group_dict['tagvalue'] + if group_dict['hasvalue'] else None + ) + entry = cls.tag_entry(group_dict['tagname'], tag_value) + return entry + + # Attempt to parse as a comment + m = COMMENT_RE.match(entry_string) + if m: + entry = cls.comment_entry(m.groupdict()['comment']) + return entry + + # If it's not the others, treat as a URI + entry = cls.uri_entry(entry_string) + + return entry + + """A dispatch dictionary for grabbing the right Regex to parse tags.""" + TAG_VALUE_RE_MAP = { + "EXTINF": re.compile(r'(?P\d+(\.\d*)?),(?P.*$)'), + "EXT-X-BYTERANGE": BYTERANGE_RE, + "EXT-X-KEY": re.compile(r'(?P<attribute_list>.*$)'), + "EXT-X-MAP": re.compile(r'(?P<attribute_list>.*$)'), + "EXT-X-MEDIA-SEQUENCE": re.compile(r'(?P<number>\d+)'), + "EXT-X-PLAYLIST-TYPE": re.compile(r'(?P<type>EVENT|VOD)'), + PLAYLIST_VERSION_TAG: re.compile(r'(?P<n>\d+)') + } + + def parsed_tag_value(self, playlist_version=None): + """Parses and returns ``self.tag_value`` based on the HLS schema. + + The value will be a dictionary where the keys are the names used in the + draft Pantos HTTP Live Streaming doc. When "attribute-list" is + specified, an entry "attribute_list" will be present containing + an :class:`AttributeList` instance. + + :param playlist_version: (:class:`int`) version number of the playlist. + If none is provided, a best guess will be made. + :return: The parsed value. + """ + if self.type != EntryType.tag: + return None + + try: + tag_re = self.TAG_VALUE_RE_MAP[self.tag_name] + except KeyError: + return None + + # parse the tag + m = tag_re.match(self.tag_value) + group_dict = m.groupdict() + + if not m: + return None + + # If the tag value has an attribute list, parse it and add it + try: + attribute_list = group_dict['attribute_list'] + attr_list = AttributeList.from_string(attribute_list) + group_dict['attributes'] = attr_list + except KeyError: + pass + + return group_dict + + +class HLSPlaylistParser(object): + """Bootstraps HLS parsing and hands the playlist string off to the + appropriate parser for the type + """ + + def __init__(self, edl_string): + self.timeline = otio.schema.Timeline() + self.playlist_type = None + + self._parse_playlist(edl_string) + + def _parse_playlist(self, edl_string): + """Parses the HLS Playlist string line-by-line.""" + # parse lines until we encounter one that identifies the playlist type + # then hand off + start_encountered = False + end_encountered = False + playlist_entries = [] + playlist_version = 1 + for line in edl_string.splitlines(): + # attempt to parse the entry + entry = HLSPlaylistEntry.from_string(line) + if entry is None: + continue + + entry_is_tag = (entry.type == EntryType.tag) + + # identify if the playlist start/end is encountered + if (entry_is_tag and not (start_encountered and end_encountered)): + if entry.tag_name == PLAYLIST_START_TAG: + start_encountered = True + elif entry.tag_name == PLAYLIST_END_TAG: + end_encountered = True + + # if the playlist starting tag hasn't been encountered, ignore + if not start_encountered: + continue + + # Store the parsed entry + playlist_entries.append(entry) + + # Determine if this tells us the playlist type + if not self.playlist_type and entry_is_tag: + if entry.tag_name in MASTER_PLAYLIST_TAGS: + self.playlist_type = PlaylistType.master + elif entry.tag_name in MEDIA_PLAYLIST_TAGS: + self.playlist_type = PlaylistType.media + + if end_encountered: + break + + # try to grab the version from the playlist + if entry_is_tag and entry.tag_name == PLAYLIST_VERSION_TAG: + playlist_version = int(entry.parsed_tag_value()['n']) + + # dispatch to the appropriate schema interpreter + if self.playlist_type is None: + self.timeline = None + raise otio.exceptions.ReadingNotSupportedError( + "could not determine playlist type" + ) + elif self.playlist_type == PlaylistType.master: + self.timeline = None + raise otio.exceptions.AdapterDoesntSupportFunction( + "HLS master playlists are not yet supported" + ) + elif self.playlist_type == PlaylistType.media: + parser = MediaPlaylistParser(playlist_entries, playlist_version) + if len(parser.track): + self.timeline.tracks.append(parser.track) + + +class MediaPlaylistParser(object): + """Parses an HLS Media playlist returning a SEQUENCE""" + + def __init__(self, playlist_entries, playlist_version=None): + self.track = otio.schema.Track( + metadata={FORMAT_METADATA_KEY: {}} + ) + + self._parse_entries(playlist_entries, playlist_version) + + def _handle_track_metadata(self, entry, playlist_version, clip): + """Stashes the tag value in the track metadata""" + value = entry.tag_value + self.track.metadata[FORMAT_METADATA_KEY][entry.tag_name] = value + + def _handle_discarded_metadata(self, entry, playlist_version, clip): + """Handler for tags that are discarded. This is done when a tag's + information is represented by the native OTIO concepts. + + For instance, the EXT-X-TARGETDURATION tag simply gives a rounded + value for the maximum segment size in the playlist. This can easily + be found in OTIO by examining the clips. + """ + # Do nothing + + def _metadata_dict_for_MAP(self, entry, playlist_version): + entry_data = entry.parsed_tag_value() + attributes = entry_data['attributes'] + map_dict = {} + for attr, value in attributes.items(): + if attr == 'BYTERANGE': + byterange = Byterange.from_string(value) + map_dict[INIT_BYTERANGE_KEY] = byterange.to_dict() + elif attr == 'URI': + map_dict[INIT_URI_KEY] = value + + return map_dict + + def _handle_INF(self, entry, playlist_version, clip): + # This specifies segment duration and optional title + info_dict = entry.parsed_tag_value(playlist_version) + segment_duration = float(info_dict['duration']) + segment_title = info_dict['title'] + available_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 1), + otio.opentime.RationalTime(segment_duration, 1) + ) + + # Push the info to the clip + clip.media_reference.available_range = available_range + clip.source_range = available_range + clip.name = segment_title + + def _handle_BYTERANGE(self, entry, playlist_version, clip): + reference_metadata = clip.media_reference.metadata + ref_streaming_metadata = reference_metadata.setdefault( + STREAMING_METADATA_KEY, + {} + ) + + # Pull out the byte count and offset + byterange = Byterange.from_match_dict( + entry.parsed_tag_value(playlist_version) + ) + ref_streaming_metadata.update(byterange.to_dict()) + + """ + Specifies handlers for specific HLS tags. + """ + TAG_HANDLERS = { + "EXTINF": _handle_INF, + PLAYLIST_VERSION_TAG: _handle_track_metadata, + "EXT-X-TARGETDURATION": _handle_discarded_metadata, + "EXT-X-MEDIA-SEQUENCE": _handle_discarded_metadata, + "EXT-X-PLAYLIST-TYPE": _handle_track_metadata, + "EXT-X-INDEPENDENT-SEGMENTS": _handle_track_metadata, + "EXT-X-BYTERANGE": _handle_BYTERANGE + } + + def _parse_entries(self, playlist_entries, playlist_version): + """Interpret the entries through the lens of the schema""" + current_clip = otio.schema.Clip( + media_reference=otio.schema.ExternalReference( + metadata={ + FORMAT_METADATA_KEY: {}, + STREAMING_METADATA_KEY: {} + } + ) + ) + current_media_ref = current_clip.media_reference + segment_metadata = {} + current_map_data = {} + # per section 4.3.3.2 of Pantos HLS, 0 is default start track + current_track = 0 + for entry in playlist_entries: + if entry.type == EntryType.URI: + # the URI ends the segment definition + current_media_ref.target_url = entry.uri + current_media_ref.metadata[FORMAT_METADATA_KEY].update( + segment_metadata + ) + current_media_ref.metadata[STREAMING_METADATA_KEY].update( + current_map_data + ) + current_clip.metadata.setdefault( + STREAMING_METADATA_KEY, + {} + )[SEQUENCE_NUM_KEY] = current_track + self.track.append(current_clip) + current_track += 1 + + # Set up the next segment definition + current_clip = otio.schema.Clip( + media_reference=otio.schema.ExternalReference( + metadata={ + FORMAT_METADATA_KEY: {}, + STREAMING_METADATA_KEY: {} + } + ) + ) + current_media_ref = current_clip.media_reference + continue + elif entry.type != EntryType.tag: + # the rest of the code deals only with tags + continue + + # Explode the EXT-X-MAP info out + if entry.tag_name == "EXT-X-MAP": + map_data = self._metadata_dict_for_MAP(entry, playlist_version) + current_map_data.update(map_data) + continue + + # Grab the track when it comes around + if entry.tag_name == "EXT-X-MEDIA-SEQUENCE": + entry_data = entry.parsed_tag_value() + current_track = int(entry_data['number']) + + # If the segment tag is one that applies to all that follow + # store the value to be applied to each segment + if entry.tag_name in MEDIA_SEGMENT_SUBSEQUENT_TAGS: + segment_metadata[entry.tag_name] = entry.tag_value + continue + + # use a handler if available + try: + handler = self.TAG_HANDLERS[entry.tag_name] + handler(self, entry, playlist_version, current_clip) + continue + except KeyError: + pass + + # add the tag to the reference metadata at the correct level + if entry.tag_name in [PLAYLIST_START_TAG, PLAYLIST_END_TAG]: + continue + elif entry.tag_name in MEDIA_SEGMENT_TAGS: + # Media segments translate into media refs + hls_metadata = current_media_ref.metadata[FORMAT_METADATA_KEY] + hls_metadata[entry.tag_name] = entry.tag_value + elif entry.tag_name in MEDIA_PLAYLIST_TAGS: + # Media playlists translate into tracks + hls_metadata = self.track.metadata[FORMAT_METADATA_KEY] + hls_metadata[entry.tag_name] = entry.tag_value + + +""" +Compatibility version list: + EXT-X-BYTERANGE >= 4 + EXT-X-I-FRAMES-ONLY >= 4 + EXT-X-MAP in media playlist with EXT-X-I-FRAMES-ONLY >= 5 + EXT-X-MAP in media playlist without I-FRAMES-ONLY >= 6 + EXT-X-KEY constrants are by attributes specified: + - IV >= 2 + - KEYFORMAT >= 5 + - KEYFORMATVERSIONS >= 5 + EXTINF with floating point vaules >= 3 + + master playlist: + EXT-X-MEDIA with INSTREAM-ID="SERVICE" +""" + + +def entries_for_segment( + uri, + segment_duration, + segment_name=None, + segment_byterange=None, + segment_tags=None +): + """Creates a set of :class:`HLSPlaylistEntries` with the given parameters. + + :param uri: (:class:`str`) The uri for the segment media. + :param segment_duration: (:class:`opentimelineio.opentime.RationalTime`) + playback duration of the segment. + :param segment_byterange: (:class:`ByteRange`) The data range for the + segment in the media (if required) + :param segment_tags: (:class:`dict`) key/value pairs of to become + additional tags for the segment + + :return: (:class:`list`) a group of :class:`HLSPlaylistEntry` instances for + the segment + """ + # Create the tags dict to build + if segment_tags: + tags = copy.deepcopy(segment_tags) + else: + tags = {} + + # Start building the entries list + segment_entries = [] + + # add the EXTINF + name = segment_name if segment_name is not None else '' + tag_value = '{0:.5f},{1}'.format( + otio.opentime.to_seconds(segment_duration), + name + ) + extinf_entry = HLSPlaylistEntry.tag_entry('EXTINF', tag_value) + segment_entries.append(extinf_entry) + + # add the additional tags + tag_entries = [ + HLSPlaylistEntry.tag_entry(k, v) for k, v in + tags.items() + ] + segment_entries.extend(tag_entries) + + # Now add the byterange for the entry + if segment_byterange: + byterange_entry = HLSPlaylistEntry.tag_entry( + 'EXT-X-BYTERANGE', + str(segment_byterange) + ) + segment_entries.append(byterange_entry) + + # Add the URI + # this method expects all fragments come from the same source file + uri_entry = HLSPlaylistEntry.uri_entry(uri) + segment_entries.append(uri_entry) + + return segment_entries + + +def stream_inf_attr_list_for_track(track): + """ Builds an :class:`AttributeList` instance for use in ``STREAM-INF`` + tags for the provided track. + + :param track: (:class:`otio.schema.Track`) A track representing a + variant stream + :return: (:class:`AttributeList`) The instance from the metadata + """ + streaming_metadata = track.metadata.get(STREAMING_METADATA_KEY, {}) + + attributes = [] + bandwidth = streaming_metadata.get('bandwidth') + if bandwidth is not None: + attributes.append(('BANDWIDTH', bandwidth)) + + codec = streaming_metadata.get('codec') + if codec is not None: + attributes.append(('CODECS', codec)) + + frame_rate = streaming_metadata.get('frame_rate') + if frame_rate is not None: + attributes.append(('FRAME-RATE', frame_rate)) + + if 'width' in streaming_metadata and 'height' in streaming_metadata: + resolution = "{}x{}".format( + streaming_metadata['width'], + streaming_metadata['height'] + ) + attributes.append(('RESOLUTION', AttributeListEnum(resolution))) + + al = AttributeList(attributes) + + return al + + +def master_playlist_to_string(master_timeline): + """Writes a master playlist describing the tracks""" + + # start with a version number of 1, as features are encountered, we will + # update the version accordingly + version_requirements = set([1]) + + # TODO: detect rather than forcing version 6 + version_requirements.add(6) + + header_tags = copy.copy( + master_timeline.metadata.get(FORMAT_METADATA_KEY, {}) + ) + + # Filter out any values from the HLS metadata that aren't meant to become + # tags, such as the directive to force an HLS master playlist + hls_md_blacklist = ['master_playlist'] + for key in hls_md_blacklist: + try: + del(header_tags[key]) + except KeyError: + pass + + playlist_entries = [] + + # First declare the non-visual media + hls_type_count = {} + video_tracks = [] + audio_tracks = [ + t for t in master_timeline.tracks if + t.kind == otio.schema.TrackKind.Audio + ] + for track in master_timeline.tracks: + if track.kind == otio.schema.TrackKind.Video: + # video is done later, skip + video_tracks.append(track) + continue + + # Determine the HLS type + hls_type = OTIO_TRACK_KIND_TO_HLS_TYPE[track.kind] + + streaming_metadata = track.metadata.get(STREAMING_METADATA_KEY, {}) + + # Find the group name + try: + group_id = streaming_metadata['group_id'] + except KeyError: + sub_id = hls_type_count.setdefault(hls_type, 1) + group_id = '{}{}'.format(hls_type, sub_id) + hls_type_count[hls_type] += 1 + + media_playlist_default_uri = "{}.m3u8".format(track.name) + try: + track_uri = track.metadata[FORMAT_METADATA_KEY].get( + 'uri', + media_playlist_default_uri + ) + except KeyError: + track_uri = media_playlist_default_uri + + # Build the attribute list + attributes = AttributeList( + [ + ('TYPE', hls_type), + ('GROUP-ID', group_id), + ('URI', track_uri), + ('NAME', track.name), + ] + ) + + if streaming_metadata.get('autoselect'): + attributes['AUTOSELECT'] = AttributeListEnum('YES') + + if streaming_metadata.get('default'): + attributes['DEFAULT'] = AttributeListEnum('YES') + + # Finally, create the tag + entry = HLSPlaylistEntry.tag_entry( + 'EXT-X-MEDIA', + str(attributes) + ) + + playlist_entries.append(entry) + + # Add a blank line in the playlist to separate sections + if playlist_entries: + playlist_entries.append(HLSPlaylistEntry.comment_entry('')) + + # First write any i-frame playlist entires + iframe_list_entries = [] + for track in video_tracks: + try: + iframe_uri = track.metadata[FORMAT_METADATA_KEY]['iframe_uri'] + except KeyError: + # don't include iframe playlist + continue + + # Create the attribute list + attribute_list = stream_inf_attr_list_for_track(track) + + # Remove entries to not be included for I-Frame streams + for attr in I_FRAME_OMIT_ATTRS: + try: + del(attribute_list[attr]) + except KeyError: + pass + + # Add the URI + attribute_list['URI'] = iframe_uri + + iframe_list_entries.append( + HLSPlaylistEntry.tag_entry( + 'EXT-X-I-FRAME-STREAM-INF', + str(attribute_list) + ) + ) + + if iframe_list_entries: + iframe_list_entries.append(HLSPlaylistEntry.comment_entry('')) + + playlist_entries.extend(iframe_list_entries) + + # Write an EXT-STREAM-INF for each rendition set + for track in video_tracks: + # create the base attribute list for the video track + al = stream_inf_attr_list_for_track(track) + + # Create the uri + media_playlist_default_uri = "{}.m3u8".format(track.name) + try: + track_uri = track.metadata[FORMAT_METADATA_KEY].get( + 'uri', media_playlist_default_uri + ) + except KeyError: + track_uri = media_playlist_default_uri + uri_entry = HLSPlaylistEntry.uri_entry(track_uri) + + # TODO: this will break when we have subtitle and CC tracks + added_entry = False + for audio_track in audio_tracks: + if track.name not in audio_track.metadata['linked_tracks']: + continue + + # Write an entry for using these together + try: + audio_track_streaming_metadata = audio_track.metadata[ + STREAMING_METADATA_KEY + ] + aud_group = audio_track_streaming_metadata['group_id'] + aud_codec = audio_track_streaming_metadata['codec'] + aud_bandwidth = audio_track_streaming_metadata['bandwidth'] + except KeyError: + raise TypeError( + "HLS audio tracks must have 'codec', 'group_id', and" + " 'bandwidth' specified in metadata" + ) + + combo_al = copy.copy(al) + combo_al['CODECS'] += ',{}'.format(aud_codec) + combo_al['AUDIO'] = aud_group + combo_al['BANDWIDTH'] += aud_bandwidth + + entry = HLSPlaylistEntry.tag_entry( + 'EXT-X-STREAM-INF', + str(combo_al) + ) + playlist_entries.append(entry) + playlist_entries.append(uri_entry) + + added_entry = True + + if not added_entry: + # write out one simple entry + entry = HLSPlaylistEntry.tag_entry( + 'EXT-X-STREAM-INF', + str(al) + ) + playlist_entries.append(entry) + playlist_entries.append(uri_entry) + + # add a break before the next grouping of entries + playlist_entries.append(HLSPlaylistEntry.comment_entry('')) + + out_entries = [HLSPlaylistEntry.tag_entry(PLAYLIST_START_TAG, None)] + + playlist_version = max(version_requirements) + playlist_version_entry = HLSPlaylistEntry.tag_entry( + PLAYLIST_VERSION_TAG, + str(playlist_version) + ) + + out_entries.append(playlist_version_entry) + + out_entries += ( + HLSPlaylistEntry.tag_entry(k, v) for k, v in header_tags.items() + ) + + # separate the header entries from the rest of the entries + out_entries.append(HLSPlaylistEntry.comment_entry('')) + + out_entries += playlist_entries + + playlist_string = '\n'.join( + (str(entry) for entry in out_entries) + ) + + return playlist_string + + +class MediaPlaylistWriter(): + + def __init__( + self, + media_track, + min_seg_duration=None, + max_seg_duration=None + ): + # Default to one segment per fragment + if min_seg_duration is None: + min_seg_duration = otio.opentime.RationalTime(0, 1) + if max_seg_duration is None: + max_seg_duration = otio.opentime.RationalTime(0, 1) + + self._min_seg_duration = min_seg_duration + self._max_seg_duration = max_seg_duration + + self._playlist_entries = [] + self._playlist_tags = {} + + # Whenever an entry is added that has a minimum version requirement, + # we add that version to this set. The max value from this set is the + # playlist's version requirement + self._versions_used = set([1]) + + # TODO: detect rather than forcing version 7 + self._versions_used.add(7) + + # Start the build + self._build_playlist_with_track(media_track) + + def _build_playlist_with_track(self, media_track): + """ + Executes methods to result in a fully populated _playlist_entries list + """ + self._copy_HLS_metadata(media_track) + self._setup_track_info(media_track) + self._add_segment_entries(media_track) + self._finalize_entries(media_track) + + def _copy_HLS_metadata(self, media_track): + """ + Copies any metadata in the "HLS" namespace from the track to the + playlist-global tags + """ + # Grab any metadata provided on the otio + try: + track_metadata = media_track.metadata[FORMAT_METADATA_KEY] + self._playlist_tags.update(track_metadata) + + # Remove the version tag from the track metadata, we'll compute + # based on what we write out + del(self._playlist_tags[PLAYLIST_VERSION_TAG]) + + except KeyError: + pass + + # additionally remove metadata keys added for providing master + # playlist URIs + for key in ('uri', 'iframe_uri'): + try: + del(self._playlist_tags[key]) + except KeyError: + pass + + def _setup_track_info(self, media_track): + """sets up playlist global metadata""" + + # Setup the track start + if 'EXT-X-I-FRAMES-ONLY' in media_track.metadata.get( + FORMAT_METADATA_KEY, + {} + ): + # I-Frame playlists start at zero no matter what + track_start = 0 + else: + # Pull the track num from the first clip, if provided + first_segment_streaming_metadata = media_track[0].metadata.get( + STREAMING_METADATA_KEY, + {} + ) + track_start = first_segment_streaming_metadata.get( + SEQUENCE_NUM_KEY + ) + + # If we found a track start or one isn't already set in the + # metadata, create the tag for it. + if ( + track_start is not None or + 'EXT-X-MEDIA-SEQUENCE' not in self._playlist_tags + ): + # Choose a reasonable track start default + if track_start is None: + track_start = 1 + self._playlist_tags['EXT-X-MEDIA-SEQUENCE'] = str(track_start) + + def _add_map_entry(self, fragment): + """adds an EXT-X-MAP entry from the given fragment + + returns the added entry + """ + + media_ref = fragment.media_reference + + # Extract useful tag data + media_ref_streaming_metadata = media_ref.metadata[ + STREAMING_METADATA_KEY + ] + uri = media_ref_streaming_metadata[INIT_URI_KEY] + seg_map_byterange_dict = media_ref_streaming_metadata.get( + INIT_BYTERANGE_KEY + ) + + # Create the attrlist + map_attr_list = AttributeList([ + ('URI', uri), + ]) + + # Add the byterange if provided + if seg_map_byterange_dict is not None: + seg_map_byterange = Byterange.from_dict(seg_map_byterange_dict) + map_attr_list['BYTERANGE'] = str(seg_map_byterange) + + # Construct the entry with the attrlist as the value + map_tag_str = str(map_attr_list) + entry = HLSPlaylistEntry.tag_entry("EXT-X-MAP", map_tag_str) + + self._playlist_entries.append(entry) + + return entry + + def _add_entries_for_segment_from_fragments( + self, + fragments, + omit_hls_keys=None, + is_iframe_playlist=False + ): + """ + For the given list of otio clips representing fragments in the mp4, + add playlist entries for single HLS segment. + + :param fragments: (:clas:`list`) :class:`opentimelineio.schema.Clip` + objects to write as a contiguous segment. + :param omit_hls_keys: (:class:`list`) metadata keys from the original + "HLS" metadata namespeaces will not be passed through. + :param is_iframe_playlist: (:class:`bool`) If true, writes one segment + per fragment, otherwise writes all fragments as a single segment + + :return: (:class:`list` the :class:`HLSPlaylistEntry` instances added + to the playlist + """ + if is_iframe_playlist: + entries = [] + for fragment in fragments: + name = '' + fragment_range = Byterange.from_dict( + fragment.media_reference.metadata[STREAMING_METADATA_KEY] + ) + + segment_tags = {} + frag_tags = fragment.media_reference.metadata.get( + FORMAT_METADATA_KEY, + {} + ) + segment_tags.update(copy.deepcopy(frag_tags)) + + # scrub any metadata marked for omission + omit_hls_keys = omit_hls_keys or [] + for key in omit_hls_keys: + try: + del(segment_tags[key]) + except KeyError: + pass + + segment_entries = entries_for_segment( + fragment.media_reference.target_url, + fragment.duration(), + name, + fragment_range, + segment_tags + ) + entries.extend(segment_entries) + + self._playlist_entries.extend(entries) + return entries + + segment_tags = {} + for fragment in fragments: + frag_tags = fragment.media_reference.metadata.get( + FORMAT_METADATA_KEY, + {} + ) + segment_tags.update(copy.deepcopy(frag_tags)) + + # scrub any metadata marked for omission + omit_hls_keys = omit_hls_keys or [] + for key in omit_hls_keys: + try: + del(segment_tags[key]) + except KeyError: + pass + + # Calculate the byterange for the segment (if byteranges are specified) + first_ref = fragments[0].media_reference + first_ref_streaming_md = first_ref.metadata[STREAMING_METADATA_KEY] + if 'byte_offset' in first_ref_streaming_md and len(fragments) == 1: + segment_range = Byterange.from_dict(first_ref_streaming_md) + elif 'byte_offset' in first_ref_streaming_md: + # Find the byterange encapsulating everything + last_ref = fragments[-1].media_reference + last_ref_streaming_md = last_ref.metadata[STREAMING_METADATA_KEY] + first_range = Byterange.from_dict(first_ref_streaming_md) + last_range = Byterange.from_dict(last_ref_streaming_md) + + segment_offset = first_range.offset + segment_end = (last_range.offset + last_range.count) + segment_count = segment_end - segment_offset + segment_range = Byterange(segment_count, segment_offset) + else: + segment_range = None + + uri = fragments[0].media_reference.target_url + + # calculate the combined duration + segment_duration = fragments[0].duration() + for frag in fragments[1:]: + segment_duration += frag.duration() + + # TODO: Determine how to pass a segment name in + segment_name = '' + segment_entries = entries_for_segment( + uri, + segment_duration, + segment_name, + segment_range, + segment_tags + ) + + self._playlist_entries.extend(segment_entries) + return segment_entries + + def _fragments_have_same_map(self, fragment, following_fragment): + """ + Given fragment and following_fragment, returns whether or not their + initialization data is the same (what becomes EXT-X-MAP) + """ + media_ref = fragment.media_reference + media_ref_streaming_md = media_ref.metadata.get( + STREAMING_METADATA_KEY, + {} + ) + following_ref = following_fragment.media_reference + following_ref_streaming_md = following_ref.metadata.get( + STREAMING_METADATA_KEY, + {} + ) + # Check the init file + init_uri = media_ref_streaming_md.get(INIT_URI_KEY) + following_init_uri = media_ref_streaming_md.get(INIT_URI_KEY) + if init_uri != following_init_uri: + return False + + # Check the init byterange + init_dict = media_ref_streaming_md.get(INIT_BYTERANGE_KEY) + following_init_dict = following_ref_streaming_md.get( + INIT_BYTERANGE_KEY + ) + + dummy_range = Byterange(0, 0) + init_range = ( + Byterange.from_dict(init_dict) if init_dict else dummy_range + ) + following_range = ( + Byterange.from_dict(following_init_dict) + if following_init_dict else dummy_range + ) + + if init_range != following_range: + return False + + return True + + def _fragments_are_contiguous(self, fragment, following_fragment): + """ Given fragment and following_fragment (otio clips) returns whether + or not they are contiguous. + + To be contiguous the fragments must: + 1. have the same file URL + 2. have the same initialization data (what becomes EXT-X-MAP) + 3. be adjacent in the file (follwoing_fragment's first byte directly + follows fragment's last byte) + + Returns True if following_fragment is contiguous from fragment + """ + # Fragments are contiguous if: + # 1. They have the file url + # 2. They have the same map info + # 3. Their byte ranges are contiguous + media_ref = fragment.media_reference + media_ref_streaming_md = media_ref.metadata.get( + STREAMING_METADATA_KEY, + {} + ) + following_ref = following_fragment.media_reference + following_ref_streaming_md = following_ref.metadata.get( + STREAMING_METADATA_KEY, + {} + ) + if media_ref.target_url != following_ref.target_url: + return False + + if ( + media_ref_streaming_md.get(INIT_URI_KEY) != + following_ref_streaming_md.get(INIT_URI_KEY) + ): + return False + + if not self._fragments_have_same_map(fragment, following_fragment): + return False + + # Check if fragments are contiguous in file + try: + frag_end = ( + media_ref_streaming_md['byte_offset'] + + media_ref_streaming_md['byte_count'] + ) + if frag_end != following_ref_streaming_md['byte_offset']: + return False + except KeyError: + return False + + # since we haven't returned yet, all checks must have passed! + return True + + def _add_segment_entries(self, media_track): + """given a media track, generates the segment entries""" + + # Determine whether or not this is an I-Frame playlist + track_hls_metadata = media_track.metadata.get('HLS') + is_iframe_playlist = 'EXT-X-I-FRAMES-ONLY' in track_hls_metadata + + # Make a list copy of the fragments + fragments = [clip for clip in media_track] + + segment_durations = [] + previous_fragment = None + map_changed = True + while fragments: + # There should be at least one fragment per segment + frag_it = iter(fragments) + first_frag = next(frag_it) + gathered_fragments = [first_frag] + gathered_duration = first_frag.duration() + + # Determine this segment will need a new EXT-X-MAP entry + map_changed = ( + True if previous_fragment is None else + not self._fragments_have_same_map( + previous_fragment, + first_frag + ) + ) + + # Iterate through the remaining fragments until a discontinuity + # is found, our time limit is met, or we add all the fragments to + # the segment + for fragment in frag_it: + # Determine whther or not the fragments are contiguous + previous_fragment = gathered_fragments[-1] + contiguous = self._fragments_are_contiguous( + previous_fragment, + fragment + ) + + # Determine if we've hit our segment time conditions + new_duration = gathered_duration + fragment.duration() + segment_full = ( + gathered_duration >= self._min_seg_duration or + new_duration > self._max_seg_duration + ) + + # End condition met, cut the segment + if not contiguous or segment_full: + break + + # Include the fragment + gathered_duration = new_duration + gathered_fragments.append(fragment) + + # Write out the segment and start the next + start_fragment = gathered_fragments[0] + + # If the map for this segment was a change, write it + if map_changed: + self._add_map_entry(start_fragment) + + # add the entries for the segment. Omit any EXT-X-MAP metadata + # that may have come in from reading a file (we're updating) + self._add_entries_for_segment_from_fragments( + gathered_fragments, + omit_hls_keys=('EXT-X-MAP'), + is_iframe_playlist=is_iframe_playlist + ) + + duration_seconds = otio.opentime.to_seconds(gathered_duration) + segment_durations.append(duration_seconds) + + # in the next iteration, start where we left off + fragments = fragments[len(gathered_fragments):] + + # Set the max segment duration + max_duration = round(max(segment_durations)) + self._playlist_tags['EXT-X-TARGETDURATION'] = str(int(max_duration)) + + def _finalize_entries(self, media_track): + """Does final wrap-up of playlist entries""" + + self._playlist_tags['EXT-X-PLAYLIST-TYPE'] = 'VOD' + + # add the end + end_entry = HLSPlaylistEntry.tag_entry(PLAYLIST_END_TAG) + self._playlist_entries.append(end_entry) + + # find the maximum HLS feature version we've used + playlist_version = max(self._versions_used) + playlist_version_entry = HLSPlaylistEntry.tag_entry( + PLAYLIST_VERSION_TAG, + str(playlist_version) + ) + + # now that we know what was used, let's prepend the header + playlist_header_entries = [ + HLSPlaylistEntry.tag_entry(PLAYLIST_START_TAG), + playlist_version_entry + ] + + # add in the rest of the header entries in a deterministic order + playlist_header_entries += ( + HLSPlaylistEntry.tag_entry(k, v) + for k, v in sorted(self._playlist_tags.items(), key=lambda i: i[0]) + ) + + # Prepend the entries with the header entries + self._playlist_entries = ( + playlist_header_entries + self._playlist_entries + ) + + def playlist_string(self): + """Returns the string representation of the playlist entries""" + + return '\n'.join( + (str(entry) for entry in self._playlist_entries) + ) + +# Public interface + + +def read_from_string(input_str): + """Adapter entry point for reading.""" + + parser = HLSPlaylistParser(input_str) + return parser.timeline + + +def write_to_string(input_otio): + """Adapter entry point for writing.""" + + if len(input_otio.tracks) == 0: + return None + + # Determine whether we should write a media or master playlist + try: + write_master = input_otio.metadata['HLS']['master_playlist'] + except KeyError: + # If no explicit directive, infer + write_master = (len(input_otio.tracks) > 1) + + if write_master: + return master_playlist_to_string(input_otio) + else: + media_track = input_otio.tracks[0] + track_streaming_md = input_otio.metadata.get( + STREAMING_METADATA_KEY, + {} + ) + min_seg_duration = track_streaming_md.get('min_segment_duration') + max_seg_duration = track_streaming_md.get('max_segment_duration') + + writer = MediaPlaylistWriter( + media_track, + min_seg_duration, + max_seg_duration + ) + return writer.playlist_string() diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py new file mode 100644 index 00000000000..03e6cf87637 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py @@ -0,0 +1,132 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Maya Sequencer Adapter Harness""" + +import os +import subprocess + +from .. import adapters + + +def write_to_file(input_otio, filepath): + if "OTIO_MAYA_PYTHON_BIN" not in os.environ: + raise RuntimeError( + "'OTIO_MAYA_PYTHON_BIN' not set, please set this to path to " + "mayapy within the Maya installation." + ) + maya_python_path = os.environ["OTIO_MAYA_PYTHON_BIN"] + if not os.path.exists(maya_python_path): + raise RuntimeError( + 'Cannot access file at OTIO_MAYA_PYTHON_BIN: "{}"'.format( + maya_python_path + ) + ) + if os.path.isdir(maya_python_path): + raise RuntimeError( + "OTIO_MAYA_PYTHON_BIN contains a path to a directory, not to an " + "executable file: {}".format(maya_python_path) + ) + + input_data = adapters.write_to_string(input_otio, "otio_json") + + os.environ['PYTHONPATH'] = ( + os.pathsep.join( + [ + os.environ.setdefault('PYTHONPATH', ''), + os.path.dirname(__file__) + ] + ) + ) + + proc = subprocess.Popen( + [ + os.environ["OTIO_MAYA_PYTHON_BIN"], + '-m', + 'extern_maya_sequencer', + 'write', + filepath + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + env=os.environ + ) + proc.stdin.write(input_data) + out, err = proc.communicate() + + if proc.returncode: + raise RuntimeError( + "ERROR: extern_maya_sequencer (called through the maya sequencer " + "file adapter) failed. stderr output: " + err + ) + + +def read_from_file(filepath): + if "OTIO_MAYA_PYTHON_BIN" not in os.environ: + raise RuntimeError( + "'OTIO_MAYA_PYTHON_BIN' not set, please set this to path to " + "mayapy within the Maya installation." + ) + + os.environ['PYTHONPATH'] = ( + os.pathsep.join( + [ + os.environ.setdefault('PYTHONPATH', ''), + os.path.dirname(__file__) + ] + ) + ) + + proc = subprocess.Popen( + [ + os.environ["OTIO_MAYA_PYTHON_BIN"], + '-m', + 'extern_maya_sequencer', + 'read', + filepath + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + env=os.environ + ) + out, err = proc.communicate() + + # maya probably puts a bunch of crap on the stdout + sentinel_str = "OTIO_JSON_BEGIN\n" + end_sentinel_str = "\nOTIO_JSON_END\n" + start = out.find(sentinel_str) + end = out.find(end_sentinel_str) + result = adapters.read_from_string( + out[start + len(sentinel_str):end], + "otio_json" + ) + + if proc.returncode: + raise RuntimeError( + "ERROR: extern_maya_sequencer (called through the maya sequencer " + "file adapter) failed. stderr output: " + err + ) + return result diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py new file mode 100644 index 00000000000..33d00ce8c79 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py @@ -0,0 +1,84 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""RvSession Adapter harness""" + +import subprocess +import os +import copy + +from .. import adapters + + +def write_to_file(input_otio, filepath): + if "OTIO_RV_PYTHON_BIN" not in os.environ: + raise RuntimeError( + "'OTIO_RV_PYTHON_BIN' not set, please set this to path to " + "py-interp within the RV installation." + ) + + if "OTIO_RV_PYTHON_LIB" not in os.environ: + raise RuntimeError( + "'OTIO_RV_PYTHON_LIB' not set, please set this to path to python " + "directory within the RV installation." + ) + + input_data = adapters.write_to_string(input_otio, "otio_json") + + base_environment = copy.deepcopy(os.environ) + + base_environment['PYTHONPATH'] = ( + os.pathsep.join( + [ + base_environment.setdefault('PYTHONPATH', ''), + os.path.dirname(__file__) + ] + ) + ) + + proc = subprocess.Popen( + [ + base_environment["OTIO_RV_PYTHON_BIN"], + '-m', + 'extern_rv', + filepath + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + env=base_environment + ) + proc.stdin.write(input_data) + out, err = proc.communicate() + + if out.strip(): + print("stdout: {}".format(out)) + if err.strip(): + print("stderr: {}".format(err)) + + if proc.returncode: + raise RuntimeError( + "ERROR: extern_rv (called through the rv session file adapter) " + "failed. stderr output: " + err + ) diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py new file mode 100644 index 00000000000..525a8a4649b --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py @@ -0,0 +1,819 @@ +# +# Copyright (C) 2019 Igalia S.L +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO GStreamer Editing Services XML Adapter. """ +import re +import unittest + +from decimal import Decimal +from fractions import Fraction +from xml.etree import cElementTree +from xml.dom import minidom +import opentimelineio as otio + +META_NAMESPACE = "XGES" + + +FRAMERATE_FRAMEDURATION = {23.98: "24000/1001", + 24: "600/25", + 25: "25/1", + 29.97: "30000/1001", + 30: "30/1", + 50: "50/1", + 59.94: "60000/1001", + 60: "60/1"} + + +TRANSITION_MAP = { + "crossfade": otio.schema.TransitionTypes.SMPTE_Dissolve +} +# Two way map +TRANSITION_MAP.update(dict([(v, k) for k, v in TRANSITION_MAP.items()])) + + +class GstParseError(otio.exceptions.OTIOError): + pass + + +class GstStructure(object): + """ + GstStructure parser with a "dictionary" like API. + """ + UNESCAPE = re.compile(r'(?<!\\)\\(.)') + INT_TYPES = "".join( + ("int", "uint", "int8", "uint8", "int16", + "uint16", "int32", "uint32", "int64", "uint64") + ) + + def __init__(self, text): + self.text = text + self.modified = False + self.name, self.types, self.values = GstStructure._parse(text + ";") + + def __repr__(self): + if not self.modified: + return self.text + + res = self.name + for key, value in self.values.items(): + value_type = self.types[key] + res += ', %s=(%s)"%s"' % (key, value_type, self.escape(value)) + res += ';' + + return res + + def __getitem__(self, key): + return self.values[key] + + def set(self, key, value_type, value): + if self.types.get(key) == value_type and self.values.get(key) == value: + return + + self.modified = True + self.types[key] = value_type + self.values[key] = value + + def get(self, key, default=None): + return self.values.get(key, default) + + @staticmethod + def _find_eos(s): + # find next '"' without preceeding '\' + line = 0 + while 1: # faster than regexp for '[^\\]\"' + p = s.index('"') + line += p + 1 + if s[p - 1] != '\\': + return line + s = s[(p + 1):] + return -1 + + @staticmethod + def escape(s): + # XXX: The unicode type doesn't exist in Python 3 (all strings are unicode) + # so we have to use type(u"") which works in both Python 2 and 3. + if type(s) not in (str, type(u"")): + return s + return s.replace(" ", "\\ ") + + @staticmethod + def _parse(s): + in_string = s + types = {} + values = {} + scan = True + # parse id + p = s.find(',') + if p == -1: + try: + p = s.index(';') + except ValueError: + p = len(s) + scan = False + name = s[:p] + # parse fields + while scan: + comma_space_it = p + # skip 'name, ' / 'value, ' + while s[comma_space_it] in [' ', ',']: + comma_space_it += 1 + s = s[comma_space_it:] + p = s.index('=') + k = s[:p] + if not s[p + 1] == '(': + raise ValueError("In %s position: %d" % (in_string, p)) + s = s[(p + 2):] # skip 'key=(' + p = s.index(')') + t = s[:p] + s = s[(p + 1):] # skip 'type)' + + if s[0] == '"': + s = s[1:] # skip '"' + p = GstStructure._find_eos(s) + if p == -1: + raise ValueError + v = s[:(p - 1)] + if s[p] == ';': + scan = False + # unescape \., but not \\. (using a backref) + # need a reverse for re.escape() + v = v.replace('\\\\', '\\') + v = GstStructure.UNESCAPE.sub(r'\1', v) + else: + p = s.find(',') + if p == -1: + p = s.index(';') + scan = False + v = s[:p] + + if t == 'structure': + v = GstStructure(v) + elif t == 'string' and len(v) and v[0] == '"': + v = v[1:-1] + elif t == 'boolean': + v = (v == '1') + elif t in GstStructure.INT_TYPES: + v = int(v) + types[k] = t + values[k] = v + + return (name, types, values) + + +class GESTrackType: + UNKNOWN = 1 << 0 + AUDIO = 1 << 1 + VIDEO = 1 << 2 + TEXT = 1 << 3 + CUSTOM = 1 << 4 + + @staticmethod + def to_otio_type(_type): + if _type == GESTrackType.AUDIO: + return otio.schema.TrackKind.Audio + elif _type == GESTrackType.VIDEO: + return otio.schema.TrackKind.Video + + raise GstParseError("Can't translate track type %s" % _type) + + +GST_CLOCK_TIME_NONE = 18446744073709551615 +GST_SECOND = 1000000000 + + +def to_gstclocktime(rational_time): + """ + This converts a RationalTime object to a GstClockTime + + Args: + rational_time (RationalTime): This is a RationalTime object + + Returns: + int: A time in nanosecond + """ + + return int(rational_time.value_rescaled_to(1) * GST_SECOND) + + +def get_from_structure(xmlelement, fieldname, default=None, attribute="properties"): + structure = GstStructure(xmlelement.get(attribute, attribute)) + return structure.get(fieldname, default) + + +class XGES: + """ + This object is responsible for knowing how to convert an xGES + project into an otio timeline + """ + + def __init__(self, xml_string): + self.xges_xml = cElementTree.fromstring(xml_string) + self.rate = 25 + + def _set_rate_from_timeline(self, timeline): + metas = GstStructure(timeline.attrib.get("metadatas", "metadatas")) + framerate = metas.get("framerate") + if framerate: + rate = Fraction(framerate) + else: + video_track = timeline.find("./track[@track-type='4']") + rate = None + if video_track is not None: + properties = GstStructure( + video_track.get("properties", "properties;")) + restriction_caps = GstStructure(properties.get( + "restriction-caps", "restriction-caps")) + rate = restriction_caps.get("framerate") + + if rate is None: + return + + self.rate = float(Fraction(rate)) + if self.rate == int(self.rate): + self.rate = int(self.rate) + else: + self.rate = float(round(Decimal(self.rate), 2)) + + def to_rational_time(self, ns_timestamp): + """ + This converts a GstClockTime value to an otio RationalTime object + + Args: + ns_timestamp (int): This is a GstClockTime value (nanosecond absolute value) + + Returns: + RationalTime: A RationalTime object + """ + return otio.opentime.RationalTime(round(int(ns_timestamp) / + (GST_SECOND / self.rate)), self.rate) + + def to_otio(self): + """ + Convert an xges to an otio + + Returns: + OpenTimeline: An OpenTimeline Timeline object + """ + + project = self.xges_xml.find("./project") + metas = GstStructure(project.attrib.get("metadatas", "metadatas")) + otio_project = otio.schema.SerializableCollection( + name=metas.get('name'), + metadata={ + META_NAMESPACE: {"metadatas": project.attrib.get( + "metadatas", "metadatas")} + } + ) + timeline = project.find("./timeline") + self._set_rate_from_timeline(timeline) + + otio_timeline = otio.schema.Timeline( + name=metas.get('name', "unnamed"), + metadata={ + META_NAMESPACE: { + "metadatas": timeline.attrib.get("metadatas", "metadatas"), + "properties": timeline.attrib.get("properties", "properties") + } + } + ) + + all_names = set() + self._add_layers(timeline, otio_timeline, all_names) + otio_project.append(otio_timeline) + + return otio_project + + def _add_layers(self, timeline, otio_timeline, all_names): + for layer in timeline.findall("./layer"): + tracks = self._build_tracks_from_layer_clips(layer, all_names) + otio_timeline.tracks.extend(tracks) + + def _get_clips_for_type(self, clips, track_type): + if not clips: + return False + + clips_for_type = [] + for clip in clips: + if int(clip.attrib['track-types']) & track_type: + clips_for_type.append(clip) + + return clips_for_type + + def _build_tracks_from_layer_clips(self, layer, all_names): + all_clips = layer.findall('./clip') + + tracks = [] + for track_type in [GESTrackType.VIDEO, GESTrackType.AUDIO]: + clips = self._get_clips_for_type(all_clips, track_type) + if not clips: + continue + + track = otio.schema.Track() + track.kind = GESTrackType.to_otio_type(track_type) + self._add_clips_in_track(clips, track, all_names) + + tracks.append(track) + + return tracks + + def _add_clips_in_track(self, clips, track, all_names): + for clip in clips: + otio_clip = self._create_otio_clip(clip, all_names) + if otio_clip is None: + continue + + clip_offset = self.to_rational_time(int(clip.attrib['start'])) + if clip_offset > track.duration(): + track.append( + self._create_otio_gap( + 0, + (clip_offset - track.duration()) + ) + ) + + track.append(otio_clip) + + return track + + def _get_clip_name(self, clip, all_names): + i = 0 + tmpname = name = clip.get("name", GstStructure( + clip.get("properties", "properties;")).get("name")) + while True: + if tmpname not in all_names: + all_names.add(tmpname) + return tmpname + + i += 1 + tmpname = name + '_%d' % i + + def _create_otio_transition(self, clip, all_names): + start = self.to_rational_time(clip.attrib["start"]) + end = start + self.to_rational_time(clip.attrib["duration"]) + cut_point = otio.opentime.RationalTime((end.value - start.value) / + 2, start.rate) + + return otio.schema.Transition( + name=self._get_clip_name(clip, all_names), + transition_type=TRANSITION_MAP.get( + clip.attrib["asset-id"], otio.schema.TransitionTypes.Custom + ), + in_offset=cut_point, + out_offset=cut_point, + ) + + def _create_otio_uri_clip(self, clip, all_names): + source_range = otio.opentime.TimeRange( + start_time=self.to_rational_time(clip.attrib["inpoint"]), + duration=self.to_rational_time(clip.attrib["duration"]), + ) + + otio_clip = otio.schema.Clip( + name=self._get_clip_name(clip, all_names), + source_range=source_range, + media_reference=self._reference_from_id( + clip.get("asset-id"), clip.get("type-name")), + ) + + return otio_clip + + def _create_otio_clip(self, clip, all_names): + otio_clip = None + + if clip.get("type-name") == "GESTransitionClip": + otio_clip = self._create_otio_transition(clip, all_names) + elif clip.get("type-name") == "GESUriClip": + otio_clip = self._create_otio_uri_clip(clip, all_names) + + if otio_clip is None: + print("Could not represent: %s" % clip.attrib) + return None + + otio_clip.metadata[META_NAMESPACE] = { + "properties": clip.get("properties", "properties;"), + "metadatas": clip.get("metadatas", "metadatas;"), + } + + return otio_clip + + def _create_otio_gap(self, start, duration): + source_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(start), + duration=duration + ) + return otio.schema.Gap(source_range=source_range) + + def _reference_from_id(self, asset_id, asset_type="GESUriClip"): + asset = self._asset_by_id(asset_id, asset_type) + if asset is None: + return None + if not asset.get("id", ""): + return otio.schema.MissingReference() + + duration = GST_CLOCK_TIME_NONE + if asset_type == "GESUriClip": + duration = get_from_structure(asset, "duration", duration) + + available_range = otio.opentime.TimeRange( + start_time=self.to_rational_time(0), + duration=self.to_rational_time(duration) + ) + ref = otio.schema.ExternalReference( + target_url=asset.get("id"), + available_range=available_range + ) + + ref.metadata[META_NAMESPACE] = { + "properties": asset.get("properties"), + "metadatas": asset.get("metadatas"), + } + + return ref + + # -------------------- + # search helpers + # -------------------- + def _asset_by_id(self, asset_id, asset_type): + return self.xges_xml.find( + "./project/ressources/asset[@id='{}'][@extractable-type-name='{}']".format( + asset_id, asset_type) + ) + + def _timeline_element_by_name(self, timeline, name): + for clip in timeline.findall("./layer/clip"): + if get_from_structure(clip, 'name') == name: + return clip + + return None + + +class XGESOtio: + + def __init__(self, input_otio): + self.container = input_otio + self.rate = 25 + + def _insert_new_sub_element(self, into_parent, tag, attrib=None, text=''): + elem = cElementTree.SubElement(into_parent, tag, **attrib or {}) + elem.text = text + return elem + + def _get_element_properties(self, element): + return element.metadata.get(META_NAMESPACE, {}).get("properties", "properties;") + + def _get_element_metadatas(self, element): + return element.metadata.get(META_NAMESPACE, + {"GES": {}}).get("metadatas", "metadatas;") + + def _serialize_ressource(self, ressources, ressource, asset_type): + if isinstance(ressource, otio.schema.MissingReference): + return + + if ressources.find("./asset[@id='%s'][@extractable-type-name='%s']" % ( + ressource.target_url, asset_type)) is not None: + return + + properties = GstStructure(self._get_element_properties(ressource)) + if properties.get('duration') is None: + properties.set('duration', 'guin64', + to_gstclocktime(ressource.available_range.duration)) + + self._insert_new_sub_element( + ressources, 'asset', + attrib={ + "id": ressource.target_url, + "extractable-type-name": 'GESUriClip', + "properties": str(properties), + "metadatas": self._get_element_metadatas(ressource), + } + ) + + def _get_transition_times(self, offset, otio_transition): + rational_offset = otio.opentime.RationalTime( + round(int(offset) / (GST_SECOND / self.rate)), + self.rate + ) + start = rational_offset - otio_transition.in_offset + end = rational_offset + otio_transition.out_offset + + return 0, to_gstclocktime(start), to_gstclocktime(end - start) + + def _serialize_clip( + self, + otio_track, + layer, + layer_priority, + ressources, + otio_clip, + clip_id, + offset + ): + + # FIXME - Figure out a proper way to determine clip type! + asset_id = "GESTitleClip" + asset_type = "GESTitleClip" + + if isinstance(otio_clip, otio.schema.Transition): + asset_type = "GESTransitionClip" + asset_id = TRANSITION_MAP.get(otio_clip.transition_type, "crossfade") + inpoint, offset, duration = self._get_transition_times(offset, otio_clip) + else: + inpoint = to_gstclocktime(otio_clip.source_range.start_time) + duration = to_gstclocktime(otio_clip.source_range.duration) + + if not isinstance(otio_clip.media_reference, otio.schema.MissingReference): + asset_id = otio_clip.media_reference.target_url + asset_type = "GESUriClip" + + self._serialize_ressource(ressources, otio_clip.media_reference, + asset_type) + + if otio_track.kind == otio.schema.TrackKind.Audio: + track_types = GESTrackType.AUDIO + elif otio_track.kind == otio.schema.TrackKind.Video: + track_types = GESTrackType.VIDEO + else: + raise ValueError("Unhandled track type: %s" % otio_track.kind) + + properties = otio_clip.metadata.get( + META_NAMESPACE, + { + "properties": 'properties, name=(string)"%s"' % ( + GstStructure.escape(otio_clip.name) + ) + }).get("properties") + return self._insert_new_sub_element( + layer, 'clip', + attrib={ + "id": str(clip_id), + "properties": properties, + "asset-id": str(asset_id), + "type-name": str(asset_type), + "track-types": str(track_types), + "layer-priority": str(layer_priority), + "start": str(offset), + "rate": '0', + "inpoint": str(inpoint), + "duration": str(duration), + "metadatas": self._get_element_metadatas(otio_clip), + } + ) + + def _serialize_tracks(self, timeline, otio_timeline): + audio_vals = ( + 'properties', + 'restriction-caps=(string)audio/x-raw(ANY)', + 'framerate=(GstFraction)1', + otio_timeline.duration().rate + ) + + properties = '%s, %s,%s/%s' % audio_vals + self._insert_new_sub_element( + timeline, 'track', + attrib={ + "caps": "audio/x-raw(ANY)", + "track-type": '2', + 'track-id': '0', + 'properties': properties + } + ) + + video_vals = ( + 'properties', + 'restriction-caps=(string)video/x-raw(ANY)', + 'framerate=(GstFraction)1', + otio_timeline.duration().rate + ) + + properties = '%s, %s,%s/%s' % video_vals + for otio_track in otio_timeline.tracks: + if otio_track.kind == otio.schema.TrackKind.Video: + self._insert_new_sub_element( + timeline, 'track', + attrib={ + "caps": "video/x-raw(ANY)", + "track-type": '4', + 'track-id': '1', + 'properties': properties, + } + ) + + return + + def _serialize_layer(self, timeline, layers, layer_priority): + if layer_priority not in layers: + layers[layer_priority] = self._insert_new_sub_element( + timeline, 'layer', + attrib={ + "priority": str(layer_priority), + } + ) + + def _serialize_timeline_element(self, timeline, layers, layer_priority, + offset, otio_track, otio_element, + ressources, all_clips): + self._serialize_layer(timeline, layers, layer_priority) + layer = layers[layer_priority] + if isinstance(otio_element, (otio.schema.Clip, otio.schema.Transition)): + element = self._serialize_clip(otio_track, layer, layer_priority, + ressources, otio_element, + str(len(all_clips)), offset) + all_clips.add(element) + if isinstance(otio_element, otio.schema.Transition): + # Make next clip overlap + return int(element.get("start")) - offset + elif not isinstance(otio_element, otio.schema.Gap): + print("FIXME: Add support for %s" % type(otio_element)) + return 0 + + return to_gstclocktime(otio_element.source_range.duration) + + def _make_element_names_unique(self, all_names, otio_element): + if isinstance(otio_element, otio.schema.Gap): + return + + if not isinstance(otio_element, otio.schema.Track): + i = 0 + name = otio_element.name + while True: + if name not in all_names: + otio_element.name = name + break + + i += 1 + name = otio_element.name + '_%d' % i + all_names.add(otio_element.name) + + if isinstance(otio_element, (otio.schema.Stack, otio.schema.Track)): + for sub_element in otio_element: + self._make_element_names_unique(all_names, sub_element) + + def _make_timeline_elements_names_unique(self, otio_timeline): + element_names = set() + for track in otio_timeline.tracks: + for element in track: + self._make_element_names_unique(element_names, element) + + def _serialize_timeline(self, project, ressources, otio_timeline): + metadatas = GstStructure(self._get_element_metadatas(otio_timeline)) + metadatas.set( + "framerate", "fraction", self._framerate_to_frame_duration( + otio_timeline.duration().rate + ) + ) + timeline = self._insert_new_sub_element( + project, 'timeline', + attrib={ + "properties": self._get_element_properties(otio_timeline), + "metadatas": str(metadatas), + } + ) + self._serialize_tracks(timeline, otio_timeline) + + self._make_timeline_elements_names_unique(otio_timeline) + + all_clips = set() + layers = {} + for layer_priority, otio_track in enumerate(otio_timeline.tracks): + self._serialize_layer(timeline, layers, layer_priority) + offset = 0 + for otio_element in otio_track: + offset += self._serialize_timeline_element( + timeline, layers, layer_priority, offset, + otio_track, otio_element, ressources, all_clips, + ) + + for layer in layers.values(): + layer[:] = sorted(layer, key=lambda child: int(child.get("start"))) + + # -------------------- + # static methods + # -------------------- + @staticmethod + def _framerate_to_frame_duration(framerate): + frame_duration = FRAMERATE_FRAMEDURATION.get(int(framerate), "") + if not frame_duration: + frame_duration = FRAMERATE_FRAMEDURATION.get(float(framerate), "") + return frame_duration + + def to_xges(self): + xges = cElementTree.Element('ges', version="0.4") + + metadatas = GstStructure(self._get_element_metadatas(self.container)) + if self.container.name is not None: + metadatas.set("name", "string", self.container.name) + if not isinstance(self.container, otio.schema.Timeline): + project = self._insert_new_sub_element( + xges, 'project', + attrib={ + "properties": self._get_element_properties(self.container), + "metadatas": str(metadatas), + } + ) + + if len(self.container) > 1: + print( + "WARNING: Only one timeline supported, using *only* the first one.") + + otio_timeline = self.container[0] + + else: + project = self._insert_new_sub_element( + xges, 'project', + attrib={ + "metadatas": str(metadatas), + } + ) + otio_timeline = self.container + + ressources = self._insert_new_sub_element(project, 'ressources') + self.rate = otio_timeline.duration().rate + self._serialize_timeline(project, ressources, otio_timeline) + + # with indentations. + string = cElementTree.tostring(xges, encoding="UTF-8") + dom = minidom.parseString(string) + return dom.toprettyxml(indent=' ') + + +# -------------------- +# adapter requirements +# -------------------- +def read_from_string(input_str): + """ + Necessary read method for otio adapter + + Args: + input_str (str): A GStreamer Editing Services formated project + + Returns: + OpenTimeline: An OpenTimeline object + """ + + return XGES(input_str).to_otio() + + +def write_to_string(input_otio): + """ + Necessary write method for otio adapter + + Args: + input_otio (OpenTimeline): An OpenTimeline object + + Returns: + str: The string contents of an FCP X XML + """ + + return XGESOtio(input_otio).to_xges() + + +# -------------------- +# Some unit check for internal types +# -------------------- + +class XGESTests(unittest.TestCase): + + def test_gst_structure_parsing(self): + struct = GstStructure('properties, name=(string)"%s";' % ( + GstStructure.escape("sc01 sh010_anim.mov")) + ) + self.assertEqual(struct["name"], "sc01 sh010_anim.mov") + + def test_gst_structure_editing(self): + struct = GstStructure('properties, name=(string)"%s";' % ( + GstStructure.escape("sc01 sh010_anim.mov")) + ) + self.assertEqual(struct["name"], "sc01 sh010_anim.mov") + + struct.set("name", "string", "test") + self.assertEqual(struct["name"], "test") + self.assertEqual(str(struct), 'properties, name=(string)"test";') + + def test_empty_string(self): + struct = GstStructure('properties, name=(string)"";') + self.assertEqual(struct["name"], "") + + +if __name__ == '__main__': + unittest.main() From 824be75d537947249937f94c90357902fe43ca46 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 17:04:05 +0200 Subject: [PATCH 258/295] created new project config schema validating new keys --- schema/config-2.0.json | 87 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 schema/config-2.0.json diff --git a/schema/config-2.0.json b/schema/config-2.0.json new file mode 100644 index 00000000000..098d1983e28 --- /dev/null +++ b/schema/config-2.0.json @@ -0,0 +1,87 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:config-2.0", + "description": "A project configuration.", + + "type": "object", + + "additionalProperties": false, + "required": [ + "tasks", + "apps" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "templates": { + "type": "object" + }, + "roots": { + "type": "object" + }, + "imageio": { + "type": "object" + }, + "tasks": { + "type": "object", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "group": {"type": "string"}, + "label": {"type": "string"} + }, + "required": [ + "short_name" + ] + } + }, + "apps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "group": {"type": "string"}, + "label": {"type": "string"} + }, + "required": ["name"] + } + }, + "families": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "label": {"type": "string"}, + "hideFilter": {"type": "boolean"} + }, + "required": ["name"] + } + }, + "groups": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "color": {"type": "string"}, + "order": {"type": ["integer", "number"]} + }, + "required": ["name"] + } + }, + "copy": { + "type": "object" + } + } +} From 43ac74c752e7bbd93f788e098091d3b6fcde49ce Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 17:04:22 +0200 Subject: [PATCH 259/295] created new project schema using new config reference --- schema/project-3.0.json | 59 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 schema/project-3.0.json diff --git a/schema/project-3.0.json b/schema/project-3.0.json new file mode 100644 index 00000000000..d6368d665c7 --- /dev/null +++ b/schema/project-3.0.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:project-3.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "data", + "config" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["pype:project-3.0"], + "example": "pype:project-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["project"], + "example": "project" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "hulk" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": { + "fps": 24, + "width": 1920, + "height": 1080 + } + }, + "config": { + "type": "object", + "description": "Document metadata", + "$ref": "config-2.0.json" + } + }, + + "definitions": {} +} From 302a00c9c95a032deb2966f18e7e9f3582b0fdc8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 17:04:58 +0200 Subject: [PATCH 260/295] sync to avalon is using new schemas --- pype/modules/ftrack/lib/avalon_sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index 970a2702906..43e02283c2c 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -31,9 +31,9 @@ # Current schemas for avalon types EntitySchemas = { - "project": "pype:project-2.1", + "project": "pype:project-3.0", "asset": "pype:asset-3.0", - "config": "pype:config-1.1" + "config": "pype:config-2.0" } # Group name of custom attributes From 7a45a1a0e7003c198efa2aa07943b446e5a7b371 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 17:05:19 +0200 Subject: [PATCH 261/295] anatomy handler can handle not existing projects --- pype/settings/handlers.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index 6e93f2f4050..0a60f6ab2c9 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -610,6 +610,9 @@ def project_doc_to_anatomy_data(self, project_doc): Probably should fill missing keys and values. """ + if not project_doc: + return {} + attributes = {} project_doc_data = project_doc.get("data") or {} for key in self.attribute_keys: From c5b10b9214995e0b3b370cabc1cedfeff449594c Mon Sep 17 00:00:00 2001 From: Petr Kalis <petr.kalis@gmail.com> Date: Tue, 30 Mar 2021 17:47:24 +0200 Subject: [PATCH 262/295] Fix - handle duplication of Task name If Task name is explicitly set in template, it duplicated it here. Task name doesnt need to be in template, but by standard it should be in subset name. This whole replace situation is because of avalon's Creator which modify subset name even if it shouldn't. If Creator app is reworked (could have wide impact!), this should be cleaned up. --- .../harmony/plugins/publish/collect_farm_render.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pype/hosts/harmony/plugins/publish/collect_farm_render.py b/pype/hosts/harmony/plugins/publish/collect_farm_render.py index 98706ad951a..c283b7f8da4 100644 --- a/pype/hosts/harmony/plugins/publish/collect_farm_render.py +++ b/pype/hosts/harmony/plugins/publish/collect_farm_render.py @@ -124,10 +124,16 @@ def get_instances(self, context): # TODO: handle pixel aspect and frame step # TODO: set Deadline stuff (pools, priority, etc. by presets) # because of using 'renderFarm' as a family, replace 'Farm' with - # capitalized task name - subset_name = node.split("/")[1].replace( + # capitalized task name - issue of avalon-core Creator app + subset_name = node.split("/")[1] + task_name = context.data["anatomyData"]["task"].capitalize() + replace_str = "" + if task_name.lower() not in subset_name.lower(): + replace_str = task_name + subset_name = subset_name.replace( 'Farm', - context.data["anatomyData"]["task"].capitalize()) + replace_str) + render_instance = HarmonyRenderInstance( version=version, time=api.time(), From 52b1619b50158baa578607600511c3138c929981 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 17:52:50 +0200 Subject: [PATCH 263/295] removed env_group_key from application schemas --- .../system_settings/applications.json | 443 +++--------------- .../host_settings/schema_aftereffects.json | 3 +- .../host_settings/schema_blender.json | 3 +- .../host_settings/schema_celaction.json | 3 +- .../host_settings/schema_djv.json | 3 +- .../host_settings/schema_fusion.json | 3 +- .../host_settings/schema_harmony.json | 3 +- .../host_settings/schema_houdini.json | 3 +- .../host_settings/schema_maya.json | 3 +- .../host_settings/schema_mayabatch.json | 3 +- .../host_settings/schema_photoshop.json | 3 +- .../host_settings/schema_resolve.json | 3 +- .../host_settings/schema_shell.json | 3 +- .../host_settings/schema_tvpaint.json | 3 +- .../host_settings/schema_unreal.json | 3 +- .../host_settings/template_host_variant.json | 3 +- .../host_settings/template_nuke.json | 3 +- 17 files changed, 77 insertions(+), 414 deletions(-) diff --git a/pype/settings/defaults/system_settings/applications.json b/pype/settings/defaults/system_settings/applications.json index ea910e125d7..08e7a16599d 100644 --- a/pype/settings/defaults/system_settings/applications.json +++ b/pype/settings/defaults/system_settings/applications.json @@ -16,18 +16,7 @@ "MAYA_DISABLE_CER": "Yes", "PYMEL_SKIP_MEL_INIT": "Yes", "LC_ALL": "C", - "PYPE_LOG_NO_COLORS": "Yes", - "__environment_keys__": { - "maya": [ - "PYTHONPATH", - "MAYA_DISABLE_CLIC_IPM", - "MAYA_DISABLE_CIP", - "MAYA_DISABLE_CER", - "PYMEL_SKIP_MEL_INIT", - "LC_ALL", - "PYPE_LOG_NO_COLORS" - ] - } + "PYPE_LOG_NO_COLORS": "Yes" }, "variants": { "maya_2020": { @@ -50,12 +39,7 @@ "linux": [] }, "environment": { - "MAYA_VERSION": "2020", - "__environment_keys__": { - "maya_2020": [ - "MAYA_VERSION" - ] - } + "MAYA_VERSION": "2020" } }, "maya_2019": { @@ -78,12 +62,7 @@ "linux": [] }, "environment": { - "MAYA_VERSION": "2019", - "__environment_keys__": { - "maya_2019": [ - "MAYA_VERSION" - ] - } + "MAYA_VERSION": "2019" } }, "maya_2018": { @@ -106,12 +85,7 @@ "linux": [] }, "environment": { - "MAYA_VERSION": "2018", - "__environment_keys__": { - "maya_2018": [ - "MAYA_VERSION" - ] - } + "MAYA_VERSION": "2018" } } } @@ -134,19 +108,7 @@ "PYMEL_SKIP_MEL_INIT": "Yes", "LC_ALL": "C", "PYPE_LOG_NO_COLORS": "Yes", - "MAYA_TEST": "{MAYA_VERSION}", - "__environment_keys__": { - "mayabatch": [ - "PYTHONPATH", - "MAYA_DISABLE_CLIC_IPM", - "MAYA_DISABLE_CIP", - "MAYA_DISABLE_CER", - "PYMEL_SKIP_MEL_INIT", - "LC_ALL", - "PYPE_LOG_NO_COLORS", - "MAYA_TEST" - ] - } + "MAYA_TEST": "{MAYA_VERSION}" }, "variants": { "mayabatch_2020": { @@ -167,12 +129,7 @@ "linux": [] }, "environment": { - "MAYA_VERSION": "2020", - "__environment_keys__": { - "mayabatch_2020": [ - "MAYA_VERSION" - ] - } + "MAYA_VERSION": "2020" } }, "mayabatch_2019": { @@ -193,12 +150,7 @@ "linux": [] }, "environment": { - "MAYA_VERSION": "2019", - "__environment_keys__": { - "mayabatch_2019": [ - "MAYA_VERSION" - ] - } + "MAYA_VERSION": "2019" } }, "mayabatch_2018": { @@ -219,12 +171,7 @@ "linux": [] }, "environment": { - "MAYA_VERSION": "2018", - "__environment_keys__": { - "mayabatch_2018": [ - "MAYA_VERSION" - ] - } + "MAYA_VERSION": "2018" } } } @@ -243,14 +190,7 @@ "PATH": { "windows": "C:/Program Files (x86)/QuickTime/QTSystem/;{PATH}" }, - "LOGLEVEL": "DEBUG", - "__environment_keys__": { - "nuke": [ - "NUKE_PATH", - "PATH", - "LOGLEVEL" - ] - } + "LOGLEVEL": "DEBUG" }, "variants": { "nuke_12-2": { @@ -272,11 +212,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "nuke_12-2": [] - } - } + "environment": {} }, "nuke_12-0": { "enabled": true, @@ -297,11 +233,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "nuke_12-0": [] - } - } + "environment": {} }, "nuke_11-3": { "enabled": true, @@ -322,11 +254,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "nuke_11-3": [] - } - } + "environment": {} }, "nuke_11-2": { "enabled": true, @@ -345,11 +273,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "nuke_11-2": [] - } - } + "environment": {} } } }, @@ -367,14 +291,7 @@ "PATH": { "windows": "C:/Program Files (x86)/QuickTime/QTSystem/;{PATH}" }, - "LOGLEVEL": "DEBUG", - "__environment_keys__": { - "nukex": [ - "NUKE_PATH", - "PATH", - "LOGLEVEL" - ] - } + "LOGLEVEL": "DEBUG" }, "variants": { "nukex_12-2": { @@ -402,11 +319,7 @@ "--nukex" ] }, - "environment": { - "__environment_keys__": { - "nukex_12-2": [] - } - } + "environment": {} }, "nukex_12-0": { "enabled": true, @@ -433,11 +346,7 @@ "--nukex" ] }, - "environment": { - "__environment_keys__": { - "nukex_12-0": [] - } - } + "environment": {} }, "nukex_11-3": { "enabled": true, @@ -464,11 +373,7 @@ "--nukex" ] }, - "environment": { - "__environment_keys__": { - "nukex_11-3": [] - } - } + "environment": {} }, "nukex_11-2": { "enabled": true, @@ -493,11 +398,7 @@ "--nukex" ] }, - "environment": { - "__environment_keys__": { - "nukex_11-2": [] - } - } + "environment": {} } } }, @@ -515,16 +416,7 @@ }, "WORKFILES_STARTUP": "0", "TAG_ASSETBUILD_STARTUP": "0", - "LOGLEVEL": "DEBUG", - "__environment_keys__": { - "nukestudio": [ - "HIERO_PLUGIN_PATH", - "PATH", - "WORKFILES_STARTUP", - "TAG_ASSETBUILD_STARTUP", - "LOGLEVEL" - ] - } + "LOGLEVEL": "DEBUG" }, "variants": { "nukestudio_12-2": { @@ -552,11 +444,7 @@ "--studio" ] }, - "environment": { - "__environment_keys__": { - "nukestudio_12-2": [] - } - } + "environment": {} }, "nukestudio_12-0": { "enabled": true, @@ -583,11 +471,7 @@ "--studio" ] }, - "environment": { - "__environment_keys__": { - "nukestudio_12-0": [] - } - } + "environment": {} }, "nukestudio_11-3": { "enabled": true, @@ -614,11 +498,7 @@ "--studio" ] }, - "environment": { - "__environment_keys__": { - "nukestudio_11-3": [] - } - } + "environment": {} }, "nukestudio_11-2": { "enabled": true, @@ -641,11 +521,7 @@ "--studio" ] }, - "environment": { - "__environment_keys__": { - "nukestudio_11-2": [] - } - } + "environment": {} } } }, @@ -663,16 +539,7 @@ }, "WORKFILES_STARTUP": "0", "TAG_ASSETBUILD_STARTUP": "0", - "LOGLEVEL": "DEBUG", - "__environment_keys__": { - "hiero": [ - "HIERO_PLUGIN_PATH", - "PATH", - "WORKFILES_STARTUP", - "TAG_ASSETBUILD_STARTUP", - "LOGLEVEL" - ] - } + "LOGLEVEL": "DEBUG" }, "variants": { "hiero_12-2": { @@ -700,11 +567,7 @@ "--hiero" ] }, - "environment": { - "__environment_keys__": { - "hiero_12-2": [] - } - } + "environment": {} }, "hiero_12-0": { "enabled": true, @@ -731,11 +594,7 @@ "--hiero" ] }, - "environment": { - "__environment_keys__": { - "hiero_12-0": [] - } - } + "environment": {} }, "hiero_11-3": { "enabled": true, @@ -762,11 +621,7 @@ "--hiero" ] }, - "environment": { - "__environment_keys__": { - "hiero_11-3": [] - } - } + "environment": {} }, "hiero_11-2": { "enabled": true, @@ -791,11 +646,7 @@ "--hiero" ] }, - "environment": { - "__environment_keys__": { - "hiero_11-2": [] - } - } + "environment": {} } } }, @@ -826,17 +677,7 @@ "{PYTHON36}/Scripts", "{PATH}" ], - "PYPE_LOG_NO_COLORS": "Yes", - "__environment_keys__": { - "fusion": [ - "FUSION_UTILITY_SCRIPTS_SOURCE_DIR", - "FUSION_UTILITY_SCRIPTS_DIR", - "PYTHON36", - "PYTHONPATH", - "PATH", - "PYPE_LOG_NO_COLORS" - ] - } + "PYPE_LOG_NO_COLORS": "Yes" }, "variants": { "fusion_16": { @@ -854,11 +695,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "fusion_16": [] - } - } + "environment": {} }, "fusion_9": { "enabled": true, @@ -877,11 +714,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "fusion_9": [] - } - } + "environment": {} } } }, @@ -926,21 +759,7 @@ ], "PRE_PYTHON_SCRIPT": "{PYPE_ROOT}/pype/resolve/preload_console.py", "PYPE_LOG_NO_COLORS": "True", - "RESOLVE_DEV": "True", - "__environment_keys__": { - "resolve": [ - "RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR", - "RESOLVE_SCRIPT_API", - "RESOLVE_SCRIPT_LIB", - "RESOLVE_UTILITY_SCRIPTS_DIR", - "PYTHON36_RESOLVE", - "PYTHONPATH", - "PATH", - "PRE_PYTHON_SCRIPT", - "PYPE_LOG_NO_COLORS", - "RESOLVE_DEV" - ] - } + "RESOLVE_DEV": "True" }, "variants": { "resolve_16": { @@ -960,11 +779,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "resolve_16": [] - } - } + "environment": {} } } }, @@ -983,12 +798,6 @@ "darwin": "{PYPE_ROOT}/pype/hosts/houdini/startup:&", "linux": "{PYPE_ROOT}/pype/hosts/houdini/startup:&", "windows": "{PYPE_ROOT}/pype/hosts/houdini/startup;&" - }, - "__environment_keys__": { - "houdini": [ - "HOUDINI_PATH", - "HOUDINI_MENU_PATH" - ] } }, "variants": { @@ -1009,11 +818,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "houdini_18-5": [] - } - } + "environment": {} }, "houdini_18": { "enabled": true, @@ -1030,11 +835,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "houdini_18": [] - } - } + "environment": {} }, "houdini_17": { "enabled": true, @@ -1051,11 +852,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "houdini_17": [] - } - } + "environment": {} } } }, @@ -1070,14 +867,7 @@ "{PYPE_REPOS_ROOT}/avalon-core/setup/blender", "{PYTHONPATH}" ], - "CREATE_NEW_CONSOLE": "yes", - "__environment_keys__": { - "blender": [ - "BLENDER_USER_SCRIPTS", - "PYTHONPATH", - "CREATE_NEW_CONSOLE" - ] - } + "CREATE_NEW_CONSOLE": "yes" }, "variants": { "blender_2-83": { @@ -1103,11 +893,7 @@ "--python-use-system-env" ] }, - "environment": { - "__environment_keys__": { - "blender_2-83": [] - } - } + "environment": {} }, "blender_2-90": { "enabled": true, @@ -1132,11 +918,7 @@ "--python-use-system-env" ] }, - "environment": { - "__environment_keys__": { - "blender_2-90": [] - } - } + "environment": {} } } }, @@ -1147,13 +929,7 @@ "host_name": "harmony", "environment": { "AVALON_HARMONY_WORKFILES_ON_LAUNCH": "1", - "LIB_OPENHARMONY_PATH": "{PYPE_ROOT}/pype/vendor/OpenHarmony", - "__environment_keys__": { - "harmony": [ - "AVALON_HARMONY_WORKFILES_ON_LAUNCH", - "LIB_OPENHARMONY_PATH" - ] - } + "LIB_OPENHARMONY_PATH": "{PYPE_ROOT}/pype/vendor/OpenHarmony" }, "variants": { "harmony_20": { @@ -1171,11 +947,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "harmony_20": [] - } - } + "environment": {} }, "harmony_17": { "enabled": true, @@ -1194,11 +966,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "harmony_17": [] - } - } + "environment": {} } } }, @@ -1208,12 +976,7 @@ "icon": "{}/app_icons/tvpaint.png", "host_name": "tvpaint", "environment": { - "PYPE_LOG_NO_COLORS": "True", - "__environment_keys__": { - "tvpaint": [ - "PYPE_LOG_NO_COLORS" - ] - } + "PYPE_LOG_NO_COLORS": "True" }, "variants": { "tvpaint_animation_11-64bits": { @@ -1233,11 +996,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "tvpaint_animation_11-64bits": [] - } - } + "environment": {} }, "tvpaint_animation_11-32bits": { "enabled": true, @@ -1256,11 +1015,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "tvpaint_animation_11-32bits": [] - } - } + "environment": {} } } }, @@ -1273,15 +1028,7 @@ "AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH": "1", "PYPE_LOG_NO_COLORS": "Yes", "WEBSOCKET_URL": "ws://localhost:8099/ws/", - "WORKFILES_SAVE_AS": "Yes", - "__environment_keys__": { - "photoshop": [ - "AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", - "PYPE_LOG_NO_COLORS", - "WEBSOCKET_URL", - "WORKFILES_SAVE_AS" - ] - } + "WORKFILES_SAVE_AS": "Yes" }, "variants": { "photoshop_2020": { @@ -1301,11 +1048,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "photoshop_2020": [] - } - } + "environment": {} }, "photoshop_2021": { "enabled": true, @@ -1324,11 +1067,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "photoshop_2021": [] - } - } + "environment": {} } } }, @@ -1341,15 +1080,7 @@ "AVALON_AFTEREFFECTS_WORKFILES_ON_LAUNCH": "1", "PYPE_LOG_NO_COLORS": "Yes", "WEBSOCKET_URL": "ws://localhost:8097/ws/", - "WORKFILES_SAVE_AS": "Yes", - "__environment_keys__": { - "aftereffects": [ - "AVALON_AFTEREFFECTS_WORKFILES_ON_LAUNCH", - "PYPE_LOG_NO_COLORS", - "WEBSOCKET_URL", - "WORKFILES_SAVE_AS" - ] - } + "WORKFILES_SAVE_AS": "Yes" }, "variants": { "aftereffects_2020": { @@ -1369,11 +1100,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "aftereffects_2020": [] - } - } + "environment": {} }, "aftereffects_2021": { "enabled": true, @@ -1392,11 +1119,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "aftereffects_2021": [] - } - } + "environment": {} } } }, @@ -1406,12 +1129,7 @@ "icon": "app_icons/celaction.png", "host_name": "celaction", "environment": { - "CELACTION_TEMPLATE": "{PYPE_ROOT}/pype/hosts/celaction/celaction_template_scene.scn", - "__environment_keys__": { - "celaction": [ - "CELACTION_TEMPLATE" - ] - } + "CELACTION_TEMPLATE": "{PYPE_ROOT}/pype/hosts/celaction/celaction_template_scene.scn" }, "variants": { "celation_Local": { @@ -1425,11 +1143,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "celation_Local": [] - } - } + "environment": {} } } }, @@ -1441,14 +1155,7 @@ "environment": { "AVALON_UNREAL_PLUGIN": "{PYPE_REPOS_ROOT}/avalon-unreal-integration", "PYPE_LOG_NO_COLORS": "True", - "QT_PREFERRED_BINDING": "PySide", - "__environment_keys__": { - "unreal": [ - "AVALON_UNREAL_PLUGIN", - "PYPE_LOG_NO_COLORS", - "QT_PREFERRED_BINDING" - ] - } + "QT_PREFERRED_BINDING": "PySide" }, "variants": { "unreal_4-24": { @@ -1466,21 +1173,13 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "unreal_4-24": [] - } - } + "environment": {} } } }, "shell": { "enabled": true, - "environment": { - "__environment_keys__": { - "shell": [] - } - }, + "environment": {}, "variants": { "python_python_3-7": { "enabled": true, @@ -1497,11 +1196,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "python_python_3-7": [] - } - } + "environment": {} }, "python_python_2-7": { "enabled": true, @@ -1518,11 +1213,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "python_python_2-7": [] - } - } + "environment": {} }, "terminal_terminal": { "enabled": true, @@ -1539,11 +1230,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "terminal_terminal": [] - } - } + "environment": {} } } }, @@ -1552,11 +1239,7 @@ "label": "DJV View", "icon": "{}/app_icons/djvView.png", "host_name": "", - "environment": { - "__environment_keys__": { - "djvview": [] - } - }, + "environment": {}, "variants": { "djvview_1-1": { "enabled": true, @@ -1573,11 +1256,7 @@ "darwin": [], "linux": [] }, - "environment": { - "__environment_keys__": { - "djvview_1-1": [] - } - } + "environment": {} } } } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json index 6e1ba352fc6..cd080bf0f2b 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "aftereffects" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json index 5030f8280f9..2501e94b50e 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_blender.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "blender" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json index c5fe824f943..fbdad62a92e 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "celaction" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json index 3f3af3585a8..381437d4ff4 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_djv.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "djvview" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_fusion.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_fusion.json index d693c39ffef..8661916d064 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_fusion.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_fusion.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "fusion" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json index 8ad07c95ba3..7c59b0febdf 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "harmony" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json index 7698cb4cc18..70e06d170d6 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "houdini" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_maya.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_maya.json index d8396b16cb2..07c8aa01060 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_maya.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_maya.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "maya" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_mayabatch.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_mayabatch.json index af7cc3d301d..bea59656afd 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_mayabatch.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_mayabatch.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "mayabatch" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_photoshop.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_photoshop.json index a8e3574aa33..6f67e29df27 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_photoshop.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_photoshop.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "photoshop" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json index 052a9354100..644e3046cea 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "resolve" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json index 3288fe2ffb8..f2f9376029c 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_shell.json @@ -13,8 +13,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "shell" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json index a3cc6188ac0..fa28c4448c9 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "tvpaint" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json b/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json index c79f08b71a9..e9d1b681308 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json @@ -17,8 +17,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "unreal" + "type": "raw-json" }, { "type": "dict", diff --git a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json index c809891b307..cf43dca6b5b 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json @@ -77,8 +77,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "{app_name}_{app_variant}" + "type": "raw-json" } ] } diff --git a/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json b/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json index c86c2aef61c..d99e0b9a85a 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/template_nuke.json @@ -18,8 +18,7 @@ { "key": "environment", "label": "Environment", - "type": "raw-json", - "env_group_key": "{nuke_type}" + "type": "raw-json" }, { "type": "dict", From 6fa65a9bf8b26d544e6ab6c08d3ab1373f3fccff Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:29:17 +0200 Subject: [PATCH 264/295] removed previous implementation of Application object --- pype/lib/applications.py | 88 ---------------------------------------- 1 file changed, 88 deletions(-) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index 4b2cab99aa4..a006538db15 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -235,94 +235,6 @@ def exists(self): return bool(self._realpath()) -class Application: - """Hold information about application. - - Object by itself does nothing special. - - Args: - app_group (str): App group name. - e.g. "maya", "nuke", "photoshop", etc. - app_name (str): Specific version (or variant) of host. - e.g. "maya2020", "nuke11.3", etc. - host_name (str): Name of host implementation. - app_data (dict): Data for the version containing information about - executables, label, variant label, icon or if is enabled. - Only required key is `executables`. - manager (ApplicationManager): Application manager that created object. - """ - - def __init__(self, app_group, app_name, host_name, app_data, manager): - self.app_group = app_group - self.app_name = app_name - self.host_name = host_name - self.app_data = app_data - self.manager = manager - - self.label = app_data.get("label") or app_name - self.variant_label = app_data.get("variant_label") or None - self.icon = app_data.get("icon") or None - self.enabled = app_data.get("enabled", True) - self.is_host = app_data.get("is_host", False) - - _executables = app_data["executables"] - if not _executables: - _executables = [] - - elif isinstance(_executables, dict): - _executables = _executables.get(platform.system().lower()) or [] - - _arguments = app_data["arguments"] - if not _arguments: - _arguments = [] - - elif isinstance(_arguments, dict): - _arguments = _arguments.get(platform.system().lower()) or [] - - executables = [] - for executable in _executables: - executables.append(ApplicationExecutable(executable)) - - self.executables = executables - self.arguments = _arguments - - @property - def full_label(self): - """Full label of application. - - Concatenate `label` and `variant_label` attributes if `variant_label` - is set. - """ - if self.variant_label: - return "{} {}".format(self.label, self.variant_label) - return str(self.label) - - def find_executable(self): - """Try to find existing executable for application. - - Returns (str): Path to executable from `executables` or None if any - exists. - """ - for executable in self.executables: - if executable.exists(): - return executable - return None - - def launch(self, *args, **kwargs): - """Launch the application. - - For this purpose is used manager's launch method to keep logic at one - place. - - Arguments must match with manager's launch method. That's why *args - **kwargs are used. - - Returns: - subprocess.Popen: Return executed process as Popen object. - """ - return self.manager.launch(self.app_name, *args, **kwargs) - - @six.add_metaclass(ABCMeta) class LaunchHook: """Abstract base class of launch hook.""" From dcdbae5156efa4717814f040c58537b2b90734d1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:30:04 +0200 Subject: [PATCH 265/295] implemented application group class it's object holds environments --- pype/lib/applications.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index a006538db15..e2596cbacb0 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -90,6 +90,37 @@ class ApplicationLaunchFailed(Exception): pass +class ApplicationGroup: + def __init__(self, name, data, manager): + self.name = name + self.manager = manager + self._data = data + + self.enabled = data.get("enabled", True) + self.label = data.get("label") or name + self.icon = data.get("icon") or None + self._environment = data.get("environment") or {} + + host_name = data.get("host_name", None) + self.is_host = host_name is not None + self.host_name = host_name + + variants = data.get("variants") or {} + + self.variants = variants + + def __repr__(self): + return "<{}> - {}".format(self.__class__.__name__, self.name) + + def __iter__(self): + for variant in self.variants.values(): + yield variant + + @property + def environment(self): + return copy.deepcopy(self._environment) + + class ApplicationManager: def __init__(self): self.log = PypeLogger().get_logger(self.__class__.__name__) From 68954e034e4edd24b060b84d938839cdf8a44d37 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:37:07 +0200 Subject: [PATCH 266/295] implemented new Application class which use more data from group and keeps only it's environments --- pype/lib/applications.py | 75 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index e2596cbacb0..3f573ba6c69 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -121,6 +121,81 @@ def environment(self): return copy.deepcopy(self._environment) +class Application: + """Hold information about application. + + Object by itself does nothing special. + + Args: + name (str): Specific version (or variant) of application. + e.g. "maya2020", "nuke11.3", etc. + data (dict): Data for the version containing information about + executables, variant label or if is enabled. + Only required key is `executables`. + group (ApplicationGroup): App group object that created the applicaiton + and under which application belongs. + """ + + def __init__(self, name, data, group): + self.name = name + self.group = group + self._data = data + + enabled = False + if group.enabled: + enabled = data.get("enabled", True) + self.enabled = enabled + + self.label = data.get("variant_label") or name + self.full_name = "/".join((group.name, name)) + self.full_label = " ".join((group.label, self.label)) + self._environment = data.get("environment") or {} + + _executables = data["executables"] + if not _executables: + _executables = [] + + elif isinstance(_executables, dict): + _executables = _executables.get(platform.system().lower()) or [] + + _arguments = data["arguments"] + if not _arguments: + _arguments = [] + + elif isinstance(_arguments, dict): + _arguments = _arguments.get(platform.system().lower()) or [] + + executables = [] + for executable in _executables: + executables.append(ApplicationExecutable(executable)) + + self.executables = executables + self.arguments = _arguments + + def __repr__(self): + return "<{}> - {}".format(self.__class__.__name__, self.full_name) + + @property + def environment(self): + return copy.deepcopy(self._environment) + + @property + def manager(self): + return self.group.manager + + @property + def host_name(self): + return self.group.host_name + + @property + def icon(self): + return self.group.icon + + @property + def is_host(self): + return self.group.is_host + + class ApplicationManager: def __init__(self): self.log = PypeLogger().get_logger(self.__class__.__name__) From be66e4cf70ad85df2b7aca47f5d3f9e950f1d3f3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:37:23 +0200 Subject: [PATCH 267/295] added docstring to ApplicationGroup --- pype/lib/applications.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index 3f573ba6c69..efe8d5f73e7 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -91,6 +91,22 @@ class ApplicationLaunchFailed(Exception): class ApplicationGroup: + """Hold information about application group. + + Application group wraps different versions(variants) of application. + e.g. "maya" is group and "maya_2020" is variant. + + Group hold `host_name` which is implementation name used in pype. Also + holds `enabled` if whole app group is enabled or `icon` for application + icon path in resources. + + Group has also `environment` which hold same environments for all variants. + + Args: + name (str): Groups' name. + data (dict): Group defying data loaded from settings. + manager (ApplicationManager): Manager that created the group. + """ def __init__(self, name, data, manager): self.name = name self.manager = manager From f5a046bbcbf07b2100faa9eda5de0331091f2086 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:37:43 +0200 Subject: [PATCH 268/295] ApplicationGroup creates objects of Application class --- pype/lib/applications.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index efe8d5f73e7..77dc62f8c9a 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -4,6 +4,7 @@ import json import platform import getpass +import collections import inspect import subprocess import distutils.spawn @@ -122,6 +123,10 @@ def __init__(self, name, data, manager): self.host_name = host_name variants = data.get("variants") or {} + for variant_name, variant_data in variants.items(): + variants[variant_name] = Application( + variant_name, variant_data, self + ) self.variants = variants From aa0b3211fc4b27837beef7f1c08429168676b88a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:40:29 +0200 Subject: [PATCH 269/295] ApplicationManager is using new classes to get applications --- pype/lib/applications.py | 40 +++++++++------------------------------- 1 file changed, 9 insertions(+), 31 deletions(-) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index 77dc62f8c9a..f53194ea722 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -221,6 +221,7 @@ class ApplicationManager: def __init__(self): self.log = PypeLogger().get_logger(self.__class__.__name__) + self.app_groups = {} self.applications = {} self.tools = {} @@ -228,42 +229,19 @@ def __init__(self): def refresh(self): """Refresh applications from settings.""" + self.app_groups.clear() self.applications.clear() self.tools.clear() settings = get_system_settings() - hosts_definitions = settings["applications"] - for app_group, variant_definitions in hosts_definitions.items(): - enabled = variant_definitions["enabled"] - label = variant_definitions.get("label") or app_group - variants = variant_definitions.get("variants") or {} - icon = variant_definitions.get("icon") - group_host_name = variant_definitions.get("host_name") or None - for app_name, app_data in variants.items(): - if app_name in self.applications: - raise AssertionError(( - "BUG: Duplicated application name in settings \"{}\"" - ).format(app_name)) - - # If host is disabled then disable all variants - if not enabled: - app_data["enabled"] = enabled - - # Pass label from host definition - if not app_data.get("label"): - app_data["label"] = label - - if not app_data.get("icon"): - app_data["icon"] = icon - - host_name = app_data.get("host_name") or group_host_name - - app_data["is_host"] = host_name is not None - - self.applications[app_name] = Application( - app_group, app_name, host_name, app_data, self - ) + app_defs = settings["applications"] + for group_name, variant_defs in app_defs.items(): + group = ApplicationGroup(group_name, variant_defs, self) + self.app_groups[group_name] = group + for app in group: + # TODO This should be replaced with `full_name` in future + self.applications[app.name] = app tools_definitions = settings["tools"]["tool_groups"] for tool_group_name, tool_group_data in tools_definitions.items(): From b6b1ab4b3a1257ca74c61c33276ee16fced6b29f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:40:43 +0200 Subject: [PATCH 270/295] reimplemented require Application methods --- pype/lib/applications.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index f53194ea722..161198a85de 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -216,6 +216,31 @@ def icon(self): def is_host(self): return self.group.is_host + def find_executable(self): + """Try to find existing executable for application. + + Returns (str): Path to executable from `executables` or None if any + exists. + """ + for executable in self.executables: + if executable.exists(): + return executable + return None + + def launch(self, *args, **kwargs): + """Launch the application. + + For this purpose is used manager's launch method to keep logic at one + place. + + Arguments must match with manager's launch method. That's why *args + **kwargs are used. + + Returns: + subprocess.Popen: Return executed process as Popen object. + """ + return self.manager.launch(self.name, *args, **kwargs) + class ApplicationManager: def __init__(self): From a507d78f9c19bf481333a0e85283ebabae0d362c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:43:05 +0200 Subject: [PATCH 271/295] fixed attributes usage --- pype/hooks/pre_global_host_data.py | 2 +- pype/lib/applications.py | 13 +++++++------ .../event_handlers_user/action_applications.py | 4 ++-- pype/tools/launcher/models.py | 6 +++--- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/pype/hooks/pre_global_host_data.py b/pype/hooks/pre_global_host_data.py index 74be2083674..876c5840e70 100644 --- a/pype/hooks/pre_global_host_data.py +++ b/pype/hooks/pre_global_host_data.py @@ -32,7 +32,7 @@ def execute(self): "project_name": self.data["project_name"], "asset_name": self.data["asset_name"], "task_name": self.data["task_name"], - "app_name": app.app_name, + "app": app, "dbcon": self.data["dbcon"], diff --git a/pype/lib/applications.py b/pype/lib/applications.py index 161198a85de..36d37227016 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -418,7 +418,7 @@ def class_validation(cls, launch_context): return False if cls.app_groups: - if launch_context.app_group not in cls.app_groups: + if launch_context.app_group.name not in cls.app_groups: return False if cls.app_names: @@ -445,11 +445,11 @@ def host_name(self): @property def app_group(self): - return getattr(self.application, "app_group", None) + return getattr(self.application, "group", None) @property def app_name(self): - return getattr(self.application, "app_name", None) + return getattr(self.application, "name", None) def validate(self): """Optional validation of launch hook on initialization. @@ -718,7 +718,7 @@ def discover_launch_hooks(self, force=False): @property def app_name(self): - return self.application.app_name + return self.application.name @property def host_name(self): @@ -726,7 +726,7 @@ def host_name(self): @property def app_group(self): - return self.application.app_group + return self.application.group @property def manager(self): @@ -1060,7 +1060,8 @@ def prepare_context_environments(data): "AVALON_ASSET": asset_doc["name"], "AVALON_TASK": task_name, "AVALON_APP": app.host_name, - "AVALON_APP_NAME": app.app_name, + # TODO this hould be `app.full_name` in future PRs + "AVALON_APP_NAME": app.name, "AVALON_WORKDIR": workdir } log.debug( diff --git a/pype/modules/ftrack/event_handlers_user/action_applications.py b/pype/modules/ftrack/event_handlers_user/action_applications.py index 5b6657793ac..2c42cadfb75 100644 --- a/pype/modules/ftrack/event_handlers_user/action_applications.py +++ b/pype/modules/ftrack/event_handlers_user/action_applications.py @@ -133,8 +133,8 @@ def discover(self, session, entities, event): app_icon = None items.append({ - "label": app.label, - "variant": app.variant_label, + "label": app.group.label, + "variant": app.label, "description": None, "actionIdentifier": self.identifier + app_name, "icon": app_icon diff --git a/pype/tools/launcher/models.py b/pype/tools/launcher/models.py index d1742014ef8..c617c7cace8 100644 --- a/pype/tools/launcher/models.py +++ b/pype/tools/launcher/models.py @@ -162,9 +162,9 @@ def get_application_actions(self): (ApplicationAction,), { "application": app, - "name": app.app_name, - "label": app.label, - "label_variant": app.variant_label, + "name": app.name, + "label": app.group.label, + "label_variant": app.label, "group": None, "icon": app.icon, "color": getattr(app, "color", None), From 9a18da32ac8ccab1cff76392e861c2208d8bf7ae Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:50:33 +0200 Subject: [PATCH 272/295] Implemented EnvironmentToolGroup to hold tool group --- pype/lib/applications.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index 36d37227016..56432ec3574 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -314,6 +314,46 @@ def launch(self, app_name, **data): return context.launch() +class EnvironmentToolGroup: + """Hold information about environment tool group. + + Environment tool group may hold different variants of same tool and set + environments that are same for all of them. + + e.g. "mtoa" may have different versions but all environments except one + are same. + + Args: + name (str): Name of the tool group. + data (dict): Group's information with it's variants. + manager (ApplicationManager): Manager that creates the group. + """ + + def __init__(self, name, data, manager): + self.name = name + self._data = data + self.manager = manager + self._environment = data["environment"] + + variants = data.get("variants") or {} + variants_by_name = {} + for variant_name, variant_env in variants.items(): + tool = EnvironmentTool(variant_name, variant_env, self) + variants_by_name[variant_name] = tool + self.variants = variants_by_name + + def __repr__(self): + return "<{}> - {}".format(self.__class__.__name__, self.name) + + def __iter__(self): + for variant in self.variants.values(): + yield variant + + @property + def environment(self): + return copy.deepcopy(self._environment) + + class ApplicationTool: """Hold information about application tool. From ed148c7c1135145d8b855e2f475e239d1de12c4a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:51:09 +0200 Subject: [PATCH 273/295] renamed ApplicationTool to EnvironmentTool and changed what data holds --- pype/lib/applications.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index 56432ec3574..5b0e61b9094 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -354,23 +354,29 @@ def environment(self): return copy.deepcopy(self._environment) -class ApplicationTool: +class EnvironmentTool: """Hold information about application tool. Structure of tool information. Args: - tool_name (str): Name of the tool. - group_name (str): Name of group which wraps tool. + name (str): Name of the tool. + environment (dict): Variant environments. + group (str): Name of group which wraps tool. """ - def __init__(self, tool_name, group_name): - self.name = tool_name - self.group_name = group_name + def __init__(self, name, environment, group): + self.name = name + self.group = group + self._environment = environment + self.full_name = "/".join((group.name, name)) + + def __repr__(self): + return "<{}> - {}".format(self.__class__.__name__, self.full_name) @property - def full_name(self): - return "/".join((self.group_name, self.name)) + def environment(self): + return copy.deepcopy(self._environment) class ApplicationExecutable: From 19313b06bdc2722eab88571921055cb6a357510b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:51:30 +0200 Subject: [PATCH 274/295] ApplicationManager use new tools classes to define it's tools --- pype/lib/applications.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index 5b0e61b9094..37021e0a5c6 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -248,6 +248,7 @@ def __init__(self): self.app_groups = {} self.applications = {} + self.tool_groups = {} self.tools = {} self.refresh() @@ -256,6 +257,7 @@ def refresh(self): """Refresh applications from settings.""" self.app_groups.clear() self.applications.clear() + self.tool_groups.clear() self.tools.clear() settings = get_system_settings() @@ -270,13 +272,13 @@ def refresh(self): tools_definitions = settings["tools"]["tool_groups"] for tool_group_name, tool_group_data in tools_definitions.items(): - tool_variants = tool_group_data.get("variants") or {} - for tool_name, tool_data in tool_variants.items(): - tool = ApplicationTool(tool_name, tool_group_name) - if tool.full_name in self.tools: - self.log.warning(( - "Duplicated tool name in settings \"{}\"" - ).format(tool.full_name)) + if not tool_group_name: + continue + group = EnvironmentToolGroup( + tool_group_name, tool_group_data, self + ) + self.tool_groups[tool_group_name] = group + for tool in group: self.tools[tool.full_name] = tool def launch(self, app_name, **data): From 1a4abd712838ca1c3d80f9a3d00598a57748bc0f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:52:50 +0200 Subject: [PATCH 275/295] changed how environments for apps and tools are retrieved --- pype/lib/applications.py | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index 37021e0a5c6..cfb79f3a186 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -988,29 +988,42 @@ def prepare_host_environments(data): app = data["app"] log = data["log"] - # Keys for getting environments - env_keys = [app.app_group, app.app_name] + # `added_env_keys` has debug purpose + added_env_keys = {app.group.name, app.name} + # Environments for application + environments = [ + app.group.environment, + app.environment + ] asset_doc = data.get("asset_doc") + # Add tools environments + groups_by_name = {} + tool_by_group_name = collections.defaultdict(list) if asset_doc: - # Add tools environments + # Make sure each tool group can be added only once for key in asset_doc["data"].get("tools_env") or []: tool = app.manager.tools.get(key) - if tool: - if tool.group_name not in env_keys: - env_keys.append(tool.group_name) + if not tool: + continue + groups_by_name[tool.group.name] = tool.group + tool_by_group_name[tool.group.name].append(tool) - if tool.name not in env_keys: - env_keys.append(tool.name) + for group_name, group in groups_by_name.items(): + environments.append(group.environment) + added_env_keys.add(group_name) + for tool in tool_by_group_name[group_name]: + environments.append(tool.environment) + added_env_keys.add(tool.name) log.debug( - "Finding environment groups for keys: {}".format(env_keys) + "Will add environments for apps and tools: {}".format( + ", ".join(added_env_keys) + ) ) - settings_env = data["settings_env"] env_values = {} - for env_key in env_keys: - _env_values = settings_env.get(env_key) + for _env_values in environments: if not _env_values: continue From 9f14f89173260c47827d18f748f0671b9fdaaa07 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 19:53:47 +0200 Subject: [PATCH 276/295] removed usage of "settings_env" key in launch context --- pype/hooks/pre_global_host_data.py | 1 - pype/lib/applications.py | 9 --------- 2 files changed, 10 deletions(-) diff --git a/pype/hooks/pre_global_host_data.py b/pype/hooks/pre_global_host_data.py index 876c5840e70..5405bc0894b 100644 --- a/pype/hooks/pre_global_host_data.py +++ b/pype/hooks/pre_global_host_data.py @@ -41,7 +41,6 @@ def execute(self): "anatomy": self.data["anatomy"], - "settings_env": self.data.get("settings_env"), "env": self.launch_context.env, "log": self.log diff --git a/pype/lib/applications.py b/pype/lib/applications.py index cfb79f3a186..e7e161b0a9f 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -572,12 +572,6 @@ def __init__(self, application, executable, **data): self.data = dict(data) - # Load settings if were not passed in data - settings_env = self.data.get("settings_env") - if settings_env is None: - settings_env = get_environments() - self.data["settings_env"] = settings_env - # subprocess.Popen launch arguments (first argument in constructor) self.launch_args = executable.as_args() self.launch_args.extend(application.arguments) @@ -896,9 +890,6 @@ def __init__(self, data): if data.get("env") is None: data["env"] = os.environ.copy() - if data.get("settings_env") is None: - data["settings_env"] = get_environments() - super(EnvironmentPrepData, self).__init__(data) From 0caa77261ac2ec30efe03fc9c75daccb747062b7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 20:02:20 +0200 Subject: [PATCH 277/295] removed label and icon from variants --- .../system_settings/applications.json | 88 ------------------- .../host_settings/template_host_variant.json | 14 --- 2 files changed, 102 deletions(-) diff --git a/pype/settings/defaults/system_settings/applications.json b/pype/settings/defaults/system_settings/applications.json index 08e7a16599d..5eccdfb83d8 100644 --- a/pype/settings/defaults/system_settings/applications.json +++ b/pype/settings/defaults/system_settings/applications.json @@ -21,9 +21,7 @@ "variants": { "maya_2020": { "enabled": true, - "label": "", "variant_label": "2020", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Autodesk\\Maya2020\\bin\\maya.exe" @@ -44,9 +42,7 @@ }, "maya_2019": { "enabled": true, - "label": "", "variant_label": "2019", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Autodesk\\Maya2019\\bin\\maya.exe" @@ -67,9 +63,7 @@ }, "maya_2018": { "enabled": true, - "label": "", "variant_label": "2018", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Autodesk\\Maya2018\\bin\\maya.exe" @@ -113,9 +107,7 @@ "variants": { "mayabatch_2020": { "enabled": true, - "label": "", "variant_label": "2020", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Autodesk\\Maya2020\\bin\\mayabatch.exe" @@ -134,9 +126,7 @@ }, "mayabatch_2019": { "enabled": true, - "label": "", "variant_label": "2019", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Autodesk\\Maya2019\\bin\\mayabatch.exe" @@ -155,9 +145,7 @@ }, "mayabatch_2018": { "enabled": true, - "label": "", "variant_label": "2018", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Autodesk\\Maya2018\\bin\\mayabatch.exe" @@ -195,9 +183,7 @@ "variants": { "nuke_12-2": { "enabled": true, - "label": "", "variant_label": "12.2", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe" @@ -216,9 +202,7 @@ }, "nuke_12-0": { "enabled": true, - "label": "", "variant_label": "12.0", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe" @@ -237,9 +221,7 @@ }, "nuke_11-3": { "enabled": true, - "label": "", "variant_label": "11.3", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe" @@ -258,9 +240,7 @@ }, "nuke_11-2": { "enabled": true, - "label": "", "variant_label": "11.2", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke11.2v2\\Nuke11.2.exe" @@ -296,9 +276,7 @@ "variants": { "nukex_12-2": { "enabled": true, - "label": "", "variant_label": "12.2", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe" @@ -323,9 +301,7 @@ }, "nukex_12-0": { "enabled": true, - "label": "", "variant_label": "12.0", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe" @@ -350,9 +326,7 @@ }, "nukex_11-3": { "enabled": true, - "label": "", "variant_label": "11.3", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe" @@ -377,9 +351,7 @@ }, "nukex_11-2": { "enabled": true, - "label": "", "variant_label": "11.2", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke11.2v2\\Nuke11.2.exe" @@ -421,9 +393,7 @@ "variants": { "nukestudio_12-2": { "enabled": true, - "label": "", "variant_label": "12.2", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe" @@ -448,9 +418,7 @@ }, "nukestudio_12-0": { "enabled": true, - "label": "", "variant_label": "12.0", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe" @@ -475,9 +443,7 @@ }, "nukestudio_11-3": { "enabled": true, - "label": "", "variant_label": "11.3", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe" @@ -502,9 +468,7 @@ }, "nukestudio_11-2": { "enabled": true, - "label": "", "variant_label": "11.2", - "icon": "", "executables": { "windows": [], "darwin": [], @@ -544,9 +508,7 @@ "variants": { "hiero_12-2": { "enabled": true, - "label": "", "variant_label": "12.2", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe" @@ -571,9 +533,7 @@ }, "hiero_12-0": { "enabled": true, - "label": "", "variant_label": "12.0", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe" @@ -598,9 +558,7 @@ }, "hiero_11-3": { "enabled": true, - "label": "", "variant_label": "11.3", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe" @@ -625,9 +583,7 @@ }, "hiero_11-2": { "enabled": true, - "label": "", "variant_label": "11.2", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Nuke11.2v2\\Nuke11.2.exe" @@ -682,9 +638,7 @@ "variants": { "fusion_16": { "enabled": true, - "label": "", "variant_label": "16", - "icon": "", "executables": { "windows": [], "darwin": [], @@ -699,9 +653,7 @@ }, "fusion_9": { "enabled": true, - "label": "", "variant_label": "9", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Blackmagic Design\\Fusion 9\\Fusion.exe" @@ -764,9 +716,7 @@ "variants": { "resolve_16": { "enabled": true, - "label": "", "variant_label": "16", - "icon": "", "executables": { "windows": [ "C:/Program Files/Blackmagic Design/DaVinci Resolve/Resolve.exe" @@ -803,9 +753,7 @@ "variants": { "houdini_18-5": { "enabled": true, - "label": "", "variant_label": "18.5", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Side Effects Software\\Houdini 18.5.499\\bin\\houdini.exe" @@ -822,9 +770,7 @@ }, "houdini_18": { "enabled": true, - "label": "", "variant_label": "18", - "icon": "", "executables": { "windows": [], "darwin": [], @@ -839,9 +785,7 @@ }, "houdini_17": { "enabled": true, - "label": "", "variant_label": "17", - "icon": "", "executables": { "windows": [], "darwin": [], @@ -872,9 +816,7 @@ "variants": { "blender_2-83": { "enabled": true, - "label": "", "variant_label": "2.83", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Blender Foundation\\Blender 2.83\\blender.exe" @@ -897,9 +839,7 @@ }, "blender_2-90": { "enabled": true, - "label": "", "variant_label": "2.90", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Blender Foundation\\Blender 2.90\\blender.exe" @@ -934,9 +874,7 @@ "variants": { "harmony_20": { "enabled": true, - "label": "", "variant_label": "20", - "icon": "", "executables": { "windows": [], "darwin": [], @@ -951,9 +889,7 @@ }, "harmony_17": { "enabled": true, - "label": "", "variant_label": "17", - "icon": "", "executables": { "windows": [], "darwin": [ @@ -981,9 +917,7 @@ "variants": { "tvpaint_animation_11-64bits": { "enabled": true, - "label": "", "variant_label": "11 (64bits)", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\TVPaint Developpement\\TVPaint Animation 11 (64bits)\\TVPaint Animation 11 (64bits).exe" @@ -1000,9 +934,7 @@ }, "tvpaint_animation_11-32bits": { "enabled": true, - "label": "", "variant_label": "11 (32bits)", - "icon": "", "executables": { "windows": [ "C:\\Program Files (x86)\\TVPaint Developpement\\TVPaint Animation 11 (32bits)\\TVPaint Animation 11 (32bits).exe" @@ -1033,9 +965,7 @@ "variants": { "photoshop_2020": { "enabled": true, - "label": "", "variant_label": "2020", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Adobe\\Adobe Photoshop 2020\\Photoshop.exe" @@ -1052,9 +982,7 @@ }, "photoshop_2021": { "enabled": true, - "label": "", "variant_label": "2021", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Adobe\\Adobe Photoshop 2021\\Photoshop.exe" @@ -1085,9 +1013,7 @@ "variants": { "aftereffects_2020": { "enabled": true, - "label": "", "variant_label": "2020", - "icon": "", "executables": { "windows": [ "" @@ -1104,9 +1030,7 @@ }, "aftereffects_2021": { "enabled": true, - "label": "", "variant_label": "2021", - "icon": "", "executables": { "windows": [ "C:\\Program Files\\Adobe\\Adobe After Effects 2021\\Support Files\\AfterFX.exe" @@ -1134,9 +1058,7 @@ "variants": { "celation_Local": { "enabled": true, - "label": "", "variant_label": "Local", - "icon": "{}/app_icons/celaction_local.png", "executables": "", "arguments": { "windows": [], @@ -1160,9 +1082,7 @@ "variants": { "unreal_4-24": { "enabled": true, - "label": "", "variant_label": "4.24", - "icon": "", "executables": { "windows": [], "darwin": [], @@ -1183,9 +1103,7 @@ "variants": { "python_python_3-7": { "enabled": true, - "label": "Python", "variant_label": "3.7", - "icon": "{}/app_icons/python.png", "executables": { "windows": [], "darwin": [], @@ -1200,9 +1118,7 @@ }, "python_python_2-7": { "enabled": true, - "label": "Python", "variant_label": "2.7", - "icon": "{}/app_icons/python.png", "executables": { "windows": [], "darwin": [], @@ -1217,9 +1133,7 @@ }, "terminal_terminal": { "enabled": true, - "label": "Terminal", "variant_label": "", - "icon": "", "executables": { "windows": [], "darwin": [], @@ -1243,9 +1157,7 @@ "variants": { "djvview_1-1": { "enabled": true, - "label": "", "variant_label": "1.1", - "icon": "", "executables": { "windows": [], "darwin": [], diff --git a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json index cf43dca6b5b..10aab064665 100644 --- a/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json +++ b/pype/settings/entities/schemas/system_schema/host_settings/template_host_variant.json @@ -17,13 +17,6 @@ "key": "enabled", "label": "Enabled" }, - { - "type": "text", - "key": "label", - "label": "Label", - "placeholder": "Used from host label if not filled.", - "roles": ["developer"] - }, { "type": "text", "key": "variant_label", @@ -31,13 +24,6 @@ "placeholder": "Only \"Label\" is used if not filled.", "roles": ["developer"] }, - { - "type": "text", - "key": "icon", - "label": "Icon", - "placeholder": "Host icon path template. Used from host if not filled.", - "roles": ["developer"] - }, { "type": "path", "key": "executables", From f1b154a94f70527d7bd833137e0383747be5c762 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Tue, 30 Mar 2021 20:04:34 +0200 Subject: [PATCH 278/295] group does not have to have set label --- pype/lib/applications.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pype/lib/applications.py b/pype/lib/applications.py index e7e161b0a9f..6df296db956 100644 --- a/pype/lib/applications.py +++ b/pype/lib/applications.py @@ -114,7 +114,7 @@ def __init__(self, name, data, manager): self._data = data self.enabled = data.get("enabled", True) - self.label = data.get("label") or name + self.label = data.get("label") or None self.icon = data.get("icon") or None self._environment = data.get("environment") or {} @@ -169,7 +169,12 @@ def __init__(self, name, data, group): self.label = data.get("variant_label") or name self.full_name = "/".join((group.name, name)) - self.full_label = " ".join((group.label, self.label)) + + if group.label: + full_label = " ".join((group.label, self.label)) + else: + full_label = self.label + self.full_label = full_label self._environment = data.get("environment") or {} _executables = data["executables"] From 140457610e342c1753916a6e2e3bb5bd7a794005 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 10:51:25 +0200 Subject: [PATCH 279/295] removed SettingsFileHandler which is not and can't be used --- pype/settings/handlers.py | 177 -------------------------------------- 1 file changed, 177 deletions(-) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index 6e93f2f4050..fa8a97869bd 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -126,183 +126,6 @@ def get_local_settings(self): pass -class SettingsFileHandler(SettingsHandler): - def __init__(self): - self.log = logging.getLogger("SettingsFileHandler") - - # Folder where studio overrides are stored - studio_overrides_dir = os.getenv("PYPE_PROJECT_CONFIGS") - if not studio_overrides_dir: - studio_overrides_dir = os.path.dirname(os.path.dirname( - os.path.abspath(pype.__file__) - )) - system_settings_path = os.path.join( - studio_overrides_dir, SYSTEM_SETTINGS_KEY + ".json" - ) - - # File where studio's default project overrides are stored - project_settings_filename = PROJECT_SETTINGS_KEY + ".json" - project_settings_path = os.path.join( - studio_overrides_dir, project_settings_filename - ) - - project_anatomy_filename = PROJECT_ANATOMY_KEY + ".json" - project_anatomy_path = os.path.join( - studio_overrides_dir, project_anatomy_filename - ) - - self.studio_overrides_dir = studio_overrides_dir - self.system_settings_path = system_settings_path - - self.project_settings_filename = project_settings_filename - self.project_anatomy_filename = project_anatomy_filename - - self.project_settings_path = project_settings_path - self.project_anatomy_path = project_anatomy_path - - def path_to_project_settings(self, project_name): - if not project_name: - return self.project_settings_path - return os.path.join( - self.studio_overrides_dir, - project_name, - self.project_settings_filename - ) - - def path_to_project_anatomy(self, project_name): - if not project_name: - return self.project_anatomy_path - return os.path.join( - self.studio_overrides_dir, - project_name, - self.project_anatomy_filename - ) - - def save_studio_settings(self, data): - """Save studio overrides of system settings. - - Do not use to store whole system settings data with defaults but only - it's overrides with metadata defining how overrides should be applied - in load function. For loading should be used function - `studio_system_settings`. - - Args: - data(dict): Data of studio overrides with override metadata. - """ - dirpath = os.path.dirname(self.system_settings_path) - if not os.path.exists(dirpath): - os.makedirs(dirpath) - - self.log.debug( - "Saving studio overrides. Output path: {}".format( - self.system_settings_path - ) - ) - with open(self.system_settings_path, "w") as file_stream: - json.dump(data, file_stream, indent=4) - - def save_project_settings(self, project_name, overrides): - """Save studio overrides of project settings. - - Data are saved for specific project or as defaults for all projects. - - Do not use to store whole project settings data with defaults but only - it's overrides with metadata defining how overrides should be applied - in load function. For loading should be used function - `get_studio_project_settings_overrides` for global project settings - and `get_project_settings_overrides` for project specific settings. - - Args: - project_name(str, null): Project name for which overrides are - or None for global settings. - data(dict): Data of project overrides with override metadata. - """ - project_overrides_json_path = self.path_to_project_settings( - project_name - ) - dirpath = os.path.dirname(project_overrides_json_path) - if not os.path.exists(dirpath): - os.makedirs(dirpath) - - self.log.debug( - "Saving overrides of project \"{}\". Output path: {}".format( - project_name, project_overrides_json_path - ) - ) - with open(project_overrides_json_path, "w") as file_stream: - json.dump(overrides, file_stream, indent=4) - - def save_project_anatomy(self, project_name, anatomy_data): - """Save studio overrides of project anatomy data. - - Args: - project_name(str, null): Project name for which overrides are - or None for global settings. - data(dict): Data of project overrides with override metadata. - """ - project_anatomy_json_path = self.path_to_project_anatomy(project_name) - dirpath = os.path.dirname(project_anatomy_json_path) - if not os.path.exists(dirpath): - os.makedirs(dirpath) - - self.log.debug( - "Saving anatomy of project \"{}\". Output path: {}".format( - project_name, project_anatomy_json_path - ) - ) - with open(project_anatomy_json_path, "w") as file_stream: - json.dump(anatomy_data, file_stream, indent=4) - - def get_studio_system_settings_overrides(self): - """Studio overrides of system settings.""" - if os.path.exists(self.system_settings_path): - return load_json_file(self.system_settings_path) - return {} - - def get_studio_project_settings_overrides(self): - """Studio overrides of default project settings.""" - if os.path.exists(self.project_settings_path): - return load_json_file(self.project_settings_path) - return {} - - def get_studio_project_anatomy_overrides(self): - """Studio overrides of default project anatomy data.""" - if os.path.exists(self.project_anatomy_path): - return load_json_file(self.project_anatomy_path) - return {} - - def get_project_settings_overrides(self, project_name): - """Studio overrides of project settings for specific project. - - Args: - project_name(str): Name of project for which data should be loaded. - - Returns: - dict: Only overrides for entered project, may be empty dictionary. - """ - path_to_json = self.path_to_project_settings(project_name) - if not os.path.exists(path_to_json): - return {} - return load_json_file(path_to_json) - - def get_project_anatomy_overrides(self, project_name): - """Studio overrides of project anatomy for specific project. - - Args: - project_name(str): Name of project for which data should be loaded. - - Returns: - dict: Only overrides for entered project, may be empty dictionary. - """ - if not project_name: - return {} - - path_to_json = self.path_to_project_anatomy(project_name) - if not os.path.exists(path_to_json): - return {} - return load_json_file(path_to_json) - - class CacheValues: cache_lifetime = 10 From 6221b6aa1e782416a86f3f6f9d8749380552d76a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 11:02:32 +0200 Subject: [PATCH 280/295] hide expainding button if content layout does not have any widget --- pype/tools/settings/settings/widgets/item_widgets.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pype/tools/settings/settings/widgets/item_widgets.py b/pype/tools/settings/settings/widgets/item_widgets.py index e2e31a7588e..6045b05227d 100644 --- a/pype/tools/settings/settings/widgets/item_widgets.py +++ b/pype/tools/settings/settings/widgets/item_widgets.py @@ -59,6 +59,9 @@ def create_ui(self): ) ) + if self.entity.use_label_wrap and self.content_layout.count() == 0: + self.body_widget.hide_toolbox(True) + self.entity_widget.add_widget_to_layout(self, label) def _prepare_entity_layouts(self, children, widget): From 56c4ae7eb7b81b400693a75d277a0c1609fe5aa6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 14:57:36 +0200 Subject: [PATCH 281/295] removed studio soft key from general and replaced with pype_path key --- .../schemas/system_schema/schema_general.json | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pype/settings/entities/schemas/system_schema/schema_general.json b/pype/settings/entities/schemas/system_schema/schema_general.json index cf88043cd03..26dc251acf3 100644 --- a/pype/settings/entities/schemas/system_schema/schema_general.json +++ b/pype/settings/entities/schemas/system_schema/schema_general.json @@ -19,20 +19,20 @@ "type": "splitter" }, { - "key": "studio_soft", - "type": "path", - "label": "Studio Software Location", - "multiplatform": true, - "multipath": false + "key": "environment", + "label": "Environment", + "type": "raw-json", + "env_group_key": "global" }, { "type": "splitter" }, { - "key": "environment", - "label": "Environment", - "type": "raw-json", - "env_group_key": "global" + "type": "path", + "key": "pype_path", + "label": "Pype Path", + "multiplatform": true, + "multipath": true } ] } From 85863015a38cfa6218d094c5c30fa3122878f085 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 14:57:51 +0200 Subject: [PATCH 282/295] defined constant for global settings type --- pype/settings/constants.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/settings/constants.py b/pype/settings/constants.py index f6077e826ef..a53e88a91e6 100644 --- a/pype/settings/constants.py +++ b/pype/settings/constants.py @@ -15,6 +15,7 @@ ) # File where studio's system overrides are stored +GLOBAL_SETTINGS_KEY = "global_settings" SYSTEM_SETTINGS_KEY = "system_settings" PROJECT_SETTINGS_KEY = "project_settings" PROJECT_ANATOMY_KEY = "project_anatomy" From 969b2dedef573575c00eb408541b7d3232783835 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 14:58:19 +0200 Subject: [PATCH 283/295] resaved defaults --- pype/settings/defaults/system_settings/general.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pype/settings/defaults/system_settings/general.json b/pype/settings/defaults/system_settings/general.json index e03e00aca88..99db4e85c6f 100644 --- a/pype/settings/defaults/system_settings/general.json +++ b/pype/settings/defaults/system_settings/general.json @@ -1,11 +1,6 @@ { "studio_name": "Studio name", "studio_code": "stu", - "studio_soft": { - "windows": "convert from \"STUDIO_SOFT\"", - "darwin": "", - "linux": "" - }, "environment": { "FFMPEG_PATH": { "windows": "{PYPE_ROOT}/vendor/bin/ffmpeg_exec/windows/bin", @@ -19,5 +14,10 @@ "PYPE_OCIO_CONFIG" ] } + }, + "pype_path": { + "windows": [], + "darwin": [], + "linux": [] } } \ No newline at end of file From cd9254b27d897fdf97313cb126528a958a819c84 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 14:58:50 +0200 Subject: [PATCH 284/295] settings handler saves pype_path from system settings to global settings on save --- pype/settings/handlers.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index 5a4e507547d..0891ac39b59 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -8,6 +8,7 @@ import six import pype from .constants import ( + GLOBAL_SETTINGS_KEY, SYSTEM_SETTINGS_KEY, PROJECT_SETTINGS_KEY, PROJECT_ANATOMY_KEY, @@ -401,6 +402,11 @@ def attribute_keys(self): self._prepare_project_settings_keys() return self._attribute_keys + def _prepare_global_settings(self, data): + if "general" not in data: + return {} + return data["general"].get("pype_path") or {} + def save_studio_settings(self, data): """Save studio overrides of system settings. @@ -412,8 +418,8 @@ def save_studio_settings(self, data): Args: data(dict): Data of studio overrides with override metadata. """ + # Store system settings self.system_settings_cache.update_data(data) - self.collection.replace_one( { "type": SYSTEM_SETTINGS_KEY @@ -425,6 +431,22 @@ def save_studio_settings(self, data): upsert=True ) + # Get global settings from system settings + global_settings = self._prepare_global_settings( + self.system_settings_cache.data + ) + # Store global settings + self.collection.replace_one( + { + "type": GLOBAL_SETTINGS_KEY + }, + { + "type": GLOBAL_SETTINGS_KEY, + "data": global_settings + }, + upsert=True + ) + def save_project_settings(self, project_name, overrides): """Save studio overrides of project settings. From a9442274ec4c533f8fd4aac1e672d0063ed33e70 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 14:59:23 +0200 Subject: [PATCH 285/295] removed unused function load_environments from igniter --- igniter/tools.py | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/igniter/tools.py b/igniter/tools.py index 4ed4ae67f4c..43cea8382a2 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -185,38 +185,6 @@ def validate_path_string(path: str) -> (bool, str): return True, "valid path" -def load_environments(sections: list = None) -> dict: - """Load environments from Pype. - - This will load environments from database, process them with - :mod:`acre` and return them as flattened dictionary. - - Args: - sections (list, optional): load specific types - - Returns; - dict of str: loaded and processed environments. - - """ - import acre - - from pype import settings - - all_env = settings.get_environments() - merged_env = {} - - sections = sections or all_env.keys() - - for section in sections: - try: - parsed_env = acre.parse(all_env[section]) - except AttributeError: - continue - merged_env = acre.append(merged_env, parsed_env) - - return acre.compute(merged_env, cleanup=True) - - def get_pype_path_from_db(url: str) -> Union[str, None]: """Get Pype path from database. From e7781a454003481b59191a3093c4036532c3192c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 15:00:41 +0200 Subject: [PATCH 286/295] added function to load global settings from mongo --- igniter/tools.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/igniter/tools.py b/igniter/tools.py index 43cea8382a2..4d827836071 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -185,6 +185,44 @@ def validate_path_string(path: str) -> (bool, str): return True, "valid path" +def get_pype_global_settings(url: str) -> dict: + """Load global settings from Mongo database. + + We are loading data from database `pype` and collection `settings`. + There we expect document type `global_settings`. + + Returns: + dict: With settings data. Empty dictionary is returned if not found. + """ + try: + components = decompose_url(url) + except RuntimeError: + return {} + mongo_kwargs = { + "host": compose_url(**components), + "serverSelectionTimeoutMS": 2000 + } + port = components.get("port") + if port is not None: + mongo_kwargs["port"] = int(port) + + try: + # Create mongo connection + client = MongoClient(**mongo_kwargs) + # Access settings collection + col = client["pype"]["settings"] + # Query global settings + global_settings = col.find_one({"type": "global_settings"}) or {} + # Close Mongo connection + client.close() + + except Exception: + # TODO log traceback or message + return {} + + return global_settings.get("data") or {} + + def get_pype_path_from_db(url: str) -> Union[str, None]: """Get Pype path from database. From b98fd5d0a9c2f281821d8ade50a3ad4c4ff082c9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 15:02:18 +0200 Subject: [PATCH 287/295] modified `get_pype_path_from_db` to use `get_pype_global_settings` function --- igniter/tools.py | 31 ++----------------------------- 1 file changed, 2 insertions(+), 29 deletions(-) diff --git a/igniter/tools.py b/igniter/tools.py index 4d827836071..80fcd5a9e61 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -224,40 +224,13 @@ def get_pype_global_settings(url: str) -> dict: def get_pype_path_from_db(url: str) -> Union[str, None]: - """Get Pype path from database. - - We are loading data from database `pype` and collection `settings`. - There we expect document type `global_settings`. + """Get Pype path from global settings. Args: url (str): mongodb url. Returns: path to Pype or None if not found - """ - try: - components = decompose_url(url) - except RuntimeError: - return None - mongo_args = { - "host": compose_url(**components), - "serverSelectionTimeoutMS": 2000 - } - port = components.get("port") - if port is not None: - mongo_args["port"] = int(port) - - try: - client = MongoClient(**mongo_args) - except Exception: - return None - - db = client.pype - col = db.settings - - global_settings = col.find_one({"type": "global_settings"}, {"data": 1}) - if not global_settings: - return None - global_settings.get("data", {}) + global_settings = get_pype_global_settings(url) return global_settings.get("pype_path", {}).get(platform.system().lower()) From c51f31161eea1a664912dfe0f4f8dc384a471a5a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 15:07:20 +0200 Subject: [PATCH 288/295] changed how paths are checked --- igniter/tools.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/igniter/tools.py b/igniter/tools.py index 80fcd5a9e61..7f5aa8d8762 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -6,6 +6,7 @@ version is decided. """ +import os from typing import Dict, Union from urllib.parse import urlparse, parse_qs from pathlib import Path @@ -234,3 +235,17 @@ def get_pype_path_from_db(url: str) -> Union[str, None]: """ global_settings = get_pype_global_settings(url) return global_settings.get("pype_path", {}).get(platform.system().lower()) + paths = ( + global_settings + .get("pype_path", {}) + .get(platform.system().lower()) + ) or [] + # For cases when `pype_path` is a single path + if paths and isinstance(paths, str): + paths = [paths] + + # Loop over paths and return only existing + for path in paths: + if os.path.exists(path): + return path + return None From 4eab3340a16a7719d0719c4827745ab5e00f6d29 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 15:07:39 +0200 Subject: [PATCH 289/295] fix label issue --- pype/settings/entities/enum_entity.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pype/settings/entities/enum_entity.py b/pype/settings/entities/enum_entity.py index c486de397ed..e28fb7478f7 100644 --- a/pype/settings/entities/enum_entity.py +++ b/pype/settings/entities/enum_entity.py @@ -133,12 +133,9 @@ def _get_enum_values(self): if enabled_entity and not enabled_entity.value: continue - _group_label = variant_entity["label"].value - if not _group_label: - _group_label = group_label variant_label = variant_entity["variant_label"].value - full_label = "{} {}".format(_group_label, variant_label) + full_label = "{} {}".format(group_label, variant_label) enum_items.append({variant_name: full_label}) valid_keys.add(variant_name) return enum_items, valid_keys From 75f9fd7d1c8845cffe30e5aef01a795d73b5b4cd Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 15:26:44 +0200 Subject: [PATCH 290/295] close validation mongo connection --- igniter/tools.py | 1 + 1 file changed, 1 insertion(+) diff --git a/igniter/tools.py b/igniter/tools.py index 7f5aa8d8762..38d7fa2b269 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -131,6 +131,7 @@ def validate_mongo_connection(cnx: str) -> (bool, str): try: client = MongoClient(**mongo_args) client.server_info() + client.close() except ServerSelectionTimeoutError as e: return False, f"Cannot connect to server {cnx} - {e}" except ValueError: From 86755f97cc54dca0c4a94781f311ad13ea3a4c0c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 15:46:05 +0200 Subject: [PATCH 291/295] fix global settings dict hierarchy --- pype/settings/handlers.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pype/settings/handlers.py b/pype/settings/handlers.py index 0891ac39b59..cc071f9fb5b 100644 --- a/pype/settings/handlers.py +++ b/pype/settings/handlers.py @@ -403,9 +403,11 @@ def attribute_keys(self): return self._attribute_keys def _prepare_global_settings(self, data): - if "general" not in data: - return {} - return data["general"].get("pype_path") or {} + output = {} + # Add "pype_path" key to global settings if is set + if "general" in data and "pype_path" in data["general"]: + output["pype_path"] = data["general"]["pype_path"] + return output def save_studio_settings(self, data): """Save studio overrides of system settings. From fae5a6506ce294ce6b2b251bf70c9e6d6582b1cd Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 16:16:44 +0200 Subject: [PATCH 292/295] added `url` argument back to docstring --- igniter/tools.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/igniter/tools.py b/igniter/tools.py index 38d7fa2b269..08157e8728f 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -193,6 +193,9 @@ def get_pype_global_settings(url: str) -> dict: We are loading data from database `pype` and collection `settings`. There we expect document type `global_settings`. + Args: + url (str): MongoDB url. + Returns: dict: With settings data. Empty dictionary is returned if not found. """ From 3280d1a8aadde67a10817531cb2471cf530ef0eb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 16:16:54 +0200 Subject: [PATCH 293/295] removed forgotten line --- igniter/tools.py | 1 - 1 file changed, 1 deletion(-) diff --git a/igniter/tools.py b/igniter/tools.py index 08157e8728f..11fe6b02f24 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -238,7 +238,6 @@ def get_pype_path_from_db(url: str) -> Union[str, None]: path to Pype or None if not found """ global_settings = get_pype_global_settings(url) - return global_settings.get("pype_path", {}).get(platform.system().lower()) paths = ( global_settings .get("pype_path", {}) From fc103ec5c31dcbb83b874da207b3deb9de437c05 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 19:49:46 +0200 Subject: [PATCH 294/295] auto create new item on getitem in mutable dict --- pype/settings/entities/dict_mutable_keys_entity.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pype/settings/entities/dict_mutable_keys_entity.py b/pype/settings/entities/dict_mutable_keys_entity.py index b4651717345..d4bf2085555 100644 --- a/pype/settings/entities/dict_mutable_keys_entity.py +++ b/pype/settings/entities/dict_mutable_keys_entity.py @@ -44,6 +44,8 @@ class DictMutableKeysEntity(EndpointEntity): _miss_arg = object() def __getitem__(self, key): + if key not in self.children_by_key: + self.add_key(key) return self.children_by_key[key] def __setitem__(self, key, value): From 9bb1d87304a3a9f094c1304af23a344ccb94938f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT <jakub.trllo@gmail.com> Date: Wed, 31 Mar 2021 19:50:06 +0200 Subject: [PATCH 295/295] added sequence tag to extract review profiles --- .../projects_schema/schemas/schema_global_publish.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index b4d18762974..3c079a130d7 100644 --- a/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/pype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -119,7 +119,10 @@ "slate-frame": "Add slate frame" }, { - "no-hnadles": "Skip handle frames" + "no-handles": "Skip handle frames" + }, + { + "sequence": "Output as image sequence" } ] },