From fd6466082b86fe3dc5eb43838b9f8fbd89326117 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 30 Mar 2021 16:38:36 +0200 Subject: [PATCH] OTIO: adding python-2 version to `pype.vendor` --- .../python_2/opentimelineio/__init__.py | 51 + .../opentimelineio/adapters/__init__.py | 213 ++ .../opentimelineio/adapters/adapter.py | 317 +++ .../builtin_adapters.plugin_manifest.json | 31 + .../opentimelineio/adapters/cmx_3600.py | 1306 +++++++++++ .../opentimelineio/adapters/fcp_xml.py | 1941 +++++++++++++++++ .../opentimelineio/adapters/otio_json.py | 48 + .../opentimelineio/algorithms/__init__.py | 44 + .../opentimelineio/algorithms/filter.py | 275 +++ .../opentimelineio/algorithms/stack_algo.py | 138 ++ .../algorithms/timeline_algo.py | 56 + .../opentimelineio/algorithms/track_algo.py | 236 ++ .../opentimelineio/console/__init__.py | 40 + .../console/autogen_serialized_datamodel.py | 302 +++ .../opentimelineio/console/console_utils.py | 72 + .../opentimelineio/console/otiocat.py | 138 ++ .../opentimelineio/console/otioconvert.py | 259 +++ .../opentimelineio/console/otiostat.py | 193 ++ .../python_2/opentimelineio/core/__init__.py | 67 + .../opentimelineio/core/composable.py | 141 ++ .../opentimelineio/core/composition.py | 718 ++++++ .../python_2/opentimelineio/core/item.py | 243 +++ .../opentimelineio/core/json_serializer.py | 218 ++ .../opentimelineio/core/media_reference.py | 102 + .../core/serializable_object.py | 219 ++ .../opentimelineio/core/type_registry.py | 152 ++ .../opentimelineio/core/unknown_schema.py | 50 + .../python_2/opentimelineio/exceptions.py | 89 + .../python/python_2/opentimelineio/hooks.py | 174 ++ .../python_2/opentimelineio/media_linker.py | 169 ++ .../python_2/opentimelineio/opentime.py | 856 ++++++++ .../opentimelineio/plugins/__init__.py | 33 + .../opentimelineio/plugins/manifest.py | 282 +++ .../opentimelineio/plugins/python_plugin.py | 128 ++ .../opentimelineio/schema/__init__.py | 75 + .../python_2/opentimelineio/schema/clip.py | 130 ++ .../python_2/opentimelineio/schema/effect.py | 130 ++ .../schema/external_reference.py | 69 + .../python_2/opentimelineio/schema/gap.py | 82 + .../schema/generator_reference.py | 76 + .../python_2/opentimelineio/schema/marker.py | 128 ++ .../schema/missing_reference.py | 43 + .../opentimelineio/schema/schemadef.py | 65 + .../schema/serializable_collection.py | 149 ++ .../python_2/opentimelineio/schema/stack.py | 120 + .../opentimelineio/schema/timeline.py | 133 ++ .../python_2/opentimelineio/schema/track.py | 242 ++ .../opentimelineio/schema/transition.py | 159 ++ .../opentimelineio/schemadef/__init__.py | 5 + .../python_2/opentimelineio/test_utils.py | 54 + .../opentimelineio_contrib/__init__.py | 37 + .../adapters/__init__.py | 0 .../adapters/aaf_adapter/__init__.py | 0 .../adapters/aaf_adapter/aaf_writer.py | 764 +++++++ .../adapters/advanced_authoring_format.py | 979 +++++++++ .../opentimelineio_contrib/adapters/ale.py | 318 +++ .../adapters/burnins.py | 93 + .../contrib_adapters.plugin_manifest.json | 61 + .../adapters/extern_maya_sequencer.py | 261 +++ .../adapters/extern_rv.py | 327 +++ .../adapters/fcpx_xml.py | 1182 ++++++++++ .../adapters/ffmpeg_burnins.py | 424 ++++ .../adapters/hls_playlist.py | 1781 +++++++++++++++ .../adapters/maya_sequencer.py | 132 ++ .../opentimelineio_contrib/adapters/rv.py | 84 + .../opentimelineio_contrib/adapters/xges.py | 819 +++++++ 66 files changed, 18223 insertions(+) create mode 100644 pype/vendor/python/python_2/opentimelineio/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/adapter.py create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py create mode 100644 pype/vendor/python/python_2/opentimelineio/adapters/otio_json.py create mode 100644 pype/vendor/python/python_2/opentimelineio/algorithms/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/algorithms/filter.py create mode 100644 pype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py create mode 100644 pype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py create mode 100644 pype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/console_utils.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/otiocat.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/otioconvert.py create mode 100644 pype/vendor/python/python_2/opentimelineio/console/otiostat.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/composable.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/composition.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/item.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/json_serializer.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/media_reference.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/serializable_object.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/type_registry.py create mode 100644 pype/vendor/python/python_2/opentimelineio/core/unknown_schema.py create mode 100644 pype/vendor/python/python_2/opentimelineio/exceptions.py create mode 100644 pype/vendor/python/python_2/opentimelineio/hooks.py create mode 100644 pype/vendor/python/python_2/opentimelineio/media_linker.py create mode 100644 pype/vendor/python/python_2/opentimelineio/opentime.py create mode 100644 pype/vendor/python/python_2/opentimelineio/plugins/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/plugins/manifest.py create mode 100644 pype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/clip.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/effect.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/external_reference.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/gap.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/generator_reference.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/marker.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/missing_reference.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/schemadef.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/stack.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/timeline.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/track.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schema/transition.py create mode 100644 pype/vendor/python/python_2/opentimelineio/schemadef/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio/test_utils.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py create mode 100644 pype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py diff --git a/pype/vendor/python/python_2/opentimelineio/__init__.py b/pype/vendor/python/python_2/opentimelineio/__init__.py new file mode 100644 index 00000000000..a8b0a636ad4 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/__init__.py @@ -0,0 +1,51 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""An editorial interchange format and library. + +see: http://opentimeline.io + +.. moduleauthor:: Pixar Animation Studios +""" + +# flake8: noqa + +# in dependency hierarchy +from . import ( + opentime, + exceptions, + core, + schema, + schemadef, + plugins, + media_linker, + adapters, + hooks, + algorithms, +) + +__version__ = "0.11.0" +__author__ = "Pixar Animation Studios" +__author_email__ = "opentimelineio@pixar.com" +__license__ = "Modified Apache 2.0 License" diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/__init__.py b/pype/vendor/python/python_2/opentimelineio/adapters/__init__.py new file mode 100644 index 00000000000..afbe3f8e8a5 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/__init__.py @@ -0,0 +1,213 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Expose the adapter interface to developers. + +To read from an existing representation, use the read_from_string and +read_from_file functions. To query the list of adapters, use the +available_adapter_names function. + +The otio_json adapter is provided as a the canonical, lossless, serialization +of the in-memory otio schema. Other adapters are to varying degrees lossy. +For more information, consult the documentation in the individual adapter +modules. +""" + +import os +import itertools + +from .. import ( + exceptions, + plugins, + media_linker +) + +from .adapter import Adapter # noqa + +# OTIO Json adapter is always available +from . import otio_json # noqa + + +def suffixes_with_defined_adapters(read=False, write=False): + """Return a set of all the suffixes that have adapters defined for them.""" + + if not read and not write: + read = True + write = True + + positive_adapters = [] + for adp in plugins.ActiveManifest().adapters: + if read and adp.has_feature("read"): + positive_adapters.append(adp) + continue + + if write and adp.has_feature("write"): + positive_adapters.append(adp) + + return set( + itertools.chain.from_iterable( + adp.suffixes for adp in positive_adapters + ) + ) + + +def available_adapter_names(): + """Return a string list of the available adapters.""" + + return [str(adp.name) for adp in plugins.ActiveManifest().adapters] + + +def _from_filepath_or_name(filepath, adapter_name): + if adapter_name is not None: + return plugins.ActiveManifest().from_name(adapter_name) + else: + return from_filepath(filepath) + + +def from_filepath(filepath): + """Guess the adapter object to use for a given filepath. + + example: + "foo.otio" returns the "otio_json" adapter. + """ + + outext = os.path.splitext(filepath)[1][1:] + + try: + return plugins.ActiveManifest().from_filepath(outext) + except exceptions.NoKnownAdapterForExtensionError: + raise exceptions.NoKnownAdapterForExtensionError( + "No adapter for suffix '{}' on file '{}'".format( + outext, + filepath + ) + ) + + +def from_name(name): + """Fetch the adapter object by the name of the adapter directly.""" + + try: + return plugins.ActiveManifest().from_name(name) + except exceptions.NotSupportedError: + raise exceptions.NotSupportedError( + "adapter not supported: {}, available: {}".format( + name, + available_adapter_names() + ) + ) + + +def read_from_file( + filepath, + adapter_name=None, + media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker, + media_linker_argument_map=None, + **adapter_argument_map +): + """Read filepath using adapter_name. + + If adapter_name is None, try and infer the adapter name from the filepath. + + For example: + timeline = read_from_file("example_trailer.otio") + timeline = read_from_file("file_with_no_extension", "cmx_3600") + """ + + adapter = _from_filepath_or_name(filepath, adapter_name) + + return adapter.read_from_file( + filepath=filepath, + media_linker_name=media_linker_name, + media_linker_argument_map=media_linker_argument_map, + **adapter_argument_map + ) + + +def read_from_string( + input_str, + adapter_name='otio_json', + media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker, + media_linker_argument_map=None, + **adapter_argument_map +): + """Read a timeline from input_str using adapter_name. + + This is useful if you obtain a timeline from someplace other than the + filesystem. + + Example: + raw_text = urlopen(my_url).read() + timeline = read_from_string(raw_text, "otio_json") + """ + + adapter = plugins.ActiveManifest().from_name(adapter_name) + return adapter.read_from_string( + input_str=input_str, + media_linker_name=media_linker_name, + media_linker_argument_map=media_linker_argument_map, + **adapter_argument_map + ) + + +def write_to_file( + input_otio, + filepath, + adapter_name=None, + **adapter_argument_map +): + """Write input_otio to filepath using adapter_name. + + If adapter_name is None, infer the adapter_name to use based on the + filepath. + + Example: + otio.adapters.write_to_file(my_timeline, "output.otio") + """ + + adapter = _from_filepath_or_name(filepath, adapter_name) + + return adapter.write_to_file( + input_otio=input_otio, + filepath=filepath, + **adapter_argument_map + ) + + +def write_to_string( + input_otio, + adapter_name='otio_json', + **adapter_argument_map +): + """Return input_otio written to a string using adapter_name. + + Example: + raw_text = otio.adapters.write_to_string(my_timeline, "otio_json") + """ + + adapter = plugins.ActiveManifest().from_name(adapter_name) + return adapter.write_to_string( + input_otio=input_otio, + **adapter_argument_map + ) diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/adapter.py b/pype/vendor/python/python_2/opentimelineio/adapters/adapter.py new file mode 100644 index 00000000000..82ac4050655 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/adapter.py @@ -0,0 +1,317 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of the OTIO internal `Adapter` system. + +For information on writing adapters, please consult: + https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# # noqa +""" + +from .. import ( + core, + plugins, + media_linker, + hooks, +) + + +@core.register_type +class Adapter(plugins.PythonPlugin): + """Adapters convert between OTIO and other formats. + + Note that this class is not subclassed by adapters. Rather, an adapter is + a python module that implements at least one of the following functions: + + write_to_string(input_otio) + write_to_file(input_otio, filepath) (optionally inferred) + read_from_string(input_str) + read_from_file(filepath) (optionally inferred) + + ...as well as a small json file that advertises the features of the adapter + to OTIO. This class serves as the wrapper around these modules internal + to OTIO. You should not need to extend this class to create new adapters + for OTIO. + + For more information: + https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# # noqa + """ + _serializable_label = "Adapter.1" + + def __init__( + self, + name=None, + execution_scope=None, + filepath=None, + suffixes=None + ): + plugins.PythonPlugin.__init__( + self, + name, + execution_scope, + filepath + ) + + self.suffixes = suffixes or [] + + suffixes = core.serializable_field( + "suffixes", + type([]), + doc="File suffixes associated with this adapter." + ) + + def has_feature(self, feature_string): + """ + return true if adapter supports feature_string, which must be a key + of the _FEATURE_MAP dictionary. + + Will trigger a call to self.module(), which imports the plugin. + """ + + if feature_string.lower() not in _FEATURE_MAP: + return False + + search_strs = _FEATURE_MAP[feature_string] + + try: + return any(hasattr(self.module(), s) for s in search_strs) + except ImportError: + # @TODO: should issue a warning that the plugin was not importable? + return False + + def read_from_file( + self, + filepath, + media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker, + media_linker_argument_map=None, + hook_function_argument_map={}, + **adapter_argument_map + ): + """Execute the read_from_file function on this adapter. + + If read_from_string exists, but not read_from_file, execute that with + a trivial file object wrapper. + """ + + if media_linker_argument_map is None: + media_linker_argument_map = {} + + result = None + + if ( + not self.has_feature("read_from_file") and + self.has_feature("read_from_string") + ): + with open(filepath, 'r') as fo: + contents = fo.read() + result = self._execute_function( + "read_from_string", + input_str=contents, + **adapter_argument_map + ) + else: + result = self._execute_function( + "read_from_file", + filepath=filepath, + **adapter_argument_map + ) + + hook_function_argument_map['adapter_arguments'] = adapter_argument_map + hook_function_argument_map['media_linker_argument_map'] = \ + media_linker_argument_map + result = hooks.run("post_adapter_read", result, + extra_args=hook_function_argument_map) + + if media_linker_name and ( + media_linker_name != media_linker.MediaLinkingPolicy.DoNotLinkMedia + ): + _with_linked_media_references( + result, + media_linker_name, + media_linker_argument_map + ) + + result = hooks.run("post_media_linker", result, + extra_args=media_linker_argument_map) + + return result + + def write_to_file( + self, + input_otio, + filepath, + hook_function_argument_map={}, + **adapter_argument_map + ): + """Execute the write_to_file function on this adapter. + + If write_to_string exists, but not write_to_file, execute that with + a trivial file object wrapper. + """ + hook_function_argument_map['adapter_arguments'] = adapter_argument_map + input_otio = hooks.run("pre_adapter_write", input_otio, + extra_args=hook_function_argument_map) + + if ( + not self.has_feature("write_to_file") and + self.has_feature("write_to_string") + ): + result = self.write_to_string(input_otio, **adapter_argument_map) + with open(filepath, 'w') as fo: + fo.write(result) + return filepath + + return self._execute_function( + "write_to_file", + input_otio=input_otio, + filepath=filepath, + **adapter_argument_map + ) + + def read_from_string( + self, + input_str, + media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker, + media_linker_argument_map=None, + hook_function_argument_map={}, + **adapter_argument_map + ): + """Call the read_from_string function on this adapter.""" + + result = self._execute_function( + "read_from_string", + input_str=input_str, + **adapter_argument_map + ) + hook_function_argument_map['adapter_arguments'] = adapter_argument_map + hook_function_argument_map['media_linker_argument_map'] = \ + media_linker_argument_map + + result = hooks.run("post_adapter_read", result, + extra_args=hook_function_argument_map) + + if media_linker_name and ( + media_linker_name != media_linker.MediaLinkingPolicy.DoNotLinkMedia + ): + _with_linked_media_references( + result, + media_linker_name, + media_linker_argument_map + ) + + # @TODO: Should this run *ONLY* if the media linker ran? + result = hooks.run("post_media_linker", result, + extra_args=hook_function_argument_map) + + return result + + def write_to_string( + self, + input_otio, + hook_function_argument_map={}, + **adapter_argument_map + ): + """Call the write_to_string function on this adapter.""" + + hook_function_argument_map['adapter_arguments'] = adapter_argument_map + input_otio = hooks.run("pre_adapter_write", input_otio, + extra_args=hook_function_argument_map) + + return self._execute_function( + "write_to_string", + input_otio=input_otio, + **adapter_argument_map + ) + + def __str__(self): + return ( + "Adapter(" + "{}, " + "{}, " + "{}, " + "{}" + ")".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath), + repr(self.suffixes), + ) + ) + + def __repr__(self): + return ( + "otio.adapter.Adapter(" + "name={}, " + "execution_scope={}, " + "filepath={}, " + "suffixes={}" + ")".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath), + repr(self.suffixes), + ) + ) + + +def _with_linked_media_references( + read_otio, + media_linker_name, + media_linker_argument_map +): + """Link media references in the read_otio if possible. + + Makes changes in place and returns the read_otio structure back. + """ + + if not read_otio or not media_linker.from_name(media_linker_name): + return read_otio + + # not every object the adapter reads has an "each_clip" method, so this + # skips objects without one. + clpfn = getattr(read_otio, "each_clip", None) + if clpfn is None: + return read_otio + + for cl in read_otio.each_clip(): + new_mr = media_linker.linked_media_reference( + cl, + media_linker_name, + # @TODO: should any context get wired in at this point? + media_linker_argument_map + ) + if new_mr is not None: + cl.media_reference = new_mr + + return read_otio + + +# map of attr to look for vs feature name in the adapter plugin +_FEATURE_MAP = { + 'read_from_file': ['read_from_file'], + 'read_from_string': ['read_from_string'], + 'read': ['read_from_file', 'read_from_string'], + 'write_to_file': ['write_to_file'], + 'write_to_string': ['write_to_string'], + 'write': ['write_to_file', 'write_to_string'] +} diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json b/pype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json new file mode 100644 index 00000000000..5e394a67d82 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json @@ -0,0 +1,31 @@ +{ + "OTIO_SCHEMA" : "PluginManifest.1", + "adapters": [ + { + "OTIO_SCHEMA": "Adapter.1", + "name": "fcp_xml", + "execution_scope": "in process", + "filepath": "fcp_xml.py", + "suffixes": ["xml"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "otio_json", + "execution_scope" : "in process", + "filepath" : "otio_json.py", + "suffixes" : ["otio"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "cmx_3600", + "execution_scope" : "in process", + "filepath" : "cmx_3600.py", + "suffixes" : ["edl"] + } + ], + "hooks": { + "post_adapter_read" : [], + "post_media_linker" : [], + "pre_adapter_write" : [] + } +} diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py b/pype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py new file mode 100644 index 00000000000..f3275e3929e --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py @@ -0,0 +1,1306 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO CMX 3600 EDL Adapter""" + +# Note: this adapter is not an ideal model for new adapters, but it works. +# If you want to write your own adapter, please see: +# https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# + +# TODO: Flesh out Attribute Handler +# TODO: Add line numbers to errors and warnings +# TODO: currently tracks with linked audio/video will lose their linkage when +# read into OTIO. + +import os +import re +import math +import collections + +from .. import ( + exceptions, + schema, + opentime, +) + + +class EDLParseError(exceptions.OTIOError): + pass + + +# regex for parsing the playback speed of an M2 event +SPEED_EFFECT_RE = re.compile( + r"(?P.*?)\s*(?P[0-9\.]*)\s*(?P[0-9:]{11})$" +) + + +# these are all CMX_3600 transition codes +# the wipe is written in regex format because it is W### where the ### is +# a 'wipe code' +# @TODO: not currently read by the transition code +transition_regex_map = { + 'C': 'cut', + 'D': 'dissolve', + r'W\d{3}': 'wipe', + 'KB': 'key_background', + 'K': 'key_foreground', + 'KO': 'key_overlay' +} + +# CMX_3600 supports some shorthand for channel assignments +# We name the actual tracks V and A1,A2,A3,etc. +# This channel_map tells you which track to use for each channel shorthand. +# Channels not listed here are used as track names verbatim. +channel_map = { + 'A': ['A1'], + 'AA': ['A1', 'A2'], + 'B': ['V', 'A1'], + 'A2/V': ['V', 'A2'], + 'AA/V': ['V', 'A1', 'A2'] +} + + +# Currently, the 'style' argument determines +# the comment string for the media reference: +# 'avid': '* FROM CLIP:' (default) +# 'nucoda': '* FROM FILE:' +# When adding a new style, please be sure to add sufficient tests +# to verify both the new and existing styles. +VALID_EDL_STYLES = ['avid', 'nucoda'] + + +class EDLParser(object): + def __init__(self, edl_string, rate=24, ignore_timecode_mismatch=False): + self.timeline = schema.Timeline() + + # Start with no tracks. They will be added as we encounter them. + # This dict maps a track name (e.g "A2" or "V") to an OTIO Track. + self.tracks_by_name = {} + + self.ignore_timecode_mismatch = ignore_timecode_mismatch + + self.parse_edl(edl_string, rate=rate) + + # TODO: Sort the tracks V, then A1,A2,etc. + + def add_clip(self, line, comments, rate=24): + comment_handler = CommentHandler(comments) + clip_handler = ClipHandler(line, comment_handler.handled, rate=rate) + clip = clip_handler.clip + if comment_handler.unhandled: + clip.metadata.setdefault("cmx_3600", {}) + clip.metadata['cmx_3600'].setdefault("comments", []) + clip.metadata['cmx_3600']['comments'] += ( + comment_handler.unhandled + ) + + # Add reel name to metadata + # A reel name of `AX` represents an unknown or auxilary source + # We don't currently track these sources outside of this adapter + # So lets skip adding AX reels as metadata for now, + # as that would dirty json outputs with non-relevant information + if clip_handler.reel and clip_handler.reel != 'AX': + clip.metadata.setdefault("cmx_3600", {}) + clip.metadata['cmx_3600']['reel'] = clip_handler.reel + + # each edit point between two clips is a transition. the default is a + # cut in the edl format the transition codes are for the transition + # into the clip + self.add_transition( + clip_handler, + clip_handler.transition_type, + clip_handler.transition_data + ) + + tracks = self.tracks_for_channel(clip_handler.channel_code) + for track in tracks: + + edl_rate = clip_handler.edl_rate + record_in = opentime.from_timecode( + clip_handler.record_tc_in, + edl_rate + ) + record_out = opentime.from_timecode( + clip_handler.record_tc_out, + edl_rate + ) + + src_duration = clip.duration() + rec_duration = record_out - record_in + if rec_duration != src_duration: + motion = comment_handler.handled.get('motion_effect') + freeze = comment_handler.handled.get('freeze_frame') + if motion is not None or freeze is not None: + # Adjust the clip to match the record duration + clip.source_range = opentime.TimeRange( + start_time=clip.source_range.start_time, + duration=rec_duration + ) + + if freeze is not None: + clip.effects.append(schema.FreezeFrame()) + # XXX remove 'FF' suffix (writing edl will add it back) + if clip.name.endswith(' FF'): + clip.name = clip.name[:-3] + elif motion is not None: + fps = float( + SPEED_EFFECT_RE.match(motion).group("speed") + ) + time_scalar = fps / rate + clip.effects.append( + schema.LinearTimeWarp(time_scalar=time_scalar) + ) + + elif self.ignore_timecode_mismatch: + # Pretend there was no problem by adjusting the record_out. + # Note that we don't actually use record_out after this + # point in the code, since all of the subsequent math uses + # the clip's source_range. Adjusting the record_out is + # just to document what the implications of ignoring the + # mismatch here entails. + record_out = record_in + src_duration + + else: + raise EDLParseError( + "Source and record duration don't match: {} != {}" + " for clip {}".format( + src_duration, + rec_duration, + clip.name + )) + + if track.source_range is None: + zero = opentime.RationalTime(0, edl_rate) + track.source_range = opentime.TimeRange( + start_time=zero - record_in, + duration=zero + ) + + track_end = track.duration() - track.source_range.start_time + if record_in < track_end: + if self.ignore_timecode_mismatch: + # shift it over + record_in = track_end + record_out = record_in + rec_duration + else: + raise EDLParseError( + "Overlapping record in value: {} for clip {}".format( + clip_handler.record_tc_in, + clip.name + )) + + # If the next clip is supposed to start beyond the end of the + # clips we've accumulated so far, then we need to add a Gap + # to fill that space. This can happen when an EDL has record + # timecodes that are sparse (e.g. from a single track of a + # multi-track composition). + if record_in > track_end and len(track) > 0: + gap = schema.Gap() + gap.source_range = opentime.TimeRange( + start_time=opentime.RationalTime(0, edl_rate), + duration=record_in - track_end + ) + track.append(gap) + track.source_range = opentime.TimeRange( + start_time=track.source_range.start_time, + duration=track.source_range.duration + gap.duration() + ) + + track.append(clip) + track.source_range = opentime.TimeRange( + start_time=track.source_range.start_time, + duration=track.source_range.duration + clip.duration() + ) + + def guess_kind_for_track_name(self, name): + if name.startswith("V"): + return schema.TrackKind.Video + if name.startswith("A"): + return schema.TrackKind.Audio + return schema.TrackKind.Video + + def tracks_for_channel(self, channel_code): + # Expand channel shorthand into a list of track names. + if channel_code in channel_map: + track_names = channel_map[channel_code] + else: + track_names = [channel_code] + + # Create any channels we don't already have + for track_name in track_names: + if track_name not in self.tracks_by_name: + track = schema.Track( + name=track_name, + kind=self.guess_kind_for_track_name(track_name) + ) + self.tracks_by_name[track_name] = track + self.timeline.tracks.append(track) + + # Return a list of actual tracks + return [self.tracks_by_name[c] for c in track_names] + + def add_transition(self, clip_handler, transition, data): + if transition not in ['C']: + md = clip_handler.clip.metadata.setdefault("cmx_3600", {}) + md["transition"] = transition + + def parse_edl(self, edl_string, rate=24): + # edl 'events' can be comprised of an indeterminate amount of lines + # we are to translating 'events' to a single clip and transition + # then we add the transition and the clip to all channels the 'event' + # channel code is mapped to the transition given in the 'event' + # precedes the clip + + # remove all blank lines from the edl + edl_lines = [ + l for l in (l.strip() for l in edl_string.splitlines()) if l + ] + + while edl_lines: + # a basic for loop wont work cleanly since we need to look ahead at + # array elements to determine what type of 'event' we are looking + # at + line = edl_lines.pop(0) + + if line.startswith('TITLE:'): + # this is the first line of interest in an edl + # it is required to be in the header + self.timeline.name = line.replace('TITLE:', '').strip() + + elif line.startswith('FCM'): + # this can occur either in the header or before any 'event' + # in both cases we can ignore it since it is meant for tape + # timecode + pass + + elif line.startswith('SPLIT'): + # this is the only comment preceding an 'event' that we care + # about in our context it simply means the next two clips will + # have the same comment data it is for reading purposes only + audio_delay = None + video_delay = None + + if 'AUDIO DELAY' in line: + audio_delay = line.split()[-1].strip() + if 'VIDEO DELAY' in line: + video_delay = line.split()[-1].strip() + if audio_delay and video_delay: + raise EDLParseError( + 'both audio and video delay declared after SPLIT.' + ) + if not (audio_delay or video_delay): + raise EDLParseError( + 'either audio or video delay declared after SPLIT.' + ) + + line_1 = edl_lines.pop(0) + line_2 = edl_lines.pop(0) + + comments = [] + while edl_lines: + if re.match(r'^\D', edl_lines[0]): + comments.append(edl_lines.pop(0)) + else: + break + self.add_clip(line_1, comments, rate=rate) + self.add_clip(line_2, comments, rate=rate) + + elif line[0].isdigit(): + # all 'events' start_time with an edit decision. this is + # denoted by the line beginning with a padded integer 000-999 + comments = [] + while edl_lines: + # any non-numbered lines after an edit decision should be + # treated as 'comments' + # comments are string tags used by the reader to get extra + # information not able to be found in the restricted edl + # format + if re.match(r'^\D', edl_lines[0]): + comments.append(edl_lines.pop(0)) + else: + break + + self.add_clip(line, comments, rate=rate) + + else: + raise EDLParseError('Unknown event type') + + for track in self.timeline.tracks: + # if the source_range is the same as the available_range + # then we don't need to set it at all. + if track.source_range == track.available_range(): + track.source_range = None + + +class ClipHandler(object): + + def __init__(self, line, comment_data, rate=24): + self.clip_num = None + self.reel = None + self.channel_code = None + self.edl_rate = rate + self.transition_id = None + self.transition_data = None + self.source_tc_in = None + self.source_tc_out = None + self.record_tc_in = None + self.record_tc_out = None + + self.parse(line) + self.clip = self.make_clip(comment_data) + + def make_clip(self, comment_data): + clip = schema.Clip() + clip.name = str(self.clip_num) + + # BLACK/BL and BARS are called out as "Special Source Identifiers" in + # the documents referenced here: + # https://github.com/PixarAnimationStudios/OpenTimelineIO#cmx3600-edl + if self.reel in ['BL', 'BLACK']: + clip.media_reference = schema.GeneratorReference() + # TODO: Replace with enum, once one exists + clip.media_reference.generator_kind = 'black' + elif self.reel == 'BARS': + clip.media_reference = schema.GeneratorReference() + # TODO: Replace with enum, once one exists + clip.media_reference.generator_kind = 'SMPTEBars' + elif 'media_reference' in comment_data: + clip.media_reference = schema.ExternalReference() + clip.media_reference.target_url = comment_data[ + 'media_reference' + ] + else: + clip.media_reference = schema.MissingReference() + + # this could currently break without a 'FROM CLIP' comment. + # Without that there is no 'media_reference' Do we have a default + # clip name? + if 'clip_name' in comment_data: + clip.name = comment_data["clip_name"] + elif ( + clip.media_reference and + hasattr(clip.media_reference, 'target_url') and + clip.media_reference.target_url is not None + ): + clip.name = os.path.splitext( + os.path.basename(clip.media_reference.target_url) + )[0] + + asc_sop = comment_data.get('asc_sop', None) + asc_sat = comment_data.get('asc_sat', None) + if asc_sop or asc_sat: + slope = (1, 1, 1) + offset = (0, 0, 0) + power = (1, 1, 1) + sat = 1.0 + + if asc_sop: + triple = r'([-+]?[\d.]+) ([-+]?[\d.]+) ([-+]?[\d.]+)' + m = re.match( + r'\(' + + triple + + r'\)\s*\(' + + triple + r'\)\s*\(' + + triple + r'\)', + asc_sop + ) + if m: + floats = [float(g) for g in m.groups()] + slope = [floats[0], floats[1], floats[2]] + offset = [floats[3], floats[4], floats[5]] + power = [floats[6], floats[7], floats[8]] + else: + raise EDLParseError( + 'Invalid ASC_SOP found: {}'.format(asc_sop)) + + if asc_sat: + sat = float(asc_sat) + + clip.metadata['cdl'] = { + 'asc_sat': sat, + 'asc_sop': { + 'slope': slope, + 'offset': offset, + 'power': power + } + } + + if 'locator' in comment_data: + # An example EDL locator line looks like this: + # * LOC: 01:00:01:14 RED ANIM FIX NEEDED + # We get the part after "LOC: " as the comment_data entry + # Given the fixed-width nature of these, we could be more + # strict about the field widths, but there are many + # variations of EDL, so if we are lenient then maybe we + # can handle more of them? Only real-world testing will + # determine this for sure... + m = re.match( + r'(\d\d:\d\d:\d\d:\d\d)\s+(\w*)\s+(.*)', + comment_data["locator"] + ) + if m: + marker = schema.Marker() + marker.marked_range = opentime.TimeRange( + start_time=opentime.from_timecode( + m.group(1), + self.edl_rate + ), + duration=opentime.RationalTime() + ) + + # always write the source value into metadata, in case it + # is not a valid enum somehow. + color_parsed_from_file = m.group(2) + + marker.metadata = { + "cmx_3600": { + "color": color_parsed_from_file + } + } + + # @TODO: if it is a valid + if hasattr( + schema.MarkerColor, + color_parsed_from_file.upper() + ): + marker.color = color_parsed_from_file.upper() + else: + marker.color = schema.MarkerColor.RED + + marker.name = m.group(3) + clip.markers.append(marker) + else: + # TODO: Should we report this as a warning somehow? + pass + + clip.source_range = opentime.range_from_start_end_time( + opentime.from_timecode(self.source_tc_in, self.edl_rate), + opentime.from_timecode(self.source_tc_out, self.edl_rate) + ) + + return clip + + def parse(self, line): + fields = tuple(e.strip() for e in line.split() if e.strip()) + field_count = len(fields) + + if field_count == 9: + # has transition data + # this is for edits with timing or other needed info + # transition data for D and W*** transitions is a n integer that + # denotes frame count + # i haven't figured out how the key transitions (K, KB, KO) work + ( + self.clip_num, + self.reel, + self.channel_code, + self.transition_type, + self.transition_data, + self.source_tc_in, + self.source_tc_out, + self.record_tc_in, + self.record_tc_out + ) = fields + + elif field_count == 8: + # no transition data + # this is for basic cuts + ( + self.clip_num, + self.reel, + self.channel_code, + self.transition_type, + self.source_tc_in, + self.source_tc_out, + self.record_tc_in, + self.record_tc_out + ) = fields + + else: + raise EDLParseError( + 'incorrect number of fields [{0}] in form statement: {1}' + ''.format(field_count, line)) + + # Frame numbers (not just timecode) are ok + for prop in [ + 'source_tc_in', + 'source_tc_out', + 'record_tc_in', + 'record_tc_out' + ]: + if ':' not in getattr(self, prop): + setattr( + self, + prop, + opentime.to_timecode( + opentime.from_frames( + int(getattr(self, prop)), + self.edl_rate + ), + self.edl_rate + ) + ) + + +class CommentHandler(object): + # this is the for that all comment 'id' tags take + regex_template = r'\*?\s*{id}:?\s*(?P.*)' + + # this should be a map of all known comments that we can read + # 'FROM CLIP' or 'FROM FILE' is a required comment to link media + # An exception is raised if both 'FROM CLIP' and 'FROM FILE' are found + # needs to be ordered so that FROM CLIP NAME gets matched before FROM CLIP + comment_id_map = collections.OrderedDict([ + ('FROM CLIP NAME', 'clip_name'), + ('FROM CLIP', 'media_reference'), + ('FROM FILE', 'media_reference'), + ('LOC', 'locator'), + ('ASC_SOP', 'asc_sop'), + ('ASC_SAT', 'asc_sat'), + ('M2', 'motion_effect'), + ('\\* FREEZE FRAME', 'freeze_frame'), + ]) + + def __init__(self, comments): + self.handled = {} + self.unhandled = [] + for comment in comments: + self.parse(comment) + + def parse(self, comment): + for comment_id, comment_type in self.comment_id_map.items(): + regex = self.regex_template.format(id=comment_id) + match = re.match(regex, comment) + if match: + self.handled[comment_type] = match.group( + 'comment_body' + ).strip() + break + else: + stripped = comment.lstrip('*').strip() + if stripped: + self.unhandled.append(stripped) + + +def _expand_transitions(timeline): + """Convert clips with metadata/transition == 'D' into OTIO transitions.""" + + tracks = timeline.tracks + remove_list = [] + replace_list = [] + append_list = [] + for track in tracks: + track_iter = iter(track) + # avid inserts an extra clip for the source + prev_prev = None + prev = None + clip = next(track_iter, None) + next_clip = next(track_iter, None) + while clip is not None: + transition_type = clip.metadata.get('cmx_3600', {}).get( + 'transition', + 'C' + ) + + if transition_type == 'C': + # nothing to do, continue to the next iteration of the loop + prev_prev = prev + prev = clip + clip = next_clip + next_clip = next(track_iter, None) + continue + if transition_type not in ['D']: + raise EDLParseError( + "Transition type '{}' not supported by the CMX EDL reader " + "currently.".format(transition_type) + ) + + transition_duration = clip.duration() + + # EDL doesn't have enough data to know where the cut point was, so + # this arbitrarily puts it in the middle of the transition + pre_cut = math.floor(transition_duration.value / 2) + post_cut = transition_duration.value - pre_cut + mid_tran_cut_pre_duration = opentime.RationalTime( + pre_cut, + transition_duration.rate + ) + mid_tran_cut_post_duration = opentime.RationalTime( + post_cut, + transition_duration.rate + ) + + # expand the previous + expansion_clip = None + if prev and not prev_prev: + expansion_clip = prev + elif prev_prev: + expansion_clip = prev_prev + if prev: + remove_list.append((track, prev)) + + sr = expansion_clip.source_range + expansion_clip.source_range = opentime.TimeRange( + start_time=sr.start_time, + duration=sr.duration + mid_tran_cut_pre_duration + ) + + # rebuild the clip as a transition + new_trx = schema.Transition( + name=clip.name, + # only supported type at the moment + transition_type=schema.TransitionTypes.SMPTE_Dissolve, + metadata=clip.metadata + ) + new_trx.in_offset = mid_tran_cut_pre_duration + new_trx.out_offset = mid_tran_cut_post_duration + + # in from to + replace_list.append((track, clip, new_trx)) + + # expand the next_clip + if next_clip: + next_clip.source_range = opentime.TimeRange( + next_clip.source_range.start_time - mid_tran_cut_post_duration, + next_clip.source_range.duration + mid_tran_cut_post_duration + ) + else: + fill = schema.Gap( + source_range=opentime.TimeRange( + duration=mid_tran_cut_post_duration, + start_time=opentime.RationalTime( + 0, + transition_duration.rate + ) + ) + ) + append_list.append((track, fill)) + + prev = clip + clip = next_clip + next_clip = next(track_iter, None) + + for (track, from_clip, to_transition) in replace_list: + track[track.index(from_clip)] = to_transition + + for (track, clip_to_remove) in list(set(remove_list)): + # if clip_to_remove in track: + track.remove(clip_to_remove) + + for (track, clip) in append_list: + track.append(clip) + + return timeline + + +def read_from_string(input_str, rate=24, ignore_timecode_mismatch=False): + """Reads a CMX Edit Decision List (EDL) from a string. + Since EDLs don't contain metadata specifying the rate they are meant + for, you may need to specify the rate parameter (default is 24). + By default, read_from_string will throw an exception if it discovers + invalid timecode in the EDL. For example, if a clip's record timecode + overlaps with the previous cut. Since this is a common mistake in + many EDLs, you can specify ignore_timecode_mismatch=True, which will + supress these errors and attempt to guess at the correct record + timecode based on the source timecode and adjacent cuts. + For best results, you may wish to do something like this: + + Example: + >>> try: + ... timeline = otio.adapters.read_from_string("mymovie.edl", rate=30) + ... except EDLParseError: + ... print('Log a warning here') + ... try: + ... timeline = otio.adapters.read_from_string( + ... "mymovie.edl", + ... rate=30, + ... ignore_timecode_mismatch=True) + ... except EDLParseError: + ... print('Log an error here') + """ + parser = EDLParser( + input_str, + rate=float(rate), + ignore_timecode_mismatch=ignore_timecode_mismatch + ) + result = parser.timeline + result = _expand_transitions(result) + return result + + +def write_to_string(input_otio, rate=None, style='avid', reelname_len=8): + # TODO: We should have convenience functions in Timeline for this? + # also only works for a single video track at the moment + + video_tracks = [t for t in input_otio.tracks + if t.kind == schema.TrackKind.Video] + audio_tracks = [t for t in input_otio.tracks + if t.kind == schema.TrackKind.Audio] + + if len(video_tracks) != 1: + raise exceptions.NotSupportedError( + "Only a single video track is supported, got: {}".format( + len(video_tracks) + ) + ) + + if len(audio_tracks) > 2: + raise exceptions.NotSupportedError( + "No more than 2 audio tracks are supported." + ) + # if audio_tracks: + # raise exceptions.NotSupportedError( + # "No audio tracks are currently supported." + # ) + + # TODO: We should try to detect the frame rate and output an + # appropriate "FCM: NON-DROP FRAME" etc here. + + writer = EDLWriter( + tracks=input_otio.tracks, + # Assume all rates are the same as the 1st track's + rate=rate or input_otio.tracks[0].duration().rate, + style=style, + reelname_len=reelname_len + ) + + return writer.get_content_for_track_at_index(0, title=input_otio.name) + + +class EDLWriter(object): + def __init__(self, tracks, rate, style, reelname_len=8): + self._tracks = tracks + self._rate = rate + self._style = style + self._reelname_len = reelname_len + + if style not in VALID_EDL_STYLES: + raise exceptions.NotSupportedError( + "The EDL style '{}' is not supported.".format( + style + ) + ) + + def get_content_for_track_at_index(self, idx, title): + track = self._tracks[idx] + + # Add a gap if the last child is a transition. + if isinstance(track[-1], schema.Transition): + gap = schema.Gap( + source_range=opentime.TimeRange( + start_time=track[-1].duration(), + duration=opentime.RationalTime(0.0, self._rate) + ) + ) + track.append(gap) + + # Note: Transitions in EDLs are unconventionally represented. + # + # Where a transition might normally be visualized like: + # |---57.0 Trans 43.0----| + # |------Clip1 102.0------|----------Clip2 143.0----------|Clip3 24.0| + # + # In an EDL it can be thought of more like this: + # |---0.0 Trans 100.0----| + # |Clip1 45.0|----------------Clip2 200.0-----------------|Clip3 24.0| + + # Adjust cut points to match EDL event representation. + for idx, child in enumerate(track): + if isinstance(child, schema.Transition): + if idx != 0: + # Shorten the a-side + sr = track[idx - 1].source_range + track[idx - 1].source_range = opentime.TimeRange( + start_time=sr.start_time, + duration=sr.duration - child.in_offset + ) + + # Lengthen the b-side + sr = track[idx + 1].source_range + track[idx + 1].source_range = opentime.TimeRange( + start_time=sr.start_time - child.in_offset, + duration=sr.duration + child.in_offset + ) + + # Just clean up the transition for goodness sake + in_offset = child.in_offset + child.in_offset = opentime.RationalTime(0.0, self._rate) + child.out_offset += in_offset + + # Group events into either simple clip/a-side or transition and b-side + # to match EDL edit/event representation and edit numbers. + events = [] + for idx, child in enumerate(track): + if isinstance(child, schema.Transition): + # Transition will be captured in subsequent iteration. + continue + + prv = track[idx - 1] if idx > 0 else None + + if isinstance(prv, schema.Transition): + events.append( + DissolveEvent( + events[-1] if len(events) else None, + prv, + child, + self._tracks, + track.kind, + self._rate, + self._style, + self._reelname_len + ) + ) + elif isinstance(child, schema.Clip): + events.append( + Event( + child, + self._tracks, + track.kind, + self._rate, + self._style, + self._reelname_len + ) + ) + elif isinstance(child, schema.Gap): + # Gaps are represented as missing record timecode, no event + # needed. + pass + + content = "TITLE: {}\n\n".format(title) if title else '' + + # Convert each event/dissolve-event into plain text. + for idx, event in enumerate(events): + event.edit_number = idx + 1 + content += event.to_edl_format() + '\n' + + return content + + +def _supported_timing_effects(clip): + return [ + fx for fx in clip.effects + if isinstance(fx, schema.LinearTimeWarp) + ] + + +def _relevant_timing_effect(clip): + # check to see if there is more than one timing effect + effects = _supported_timing_effects(clip) + + if effects != clip.effects: + for thing in clip.effects: + if thing not in effects and isinstance(thing, schema.TimeEffect): + raise exceptions.NotSupportedError( + "Clip contains timing effects not supported by the EDL" + " adapter.\nClip: {}".format(str(clip))) + + timing_effect = None + if effects: + timing_effect = effects[0] + if len(effects) > 1: + raise exceptions.NotSupportedError( + "EDL Adapter only allows one timing effect / clip." + ) + + return timing_effect + + +class Event(object): + def __init__( + self, + clip, + tracks, + kind, + rate, + style, + reelname_len + ): + + line = EventLine(kind, rate, reel=_reel_from_clip(clip, reelname_len)) + line.source_in = clip.source_range.start_time + line.source_out = clip.source_range.end_time_exclusive() + + timing_effect = _relevant_timing_effect(clip) + + if timing_effect: + if timing_effect.effect_name == "FreezeFrame": + line.source_out = line.source_in + opentime.RationalTime( + 1, + line.source_in.rate + ) + elif timing_effect.effect_name == "LinearTimeWarp": + value = clip.trimmed_range().duration.value / timing_effect.time_scalar + line.source_out = ( + line.source_in + opentime.RationalTime(value, rate)) + + range_in_timeline = clip.transformed_time_range( + clip.trimmed_range(), + tracks + ) + line.record_in = range_in_timeline.start_time + line.record_out = range_in_timeline.end_time_exclusive() + self.line = line + + self.comments = _generate_comment_lines( + clip=clip, + style=style, + edl_rate=rate, + reelname_len=reelname_len, + from_or_to='FROM' + ) + + self.clip = clip + self.source_out = line.source_out + self.record_out = line.record_out + self.reel = line.reel + + def __str__(self): + return '{type}({name})'.format( + type=self.clip.schema_name(), + name=self.clip.name + ) + + def to_edl_format(self): + """ + Example output: + 002 AX V C 00:00:00:00 00:00:00:05 00:00:00:05 00:00:00:10 + * FROM CLIP NAME: test clip2 + * FROM FILE: S:\\var\\tmp\\test.exr + + """ + lines = [self.line.to_edl_format(self.edit_number)] + lines += self.comments if len(self.comments) else [] + + return "\n".join(lines) + + +class DissolveEvent(object): + + def __init__( + self, + a_side_event, + transition, + b_side_clip, + tracks, + kind, + rate, + style, + reelname_len + ): + # Note: We don't make the A-Side event line here as it is represented + # by its own event (edit number). + + cut_line = EventLine(kind, rate) + + if a_side_event: + cut_line.reel = a_side_event.reel + cut_line.source_in = a_side_event.source_out + cut_line.source_out = a_side_event.source_out + cut_line.record_in = a_side_event.record_out + cut_line.record_out = a_side_event.record_out + + self.from_comments = _generate_comment_lines( + clip=a_side_event.clip, + style=style, + edl_rate=rate, + reelname_len=reelname_len, + from_or_to='FROM' + ) + else: + cut_line.reel = 'BL' + cut_line.source_in = opentime.RationalTime(0.0, rate) + cut_line.source_out = opentime.RationalTime(0.0, rate) + cut_line.record_in = opentime.RationalTime(0.0, rate) + cut_line.record_out = opentime.RationalTime(0.0, rate) + + self.cut_line = cut_line + + dslve_line = EventLine( + kind, + rate, + reel=_reel_from_clip(b_side_clip, reelname_len) + ) + dslve_line.source_in = b_side_clip.source_range.start_time + dslve_line.source_out = b_side_clip.source_range.end_time_exclusive() + range_in_timeline = b_side_clip.transformed_time_range( + b_side_clip.trimmed_range(), + tracks + ) + dslve_line.record_in = range_in_timeline.start_time + dslve_line.record_out = range_in_timeline.end_time_exclusive() + dslve_line.dissolve_length = transition.out_offset + self.dissolve_line = dslve_line + + self.to_comments = _generate_comment_lines( + clip=b_side_clip, + style=style, + edl_rate=rate, + reelname_len=reelname_len, + from_or_to='TO' + ) + + self.a_side_event = a_side_event + self.transition = transition + self.b_side_clip = b_side_clip + + # Expose so that any subsequent dissolves can borrow their values. + self.clip = b_side_clip + self.source_out = dslve_line.source_out + self.record_out = dslve_line.record_out + self.reel = dslve_line.reel + + def __str__(self): + a_side = self.a_side_event + return '{a_type}({a_name}) -> {b_type}({b_name})'.format( + a_type=a_side.clip.schema_name() if a_side else '', + a_name=a_side.clip.name if a_side else '', + b_type=self.b_side_clip.schema_name(), + b_name=self.b_side_clip.name + ) + + def to_edl_format(self): + """ + Example output: + + Cross dissolve... + 002 Clip1 V C 00:00:07:08 00:00:07:08 00:00:01:21 00:00:01:21 + 002 Clip2 V D 100 00:00:09:07 00:00:17:15 00:00:01:21 00:00:10:05 + * FROM CLIP NAME: Clip1 + * FROM CLIP: /var/tmp/clip1.001.exr + * TO CLIP NAME: Clip2 + * TO CLIP: /var/tmp/clip2.001.exr + + Fade in... + 001 BL V C 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 + 001 My_Clip V D 012 00:00:02:02 00:00:03:04 00:00:00:00 00:00:01:02 + * TO CLIP NAME: My Clip + * TO FILE: /var/tmp/clip.001.exr + + Fade out... + 002 My_Clip V C 00:00:01:12 00:00:01:12 00:00:00:12 00:00:00:12 + 002 BL V D 012 00:00:00:00 00:00:00:12 00:00:00:12 00:00:01:00 + * FROM CLIP NAME: My Clip + * FROM FILE: /var/tmp/clip.001.exr + """ + + lines = [ + self.cut_line.to_edl_format(self.edit_number), + self.dissolve_line.to_edl_format(self.edit_number) + ] + lines += self.from_comments if hasattr(self, 'from_comments') else [] + lines += self.to_comments if len(self.to_comments) else [] + + return "\n".join(lines) + + +class EventLine(object): + def __init__(self, kind, rate, reel='AX'): + self.reel = reel + self._kind = 'V' if kind == schema.TrackKind.Video else 'A' + self._rate = rate + + self.source_in = opentime.RationalTime(0.0, rate=rate) + self.source_out = opentime.RationalTime(0.0, rate=rate) + self.record_in = opentime.RationalTime(0.0, rate=rate) + self.record_out = opentime.RationalTime(0.0, rate=rate) + + self.dissolve_length = opentime.RationalTime(0.0, rate) + + def to_edl_format(self, edit_number): + ser = { + 'edit': edit_number, + 'reel': self.reel, + 'kind': self._kind, + 'src_in': opentime.to_timecode(self.source_in, self._rate), + 'src_out': opentime.to_timecode(self.source_out, self._rate), + 'rec_in': opentime.to_timecode(self.record_in, self._rate), + 'rec_out': opentime.to_timecode(self.record_out, self._rate), + 'diss': int( + opentime.to_frames(self.dissolve_length, self._rate) + ), + } + + if self.is_dissolve(): + return "{edit:03d} {reel:8} {kind:5} D {diss:03d} " \ + "{src_in} {src_out} {rec_in} {rec_out}".format(**ser) + else: + return "{edit:03d} {reel:8} {kind:5} C " \ + "{src_in} {src_out} {rec_in} {rec_out}".format(**ser) + + def is_dissolve(self): + return self.dissolve_length.value > 0 + + +def _generate_comment_lines( + clip, + style, + edl_rate, + reelname_len, + from_or_to='FROM' +): + lines = [] + url = None + + if not clip or isinstance(clip, schema.Gap): + return [] + + suffix = '' + timing_effect = _relevant_timing_effect(clip) + if timing_effect and timing_effect.effect_name == 'FreezeFrame': + suffix = ' FF' + + if clip.media_reference: + if hasattr(clip.media_reference, 'target_url'): + url = clip.media_reference.target_url + + else: + url = clip.name + + if from_or_to not in ['FROM', 'TO']: + raise exceptions.NotSupportedError( + "The clip FROM or TO value '{}' is not supported.".format( + from_or_to + ) + ) + + if timing_effect and isinstance(timing_effect, schema.LinearTimeWarp): + lines.append( + 'M2 {}\t\t{}\t\t\t{}'.format( + clip.name, + timing_effect.time_scalar * edl_rate, + opentime.to_timecode( + clip.trimmed_range().start_time, + edl_rate + ) + ) + ) + + if clip.name: + # Avid Media Composer outputs two spaces before the + # clip name so we match that. + lines.append( + "* {from_or_to} CLIP NAME: {name}{suffix}".format( + from_or_to=from_or_to, + name=clip.name, + suffix=suffix + ) + ) + if timing_effect and timing_effect.effect_name == "FreezeFrame": + lines.append('* * FREEZE FRAME') + if url and style == 'avid': + lines.append("* {from_or_to} CLIP: {url}".format( + from_or_to=from_or_to, + url=url + )) + if url and style == 'nucoda': + lines.append("* {from_or_to} FILE: {url}".format( + from_or_to=from_or_to, + url=url + )) + + if reelname_len and not clip.metadata.get('cmx_3600', {}).get('reel'): + lines.append("* OTIO TRUNCATED REEL NAME FROM: {url}".format( + url=os.path.basename(_flip_windows_slashes(url or clip.name)) + )) + + cdl = clip.metadata.get('cdl') + if cdl: + asc_sop = cdl.get('asc_sop') + asc_sat = cdl.get('asc_sat') + if asc_sop: + lines.append( + "*ASC_SOP ({} {} {}) ({} {} {}) ({} {} {})".format( + asc_sop['slope'][0], + asc_sop['slope'][1], + asc_sop['slope'][2], + asc_sop['offset'][0], + asc_sop['offset'][1], + asc_sop['offset'][2], + asc_sop['power'][0], + asc_sop['power'][1], + asc_sop['power'][2] + )) + if asc_sat: + lines.append("*ASC_SAT {}".format( + asc_sat + )) + + # Output any markers on this clip + for marker in clip.markers: + timecode = opentime.to_timecode( + marker.marked_range.start_time, + edl_rate + ) + + color = marker.color + meta = marker.metadata.get("cmx_3600") + if not color and meta and meta.get("color"): + color = meta.get("color").upper() + comment = (marker.name or '').upper() + lines.append("* LOC: {} {:7} {}".format(timecode, color, comment)) + + # If we are carrying any unhandled CMX 3600 comments on this clip + # then output them blindly. + extra_comments = clip.metadata.get('cmx_3600', {}).get('comments', []) + for comment in extra_comments: + lines.append("* {}".format(comment)) + + return lines + + +def _flip_windows_slashes(path): + return re.sub(r'\\', '/', path) + + +def _reel_from_clip(clip, reelname_len): + if isinstance(clip, schema.Gap): + return 'BL' + + elif clip.metadata.get('cmx_3600', {}).get('reel'): + return clip.metadata.get('cmx_3600').get('reel') + + _reel = clip.name or 'AX' + + if isinstance(clip.media_reference, schema.ExternalReference): + _reel = clip.media_reference.name or os.path.basename( + clip.media_reference.target_url + ) + + # Flip Windows slashes + _reel = os.path.basename(_flip_windows_slashes(_reel)) + + # Strip extension + reel = re.sub(r'([.][a-zA-Z]+)$', '', _reel) + + if reelname_len: + # Remove non valid characters + reel = re.sub(r'[^ a-zA-Z0-9]+', '', reel) + + if len(reel) > reelname_len: + reel = reel[:reelname_len] + + elif len(reel) < reelname_len: + reel += ' ' * (reelname_len - len(reel)) + + return reel diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py b/pype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py new file mode 100644 index 00000000000..48f684cc360 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py @@ -0,0 +1,1941 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO Final Cut Pro 7 XML Adapter.""" + +import collections +import functools +import itertools +import math +import os +import re +from xml.etree import cElementTree +from xml.dom import minidom + +# urlparse's name changes in Python 3 +try: + # Python 2.7 + import urlparse as urllib_parse +except ImportError: + # Python 3 + import urllib.parse as urllib_parse + +# Same with the ABC classes from collections +try: + # Python 3 + from collections.abc import Mapping +except ImportError: + # Python 2.7 + from collections import Mapping + +from opentimelineio import ( + core, + opentime, + schema, +) + +# namespace to use for metadata +META_NAMESPACE = 'fcp_xml' + +# Regex to match identifiers like clipitem-22 +ID_RE = re.compile(r"^(?P[a-zA-Z]*)-(?P\d*)$") + + +# --------- +# utilities +# --------- + + +class _Context(Mapping): + """ + An inherited value context. + + In FCP XML there is a concept of inheritance down the element heirarchy. + For instance, a ``clip`` element may not specify the ``rate`` locally, but + instead inherit it from the parent ``track`` element. + + This object models that as a stack of elements. When a value needs to be + queried from the context, it will be gathered by walking from the top of + the stack until the value is found. + + For example, to find the ``rate`` element as an immediate child most + appropriate to the current context, you would do something like:: + ``my_current_context["./rate"]`` + + This object can be thought of as immutable. You get a new context when you + push an element. This prevents inadvertant tampering with parent contexts + that may be used at levels above. + + This DOES NOT support ``id`` attribute dereferencing, please make sure to + do that prior to using this structure. + + .. seealso:: https://developer.apple.com/library/archive/documentation\ + /AppleApplications/Reference/FinalCutPro_XML/Basics/Basics.html#\ + //apple_ref/doc/uid/TP30001154-TPXREF102 + """ + + def __init__(self, element=None, parent_elements=None): + if parent_elements is not None: + self.elements = parent_elements[:] + else: + self.elements = [] + + if element is not None: + self.elements.append(element) + + def _all_keys(self): + """ + Returns a set of all the keys available in the context stack. + """ + return set( + itertools.chain.fromiterable(e.keys() for e in self.elements) + ) + + def __getitem__(self, key): + # Walk down the contexts until the item is found + for element in reversed(self.elements): + found_element = element.find(key) + if found_element is not None: + return found_element + + raise KeyError(key) + + def __iter__(self): + # This is unlikely to be used, so we'll do it the expensive way + return iter(self._all_keys) + + def __len__(self): + # This is unlikely to be used, so we'll do it the expensive way + return len(self._all_keys) + + def context_pushing_element(self, element): + """ + Pushes an element to the top of the stack. + + :param element: Element to push to the stack. + :return: The new context with the provided element pushed to the top + of the stack. + :raises: :class:`ValueError` if the element is already in the stack. + """ + for context_element in self.elements: + if context_element == element: + raise ValueError( + "element {} already in context".format(element) + ) + + return _Context(element, self.elements) + + +def _url_to_path(url): + parsed = urllib_parse.urlparse(url) + return parsed.path + + +def _bool_value(element): + """ + Given an xml element, returns the tag text converted to a bool. + + :param element: The element to fetch the value from. + + :return: A boolean. + """ + return (element.text.lower() == "true") + + +def _element_identification_string(element): + """ + Gets a string that will hopefully help in identifing an element when there + is an error. + """ + info_string = "tag: {}".format(element.tag) + try: + elem_id = element.attrib["id"] + info_string += " id: {}".format(elem_id) + except KeyError: + pass + + return info_string + + +def _name_from_element(element): + """ + Fetches the name from the ``name`` element child of the provided element. + If no element exists, returns ``None``. + + :param element: The element to find the name for. + + :return: The name string or ``None`` + """ + name_elem = element.find("./name") + if name_elem is not None: + return name_elem.text + + return None + + +def _rate_for_element(element): + """ + Takes an FCP rate element and returns a rate to use with otio. + + :param element: An FCP rate element. + + :return: The float rate. + """ + # rate is encoded as a timebase (int) which can be drop-frame + base = float(element.find("./timebase").text) + if _bool_value(element.find("./ntsc")): + base *= 1000.0 / 1001 + + return base + + +def _rate_from_context(context): + """ + Given the context object, gets the appropriate rate. + + :param context: The :class:`_Context` instance to find the rate in. + + :return: The rate value or ``None`` if no rate is available in the context. + """ + try: + rate_element = context["./rate"] + except KeyError: + return None + + return _rate_for_element(rate_element) + + +def _time_from_timecode_element(tc_element, context=None): + """ + Given a timecode xml element, returns the time that represents. + + .. todo:: Non Drop-Frame timecode is not yet supported by OTIO. + + :param tc_element: The ``timecode`` element. + :param context: The context dict under which this timecode is being gotten. + + :return: The :class:`opentime.RationalTime` representation of the + timecode. + """ + if context is not None: + local_context = context.context_pushing_element(tc_element) + else: + local_context = _Context(tc_element) + + # Resolve the rate + rate = _rate_from_context(local_context) + + # Try using the display format and frame number + frame = tc_element.find("./frame") + + # Use frame number, if available + if frame is not None: + frame_num = int(frame.text) + return opentime.RationalTime(frame_num, rate) + + # If a TC string is provided, parse rate from it + tc_string_element = tc_element.find("./string") + if tc_string_element is None: + raise ValueError("Timecode element missing required elements") + + tc_string = tc_string_element.text + + return opentime.from_timecode(tc_string, rate) + + +def _track_kind_from_element(media_element): + """ + Given an FCP XML media sub-element, returns an appropriate + :class:`schema.TrackKind` value corresponding to that media type. + + :param media_element: An XML element that is a child of the ``media`` tag. + + :return: The corresponding :class`schema.TrackKind` value. + :raises: :class:`ValueError` When the media type is unsupported. + """ + element_tag = media_element.tag.lower() + if element_tag == "audio": + return schema.TrackKind.Audio + elif element_tag == "video": + return schema.TrackKind.Video + + raise ValueError("Unsupported media kind: {}".format(media_element.tag)) + + +def _is_primary_audio_channel(track): + """ + Determines whether or not this is the "primary" audio track. + + audio may be structured in stereo where each channel occupies a separate + track. This importer keeps stereo pairs ganged together as a single track. + + :param track: An XML track element. + + :return: A boolean ``True`` if this is the first track. + """ + exploded_index = track.attrib.get('currentExplodedTrackIndex', '0') + exploded_count = track.attrib.get('totalExplodedTrackCount', '1') + + return (exploded_index == '0' or exploded_count == '1') + + +def _transition_cut_point(transition_item, context): + """ + Returns the end time at which the transition progresses from one clip to + the next. + + :param transition_item: The XML element for the transition. + :param context: The context dictionary applying to this transition. + + :return: The :class:`opentime.RationalTime` the transition cuts at. + """ + alignment = transition_item.find('./alignment').text + start = int(transition_item.find('./start').text) + end = int(transition_item.find('./end').text) + + # start/end time is in the parent context's rate + local_context = context.context_pushing_element(transition_item) + rate = _rate_from_context(local_context) + + if alignment in ('end', 'end-black'): + value = end + elif alignment in ('start', 'start-black'): + value = start + elif alignment in ('center',): + value = int((start + end) / 2) + else: + value = int((start + end) / 2) + + return opentime.RationalTime(value, rate) + + +def _xml_tree_to_dict(node, ignore_tags=None, omit_timing=True): + """ + Translates the tree under a provided node mapping to a dictionary/list + representation. XML tag attributes are placed in the dictionary with an + ``@`` prefix. + + .. note:: In addition to the provided ignore tags, this filters a subset of + timing metadata such as ``frame`` and ``string`` elements within timecode + elements. + + .. warning:: This scheme does not allow for leaf elements to have + attributes. for the moment this doesn't seem to be an issue. + + :param node: The root xml element to express childeren of in the + dictionary. + :param ignore_tags: A collection of tagnames to skip when converting. + :param omit_timing: If ``True``, omits timing-specific tags. + + :return: The dictionary representation. + """ + if node.tag == "timecode": + additional_ignore_tags = {"frame", "string"} + else: + additional_ignore_tags = tuple() + + out_dict = collections.OrderedDict() + + # Handle the attributes + out_dict.update( + collections.OrderedDict( + ("@{}".format(k), v) for k, v in node.attrib.items() + ) + ) + + # Now traverse the child tags + encountered_tags = set() + list_tags = set() + for info_node in node: + # Skip tags we were asked to omit + node_tag = info_node.tag + if ignore_tags and node_tag in ignore_tags: + continue + + # Skip some special case tags related to timing information + if node_tag in additional_ignore_tags: + continue + + # If there are children, make this a sub-dictionary by recursing + if len(info_node): + node_value = _xml_tree_to_dict(info_node) + else: + node_value = info_node.text + + # If we've seen this node before, then treat it as a list + if node_tag in list_tags: + # We've established that this tag is a list, append to that + out_dict[node_tag].append(node_value) + elif node_tag in encountered_tags: + # This appears to be a list we didn't know about, convert + out_dict[node_tag] = [ + out_dict[node_tag], node_value + ] + list_tags.add(node_tag) + else: + # Store the value + out_dict[node_tag] = node_value + encountered_tags.add(node_tag) + + return out_dict + + +def _dict_to_xml_tree(data_dict, tag): + """ + Given a dictionary, returns an element tree storing the data. This is the + inverse of :func:`_xml_tree_to_dict`. + + Any key/value pairs in the dictionary heirarchy where the key is prefixed + with ``@`` will be treated as attributes on the containing element. + + .. note:: This will automatically omit some kinds of metadata it should + be up to the xml building functions to manage (such as timecode and id). + + :param data_dict: The dictionary to turn into an XML tree. + :param tag: The tag name to use for the top-level element. + + :return: The top element for the dictionary + """ + top_attributes = collections.OrderedDict( + (k[1:], v) for k, v in data_dict.items() + if k != "@id" and k.startswith("@") + ) + top_element = cElementTree.Element(tag, **top_attributes) + + def elements_for_value(python_value, element_tag): + """ Creates a list of appropriate XML elements given a value. """ + if isinstance(python_value, dict): + element = _dict_to_xml_tree(python_value, element_tag) + return [element] + elif isinstance(python_value, list): + return itertools.chain.from_iterable( + elements_for_value(item, element_tag) for item in python_value + ) + else: + element = cElementTree.Element(element_tag) + if python_value is not None: + element.text = str(python_value) + return [element] + + # Drop timecode, rate, and link elements from roundtripping because they + # may become stale with timeline updates. + default_ignore_keys = {"timecode", "rate", "link"} + specific_ignore_keys = {"samplecharacteristics": {"timecode"}} + ignore_keys = specific_ignore_keys.get(tag, default_ignore_keys) + + # push the elements into the tree + for key, value in data_dict.items(): + if key in ignore_keys: + continue + + # We already handled the attributes + if key.startswith("@"): + continue + + elements = elements_for_value(value, key) + top_element.extend(elements) + + return top_element + + +def _element_with_item_metadata(tag, item): + """ + Given a tag name, gets the FCP XML metadata dict and creates a tree of XML + with that metadata under a top element with the provided tag. + + :param tag: The XML tag for the root element. + :param item: An otio object with a metadata dict. + """ + item_meta = item.metadata.get(META_NAMESPACE) + if item_meta: + return _dict_to_xml_tree(item_meta, tag) + + return cElementTree.Element(tag) + + +def _get_or_create_subelement(parent_element, tag): + """ + Given an element and tag name, either gets the direct child of parent with + that tag name or creates a new subelement with that tag and returns it. + + :param parent_element: The element to get or create the subelement from. + :param tag: The tag for the subelement. + """ + sub_element = parent_element.find(tag) + if sub_element is None: + sub_element = cElementTree.SubElement(parent_element, tag) + + return sub_element + + +def _make_pretty_string(tree_e): + # most of the parsing in this adapter is done with cElementTree because it + # is simpler and faster. However, the string representation it returns is + # far from elegant. Therefor we feed it through minidom to provide an xml + # with indentations. + string = cElementTree.tostring(tree_e, encoding="UTF-8", method="xml") + dom = minidom.parseString(string) + return dom.toprettyxml(indent=' ') + + +def marker_for_element(marker_element, rate): + """ + Creates an :class:`schema.Marker` for the provided element. + + :param marker_element: The XML element for the marker. + :param rate: The rate for the object the marker is attached to. + + :return: The :class:`schema.Marker` instance. + """ + # TODO: The spec doc indicates that in and out are required, but doesn't + # say they have to be locally specified, so is it possible they + # could be inherited? + marker_in = opentime.RationalTime( + float(marker_element.find("./in").text), rate + ) + marker_out_value = float(marker_element.find("./out").text) + if marker_out_value > 0: + marker_out = opentime.RationalTime( + marker_out_value, rate + ) + marker_duration = (marker_out - marker_in) + else: + marker_duration = opentime.RationalTime(rate=rate) + + marker_range = opentime.TimeRange(marker_in, marker_duration) + + md_dict = _xml_tree_to_dict(marker_element, {"in", "out", "name"}) + metadata = {META_NAMESPACE: md_dict} if md_dict else None + + return schema.Marker( + name=_name_from_element(marker_element), + marked_range=marker_range, + metadata=metadata + ) + + +def markers_from_element(element, context=None): + """ + Given an element, returns the list of markers attached to it. + + :param element: An element with one or more ``marker`` child elements. + :param context: The context for this element. + + :return: A :class:`list` of :class:`schema.Marker` instances attached + to the provided element. + """ + if context is not None: + local_context = context.context_pushing_element(element) + else: + local_context = _Context(element) + rate = _rate_from_context(local_context) + + return [marker_for_element(e, rate) for e in element.iterfind("./marker")] + + +class FCP7XMLParser: + """ + Implements parsing of an FCP XML file into an OTIO timeline. + + Parsing FCP XML elements include two concepts that require carrying state: + 1. Inheritance + 2. The id Attribute + + .. seealso:: https://developer.apple.com/library/archive/documentation/\ + AppleApplications/Reference/FinalCutPro_XML/Basics/Basics.html\ + #//apple_ref/doc/uid/TP30001154-TPXREF102 + + Inheritance is implemented using a _Context object that is pushed down + through layers of parsing. A given parsing method is passed the element to + parse into an otio object along with the context that element exists under + (e.x. a track element parsing method is given the track element and the + sequence context for that track). + + The id attribute dereferencing is handled through a lookup table stored on + parser instances and using the ``_derefed_`` methods to take an element and + find dereference elements. + """ + + _etree = None + """ The root etree for the FCP XML. """ + + _id_map = None + """ A mapping of id to the first element encountered with that id. """ + + def __init__(self, element_tree): + """ + Constructor, must be init with an xml etree. + """ + self._etree = element_tree + + self._id_map = {} + + def _derefed_element(self, element): + """ + Given an element, dereferences it by it's id attribute if needed. If + the element has an id attribute and it's our first time encountering + it, store the id. + """ + if element is None: + return element + + try: + elem_id = element.attrib["id"] + except KeyError: + return element + + return self._id_map.setdefault(elem_id, element) + + def _derefed_iterfind(self, element, path): + """ + Given an elemnt, finds elements with the provided path below and + returns an iterator of the dereferenced versions of those. + + :param element: The XML etree element. + :param path: The path to find subelements. + + :return: iterator of subelements dereferenced by id. + """ + return ( + self._derefed_element(e) for e in element.iterfind(path) + ) + + def top_level_sequences(self): + """" + Returns a list of timelines for the top-level sequences in the file. + """ + context = _Context() + + # If the tree has just sequences at the top level, this will catch them + top_iter = self._derefed_iterfind(self._etree, "./sequence") + + # If there is a project or bin at the top level, this should cath them + project_and_bin_iter = self._derefed_iterfind( + self._etree, ".//children/sequence" + ) + + # Make an iterator that will exhaust both the above + sequence_iter = itertools.chain(top_iter, project_and_bin_iter) + + return [self.timeline_for_sequence(s, context) for s in sequence_iter] + + def timeline_for_sequence(self, sequence_element, context): + """ + Returns either an :class`schema.Timeline` parsed from a sequence + element. + + :param sequence_element: The sequence element. + :param context: The context dictionary. + + :return: The appropriate OTIO object for the element. + """ + local_context = context.context_pushing_element(sequence_element) + + name = _name_from_element(sequence_element) + parsed_tags = {"name", "media", "marker", "duration"} + md_dict = _xml_tree_to_dict(sequence_element, parsed_tags) + + sequence_timecode = self._derefed_element( + sequence_element.find("./timecode") + ) + if sequence_timecode is not None: + seq_start_time = _time_from_timecode_element( + sequence_timecode, local_context + ) + else: + seq_start_time = None + + media_element = self._derefed_element(sequence_element.find("./media")) + if media_element is None: + tracks = None + else: + # Reach down into the media block and escalate metadata to the + # sequence + for media_type in media_element: + media_info_dict = _xml_tree_to_dict(media_type, {"track"}) + if media_info_dict: + media_dict = md_dict.setdefault( + "media", collections.OrderedDict() + ) + media_dict[media_type.tag] = media_info_dict + + tracks = self.stack_for_element(media_element, local_context) + tracks.name = name + + # TODO: Should we be parsing the duration tag and pad out a track with + # gap to match? + + timeline = schema.Timeline( + name=name, + global_start_time=seq_start_time, + metadata={META_NAMESPACE: md_dict} if md_dict else {}, + ) + timeline.tracks = tracks + + # Push the sequence markers onto the top stack + markers = markers_from_element(sequence_element, context) + timeline.tracks.markers.extend(markers) + + return timeline + + def stack_for_element(self, element, context): + """ + Given an element, parses out track information as a stack. + + :param element: The element under which to find the tracks (typically + a ``media`` element. + :param context: The current parser context. + + :return: A :class:`schema.Stack` of the tracks. + """ + # Determine the context + local_context = context.context_pushing_element(element) + + tracks = [] + media_type_elements = self._derefed_iterfind(element, "./") + for media_type_element in media_type_elements: + try: + track_kind = _track_kind_from_element(media_type_element) + except ValueError: + # Unexpected element + continue + + is_audio = (track_kind == schema.TrackKind.Audio) + track_elements = self._derefed_iterfind( + media_type_element, "./track" + ) + for track_element in track_elements: + if is_audio and not _is_primary_audio_channel(track_element): + continue + + tracks.append( + self.track_for_element( + track_element, track_kind, local_context + ) + ) + + markers = markers_from_element(element, context) + + stack = schema.Stack( + children=tracks, + markers=markers, + name=_name_from_element(element), + ) + + return stack + + def track_for_element(self, track_element, track_kind, context): + """ + Given a track element, constructs the OTIO track. + + :param track_element: The track XML element. + :param track_kind: The :class:`schema.TrackKind` for the track. + :param context: The context dict for this track. + """ + local_context = context.context_pushing_element(track_element) + name_element = track_element.find("./name") + track_name = (name_element.text if name_element is not None else None) + + timeline_item_tags = {"clipitem", "generatoritem", "transitionitem"} + + md_dict = _xml_tree_to_dict(track_element, timeline_item_tags) + track_metadata = {META_NAMESPACE: md_dict} if md_dict else None + + track = schema.Track( + name=track_name, + kind=track_kind, + metadata=track_metadata, + ) + + # Iterate through and parse track items + track_rate = _rate_from_context(local_context) + current_timeline_time = opentime.RationalTime(0, track_rate) + head_transition_element = None + for i, item_element in enumerate(track_element): + if item_element.tag not in timeline_item_tags: + continue + + item_element = self._derefed_element(item_element) + + # Do a lookahead to try and find the tail transition item + try: + tail_transition_element = track_element[i + 1] + if tail_transition_element.tag != "transitionitem": + tail_transition_element = None + else: + tail_transition_element = self._derefed_element( + tail_transition_element + ) + except IndexError: + tail_transition_element = None + + track_item, item_range = self.item_and_timing_for_element( + item_element, + head_transition_element, + tail_transition_element, + local_context, + ) + + # Insert gap between timeline cursor and the new item if needed. + if current_timeline_time < item_range.start_time: + gap_duration = (item_range.start_time - current_timeline_time) + gap_range = opentime.TimeRange( + duration=gap_duration.rescaled_to(track_rate) + ) + track.append(schema.Gap(source_range=gap_range)) + + # Add the item and advance the timeline cursor + track.append(track_item) + current_timeline_time = item_range.end_time_exclusive() + + # Stash the element for the next iteration if it's a transition + if item_element.tag == "transitionitem": + head_transition_element = item_element + + return track + + def media_reference_for_file_element(self, file_element, context): + """ + Given a file XML element, returns the + :class`schema.ExternalReference`. + + :param file_element: The file xml element. + :param context: The parent context dictionary. + + :return: An :class:`schema.ExternalReference`. + """ + local_context = context.context_pushing_element(file_element) + media_ref_rate = _rate_from_context(local_context) + + name = _name_from_element(file_element) + + # Get the full metadata + metadata_ignore_keys = {"duration", "name", "pathurl"} + md_dict = _xml_tree_to_dict(file_element, metadata_ignore_keys) + metadata_dict = {META_NAMESPACE: md_dict} if md_dict else None + + # Determine the file path + path_element = file_element.find("./pathurl") + if path_element is not None: + path = path_element.text + else: + path = None + + # Find the timing + timecode_element = file_element.find("./timecode") + if timecode_element is not None: + start_time = _time_from_timecode_element(timecode_element) + start_time = start_time.rescaled_to(media_ref_rate) + else: + start_time = opentime.RationalTime(0, media_ref_rate) + + duration_element = file_element.find("./duration") + if duration_element is not None: + duration = opentime.RationalTime( + float(duration_element.text), media_ref_rate + ) + available_range = opentime.TimeRange(start_time, duration) + elif timecode_element is not None: + available_range = opentime.TimeRange( + start_time, + opentime.RationalTime(0, media_ref_rate), + ) + else: + available_range = None + + if path is None: + media_reference = schema.MissingReference( + name=name, + available_range=available_range, + metadata=metadata_dict, + ) + else: + media_reference = schema.ExternalReference( + target_url=path, + available_range=available_range, + metadata=metadata_dict, + ) + media_reference.name = name + + return media_reference + + def media_reference_for_effect_element(self, effect_element): + """ + Given an effect element, returns a generator reference. + + :param effect_element: The effect for the generator. + + :return: An :class:`schema.GeneratorReference` instance. + """ + name = _name_from_element(effect_element) + md_dict = _xml_tree_to_dict(effect_element, {"name"}) + + return schema.GeneratorReference( + name=name, + metadata=({META_NAMESPACE: md_dict} if md_dict else None) + ) + + def item_and_timing_for_element( + self, item_element, head_transition, tail_transition, context + ): + """ + Given a track item, returns a tuple with the appropriate OpenTimelineIO + schema item as the first element and an + :class:`opentime.TimeRange`of theresolved timeline range the clip + occupies. + + :param item_element: The track item XML node. + :param head_transition: The xml element for the transition immediately + before or ``None``. + :param tail_transition: The xml element for the transition immediately + after or ``None``. + :param context: The context dictionary. + + :return: An :class:`core.Item` subclass instance and + :class:`opentime.TimeRange` for the item. + """ + parent_rate = _rate_from_context(context) + + # Establish the start/end time in the timeline + start_value = int(item_element.find("./start").text) + end_value = int(item_element.find("./end").text) + + if start_value == -1: + # determine based on the cut point of the head transition + start = _transition_cut_point(head_transition, context) + + # This offset is needed to determing how much to advance from the + # clip media's in time. Duration accounts for this offset for the + # out time. + transition_rate = _rate_from_context( + context.context_pushing_element(head_transition) + ) + start_offset = start - opentime.RationalTime( + int(head_transition.find('./start').text), transition_rate + ) + else: + start = opentime.RationalTime(start_value, parent_rate) + start_offset = opentime.RationalTime() + + if end_value == -1: + # determine based on the cut point of the tail transition + end = _transition_cut_point(tail_transition, context) + else: + end = opentime.RationalTime(end_value, parent_rate) + + item_range = opentime.TimeRange(start, (end - start)) + + # Get the metadata dictionary for the item + item_metadata_ignore_keys = { + "name", + "start", + "end", + "in", + "out", + "duration", + "file", + "marker", + "effect", + "rate", + "sequence", + } + metadata_dict = _xml_tree_to_dict( + item_element, item_metadata_ignore_keys + ) + + # deserialize the item + if item_element.tag in {"clipitem", "generatoritem"}: + item = self.clip_for_element( + item_element, item_range, start_offset, context + ) + elif item_element.tag == "transitionitem": + item = self.transition_for_element(item_element, context) + else: + name = "unknown-{}".format(item_element.tag) + item = core.Item(name=name, source_range=item_range) + + if metadata_dict: + item.metadata.setdefault(META_NAMESPACE, {}).update(metadata_dict) + + return (item, item_range) + + def clip_for_element( + self, clipitem_element, item_range, start_offset, context + ): + """ + Given a clipitem xml element, returns an :class:`schema.Clip`. + + :param clipitem_element: The element to create a clip for. + :param item_range: The time range in the timeline the clip occupies. + :param start_offset: The amount by which the ``in`` time of the clip + source should be advanced (usually due to a transition). + :param context: The parent context for the clip. + + :return: The :class:`schema.Clip` instance. + """ + local_context = context.context_pushing_element(clipitem_element) + + name = _name_from_element(clipitem_element) + + file_element = self._derefed_element(clipitem_element.find("./file")) + sequence_element = self._derefed_element( + clipitem_element.find("./sequence") + ) + if clipitem_element.tag == "generatoritem": + generator_effect_element = clipitem_element.find( + "./effect[effecttype='generator']" + ) + else: + generator_effect_element = None + + media_start_time = opentime.RationalTime() + if sequence_element is not None: + item = self.stack_for_element(sequence_element, local_context) + # TODO: is there an applicable media start time we should be + # using from nested sequences? + elif file_element is not None or generator_effect_element is not None: + if file_element is not None: + media_reference = self.media_reference_for_file_element( + file_element, local_context + ) + # See if there is a start offset + timecode_element = file_element.find("./timecode") + if timecode_element is not None: + media_start_time = _time_from_timecode_element( + timecode_element + ) + elif generator_effect_element is not None: + media_reference = self.media_reference_for_effect_element( + generator_effect_element + ) + + item = schema.Clip( + name=name, + media_reference=media_reference, + ) + else: + raise TypeError( + 'Type of clip item is not supported {}'.format( + _element_identification_string(clipitem_element) + ) + ) + + # Add the markers + markers = markers_from_element(clipitem_element, context) + item.markers.extend(markers) + + # Find the in time (source time relative to media start) + clip_rate = _rate_from_context(local_context) + in_value = float(clipitem_element.find('./in').text) + in_time = opentime.RationalTime(in_value, clip_rate) + + # Offset the "in" time by the start offset of the media + soure_start_time = in_time + media_start_time + start_offset + duration = item_range.duration + + # Source Range is the item range expressed in the clip's rate (for now) + source_range = opentime.TimeRange( + soure_start_time.rescaled_to(clip_rate), + duration.rescaled_to(clip_rate), + ) + + item.source_range = source_range + + # Parse the filters + filter_iter = self._derefed_iterfind(clipitem_element, "./filter") + for filter_element in filter_iter: + item.effects.append( + self.effect_from_filter_element(filter_element) + ) + + return item + + def effect_from_filter_element(self, filter_element): + """ + Given a filter element, creates an :class:`schema.Effect`. + + :param filter_element: The ``filter`` element containing the effect. + + :return: The effect instance. + """ + effect_element = filter_element.find("./effect") + + if effect_element is None: + raise ValueError( + "could not find effect in filter: {}".format(filter_element) + ) + + name = effect_element.find("./name").text + + effect_metadata = _xml_tree_to_dict(effect_element, {"name"}) + + return schema.Effect( + name, + metadata={META_NAMESPACE: effect_metadata}, + ) + + def transition_for_element(self, item_element, context): + """ + Creates an OTIO transition for the provided transition element. + + :param item_element: The element to create a transition for. + :param context: The parent context for the element. + + :return: The :class:`schema.Transition` instance. + """ + # start and end times are in the parent's rate + rate = _rate_from_context(context) + start = opentime.RationalTime( + int(item_element.find('./start').text), + rate + ) + end = opentime.RationalTime( + int(item_element.find('./end').text), + rate + ) + cut_point = _transition_cut_point(item_element, context) + + transition = schema.Transition( + name=item_element.find('./effect/name').text, + transition_type=schema.TransitionTypes.SMPTE_Dissolve, + in_offset=cut_point - start, + out_offset=end - cut_point, + ) + + return transition + + +# ------------------------ +# building single track +# ------------------------ + + +def _backreference_for_item(item, tag, br_map): + """ + Given an item, determines what the id in the backreference map should be. + If the item is already tracked in the map, it will be returned, otherwise + a new id will be minted. + + .. note:: ``br_map`` may be mutated by this function. ``br_map`` is + intended to be an opaque data structure and only accessed through this + function, the structure of data in br_map may change. + + :param item: The :class:`core.SerializableObject` to create an id for. + :param tag: The tag name that will be used for object in xml. + :param br_map: The dictionary containing backreference information + generated so far. + + :return: A 2-tuple of (id_string, is_new_id) where the ``id_string`` is + the value for the xml id attribute and ``is_new_id`` is ``True`` when + this is the first time that id was encountered. + """ + # br_map is structured as a dictionary with tags as keys, and dictionaries + # of hash to id int as values. + + def id_string(id_int): + return "{}-{}".format(tag, id_int) + + # Determine how to uniquely identify the referenced item + if isinstance(item, schema.ExternalReference): + item_hash = hash(str(item.target_url)) + else: + # TODO: This may become a performance issue. It means that every + # non-ref object is serialized to json and hashed each time it's + # encountered. + item_hash = hash( + core.json_serializer.serialize_json_to_string(item) + ) + + is_new_id = False + item_id = br_map.get(tag, {}).get(item_hash) + if item_id is not None: + return (id_string(item_id), is_new_id) + + # This is a new id, figure out what it should be. + is_new_id = True + + # Attempt to preserve the ID from the input metadata. + preferred_id = None + orig_id_string = item.metadata.get(META_NAMESPACE, {}).get("@id") + if orig_id_string is not None: + orig_id_match = ID_RE.match(orig_id_string) + if orig_id_match is not None: + match_groups = orig_id_match.groupdict() + orig_tagname = match_groups["tag"] + if orig_tagname == tag: + preferred_id = int(match_groups["id"]) + + # Generate an id by finding the lowest value in a contiguous range not + # colliding with an existing value + tag_id_map = br_map.setdefault(tag, {}) + existing_ids = set(tag_id_map.values()) + if preferred_id is not None and preferred_id not in existing_ids: + item_id = preferred_id + else: + # Make a range from 1 including the ID after the largest assigned + # (hence the +2 since range is non-inclusive on the upper bound) + max_assigned_id = max(existing_ids) if existing_ids else 0 + max_possible_id = (max_assigned_id + 2) + possible_ids = set(range(1, max_possible_id)) + + # Select the lowest unassigned ID + item_id = min(possible_ids.difference(existing_ids)) + + # Store the created id + tag_id_map[item_hash] = item_id + + return (id_string(item_id), is_new_id) + + +def _backreference_build(tag): + """ + A decorator for functions creating XML elements to implement the id system + described in FCP XML. + + This wrapper determines if the otio item is equivalent to one encountered + before with the provided tag name. If the item hasn't been encountered then + the wrapped function will be invoked and the XML element from that function + will have the ``id`` attribute set and be stored in br_map. + If the item is equivalent to a previously provided item, the wrapped + function won't be invoked and a simple tag with the previous instance's id + will be returned instead. + + The wrapped function must: + - Have the otio item as the first positional argument. + - Have br_map (backreference map, a dictionary) as the last positional + arg. br_map stores the state for encountered items. + + :param tag: The xml tag of the element the wrapped function generates. + """ + # We can also encode these back-references if an item is accessed multiple + # times. To do this we store an id attribute on the element. For back- + # references we then only need to return an empty element of that type with + # the id we logged before + + def singleton_decorator(func): + @functools.wraps(func) + def wrapper(item, *args, **kwargs): + br_map = args[-1] + + item_id, id_is_new = _backreference_for_item(item, tag, br_map) + + # if the item exists in the map already, we should use the + # abbreviated XML element referring to the original + if not id_is_new: + return cElementTree.Element(tag, id=item_id) + + # This is the first time for this unique item, it needs it's full + # XML. Get the element generated by the wrapped function and add + # the id attribute. + elem = func(item, *args, **kwargs) + elem.attrib["id"] = item_id + + return elem + + return wrapper + + return singleton_decorator + + +def _append_new_sub_element(parent, tag, attrib=None, text=None): + """ + Creates a sub-element with the provided tag, attributes, and text. + + This is a convenience because the :class:`SubElement` constructor does not + provide the ability to set ``text``. + + :param parent: The parent element. + :param tag: The tag string for the element. + :param attrib: An optional dictionary of attributes for the element. + :param text: Optional text value for the element. + + :return: The new XML element. + """ + elem = cElementTree.SubElement(parent, tag, **attrib or {}) + if text is not None: + elem.text = text + + return elem + + +def _build_rate(fps): + """ + Given a framerate, makes a ``rate`` xml tree. + + :param fps: The framerate. + :return: The fcp xml ``rate`` tree. + """ + rate = math.ceil(fps) + + rate_e = cElementTree.Element('rate') + _append_new_sub_element(rate_e, 'timebase', text=str(int(rate))) + _append_new_sub_element( + rate_e, + 'ntsc', + text='FALSE' if rate == fps else 'TRUE' + ) + return rate_e + + +def _build_timecode(time, fps, drop_frame=False, additional_metadata=None): + """ + Makes a timecode xml element tree. + + .. warning:: The drop_frame parameter is currently ignored and + auto-determined by rate. This is because the underlying otio timecode + conversion assumes DFTC based on rate. + + :param time: The :class:`opentime.RationalTime` for the timecode. + :param fps: The framerate for the timecode. + :param drop_frame: If True, generates drop-frame timecode. + :param additional_metadata: A dictionary with other metadata items like + ``field``, ``reel``, ``source``, and ``format``. It is assumed this + dictionary is of the form generated by :func:`_xml_tree_to_dict` when + the file was read originally. + + :return: The ``timecode`` element. + """ + if additional_metadata: + # Only allow legal child items for the timecode element + filtered = { + k: v for k, v in additional_metadata.items() + if k in {"field", "reel", "source", "format"} + } + tc_element = _dict_to_xml_tree(filtered, "timecode") + else: + tc_element = cElementTree.Element("timecode") + + tc_element.append(_build_rate(fps)) + rate_is_not_ntsc = (tc_element.find('./rate/ntsc').text == "FALSE") + if drop_frame and rate_is_not_ntsc: + tc_fps = fps * (1000 / 1001.0) + else: + tc_fps = fps + + # Get the time values + tc_time = opentime.RationalTime(time.value_rescaled_to(fps), tc_fps) + tc_string = opentime.to_timecode(tc_time, tc_fps, drop_frame) + + _append_new_sub_element(tc_element, "string", text=tc_string) + + frame_number = int(round(time.value)) + _append_new_sub_element( + tc_element, "frame", text="{:.0f}".format(frame_number) + ) + + drop_frame = (";" in tc_string) + display_format = "DF" if drop_frame else "NDF" + _append_new_sub_element(tc_element, "displayformat", text=display_format) + + return tc_element + + +def _build_item_timings( + item_e, + item, + timeline_range, + transition_offsets, + timecode +): + # source_start is absolute time taking into account the timecode of the + # media. But xml regards the source in point from the start of the media. + # So we subtract the media timecode. + item_rate = item.source_range.start_time.rate + source_start = (item.source_range.start_time - timecode) + source_start = source_start.rescaled_to(item_rate) + + source_end = (item.source_range.end_time_exclusive() - timecode) + source_end = source_end.rescaled_to(item_rate) + + start = '{:.0f}'.format(timeline_range.start_time.value) + end = '{:.0f}'.format(timeline_range.end_time_exclusive().value) + + item_e.append(_build_rate(item_rate)) + + if transition_offsets[0] is not None: + start = '-1' + source_start -= transition_offsets[0] + if transition_offsets[1] is not None: + end = '-1' + source_end += transition_offsets[1] + + _append_new_sub_element( + item_e, 'duration', + text='{:.0f}'.format(item.source_range.duration.value) + ) + _append_new_sub_element(item_e, 'start', text=start) + _append_new_sub_element(item_e, 'end', text=end) + _append_new_sub_element( + item_e, + 'in', + text='{:.0f}'.format(source_start.value) + ) + _append_new_sub_element( + item_e, + 'out', + text='{:.0f}'.format(source_end.value) + ) + + +@_backreference_build('file') +def _build_empty_file(media_ref, parent_range, br_map): + file_e = _element_with_item_metadata("file", media_ref) + _append_new_sub_element(file_e, "name", text=media_ref.name) + + if media_ref.available_range is not None: + available_range = media_ref.available_range + else: + available_range = opentime.TimeRange( + opentime.RationalTime(0, parent_range.start_time.rate), + parent_range.duration, + ) + + ref_rate = available_range.start_time.rate + file_e.append(_build_rate(ref_rate)) + + # Only provide a duration if one came from the media, don't invent one. + # For example, Slugs have no duration specified. + if media_ref.available_range: + duration = available_range.duration.rescaled_to(ref_rate) + _append_new_sub_element( + file_e, + 'duration', + text='{:.0f}'.format(duration.value), + ) + + # timecode + ref_tc_metadata = media_ref.metadata.get(META_NAMESPACE, {}).get( + "timecode" + ) + tc_element = _build_timecode_from_metadata( + available_range.start_time, ref_tc_metadata + ) + file_e.append(tc_element) + + file_media_e = _get_or_create_subelement(file_e, "media") + if file_media_e.find("video") is None: + _append_new_sub_element(file_media_e, "video") + + return file_e + + +@_backreference_build('file') +def _build_file(media_reference, br_map): + file_e = _element_with_item_metadata("file", media_reference) + + available_range = media_reference.available_range + url_path = _url_to_path(media_reference.target_url) + + file_name = ( + media_reference.name if media_reference.name + else os.path.basename(url_path) + ) + _append_new_sub_element(file_e, 'name', text=file_name) + _append_new_sub_element(file_e, 'pathurl', text=media_reference.target_url) + + # timing info + file_e.append(_build_rate(available_range.start_time.rate)) + _append_new_sub_element( + file_e, 'duration', + text='{:.0f}'.format(available_range.duration.value) + ) + + # timecode + ref_tc_metadata = media_reference.metadata.get(META_NAMESPACE, {}).get( + "timecode" + ) + tc_element = _build_timecode_from_metadata( + available_range.start_time, ref_tc_metadata + ) + file_e.append(tc_element) + + # we need to flag the file reference with the content types, otherwise it + # will not get recognized + # TODO: We should use a better method for this. Perhaps pre-walk the + # timeline and find all the track kinds this media is present in? + if not file_e.find("media"): + file_media_e = _get_or_create_subelement(file_e, "media") + + audio_exts = {'.wav', '.aac', '.mp3', '.aif', '.aiff', '.m4a'} + has_video = (os.path.splitext(url_path)[1].lower() not in audio_exts) + if has_video and file_media_e.find("video") is None: + _append_new_sub_element(file_media_e, "video") + + # TODO: This is assuming all files have an audio track. Not sure what + # the implications of that are. + if file_media_e.find("audio") is None: + _append_new_sub_element(file_media_e, "audio") + + return file_e + + +def _build_transition_item( + transition_item, + timeline_range, + transition_offsets, + br_map, +): + transition_e = _element_with_item_metadata( + "transitionitem", transition_item + ) + _append_new_sub_element( + transition_e, + 'start', + text='{:.0f}'.format(timeline_range.start_time.value) + ) + _append_new_sub_element( + transition_e, + 'end', + text='{:.0f}'.format(timeline_range.end_time_exclusive().value) + ) + + # Only add an alignment if it didn't already come in from the metadata dict + if transition_e.find("alignment") is None: + # default center aligned + alignment = "center" + if not transition_item.in_offset.value: + alignment = 'start-black' + elif not transition_item.out_offset.value: + alignment = 'end-black' + + _append_new_sub_element(transition_e, 'alignment', text=alignment) + # todo support 'start' and 'end' alignment + + transition_e.append(_build_rate(timeline_range.start_time.rate)) + + # Only add an effect if it didn't already come in from the metadata dict + if not transition_e.find("./effect"): + try: + effectid = transition_item.metadata[META_NAMESPACE]["effectid"] + except KeyError: + effectid = "Cross Dissolve" + + effect_e = _append_new_sub_element(transition_e, 'effect') + _append_new_sub_element(effect_e, 'name', text=transition_item.name) + _append_new_sub_element(effect_e, 'effectid', text=effectid) + _append_new_sub_element(effect_e, 'effecttype', text='transition') + _append_new_sub_element(effect_e, 'mediatype', text='video') + + return transition_e + + +@_backreference_build("clipitem") +def _build_clip_item_without_media( + clip_item, + timeline_range, + transition_offsets, + br_map, +): + # TODO: Does this need to be a separate function or could it be unified + # with _build_clip_item? + clip_item_e = _element_with_item_metadata("clipitem", clip_item) + if "frameBlend" not in clip_item_e.attrib: + clip_item_e.attrib["frameBlend"] = "FALSE" + + if clip_item.media_reference.available_range: + media_start_time = clip_item.media_reference.available_range.start_time + else: + media_start_time = opentime.RationalTime( + 0, timeline_range.start_time.rate + ) + + _append_new_sub_element(clip_item_e, 'name', text=clip_item.name) + clip_item_e.append( + _build_empty_file( + clip_item.media_reference, timeline_range, br_map + ) + ) + clip_item_e.extend([_build_marker(m) for m in clip_item.markers]) + + _build_item_timings( + clip_item_e, + clip_item, + timeline_range, + transition_offsets, + media_start_time, + ) + + return clip_item_e + + +@_backreference_build("clipitem") +def _build_clip_item(clip_item, timeline_range, transition_offsets, br_map): + is_generator = isinstance( + clip_item.media_reference, schema.GeneratorReference + ) + + tagname = "generatoritem" if is_generator else "clipitem" + clip_item_e = _element_with_item_metadata(tagname, clip_item) + if "frameBlend" not in clip_item_e.attrib: + clip_item_e.attrib["frameBlend"] = "FALSE" + + if is_generator: + clip_item_e.append(_build_generator_effect(clip_item, br_map)) + else: + clip_item_e.append(_build_file(clip_item.media_reference, br_map)) + + # set the clip name from the media reference if not defined on the clip + if clip_item.name is not None: + name = clip_item.name + elif is_generator: + name = clip_item.media_reference.name + else: + url_path = _url_to_path(clip_item.media_reference.target_url) + name = os.path.basename(url_path) + + _append_new_sub_element(clip_item_e, 'name', text=name) + + if clip_item.media_reference.available_range: + clip_item_e.append( + _build_rate(clip_item.source_range.start_time.rate) + ) + clip_item_e.extend(_build_marker(m) for m in clip_item.markers) + + if clip_item.media_reference.available_range: + timecode = clip_item.media_reference.available_range.start_time + else: + timecode = opentime.RationalTime( + 0, clip_item.source_range.start_time.rate + ) + + _build_item_timings( + clip_item_e, + clip_item, + timeline_range, + transition_offsets, + timecode + ) + + return clip_item_e + + +def _build_generator_effect(clip_item, br_map): + """ + Builds an effect element for the generator ref on the provided clip item. + + :param clip_item: a clip with a :class:`schema.GeneratorReference` as + its ``media_reference``. + :param br_map: The backreference map. + """ + # Since we don't support effects in a standard way, just try and build + # based on the metadata provided at deserialization so we can roundtrip + generator_ref = clip_item.media_reference + try: + fcp_xml_effect_info = generator_ref.metadata[META_NAMESPACE] + except KeyError: + return _build_empty_file( + generator_ref, + clip_item.source_range, + br_map, + ) + + # Get the XML Tree built from the metadata + effect_element = _dict_to_xml_tree(fcp_xml_effect_info, "effect") + + # Validate the metadata and make sure it contains the required elements + for required in ("effectid", "effecttype", "mediatype", "effectcategory"): + if effect_element.find(required) is None: + return _build_empty_file( + generator_ref, + clip_item.source_range, + br_map, + ) + + # Add the name + _append_new_sub_element(effect_element, "name", text=generator_ref.name) + + return effect_element + + +@_backreference_build("clipitem") +def _build_track_item(track, timeline_range, transition_offsets, br_map): + clip_item_e = _element_with_item_metadata("clipitem", track) + if "frameBlend" not in clip_item_e.attrib: + clip_item_e.attrib["frameBlend"] = "FALSE" + + _append_new_sub_element( + clip_item_e, + 'name', + text=os.path.basename(track.name) + ) + + track_e = _build_sequence_for_stack(track, timeline_range, br_map) + + clip_item_e.append(_build_rate(track.source_range.start_time.rate)) + clip_item_e.extend([_build_marker(m) for m in track.markers]) + clip_item_e.append(track_e) + timecode = opentime.RationalTime(0, timeline_range.start_time.rate) + + _build_item_timings( + clip_item_e, + track, + timeline_range, + transition_offsets, + timecode + ) + + return clip_item_e + + +def _build_item(item, timeline_range, transition_offsets, br_map): + if isinstance(item, schema.Transition): + return _build_transition_item( + item, + timeline_range, + transition_offsets, + br_map + ) + elif isinstance(item, schema.Clip): + if isinstance( + item.media_reference, + schema.MissingReference + ): + return _build_clip_item_without_media( + item, + timeline_range, + transition_offsets, + br_map + ) + else: + return _build_clip_item( + item, + timeline_range, + transition_offsets, + br_map + ) + elif isinstance(item, schema.Stack): + return _build_track_item( + item, + timeline_range, + transition_offsets, + br_map + ) + else: + raise ValueError('Unsupported item: ' + str(item)) + + +def _build_top_level_track(track, track_rate, br_map): + track_e = _element_with_item_metadata("track", track) + + for n, item in enumerate(track): + if isinstance(item, schema.Gap): + continue + + transition_offsets = [None, None] + previous_item = track[n - 1] if n > 0 else None + next_item = track[n + 1] if n + 1 < len(track) else None + if not isinstance(item, schema.Transition): + # find out if this item has any neighboring transition + if isinstance(previous_item, schema.Transition): + if previous_item.out_offset.value: + transition_offsets[0] = previous_item.in_offset + else: + transition_offsets[0] = None + if isinstance(next_item, schema.Transition): + if next_item.in_offset.value: + transition_offsets[1] = next_item.out_offset + else: + transition_offsets[1] = None + + timeline_range = track.range_of_child_at_index(n) + timeline_range = opentime.TimeRange( + timeline_range.start_time.rescaled_to(track_rate), + timeline_range.duration.rescaled_to(track_rate) + ) + track_e.append( + _build_item(item, timeline_range, transition_offsets, br_map) + ) + + return track_e + + +def _build_marker(marker): + marker_e = _element_with_item_metadata("marker", marker) + + marked_range = marker.marked_range + + _append_new_sub_element(marker_e, 'name', text=marker.name) + _append_new_sub_element( + marker_e, 'in', + text='{:.0f}'.format(marked_range.start_time.value) + ) + _append_new_sub_element(marker_e, 'out', text='-1') + + return marker_e + + +def _build_timecode_from_metadata(time, tc_metadata=None): + """ + Makes a timecode element with the given time and (if available) + ```timecode`` metadata stashed on input. + + :param time: The :class:`opentime.RationalTime` to encode. + :param tc_metadata: The xml dict for the ``timecode`` element populated + on read. + + :return: A timecode element. + """ + if tc_metadata is None: + tc_metadata = {} + + try: + # Parse the rate in the preserved metadata, if available + tc_rate = _rate_for_element( + _dict_to_xml_tree(tc_metadata["rate"], "rate") + ) + except KeyError: + # Default to the rate in the start time + tc_rate = time.rate + + drop_frame = (tc_metadata.get("displayformat", "NDF") == "DF") + + return _build_timecode( + time, + tc_rate, + drop_frame, + additional_metadata=tc_metadata, + ) + + +@_backreference_build('sequence') +def _build_sequence_for_timeline(timeline, timeline_range, br_map): + sequence_e = _element_with_item_metadata("sequence", timeline) + + _add_stack_elements_to_sequence( + timeline.tracks, sequence_e, timeline_range, br_map + ) + + # In the case of timelines, use the timeline name rather than the stack + # name. + if timeline.name: + sequence_e.find('./name').text = timeline.name + + # Add the sequence global start + if timeline.global_start_time is not None: + seq_tc_metadata = timeline.metadata.get(META_NAMESPACE, {}).get( + "timecode" + ) + tc_element = _build_timecode_from_metadata( + timeline.global_start_time, seq_tc_metadata + ) + sequence_e.append(tc_element) + + return sequence_e + + +@_backreference_build('sequence') +def _build_sequence_for_stack(stack, timeline_range, br_map): + sequence_e = _element_with_item_metadata("sequence", stack) + + _add_stack_elements_to_sequence(stack, sequence_e, timeline_range, br_map) + + return sequence_e + + +def _add_stack_elements_to_sequence(stack, sequence_e, timeline_range, br_map): + _append_new_sub_element(sequence_e, 'name', text=stack.name) + _append_new_sub_element( + sequence_e, 'duration', + text='{:.0f}'.format(timeline_range.duration.value) + ) + sequence_e.append(_build_rate(timeline_range.start_time.rate)) + track_rate = timeline_range.start_time.rate + + media_e = _get_or_create_subelement(sequence_e, "media") + video_e = _get_or_create_subelement(media_e, 'video') + audio_e = _get_or_create_subelement(media_e, 'audio') + + for track in stack: + track_elements = _build_top_level_track(track, track_rate, br_map) + if track.kind == schema.TrackKind.Video: + video_e.append(track_elements) + elif track.kind == schema.TrackKind.Audio: + audio_e.append(track_elements) + + for marker in stack.markers: + sequence_e.append(_build_marker(marker)) + + +def _build_collection(collection, br_map): + tracks = [] + for item in collection: + if not isinstance(item, schema.Timeline): + continue + + timeline_range = opentime.TimeRange( + start_time=item.global_start_time, + duration=item.duration() + ) + tracks.append( + _build_sequence_for_timeline(item, timeline_range, br_map) + ) + + return tracks + + +# -------------------- +# adapter requirements +# -------------------- + +def read_from_string(input_str): + tree = cElementTree.fromstring(input_str) + + parser = FCP7XMLParser(tree) + sequences = parser.top_level_sequences() + + if len(sequences) == 1: + return sequences[0] + elif len(sequences) > 1: + return schema.SerializableCollection( + name="Sequences", + children=sequences, + ) + else: + raise ValueError('No top-level sequences found') + + +def write_to_string(input_otio): + tree_e = cElementTree.Element('xmeml', version="4") + project_e = _append_new_sub_element(tree_e, 'project') + _append_new_sub_element(project_e, 'name', text=input_otio.name) + children_e = _append_new_sub_element(project_e, 'children') + + br_map = collections.defaultdict(dict) + + if isinstance(input_otio, schema.Timeline): + timeline_range = opentime.TimeRange( + start_time=input_otio.global_start_time, + duration=input_otio.duration() + ) + children_e.append( + _build_sequence_for_timeline( + input_otio, timeline_range, br_map + ) + ) + elif isinstance(input_otio, schema.SerializableCollection): + children_e.extend( + _build_collection(input_otio, br_map) + ) + + return _make_pretty_string(tree_e) diff --git a/pype/vendor/python/python_2/opentimelineio/adapters/otio_json.py b/pype/vendor/python/python_2/opentimelineio/adapters/otio_json.py new file mode 100644 index 00000000000..66b8db29044 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/adapters/otio_json.py @@ -0,0 +1,48 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""This adapter lets you read and write native .otio files""" + +from .. import ( + core +) + + +# @TODO: Implement out of process plugins that hand around JSON + + +def read_from_file(filepath): + return core.deserialize_json_from_file(filepath) + + +def read_from_string(input_str): + return core.deserialize_json_from_string(input_str) + + +def write_to_string(input_otio): + return core.serialize_json_to_string(input_otio) + + +def write_to_file(input_otio, filepath): + return core.serialize_json_to_file(input_otio, filepath) diff --git a/pype/vendor/python/python_2/opentimelineio/algorithms/__init__.py b/pype/vendor/python/python_2/opentimelineio/algorithms/__init__.py new file mode 100644 index 00000000000..e211598bb37 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/algorithms/__init__.py @@ -0,0 +1,44 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Algorithms for OTIO objects.""" + +# flake8: noqa +from .track_algo import ( + track_trimmed_to_range, + track_with_expanded_transitions +) + +from .stack_algo import ( + flatten_stack, + top_clip_at_time, +) + +from .filter import ( + filtered_composition, + filtered_with_sequence_context +) +from .timeline_algo import ( + timeline_trimmed_to_range +) diff --git a/pype/vendor/python/python_2/opentimelineio/algorithms/filter.py b/pype/vendor/python/python_2/opentimelineio/algorithms/filter.py new file mode 100644 index 00000000000..8f9e2ed41bb --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/algorithms/filter.py @@ -0,0 +1,275 @@ +#!/usr/bin/env python +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Algorithms for filtering OTIO files. """ + +import copy + +from .. import ( + schema +) + + +def _is_in(thing, container): + return any(thing is item for item in container) + + +def _isinstance_in(child, typelist): + return any(isinstance(child, t) for t in typelist) + + +def filtered_composition( + root, + unary_filter_fn, + types_to_prune=None, +): + """Filter a deep copy of root (and children) with unary_filter_fn. + + types_to_prune:: tuple of types, example: (otio.schema.Gap,...) + + 1. Make a deep copy of root + 2. Starting with root, perform a depth first traversal + 3. For each item (including root): + a. if types_to_prune is not None and item is an instance of a type + in types_to_prune, prune it from the copy, continue. + b. Otherwise, pass the copy to unary_filter_fn. If unary_filter_fn: + I. returns an object: add it to the copy, replacing original + II. returns a tuple: insert it into the list, replacing original + III. returns None: prune it + 4. If an item is pruned, do not traverse its children + 5. Return the new deep copy. + + EXAMPLE 1 (filter): + If your unary function is: + def fn(thing): + if thing.name == B: + return thing' # some transformation of B + else: + return thing + + If you have a track: [A,B,C] + + filtered_composition(track, fn) => [A,B',C] + + EXAMPLE 2 (prune): + If your unary function is: + def fn(thing): + if thing.name == B: + return None + else: + return thing + + filtered_composition(track, fn) => [A,C] + + EXAMPLE 3 (expand): + If your unary function is: + def fn(thing): + if thing.name == B: + return tuple(B_1,B_2,B_3) + else: + return thing + + filtered_composition(track, fn) => [A,B_1,B_2,B_3,C] + + EXAMPLE 4 (prune gaps): + track :: [Gap, A, Gap] + filtered_composition( + track, lambda _:_, types_to_prune=(otio.schema.Gap,)) => [A] + """ + + # deep copy everything + mutable_object = copy.deepcopy(root) + + prune_list = set() + + header_list = [mutable_object] + + if isinstance(mutable_object, schema.Timeline): + header_list.append(mutable_object.tracks) + + iter_list = header_list + list(mutable_object.each_child()) + + for child in iter_list: + if _safe_parent(child) is not None and _is_in(child.parent(), prune_list): + prune_list.add(child) + continue + + parent = None + child_index = None + if _safe_parent(child) is not None: + child_index = child.parent().index(child) + parent = child.parent() + del child.parent()[child_index] + + # first try to prune + if (types_to_prune and _isinstance_in(child, types_to_prune)): + result = None + # finally call the user function + else: + result = unary_filter_fn(child) + + if child is mutable_object: + mutable_object = result + + if result is None: + prune_list.add(child) + continue + + if type(result) is not tuple: + result = [result] + + if parent is not None: + parent[child_index:child_index] = result + + return mutable_object + + +def _safe_parent(child): + if hasattr(child, 'parent'): + return child.parent() + return None + + +def filtered_with_sequence_context( + root, + reduce_fn, + types_to_prune=None, +): + """Filter a deep copy of root (and children) with reduce_fn. + + reduce_fn::function(previous_item, current, next_item) (see below) + types_to_prune:: tuple of types, example: (otio.schema.Gap,...) + + 1. Make a deep copy of root + 2. Starting with root, perform a depth first traversal + 3. For each item (including root): + a. if types_to_prune is not None and item is an instance of a type + in types_to_prune, prune it from the copy, continue. + b. Otherwise, pass (prev, copy, and next) to reduce_fn. If reduce_fn: + I. returns an object: add it to the copy, replacing original + II. returns a tuple: insert it into the list, replacing original + III. returns None: prune it + + ** note that reduce_fn is always passed objects from the original + deep copy, not what prior calls return. See below for examples + 4. If an item is pruned, do not traverse its children + 5. Return the new deep copy. + + EXAMPLE 1 (filter): + >>> track = [A,B,C] + >>> def fn(prev_item, thing, next_item): + ... if prev_item.name == A: + ... return D # some new clip + ... else: + ... return thing + >>> filtered_with_sequence_context(track, fn) => [A,D,C] + + order of calls to fn: + fn(None, A, B) => A + fn(A, B, C) => D + fn(B, C, D) => C # !! note that it was passed B instead of D. + + EXAMPLE 2 (prune): + >>> track = [A,B,C] + >>> def fn(prev_item, thing, next_item): + ... if prev_item.name == A: + ... return None # prune the clip + ... else: + ... return thing + >>> filtered_with_sequence_context(track, fn) => [A,C] + + order of calls to fn: + fn(None, A, B) => A + fn(A, B, C) => None + fn(B, C, D) => C # !! note that it was passed B instead of D. + + EXAMPLE 3 (expand): + >>> def fn(prev_item, thing, next_item): + ... if prev_item.name == A: + ... return (D, E) # tuple of new clips + ... else: + ... return thing + >>> filtered_with_sequence_context(track, fn) => [A, D, E, C] + + the order of calls to fn will be: + fn(None, A, B) => A + fn(A, B, C) => (D, E) + fn(B, C, D) => C # !! note that it was passed B instead of D. + """ + + # deep copy everything + mutable_object = copy.deepcopy(root) + + prune_list = set() + + header_list = [mutable_object] + + if isinstance(mutable_object, schema.Timeline): + header_list.append(mutable_object.tracks) + + iter_list = header_list + list(mutable_object.each_child()) + + # expand to include prev, next when appropriate + expanded_iter_list = [] + for child in iter_list: + if _safe_parent(child) and isinstance(child.parent(), schema.Track): + prev_item, next_item = child.parent().neighbors_of(child) + expanded_iter_list.append((prev_item, child, next_item)) + else: + expanded_iter_list.append((None, child, None)) + + for prev_item, child, next_item in expanded_iter_list: + if _safe_parent(child) is not None and _is_in(child.parent(), prune_list): + prune_list.add(child) + continue + + parent = None + child_index = None + if _safe_parent(child) is not None: + child_index = child.parent().index(child) + parent = child.parent() + del child.parent()[child_index] + + # first try to prune + if types_to_prune and _isinstance_in(child, types_to_prune): + result = None + # finally call the user function + else: + result = reduce_fn(prev_item, child, next_item) + + if child is mutable_object: + mutable_object = result + + if result is None: + prune_list.add(child) + continue + + if type(result) is not tuple: + result = [result] + + if parent is not None: + parent[child_index:child_index] = result + + return mutable_object diff --git a/pype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py b/pype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py new file mode 100644 index 00000000000..cdb6424b462 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py @@ -0,0 +1,138 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +__doc__ = """ Algorithms for stack objects. """ + +import copy + +from .. import ( + schema, + opentime, +) +from . import ( + track_algo +) + + +def top_clip_at_time(in_stack, t): + """Return the topmost visible child that overlaps with time t. + + Example: + tr1: G1, A, G2 + tr2: [B------] + G1, and G2 are gaps, A and B are clips. + + If t is within A, a will be returned. If t is within G1 or G2, B will be + returned. + """ + + # ensure that it only runs on stacks + if not isinstance(in_stack, schema.Stack): + raise ValueError( + "Argument in_stack must be of type otio.schema.Stack, " + "not: '{}'".format( + type(in_stack) + ) + ) + + # build a range to use the `each_child`method. + search_range = opentime.TimeRange( + start_time=t, + # 0 duration so we are just sampling a point in time. + # XXX Should this duration be equal to the length of one sample? + # opentime.RationalTime(1, rate)? + duration=opentime.RationalTime(0, t.rate) + ) + + # walk through the children of the stack in reverse order. + for track in reversed(in_stack): + valid_results = [] + if hasattr(track, "each_child"): + valid_results = list( + c for c in track.each_clip(search_range, shallow_search=True) + if c.visible() + ) + + # XXX doesn't handle nested tracks/stacks at the moment + + for result in valid_results: + return result + + return None + + +def flatten_stack(in_stack): + """Flatten a Stack, or a list of Tracks, into a single Track. + Note that the 1st Track is the bottom one, and the last is the top. + """ + + flat_track = schema.Track() + flat_track.name = "Flattened" + + # map of track to track.range_of_all_children + range_track_map = {} + + def _get_next_item( + in_stack, + track_index=None, + trim_range=None + ): + if track_index is None: + # start with the top-most track + track_index = len(in_stack) - 1 + if track_index < 0: + # if you get to the bottom, you're done + return + + track = in_stack[track_index] + if trim_range is not None: + track = track_algo.track_trimmed_to_range(track, trim_range) + + track_map = range_track_map.get(track) + if track_map is None: + track_map = track.range_of_all_children() + range_track_map[track] = track_map + + for item in track: + if ( + item.visible() + or track_index == 0 + or isinstance(item, schema.Transition) + ): + yield item + else: + trim = track_map[item] + if trim_range is not None: + trim = opentime.TimeRange( + start_time=trim.start_time + trim_range.start_time, + duration=trim.duration + ) + track_map[item] = trim + for more in _get_next_item(in_stack, track_index - 1, trim): + yield more + + for item in _get_next_item(in_stack): + flat_track.append(copy.deepcopy(item)) + + return flat_track diff --git a/pype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py b/pype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py new file mode 100644 index 00000000000..bbb0ae62755 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py @@ -0,0 +1,56 @@ +# +# Copyright 2019 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Algorithms for timeline objects.""" + +import copy + +from . import ( + track_algo +) + + +def timeline_trimmed_to_range(in_timeline, trim_range): + """Returns a new timeline that is a copy of the in_timeline, but with items + outside the trim_range removed and items on the ends trimmed to the + trim_range. Note that the timeline is never expanded, only shortened. + Please note that you could do nearly the same thing non-destructively by + just setting the Track's source_range but sometimes you want to really cut + away the stuff outside and that's what this function is meant for.""" + new_timeline = copy.deepcopy(in_timeline) + + for track_num, child_track in enumerate(in_timeline.tracks): + # @TODO: put the trim_range into the space of the tracks + # new_range = new_timeline.tracks.transformed_time_range( + # trim_range, + # child_track + # ) + + # trim the track and assign it to the new stack. + new_timeline.tracks[track_num] = track_algo.track_trimmed_to_range( + child_track, + trim_range + ) + + return new_timeline diff --git a/pype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py b/pype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py new file mode 100644 index 00000000000..8ac406f1d6d --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py @@ -0,0 +1,236 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Algorithms for track objects.""" + +import copy + +from .. import ( + schema, + exceptions, + opentime, +) + + +def track_trimmed_to_range(in_track, trim_range): + """Returns a new track that is a copy of the in_track, but with items + outside the trim_range removed and items on the ends trimmed to the + trim_range. Note that the track is never expanded, only shortened. + Please note that you could do nearly the same thing non-destructively by + just setting the Track's source_range but sometimes you want to really cut + away the stuff outside and that's what this function is meant for.""" + new_track = copy.deepcopy(in_track) + + track_map = new_track.range_of_all_children() + + # iterate backwards so we can delete items + for c, child in reversed(list(enumerate(new_track))): + child_range = track_map[child] + if not trim_range.overlaps(child_range): + # completely outside the trim range, so we discard it + del new_track[c] + elif trim_range.contains(child_range): + # completely contained, keep the whole thing + pass + else: + if isinstance(child, schema.Transition): + raise exceptions.CannotTrimTransitionsError( + "Cannot trim in the middle of a Transition." + ) + + # we need to clip the end(s) + child_source_range = child.trimmed_range() + + # should we trim the start? + if trim_range.start_time > child_range.start_time: + trim_amount = trim_range.start_time - child_range.start_time + child_source_range = opentime.TimeRange( + start_time=child_source_range.start_time + trim_amount, + duration=child_source_range.duration - trim_amount + + ) + + # should we trim the end? + trim_end = trim_range.end_time_exclusive() + child_end = child_range.end_time_exclusive() + if trim_end < child_end: + trim_amount = child_end - trim_end + child_source_range = opentime.TimeRange( + start_time=child_source_range.start_time, + duration=child_source_range.duration - trim_amount + + ) + + # set the new child's trims + child.source_range = child_source_range + + return new_track + + +def track_with_expanded_transitions(in_track): + """Expands transitions such that neighboring clips are trimmed into + regions of overlap. + + For example, if your track is: + Clip1, T, Clip2 + + will return: + Clip1', Clip1_t, T, Clip2_t, Clip2' + + Where Clip1' is the part of Clip1 not in the transition, Clip1_t is the + part inside the transition and so on. + """ + + result_track = [] + + seq_iter = iter(in_track) + prev_thing = None + thing = next(seq_iter, None) + next_thing = next(seq_iter, None) + + while thing is not None: + if isinstance(thing, schema.Transition): + result_track.append(_expand_transition(thing, in_track)) + else: + # not a transition, but might be trimmed by one before or after + # in the track + pre_transition = None + next_transition = None + + if isinstance(prev_thing, schema.Transition): + pre_transition = prev_thing + + if isinstance(next_thing, schema.Transition): + next_transition = next_thing + + result_track.append( + _trim_from_transitions( + thing, + pre=pre_transition, + post=next_transition + ) + ) + + # loop + prev_thing = thing + thing = next_thing + next_thing = next(seq_iter, None) + + return result_track + + +def _expand_transition(target_transition, from_track): + """ Expand transitions into the portions of pre-and-post clips that + overlap with the transition. + """ + + result = from_track.neighbors_of( + target_transition, + schema.NeighborGapPolicy.around_transitions + ) + + trx_duration = target_transition.in_offset + target_transition.out_offset + + # make copies of the before and after, and modify their in/out points + pre = copy.deepcopy(result.previous) + + if isinstance(pre, schema.Transition): + raise exceptions.TransitionFollowingATransitionError( + "cannot put two transitions next to each other in a track: " + "{}, {}".format( + pre, + target_transition + ) + ) + if target_transition.in_offset is None: + raise RuntimeError( + "in_offset is None on: {}".format(target_transition) + ) + + if target_transition.out_offset is None: + raise RuntimeError( + "out_offset is None on: {}".format(target_transition) + ) + + pre.name = (pre.name or "") + "_transition_pre" + + # ensure that pre.source_range is set, because it will get manipulated + tr = pre.trimmed_range() + + pre.source_range = opentime.TimeRange( + start_time=( + tr.end_time_exclusive() - target_transition.in_offset + ), + duration=trx_duration.rescaled_to( + tr.start_time + ) + ) + + post = copy.deepcopy(result.next) + if isinstance(post, schema.Transition): + raise exceptions.TransitionFollowingATransitionError( + "cannot put two transitions next to each other in a track: " + "{}, {}".format( + target_transition, + post + ) + ) + + post.name = (post.name or "") + "_transition_post" + + # ensure that post.source_range is set, because it will get manipulated + tr = post.trimmed_range() + + post.source_range = opentime.TimeRange( + start_time=( + tr.start_time - target_transition.in_offset + ).rescaled_to(tr.start_time), + duration=trx_duration.rescaled_to(tr.start_time) + ) + + return pre, target_transition, post + + +def _trim_from_transitions(thing, pre=None, post=None): + """ Trim clips next to transitions. """ + + result = copy.deepcopy(thing) + + # We might not have a source_range yet, + # We can trim to the computed trimmed_range to + # ensure we have something. + new_range = result.trimmed_range() + start_time = new_range.start_time + duration = new_range.duration + + if pre: + start_time += pre.out_offset + duration -= pre.out_offset + + if post: + duration -= post.in_offset + + result.source_range = opentime.TimeRange(start_time, duration) + + return result diff --git a/pype/vendor/python/python_2/opentimelineio/console/__init__.py b/pype/vendor/python/python_2/opentimelineio/console/__init__.py new file mode 100644 index 00000000000..e5f6e869880 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/__init__.py @@ -0,0 +1,40 @@ +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Console scripts for OpenTimelineIO + +.. moduleauthor:: Pixar Animation Studios +""" + +# flake8: noqa + +# in dependency hierarchy +from . import ( + otioconvert, + otiocat, + otiostat, + console_utils, + autogen_serialized_datamodel, +) + diff --git a/pype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py b/pype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py new file mode 100644 index 00000000000..046e8cbd1c8 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python +# +# Copyright 2019 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + + +"""Generates documentation of the serialized data model for OpenTimelineIO.""" + +import argparse +import inspect +import json +import tempfile +import sys + +try: + # python2 + import StringIO as io +except ImportError: + # python3 + import io + +import opentimelineio as otio + + +DOCUMENT_HEADER = """# OpenTimelineIO Serialized Data Documentation + +This document is a list of all the OpenTimelineIO classes that serialize to and +from JSON, omitting SchemaDef plugins. + +This document is automatically generated by running + docs/autogen_serialized_datamodel.py, or by running `make doc-model`. It is + part of the unit tests suite and should be updated whenever the schema changes. + If it needs to be updated, run: `make doc-model-update` and this file should be + regenerated. + +# Classes + +""" + +FIELDS_ONLY_HEADER = """# OpenTimelineIO Serialized Data Documentation + +This document is a list of all the OpenTimelineIO classes that serialize to and +from JSON, omitting plugins classes and docstrings. + +This document is automatically generated by running + docs/autogen_serialized_datamodel.py, or by running `make doc-model`. It is + part of the unit tests suite and should be updated whenever the schema changes. + If it needs to be updated, run: `make doc-model-update` and this file should be + regenerated. + +# Classes + +""" + +CLASS_HEADER_WITH_DOCS = """ +### {classname} + +*full module path*: `{modpath}` + +*documentation*: + +``` +{docstring} +``` + +parameters: +""" + +CLASS_HEADER_ONLY_FIELDS = """ +### {classname} + +parameters: +""" + +MODULE_HEADER = """ +## Module: {modname} +""" + +PROP_HEADER = """- *{propkey}*: {prophelp} +""" + +# @TODO: having type information here would be awesome +PROP_HEADER_NO_HELP = """- *{propkey}* +""" + +# three ways to try and get the property + docstring +PROP_FETCHERS = ( + lambda cl, k: inspect.getdoc(getattr(cl, k)), + lambda cl, k: inspect.getdoc(getattr(cl, "_" + k)), + lambda cl, k: inspect.getdoc(getattr(cl(), k)) and "" or "", +) + + +def _parsed_args(): + """ parse commandline arguments with argparse """ + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + group = parser.add_mutually_exclusive_group() + group.add_argument( + "-d", + "--dryrun", + action="store_true", + default=False, + help="Dryrun mode - print out instead of perform actions" + ) + group.add_argument( + "-o", + "--output", + type=str, + default=None, + help="Update the baseline with the current version" + ) + + return parser.parse_args() + + +# things to skip +SKIP_CLASSES = [otio.core.SerializableObject, otio.core.UnknownSchema] +SKIP_KEYS = ["OTIO_SCHEMA"] # not data, just for the backing format +SKIP_MODULES = ["opentimelineio.schemadef"] # because these are plugins + + +def _generate_model_for_module(mod, classes, modules): + modules.add(mod) + + # fetch the classes from this module + serializeable_classes = [ + thing for thing in mod.__dict__.values() + if ( + inspect.isclass(thing) + and thing not in classes + and issubclass(thing, otio.core.SerializableObject) + or thing in ( + otio.opentime.RationalTime, + otio.opentime.TimeRange, + otio.opentime.TimeTransform, + ) + ) + ] + + # serialize/deserialize the classes to capture their serialized parameters + model = {} + for cl in serializeable_classes: + if cl in SKIP_CLASSES: + continue + + model[cl] = {} + field_dict = json.loads(otio.adapters.otio_json.write_to_string(cl())) + for k in field_dict.keys(): + if k in SKIP_KEYS: + continue + + for fetcher in PROP_FETCHERS: + try: + model[cl][k] = fetcher(cl, k) + break + except AttributeError: + pass + else: + sys.stderr.write("ERROR: could not fetch property: {}".format(k)) + + # Stashing the OTIO_SCHEMA back into the dictionary since the + # documentation uses this information in its header. + model[cl]["OTIO_SCHEMA"] = field_dict["OTIO_SCHEMA"] + + classes.update(model) + + # find new modules to recurse into + new_mods = sorted( + ( + thing for thing in mod.__dict__.values() + if ( + inspect.ismodule(thing) + and thing not in modules + and all(not thing.__name__.startswith(t) for t in SKIP_MODULES) + ) + ), + key=lambda mod: str(mod) + ) + + # recurse into the new modules and update the classes and modules values + [_generate_model_for_module(m, classes, modules) for m in new_mods] + + +def _generate_model(): + classes = {} + modules = set() + _generate_model_for_module(otio, classes, modules) + return classes + + +def _write_documentation(model): + md_with_helpstrings = io.StringIO() + md_only_fields = io.StringIO() + + md_with_helpstrings.write(DOCUMENT_HEADER) + md_only_fields.write(FIELDS_ONLY_HEADER) + + modules = {} + for cl in model: + modules.setdefault(cl.__module__, []).append(cl) + + CURRENT_MODULE = None + for module_list in sorted(modules): + this_mod = ".".join(module_list.split('.')[:2]) + if this_mod != CURRENT_MODULE: + CURRENT_MODULE = this_mod + md_with_helpstrings.write(MODULE_HEADER.format(modname=this_mod)) + md_only_fields.write(MODULE_HEADER.format(modname=this_mod)) + + # because these are classes, they need to sort on their stringified + # names + for cl in sorted(modules[module_list], key=lambda cl: str(cl)): + modname = inspect.getmodule(cl).__name__ + label = model[cl]["OTIO_SCHEMA"] + md_with_helpstrings.write( + CLASS_HEADER_WITH_DOCS.format( + classname=label, + modpath=modname + "." + cl.__name__, + docstring=cl.__doc__ + ) + ) + md_only_fields.write( + CLASS_HEADER_ONLY_FIELDS.format( + classname=label, + ) + ) + + for key, helpstr in sorted(model[cl].items()): + if key in SKIP_KEYS: + continue + md_with_helpstrings.write( + PROP_HEADER.format(propkey=key, prophelp=helpstr) + ) + md_only_fields.write( + PROP_HEADER_NO_HELP.format(propkey=key) + ) + + return md_with_helpstrings.getvalue(), md_only_fields.getvalue() + + +def main(): + """ main entry point """ + args = _parsed_args() + with_docs, without_docs = generate_and_write_documentation() + + # print it out somewhere + if args.dryrun: + print(with_docs) + return + + output = args.output + if not output: + output = tempfile.NamedTemporaryFile( + 'w', + suffix="otio_serialized_schema.md", + delete=False + ).name + + with open(output, 'w') as fo: + fo.write(with_docs) + + # write version without docstrings + prefix, suffix = output.rsplit('.', 1) + output_only_fields = prefix + "-only-fields." + suffix + + with open(output_only_fields, 'w') as fo: + fo.write(without_docs) + + print("wrote documentation to {} and {}".format(output, output_only_fields)) + + +def generate_and_write_documentation(): + model = _generate_model() + return _write_documentation(model) + + +if __name__ == '__main__': + main() diff --git a/pype/vendor/python/python_2/opentimelineio/console/console_utils.py b/pype/vendor/python/python_2/opentimelineio/console/console_utils.py new file mode 100644 index 00000000000..9c659433e39 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/console_utils.py @@ -0,0 +1,72 @@ +# +# Copyright 2019 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +import ast + +from .. import ( + media_linker, +) + +"""Utilities for OpenTimelineIO commandline modules.""" + + +def arg_list_to_map(arg_list, label): + """ + Convert an argument of the form -A foo=bar from the parsed result to a map. + """ + + argument_map = {} + for pair in arg_list: + if '=' not in pair: + raise ValueError( + "error: {} arguments must be in the form key=value" + " got: {}".format(label, pair) + ) + + key, val = pair.split('=', 1) # only split on the 1st '=' + try: + # Sometimes we need to pass a bool, int, list, etc. + parsed_value = ast.literal_eval(val) + except (ValueError, SyntaxError): + # Fall back to a simple string + parsed_value = val + argument_map[key] = parsed_value + + return argument_map + + +def media_linker_name(ml_name_arg): + """ + Parse commandline arguments for the media linker, which can be not set + (fall back to default), "" or "none" (don't link media) or the name of a + media linker to use. + """ + if ml_name_arg.lower() == 'default': + media_linker_name = media_linker.MediaLinkingPolicy.ForceDefaultLinker + elif ml_name_arg.lower() in ['none', '']: + media_linker_name = media_linker.MediaLinkingPolicy.DoNotLinkMedia + else: + media_linker_name = ml_name_arg + + return media_linker_name diff --git a/pype/vendor/python/python_2/opentimelineio/console/otiocat.py b/pype/vendor/python/python_2/opentimelineio/console/otiocat.py new file mode 100644 index 00000000000..95131445120 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/otiocat.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Print the contents of an OTIO file to stdout.""" + +import argparse +import sys + +import opentimelineio as otio + + +def _parsed_args(): + """ parse commandline arguments with argparse """ + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + 'filepath', + type=str, + nargs='+', + help='files to print the contents of' + ) + parser.add_argument( + '-a', + '--adapter-arg', + type=str, + default=[], + action='append', + help='Extra arguments to be passed to input adapter in the form of ' + 'key=value. Values are strings, numbers or Python literals: True, ' + 'False, etc. Can be used multiple times: -a burrito="bar" -a taco=12.' + ) + parser.add_argument( + '-m', + '--media-linker', + type=str, + default="Default", + help=( + "Specify a media linker. 'Default' means use the " + "$OTIO_DEFAULT_MEDIA_LINKER if set, 'None' or '' means explicitly " + "disable the linker, and anything else is interpreted as the name" + " of the media linker to use." + ) + ) + parser.add_argument( + '-M', + '--media-linker-arg', + type=str, + default=[], + action='append', + help='Extra arguments to be passed to the media linker in the form of ' + 'key=value. Values are strings, numbers or Python literals: True, ' + 'False, etc. Can be used multiple times: -M burrito="bar" -M taco=12.' + ) + + return parser.parse_args() + + +def _otio_compatible_file_to_json_string( + fpath, + media_linker_name, + media_linker_argument_map, + adapter_argument_map +): + """Read the file at fpath with the default otio adapter and return the json + as a string. + """ + + adapter = otio.adapters.from_name("otio_json") + return adapter.write_to_string( + otio.adapters.read_from_file( + fpath, + media_linker_name=media_linker_name, + media_linker_argument_map=media_linker_argument_map, + **adapter_argument_map + ) + ) + + +def main(): + """Parse arguments and call _otio_compatible_file_to_json_string.""" + + args = _parsed_args() + + media_linker_name = otio.console.console_utils.media_linker_name( + args.media_linker + ) + + try: + read_adapter_arg_map = otio.console.console_utils.arg_list_to_map( + args.adapter_arg, + "adapter" + ) + media_linker_argument_map = otio.console.console_utils.arg_list_to_map( + args.media_linker_arg, + "media linker" + ) + except ValueError as exc: + sys.stderr.write("\n" + str(exc) + "\n") + sys.exit(1) + + for fpath in args.filepath: + print( + _otio_compatible_file_to_json_string( + fpath, + media_linker_name, + media_linker_argument_map, + read_adapter_arg_map + ) + ) + + +if __name__ == '__main__': + main() diff --git a/pype/vendor/python/python_2/opentimelineio/console/otioconvert.py b/pype/vendor/python/python_2/opentimelineio/console/otioconvert.py new file mode 100644 index 00000000000..9d45a0fcf49 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/otioconvert.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +import argparse +import sys +import copy + +import opentimelineio as otio + +__doc__ = """ Python wrapper around OTIO to convert timeline files between \ +formats. + +Available adapters: {} +""".format(otio.adapters.available_adapter_names()) + + +def _parsed_args(): + """ parse commandline arguments with argparse """ + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '-i', + '--input', + type=str, + required=True, + help='path to input file', + ) + parser.add_argument( + '-o', + '--output', + type=str, + required=True, + help='path to output file', + ) + parser.add_argument( + '-I', + '--input-adapter', + type=str, + default=None, + help="Explicitly use this adapter for reading the input file", + ) + parser.add_argument( + '-O', + '--output-adapter', + type=str, + default=None, + help="Explicitly use this adapter for writing the output file", + ) + parser.add_argument( + '-T', + '--tracks', + type=str, + default=None, + help="Pick one or more tracks, by 0-based index, separated by commas.", + ) + parser.add_argument( + '-m', + '--media-linker', + type=str, + default="Default", + help=( + "Specify a media linker. 'Default' means use the " + "$OTIO_DEFAULT_MEDIA_LINKER if set, 'None' or '' means explicitly " + "disable the linker, and anything else is interpreted as the name" + " of the media linker to use." + ) + ) + parser.add_argument( + '-M', + '--media-linker-arg', + type=str, + default=[], + action='append', + help='Extra arguments to be passed to the media linker in the form of ' + 'key=value. Values are strings, numbers or Python literals: True, ' + 'False, etc. Can be used multiple times: -M burrito="bar" -M taco=12.' + ) + parser.add_argument( + '-a', + '--adapter-arg', + type=str, + default=[], + action='append', + help='Extra arguments to be passed to input adapter in the form of ' + 'key=value. Values are strings, numbers or Python literals: True, ' + 'False, etc. Can be used multiple times: -a burrito="bar" -a taco=12.' + ) + parser.add_argument( + '-A', + '--output-adapter-arg', + type=str, + default=[], + action='append', + help='Extra arguments to be passed to output adapter in the form of ' + 'key=value. Values are strings, numbers or Python literals: True, ' + 'False, etc. Can be used multiple times: -A burrito="bar" -A taco=12.' + ) + trim_args = parser.add_argument_group( + title="Trim Arguments", + description="Arguments that allow you to trim the OTIO file." + ) + trim_args.add_argument( + '--begin', + type=str, + default=None, + help=( + "Trim out everything in the timeline before this time, in the " + "global time frame of the timeline. Argument should be in the form" + ' "VALUE,RATE", eg: --begin "10,24". Requires --end argument.' + ), + ) + trim_args.add_argument( + '--end', + type=str, + default=None, + help=( + "Trim out everything in the timeline after this time, in the " + "global time frame of the timeline. Argument should be in the form" + ' "VALUE,RATE", eg: --begin "10,24". Requires --begin argument.' + ), + ) + + result = parser.parse_args() + + if result.begin is not None and result.end is None: + parser.error("--begin requires --end.") + if result.end is not None and result.begin is None: + parser.error("--end requires --begin.") + + if result.begin is not None: + try: + value, rate = result.begin.split(",") + result.begin = otio.opentime.RationalTime(float(value), float(rate)) + except ValueError: + parser.error( + "--begin argument needs to be of the form: VALUE,RATE where " + "VALUE is the (float) time value of the resulting RationalTime " + "and RATE is the (float) time rate of the resulting RationalTime," + " not '{}'".format(result.begin) + ) + + if result.end is not None: + try: + value, rate = result.end.split(",") + result.end = otio.opentime.RationalTime(float(value), float(rate)) + except ValueError: + parser.error( + "--end argument needs to be of the form: VALUE,RATE where " + "VALUE is the (float) time value of the resulting RationalTime " + "and RATE is the (float) time rate of the resulting RationalTime," + " not '{}'".format(result.begin) + ) + + return result + + +def main(): + """Parse arguments and convert the files.""" + + args = _parsed_args() + + in_adapter = args.input_adapter + if in_adapter is None: + in_adapter = otio.adapters.from_filepath(args.input).name + + out_adapter = args.output_adapter + if out_adapter is None: + out_adapter = otio.adapters.from_filepath(args.output).name + + media_linker_name = otio.console.console_utils.media_linker_name( + args.media_linker + ) + + try: + read_adapter_arg_map = otio.console.console_utils.arg_list_to_map( + args.adapter_arg, + "input adapter" + ) + ml_args = otio.console.console_utils.arg_list_to_map( + args.media_linker_arg, + "media linker" + ) + except ValueError as exc: + sys.stderr.write("\n" + str(exc) + "\n") + sys.exit(1) + + result_tl = otio.adapters.read_from_file( + args.input, + in_adapter, + media_linker_name=media_linker_name, + media_linker_argument_map=ml_args, + **read_adapter_arg_map + ) + + if args.tracks: + result_tracks = copy.deepcopy(otio.schema.Stack()) + del result_tracks[:] + for track in args.tracks.split(","): + tr = result_tl.tracks[int(track)] + del result_tl.tracks[int(track)] + print("track {0} is of kind: '{1}'".format(track, tr.kind)) + result_tracks.append(tr) + result_tl.tracks = result_tracks + + # handle trim arguments + if args.begin is not None and args.end is not None: + result_tl = otio.algorithms.timeline_trimmed_to_range( + result_tl, + otio.opentime.range_from_start_end_time(args.begin, args.end) + ) + + try: + write_adapter_arg_map = otio.console.console_utils.arg_list_to_map( + args.output_adapter_arg, + "output adapter" + ) + except ValueError as exc: + sys.stderr.write("\n" + str(exc) + "\n") + sys.exit(1) + + otio.adapters.write_to_file( + result_tl, + args.output, + out_adapter, + **write_adapter_arg_map + ) + + +if __name__ == '__main__': + try: + main() + except otio.exceptions.OTIOError as err: + sys.stderr.write("ERROR: " + str(err) + "\n") + sys.exit(1) diff --git a/pype/vendor/python/python_2/opentimelineio/console/otiostat.py b/pype/vendor/python/python_2/opentimelineio/console/otiostat.py new file mode 100644 index 00000000000..9cd554727ab --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/console/otiostat.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Print statistics about the otio file, including validation information.""" + +import argparse +import sys + +import opentimelineio as otio + + +def _parsed_args(): + """ parse commandline arguments with argparse """ + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + 'filepath', + type=str, + nargs='+', + help='files to operate on' + ) + + return parser.parse_args() + + +TESTS = [] + + +def stat_check(name): + def real_stat_check(fn): + TESTS.append((name, fn)) + return fn + return real_stat_check + + +@stat_check("parsed") +def _did_parse(input): + return input and True or False + + +@stat_check("top level object") +def _top_level_object(input): + return input._serializable_label + + +@stat_check("number of tracks") +def _num_tracks(input): + try: + return len(input.tracks) + except AttributeError: + return 0 + + +@stat_check("Tracks are the same length") +def _equal_length_tracks(tl): + if not tl.tracks: + return True + for i, track in enumerate(tl.tracks): + if track.duration() != tl.tracks[0].duration(): + raise RuntimeError( + "track {} is not the same duration as the other tracks." + " Track {} duration, vs: {}".format( + i, + track.duration(), + tl.tracks[0].duration() + ) + ) + return True + + +@stat_check("deepest nesting") +def _deepest_nesting(input): + def depth(parent): + if not isinstance(parent, otio.core.Composition): + return 1 + d = 0 + for child in parent: + d = max(d, depth(child) + 1) + return d + if isinstance(input, otio.schema.Timeline): + return depth(input.tracks) + 1 + else: + return depth(input) + + +@stat_check("number of clips") +def _num_clips(input): + return len(list(input.each_clip())) + + +@stat_check("total duration") +def _total_duration(input): + try: + return input.tracks.duration() + except AttributeError: + return "n/a" + + +@stat_check("total duration in timecode") +def _total_duration_timecode(input): + try: + d = input.tracks.duration() + return otio.opentime.to_timecode(d, d.rate) + except AttributeError: + return "n/a" + + +@stat_check("top level rate") +def _top_level_rate(input): + try: + return input.tracks.duration().rate + except AttributeError: + return "n/a" + + +@stat_check("clips with cdl data") +def _clips_with_cdl_data(input): + return len(list(c for c in input.each_clip() if 'cdl' in c.metadata)) + + +@stat_check("Tracks with non standard types") +def _sequences_with_non_standard_types(input): + return len( + list( + c + for c in input.each_child(descended_from_type=otio.schema.Track) + if c.kind not in (otio.schema.TrackKind.__dict__) + ) + ) + + +def _stat_otio(input_otio): + for (test, testfunc) in TESTS: + try: + print("{}: {}".format(test, testfunc(input_otio))) + except (otio.exceptions.OTIOError) as e: + sys.stderr.write( + "There was an OTIO Error: " + " {}\n".format(e), + ) + continue + except (Exception) as e: + sys.stderr.write("There was a system error: {}\n".format(e)) + continue + + +def main(): + """ main entry point """ + args = _parsed_args() + + for fp in args.filepath: + try: + parsed_otio = otio.adapters.read_from_file(fp) + except (otio.exceptions.OTIOError) as e: + sys.stderr.write( + "The file did not successfully parse, with error:" + " {}\n".format(e), + ) + continue + except (Exception) as e: + sys.stderr.write("There was a system error: {}\n".format(e)) + continue + + _stat_otio(parsed_otio) + + +if __name__ == '__main__': + main() diff --git a/pype/vendor/python/python_2/opentimelineio/core/__init__.py b/pype/vendor/python/python_2/opentimelineio/core/__init__.py new file mode 100644 index 00000000000..ac5c0bbcc00 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/__init__.py @@ -0,0 +1,67 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Internal implementation details of OpenTimelineIO.""" + +# flake8: noqa + +from . import ( + serializable_object +) +from .serializable_object import ( + SerializableObject, + serializable_field, + deprecated_field, +) +from .composable import ( + Composable +) +from .item import ( + Item +) +from . import composition +from .composition import ( + Composition, +) +from . import type_registry +from .type_registry import ( + register_type, + upgrade_function_for, + schema_name_from_label, + schema_version_from_label, + instance_from_schema, +) +from .json_serializer import ( + serialize_json_to_string, + serialize_json_to_file, + deserialize_json_from_string, + deserialize_json_from_file, +) +from .media_reference import ( + MediaReference, +) +from . import unknown_schema +from .unknown_schema import ( + UnknownSchema +) diff --git a/pype/vendor/python/python_2/opentimelineio/core/composable.py b/pype/vendor/python/python_2/opentimelineio/core/composable.py new file mode 100644 index 00000000000..78c7fba3492 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/composable.py @@ -0,0 +1,141 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Composable class definition. + +An object that can be composed by tracks. +""" + +import weakref + +from . import serializable_object +from . import type_registry + +import copy + + +@type_registry.register_type +class Composable(serializable_object.SerializableObject): + """An object that can be composed by tracks. + + Base class of: + Item + Transition + """ + + name = serializable_object.serializable_field( + "name", + doc="Composable name." + ) + metadata = serializable_object.serializable_field( + "metadata", + doc="Metadata dictionary for this Composable." + ) + + _serializable_label = "Composable.1" + _class_path = "core.Composable" + + def __init__(self, name=None, metadata=None): + super(Composable, self).__init__() + self._parent = None + + # initialize the serializable fields + self.name = name + self.metadata = copy.deepcopy(metadata) if metadata else {} + + @staticmethod + def visible(): + """Return the visibility of the Composable. By default True.""" + + return False + + @staticmethod + def overlapping(): + """Return whether an Item is overlapping. By default False.""" + + return False + + # @{ functions to express the composable hierarchy + def _root_parent(self): + return ([self] + self._ancestors())[-1] + + def _ancestors(self): + ancestors = [] + seqi = self + while seqi.parent() is not None: + seqi = seqi.parent() + ancestors.append(seqi) + return ancestors + + def parent(self): + """Return the parent Composable, or None if self has no parent.""" + + return self._parent() if self._parent is not None else None + + def _set_parent(self, new_parent): + if new_parent is not None and self.parent() is not None: + raise ValueError( + "Composable named '{}' is already in a composition named '{}'," + " remove from previous parent before adding to new one." + " Composable: {}, Composition: {}".format( + self.name, + self.parent() is not None and self.parent().name or None, + self, + self.parent() + ) + ) + self._parent = weakref.ref(new_parent) if new_parent is not None else None + + def is_parent_of(self, other): + """Returns true if self is a parent or ancestor of other.""" + + visited = set([]) + while other.parent() is not None and other.parent() not in visited: + if other.parent() is self: + return True + visited.add(other) + other = other.parent() + + return False + + # @} + + def __repr__(self): + return ( + "otio.{}(" + "name={}, " + "metadata={}" + ")".format( + self._class_path, + repr(self.name), + repr(self.metadata) + ) + ) + + def __str__(self): + return "{}({}, {})".format( + self._class_path.split('.')[-1], + self.name, + str(self.metadata) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/core/composition.py b/pype/vendor/python/python_2/opentimelineio/core/composition.py new file mode 100644 index 00000000000..4da5a4b0915 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/composition.py @@ -0,0 +1,718 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Composition base class. An object that contains `Items`.""" + +import collections + +from . import ( + serializable_object, + type_registry, + item, + composable, +) + +from .. import ( + opentime, + exceptions +) + + +def _bisect_right( + seq, + tgt, + key_func, + lower_search_bound=0, + upper_search_bound=None +): + """Return the index of the last item in seq such that all e in seq[:index] + have key_func(e) <= tgt, and all e in seq[index:] have key_func(e) > tgt. + + Thus, seq.insert(index, value) will insert value after the rightmost item + such that meets the above condition. + + lower_search_bound and upper_search_bound bound the slice to be searched. + + Assumes that seq is already sorted. + """ + + if lower_search_bound < 0: + raise ValueError('lower_search_bound must be non-negative') + + if upper_search_bound is None: + upper_search_bound = len(seq) + + while lower_search_bound < upper_search_bound: + midpoint_index = (lower_search_bound + upper_search_bound) // 2 + + if tgt < key_func(seq[midpoint_index]): + upper_search_bound = midpoint_index + else: + lower_search_bound = midpoint_index + 1 + + return lower_search_bound + + +def _bisect_left( + seq, + tgt, + key_func, + lower_search_bound=0, + upper_search_bound=None +): + """Return the index of the last item in seq such that all e in seq[:index] + have key_func(e) < tgt, and all e in seq[index:] have key_func(e) >= tgt. + + Thus, seq.insert(index, value) will insert value before the leftmost item + such that meets the above condition. + + lower_search_bound and upper_search_bound bound the slice to be searched. + + Assumes that seq is already sorted. + """ + + if lower_search_bound < 0: + raise ValueError('lower_search_bound must be non-negative') + + if upper_search_bound is None: + upper_search_bound = len(seq) + + while lower_search_bound < upper_search_bound: + midpoint_index = (lower_search_bound + upper_search_bound) // 2 + + if key_func(seq[midpoint_index]) < tgt: + lower_search_bound = midpoint_index + 1 + else: + upper_search_bound = midpoint_index + + return lower_search_bound + + +@type_registry.register_type +class Composition(item.Item, collections.MutableSequence): + """Base class for an OTIO Item that contains other Items. + + Should be subclassed (for example by Track and Stack), not used + directly. + """ + + _serializable_label = "Composition.1" + _composition_kind = "Composition" + _modname = "core" + _composable_base_class = composable.Composable + + def __init__( + self, + name=None, + children=None, + source_range=None, + markers=None, + effects=None, + metadata=None + ): + item.Item.__init__( + self, + name=name, + source_range=source_range, + markers=markers, + effects=effects, + metadata=metadata + ) + collections.MutableSequence.__init__(self) + + # Because we know that all children are unique, we store a set + # of all the children as well to speed up __contain__ checks. + self._child_lookup = set() + + self._children = [] + if children: + # cannot simply set ._children to children since __setitem__ runs + # extra logic (assigning ._parent pointers) and populates the + # internal membership set _child_lookup. + self.extend(children) + + _children = serializable_object.serializable_field( + "children", + list, + "Items contained by this composition." + ) + + @property + def composition_kind(self): + """Returns a label specifying the kind of composition.""" + + return self._composition_kind + + def __str__(self): + return "{}({}, {}, {}, {})".format( + self._composition_kind, + str(self.name), + str(self._children), + str(self.source_range), + str(self.metadata) + ) + + def __repr__(self): + return ( + "otio.{}.{}(" + "name={}, " + "children={}, " + "source_range={}, " + "metadata={}" + ")".format( + self._modname, + self._composition_kind, + repr(self.name), + repr(self._children), + repr(self.source_range), + repr(self.metadata) + ) + ) + + transform = serializable_object.deprecated_field() + + def child_at_time( + self, + search_time, + shallow_search=False, + ): + """Return the child that overlaps with time search_time. + + search_time is in the space of self. + + If shallow_search is false, will recurse into compositions. + """ + + range_map = self.range_of_all_children() + + # find the first item whose end_time_exclusive is after the + first_inside_range = _bisect_left( + seq=self._children, + tgt=search_time, + key_func=lambda child: range_map[child].end_time_exclusive(), + ) + + # find the last item whose start_time is before the + last_in_range = _bisect_right( + seq=self._children, + tgt=search_time, + key_func=lambda child: range_map[child].start_time, + lower_search_bound=first_inside_range, + ) + + # limit the search to children who are in the search_range + possible_matches = self._children[first_inside_range:last_in_range] + + result = None + for thing in possible_matches: + if range_map[thing].overlaps(search_time): + result = thing + break + + # if the search cannot or should not continue + if ( + result is None + or shallow_search + or not hasattr(result, "child_at_time") + ): + return result + + # before you recurse, you have to transform the time into the + # space of the child + child_search_time = self.transformed_time(search_time, result) + + return result.child_at_time(child_search_time, shallow_search) + + def each_child( + self, + search_range=None, + descended_from_type=composable.Composable, + shallow_search=False, + ): + """ Generator that returns each child contained in the composition in + the order in which it is found. + + Arguments: + search_range: if specified, only children whose range overlaps with + the search range will be yielded. + descended_from_type: if specified, only children who are a + descendent of the descended_from_type will be yielded. + shallow_search: if True, will only search children of self, not + and not recurse into children of children. + """ + if search_range: + range_map = self.range_of_all_children() + + # find the first item whose end_time_inclusive is after the + # start_time of the search range + first_inside_range = _bisect_left( + seq=self._children, + tgt=search_range.start_time, + key_func=lambda child: range_map[child].end_time_inclusive(), + ) + + # find the last item whose start_time is before the + # end_time_inclusive of the search_range + last_in_range = _bisect_right( + seq=self._children, + tgt=search_range.end_time_inclusive(), + key_func=lambda child: range_map[child].start_time, + lower_search_bound=first_inside_range, + ) + + # limit the search to children who are in the search_range + children = self._children[first_inside_range:last_in_range] + else: + # otherwise search all the children + children = self._children + + for child in children: + # filter out children who are not descended from the specified type + # shortcut the isinstance if descended_from_type is composable + # (since all objects in compositions are already composables) + is_descendant = descended_from_type == composable.Composable + if is_descendant or isinstance(child, descended_from_type): + yield child + + # if not a shallow_search, for children that are compositions, + # recurse into their children + if not shallow_search and hasattr(child, "each_child"): + + if search_range is not None: + search_range = self.transformed_time_range(search_range, child) + + for valid_child in child.each_child( + search_range, + descended_from_type, + shallow_search + ): + yield valid_child + + def range_of_child_at_index(self, index): + """Return the range of a child item in the time range of this + composition. + + For example, with a track: + [ClipA][ClipB][ClipC] + + The self.range_of_child_at_index(2) will return: + TimeRange(ClipA.duration + ClipB.duration, ClipC.duration) + + To be implemented by subclass of Composition. + """ + + raise NotImplementedError + + def trimmed_range_of_child_at_index(self, index): + """Return the trimmed range of the child item at index in the time + range of this composition. + + For example, with a track: + + [ ] + + [ClipA][ClipB][ClipC] + + The range of index 2 (ClipC) will be just like + range_of_child_at_index() but trimmed based on this Composition's + source_range. + + To be implemented by child. + """ + + raise NotImplementedError + + def range_of_all_children(self): + """Return a dict mapping children to their range in this object.""" + + raise NotImplementedError + + def __copy__(self): + result = super(Composition, self).__copy__() + + # Children are *not* copied with a shallow copy since the meaning is + # ambiguous - they have a parent pointer which would need to be flipped + # or they would need to be copied, which implies a deepcopy(). + # + # This follows from the python documentation on copy/deepcopy: + # https://docs.python.org/2/library/copy.html + # + # """ + # - A shallow copy constructs a new compound object and then (to the + # extent possible) inserts references into it to the objects found in + # the original. + # - A deep copy constructs a new compound object and then, recursively, + # inserts copies into it of the objects found in the original. + # """ + result._children = [] + + return result + + def __deepcopy__(self, md): + result = super(Composition, self).__deepcopy__(md) + + # deepcopy should have already copied the children, so only parent + # pointers need to be updated. + [c._set_parent(result) for c in result._children] + + # we also need to reconstruct the membership set of _child_lookup. + result._child_lookup.update(result._children) + + return result + + def _path_to_child(self, child): + if not isinstance(child, composable.Composable): + raise TypeError( + "An object child of 'Composable' is required," + " not type '{}'".format( + type(child) + ) + ) + + current = child + parents = [] + + while(current is not self): + try: + current = current.parent() + except AttributeError: + raise exceptions.NotAChildError( + "Item '{}' is not a child of '{}'.".format(child, self) + ) + + parents.append(current) + + return parents + + def range_of_child(self, child, reference_space=None): + """The range of the child in relation to another item + (reference_space), not trimmed based on this + composition's source_range. + + Note that reference_space must be in the same timeline as self. + + For example: + + | [-----] | seq + + [-----------------] Clip A + + If ClipA has duration 17, and seq has source_range: 5, duration 15, + seq.range_of_child(Clip A) will return (0, 17) + ignoring the source range of seq. + + To get the range of the child with the source_range applied, use the + trimmed_range_of_child() method. + """ + + if not reference_space: + reference_space = self + + parents = self._path_to_child(child) + + current = child + result_range = None + + for parent in parents: + index = parent.index(current) + parent_range = parent.range_of_child_at_index(index) + + if not result_range: + result_range = parent_range + current = parent + continue + + result_range = opentime.TimeRange( + start_time=result_range.start_time + parent_range.start_time, + duration=result_range.duration + ) + current = parent + + if reference_space is not self: + result_range = self.transformed_time_range( + result_range, + reference_space + ) + + return result_range + + def handles_of_child(self, child): + """If media beyond the ends of this child are visible due to adjacent + Transitions (only applicable in a Track) then this will return the + head and tail offsets as a tuple of RationalTime objects. If no handles + are present on either side, then None is returned instead of a + RationalTime. + + Example usage: + >>> head, tail = track.handles_of_child(clip) + >>> if head: + ... print('Do something') + >>> if tail: + ... print('Do something else') + """ + return (None, None) + + def trimmed_range_of_child(self, child, reference_space=None): + """Get range of the child in reference_space coordinates, after the + self.source_range is applied. + + Example + | [-----] | seq + [-----------------] Clip A + + If ClipA has duration 17, and seq has source_range: 5, duration 10, + seq.trimmed_range_of_child(Clip A) will return (5, 10) + Which is trimming the range according to the source_range of seq. + + To get the range of the child without the source_range applied, use the + range_of_child() method. + + Another example + | [-----] | seq source range starts on frame 4 and goes to frame 8 + [ClipA][ClipB] (each 6 frames long) + + >>> seq.range_of_child(CLipA) + 0, duration 6 + >>> seq.trimmed_range_of_child(ClipA): + 4, duration 2 + """ + + if not reference_space: + reference_space = self + + if not reference_space == self: + raise NotImplementedError + + parents = self._path_to_child(child) + + current = child + result_range = None + + for parent in parents: + index = parent.index(current) + parent_range = parent.trimmed_range_of_child_at_index(index) + + if not result_range: + result_range = parent_range + current = parent + continue + + result_range.start_time += parent_range.start_time + current = parent + + if not self.source_range or not result_range: + return result_range + + new_start_time = max( + self.source_range.start_time, + result_range.start_time + ) + + # trimmed out + if new_start_time >= result_range.end_time_exclusive(): + return None + + # compute duration + new_duration = min( + result_range.end_time_exclusive(), + self.source_range.end_time_exclusive() + ) - new_start_time + + if new_duration.value < 0: + return None + + return opentime.TimeRange(new_start_time, new_duration) + + def trim_child_range(self, child_range): + if not self.source_range: + return child_range + + # cropped out entirely + past_end_time = self.source_range.start_time >= child_range.end_time_exclusive() + before_start_time = \ + self.source_range.end_time_exclusive() <= child_range.start_time + + if past_end_time or before_start_time: + return None + + if child_range.start_time < self.source_range.start_time: + child_range = opentime.range_from_start_end_time( + self.source_range.start_time, + child_range.end_time_exclusive() + ) + + if ( + child_range.end_time_exclusive() > + self.source_range.end_time_exclusive() + ): + child_range = opentime.range_from_start_end_time( + child_range.start_time, + self.source_range.end_time_exclusive() + ) + + return child_range + + # @{ SerializableObject override. + def _update(self, d): + """Like the dictionary .update() method. + + Update the data dictionary of this SerializableObject with the .data + of d if d is a SerializableObject or if d is a dictionary, d itself. + """ + + # use the parent update function + super(Composition, self)._update(d) + + # ...except for the 'children' field, which needs to run through the + # insert method so that _parent pointers are correctly set on children. + self._children = [] + self.extend(d.get('children', [])) + # @} + + # @{ collections.MutableSequence implementation + def __getitem__(self, item): + return self._children[item] + + def _setitem_slice(self, key, value): + set_value = set(value) + + # check if any members in the new slice are repeated + if len(set_value) != len(value): + raise ValueError( + "Instancing not allowed in Compositions, {} contains repeated" + " items.".format(value) + ) + + old = self._children[key] + if old: + set_old = set(old) + set_outside_old = set(self._children).difference(set_old) + + isect = set_outside_old.intersection(set_value) + if isect: + raise ValueError( + "Attempting to insert duplicates of items {} already " + "present in container, instancing not allowed in " + "Compositions".format(isect) + ) + + # update old parent + for val in old: + val._set_parent(None) + self._child_lookup.remove(val) + + # insert into _children + self._children[key] = value + + # update new parent + if value: + for val in value: + val._set_parent(self) + self._child_lookup.add(val) + + def __setitem__(self, key, value): + # fetch the current thing at that index/slice + old = self._children[key] + + # in the case of key being a slice, old and value are both sequences + if old is value: + return + + if isinstance(key, slice): + return self._setitem_slice(key, value) + + if value in self: + raise ValueError( + "Composable {} already present in this container, instancing" + " not allowed in otio compositions.".format(value) + ) + + # unset the old child's parent and delete the membership entry. + if old is not None: + old._set_parent(None) + self._child_lookup.remove(old) + + # put it into our list of children + self._children[key] = value + + # set the new parent + if value is not None: + value._set_parent(self) + + # put it into our membership tracking set + self._child_lookup.add(value) + + def insert(self, index, item): + """Insert an item into the composition at location `index`.""" + + if not isinstance(item, self._composable_base_class): + raise TypeError( + "Not allowed to insert an object of type {0} into a {1}, only" + " objects descending from {2}. Tried to insert: {3}".format( + type(item), + type(self), + self._composable_base_class, + str(item) + ) + ) + + if item in self: + raise ValueError( + "Composable {} already present in this container, instancing" + " not allowed in otio compositions.".format(item) + ) + + # set the item's parent and add it to our membership tracking and list + # of children + item._set_parent(self) + self._child_lookup.add(item) + self._children.insert(index, item) + + def __contains__(self, item): + """Use our internal membership tracking set to speed up searches.""" + return item in self._child_lookup + + def __len__(self): + """The len() of a Composition is the # of children in it. + Note that this also means that a Composition with no children + is considered False, so take care to test for "if foo is not None" + versus just "if foo" when the difference matters.""" + return len(self._children) + + def __delitem__(self, key): + # grab the old value + old = self._children[key] + + # remove it from the membership tracking set and clear parent + if old is not None: + if isinstance(key, slice): + for val in old: + self._child_lookup.remove(val) + val._set_parent(None) + else: + self._child_lookup.remove(old) + old._set_parent(None) + + # remove it from our list of children + del self._children[key] diff --git a/pype/vendor/python/python_2/opentimelineio/core/item.py b/pype/vendor/python/python_2/opentimelineio/core/item.py new file mode 100644 index 00000000000..7e035a3a9ec --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/item.py @@ -0,0 +1,243 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of the Item base class. OTIO Objects that contain media.""" + +import copy + +from .. import ( + opentime, + exceptions, +) + +from . import ( + serializable_object, + composable, +) + + +class Item(composable.Composable): + """An Item is a Composable that can be part of a Composition or Timeline. + + More specifically, it is a Composable that has meaningful duration. + + Can also hold effects and markers. + + Base class of: + - Composition (and children) + - Clip + - Gap + """ + + _serializable_label = "Item.1" + _class_path = "core.Item" + + def __init__( + self, + name=None, + source_range=None, + effects=None, + markers=None, + metadata=None, + ): + super(Item, self).__init__(name=name, metadata=metadata) + + self.source_range = copy.deepcopy(source_range) + self.effects = copy.deepcopy(effects) if effects else [] + self.markers = copy.deepcopy(markers) if markers else [] + + name = serializable_object.serializable_field("name", doc="Item name.") + source_range = serializable_object.serializable_field( + "source_range", + opentime.TimeRange, + doc="Range of source to trim to. Can be None or a TimeRange." + ) + + @staticmethod + def visible(): + """Return the visibility of the Item. By default True.""" + + return True + + def duration(self): + """Convience wrapper for the trimmed_range.duration of the item.""" + + return self.trimmed_range().duration + + def available_range(self): + """Implemented by child classes, available range of media.""" + + raise NotImplementedError + + def trimmed_range(self): + """The range after applying the source range.""" + if self.source_range is not None: + return copy.copy(self.source_range) + + return self.available_range() + + def visible_range(self): + """The range of this item's media visible to its parent. + Includes handles revealed by adjacent transitions (if any). + This will always be larger or equal to trimmed_range().""" + result = self.trimmed_range() + if self.parent(): + head, tail = self.parent().handles_of_child(self) + if head: + result = opentime.TimeRange( + start_time=result.start_time - head, + duration=result.duration + head + ) + if tail: + result = opentime.TimeRange( + start_time=result.start_time, + duration=result.duration + tail + ) + return result + + def trimmed_range_in_parent(self): + """Find and return the trimmed range of this item in the parent.""" + if not self.parent(): + raise exceptions.NotAChildError( + "No parent of {}, cannot compute range in parent.".format(self) + ) + + return self.parent().trimmed_range_of_child(self) + + def range_in_parent(self): + """Find and return the untrimmed range of this item in the parent.""" + if not self.parent(): + raise exceptions.NotAChildError( + "No parent of {}, cannot compute range in parent.".format(self) + ) + + return self.parent().range_of_child(self) + + def transformed_time(self, t, to_item): + """Converts time t in the coordinate system of self to coordinate + system of to_item. + + Note that self and to_item must be part of the same timeline (they must + have a common ancestor). + + Example: + + 0 20 + [------t----D----------] + [--A-][t----B---][--C--] + 100 101 110 + 101 in B = 6 in D + + t = t argument + """ + + if not isinstance(t, opentime.RationalTime): + raise ValueError( + "transformed_time only operates on RationalTime, not {}".format( + type(t) + ) + ) + + # does not operate in place + result = copy.copy(t) + + if to_item is None: + return result + + root = self._root_parent() + + # transform t to root parent's coordinate system + item = self + while item != root and item != to_item: + + parent = item.parent() + result -= item.trimmed_range().start_time + result += parent.range_of_child(item).start_time + + item = parent + + ancestor = item + + # transform from root parent's coordinate system to to_item + item = to_item + while item != root and item != ancestor: + + parent = item.parent() + result += item.trimmed_range().start_time + result -= parent.range_of_child(item).start_time + + item = parent + + assert(item is ancestor) + + return result + + def transformed_time_range(self, tr, to_item): + """Transforms the timerange tr to the range of child or self to_item.""" + + return opentime.TimeRange( + self.transformed_time(tr.start_time, to_item), + tr.duration + ) + + markers = serializable_object.serializable_field( + "markers", + doc="List of markers on this item." + ) + effects = serializable_object.serializable_field( + "effects", + doc="List of effects on this item." + ) + metadata = serializable_object.serializable_field( + "metadata", + doc="Metadata dictionary for this item." + ) + + def __repr__(self): + return ( + "otio.{}(" + "name={}, " + "source_range={}, " + "effects={}, " + "markers={}, " + "metadata={}" + ")".format( + self._class_path, + repr(self.name), + repr(self.source_range), + repr(self.effects), + repr(self.markers), + repr(self.metadata) + ) + ) + + def __str__(self): + return "{}({}, {}, {}, {}, {})".format( + self._class_path.split('.')[-1], + self.name, + str(self.source_range), + str(self.effects), + str(self.markers), + str(self.metadata) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/core/json_serializer.py b/pype/vendor/python/python_2/opentimelineio/core/json_serializer.py new file mode 100644 index 00000000000..fee82421439 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/json_serializer.py @@ -0,0 +1,218 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Serializer for SerializableObjects to JSON + +Used for the otio_json adapter as well as for plugins and manifests. +""" + +import json + +from . import ( + SerializableObject, + type_registry, +) + +from .unknown_schema import UnknownSchema + +from .. import ( + exceptions, + opentime, +) + + +# @TODO: Handle file version drifting + + +class _SerializableObjectEncoder(json.JSONEncoder): + + """ Encoder for the SerializableObject OTIO Class and its descendents. """ + + def default(self, obj): + for typename, encfn in _ENCODER_LIST: + if isinstance(obj, typename): + return encfn(obj) + + return json.JSONEncoder.default(self, obj) + + +def serialize_json_to_string(root, indent=4): + """Serialize a tree of SerializableObject to JSON. + + Returns a JSON string. + """ + + return _SerializableObjectEncoder( + sort_keys=True, + indent=indent + ).encode(root) + + +def serialize_json_to_file(root, to_file): + """ + Serialize a tree of SerializableObject to JSON. + + Writes the result to the given file path. + """ + + content = serialize_json_to_string(root) + + with open(to_file, 'w') as file_contents: + file_contents.write(content) + +# @{ Encoders + + +def _encoded_serializable_object(input_otio): + if not input_otio._serializable_label: + raise exceptions.InvalidSerializableLabelError( + input_otio._serializable_label + ) + result = { + "OTIO_SCHEMA": input_otio._serializable_label, + } + result.update(input_otio._data) + return result + + +def _encoded_unknown_schema_object(input_otio): + orig_label = input_otio.data.get(UnknownSchema._original_label) + if not orig_label: + raise exceptions.InvalidSerializableLabelError( + orig_label + ) + # result is just a dict, not a SerializableObject + result = {} + result.update(input_otio.data) + result["OTIO_SCHEMA"] = orig_label # override the UnknownSchema label + del result[UnknownSchema._original_label] + return result + + +def _encoded_time(input_otio): + return { + "OTIO_SCHEMA": "RationalTime.1", + 'value': input_otio.value, + 'rate': input_otio.rate + } + + +def _encoded_time_range(input_otio): + return { + "OTIO_SCHEMA": "TimeRange.1", + 'start_time': _encoded_time(input_otio.start_time), + 'duration': _encoded_time(input_otio.duration) + } + + +def _encoded_transform(input_otio): + return { + "OTIO_SCHEMA": "TimeTransform.1", + 'offset': _encoded_time(input_otio.offset), + 'scale': input_otio.scale, + 'rate': input_otio.rate + } +# @} + + +# Ordered list of functions for encoding OTIO objects to JSON. +# More particular cases should precede more general cases. +_ENCODER_LIST = [ + (opentime.RationalTime, _encoded_time), + (opentime.TimeRange, _encoded_time_range), + (opentime.TimeTransform, _encoded_transform), + (UnknownSchema, _encoded_unknown_schema_object), + (SerializableObject, _encoded_serializable_object) +] + +# @{ Decoders + + +def _decoded_time(input_otio): + return opentime.RationalTime( + input_otio['value'], + input_otio['rate'] + ) + + +def _decoded_time_range(input_otio): + return opentime.TimeRange( + input_otio['start_time'], + input_otio['duration'] + ) + + +def _decoded_transform(input_otio): + return opentime.TimeTransform( + input_otio['offset'], + input_otio['scale'] + ) +# @} + + +# Map of explicit decoder functions to schema labels (for opentime) +# because opentime is implemented with no knowledge of OTIO, it doesn't use the +# same pattern as SerializableObject. +_DECODER_FUNCTION_MAP = { + 'RationalTime.1': _decoded_time, + 'TimeRange.1': _decoded_time_range, + 'TimeTransform.1': _decoded_transform, +} + + +def _as_otio(dct): + """ Specialized JSON decoder for OTIO base Objects. """ + + if "OTIO_SCHEMA" in dct: + schema_label = dct["OTIO_SCHEMA"] + + if schema_label in _DECODER_FUNCTION_MAP: + return _DECODER_FUNCTION_MAP[schema_label](dct) + + schema_name = type_registry.schema_name_from_label(schema_label) + schema_version = type_registry.schema_version_from_label(schema_label) + del dct["OTIO_SCHEMA"] + + return type_registry.instance_from_schema( + schema_name, + schema_version, + dct + ) + + return dct + + +def deserialize_json_from_string(otio_string): + """ Deserialize a string containing JSON to OTIO objects. """ + + return json.loads(otio_string, object_hook=_as_otio) + + +def deserialize_json_from_file(otio_filepath): + """ Deserialize the file at otio_filepath containing JSON to OTIO. """ + + with open(otio_filepath, 'r') as file_contents: + result = deserialize_json_from_string(file_contents.read()) + result._json_path = otio_filepath + return result diff --git a/pype/vendor/python/python_2/opentimelineio/core/media_reference.py b/pype/vendor/python/python_2/opentimelineio/core/media_reference.py new file mode 100644 index 00000000000..ac348526131 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/media_reference.py @@ -0,0 +1,102 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Media Reference Classes and Functions.""" + +from .. import ( + opentime, +) +from . import ( + type_registry, + serializable_object, +) + +import copy + + +@type_registry.register_type +class MediaReference(serializable_object.SerializableObject): + """Base Media Reference Class. + + Currently handles string printing the child classes, which expose interface + into its data dictionary. + + The requirement is that the schema is named so that external systems can + fetch the required information correctly. + """ + _serializable_label = "MediaReference.1" + _name = "MediaReference" + + def __init__( + self, + name=None, + available_range=None, + metadata=None + ): + super(MediaReference, self).__init__() + + self.name = name + self.available_range = copy.deepcopy(available_range) + self.metadata = copy.deepcopy(metadata) or {} + + name = serializable_object.serializable_field( + "name", + doc="Name of this media reference." + ) + available_range = serializable_object.serializable_field( + "available_range", + opentime.TimeRange, + doc="Available range of media in this media reference." + ) + metadata = serializable_object.serializable_field( + "metadata", + dict, + doc="Metadata dictionary." + ) + + @property + def is_missing_reference(self): + return False + + def __str__(self): + return "{}({}, {}, {})".format( + self._name, + repr(self.name), + repr(self.available_range), + repr(self.metadata) + ) + + def __repr__(self): + return ( + "otio.schema.{}(" + "name={}," + " available_range={}," + " metadata={}" + ")" + ).format( + self._name, + repr(self.name), + repr(self.available_range), + repr(self.metadata) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/core/serializable_object.py b/pype/vendor/python/python_2/opentimelineio/core/serializable_object.py new file mode 100644 index 00000000000..27032569b0d --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/serializable_object.py @@ -0,0 +1,219 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implements the otio.core.SerializableObject""" + +import copy + +from . import ( + type_registry, +) + + +class SerializableObject(object): + """Base object for things that can be [de]serialized to/from .otio files. + + To define a new child class of this, you inherit from it and also use the + register_type decorator. Then you use the serializable_field function + above to create attributes that can be serialized/deserialized. + + You can use the upgrade_function_for decorator to upgrade older schemas + to newer ones. + + Finally, if you're in the process of upgrading schemas and you want to + catch code that refers to old attribute names, you can use the + deprecated_field function. This raises an exception if code attempts to + read or write to that attribute. After testing and before pushing, please + remove references to deprecated_field. + + For example + + >>> import opentimelineio as otio + + >>> @otio.core.register_type + ... class ExampleChild(otio.core.SerializableObject): + ... _serializable_label = "ExampleChild.7" + ... child_data = otio.core.serializable_field("child_data", int) + + # @TODO: delete once testing shows nothing is referencing this. + >>> old_child_data_name = otio.core.deprecated_field() + + >>> @otio.core.upgrade_function_for(ExampleChild, 3) + ... def upgrade_child_to_three(_data): + ... return {"child_data" : _data["old_child_data_name"]} + """ + + # Every child must define a _serializable_label attribute. + # This attribute is a string in the form of: "SchemaName.VersionNumber" + # Where VersionNumber is an integer. + # You can use the classmethods .schema_name() and .schema_version() to + # query these fields. + _serializable_label = None + _class_path = "core.SerializableObject" + + def __init__(self): + self._data = {} + + # @{ "Reference Type" semantics for SerializableObject + # We think of the SerializableObject as a reference type - by default + # comparison is pointer comparison, but you can use 'is_equivalent_to' to + # check if the contents of the SerializableObject are the same as some + # other SerializableObject's contents. + # + # Implicitly: + # def __eq__(self, other): + # return self is other + + def is_equivalent_to(self, other): + """Returns true if the contents of self and other match.""" + + try: + if self._data == other._data: + return True + + # XXX: Gross hack takes OTIO->JSON String->Python Dictionaries + # + # using the serializer ensures that we only compare fields that are + # serializable, which is how we define equivalence. + # + # we use json.loads() to turn the string back into dictionaries + # so we can use python's equivalence for things like floating + # point numbers (ie 5.0 == 5) without having to do string + # processing. + + from . import json_serializer + import json + + lhs_str = json_serializer.serialize_json_to_string(self) + lhs = json.loads(lhs_str) + + rhs_str = json_serializer.serialize_json_to_string(other) + rhs = json.loads(rhs_str) + + return (lhs == rhs) + except AttributeError: + return False + # @} + + def _update(self, d): + """Like the dictionary .update() method. + + Update the _data dictionary of this SerializableObject with the ._data + of d if d is a SerializableObject or if d is a dictionary, d itself. + """ + + if isinstance(d, SerializableObject): + self._data.update(d._data) + else: + self._data.update(d) + + @classmethod + def schema_name(cls): + return type_registry.schema_name_from_label( + cls._serializable_label + ) + + @classmethod + def schema_version(cls): + return type_registry.schema_version_from_label( + cls._serializable_label + ) + + @property + def is_unknown_schema(self): + # in general, SerializableObject will have a known schema + # but UnknownSchema subclass will redefine this property to be True + return False + + def __copy__(self): + raise NotImplementedError( + "Shallow copying is not permitted. Use a deep copy." + ) + + def __deepcopy__(self, md): + result = type(self)() + result._data = copy.deepcopy(self._data, md) + + return result + + def deepcopy(self): + return self.__deepcopy__({}) + + +def serializable_field(name, required_type=None, doc=None): + """Create a serializable_field for child classes of SerializableObject. + + Convienence function for adding attributes to child classes of + SerializableObject in such a way that they will be serialized/deserialized + automatically. + + Use it like this: + class foo(SerializableObject): + bar = serializable_field("bar", required_type=int, doc="example") + + This would indicate that class "foo" has a serializable field "bar". So: + f = foo() + f.bar = "stuff" + + # serialize & deserialize + otio_json = otio.adapters.from_name("otio") + f2 = otio_json.read_from_string(otio_json.write_to_string(f)) + + # fields should be equal + f.bar == f2.bar + + Additionally, the "doc" field will become the documentation for the + property. + """ + + def getter(self): + return self._data[name] + + def setter(self, val): + # always allow None values regardless of value of required_type + if required_type is not None and val is not None: + if not isinstance(val, required_type): + raise TypeError( + "attribute '{}' must be an instance of '{}', not: {}".format( + name, + required_type, + type(val) + ) + ) + + self._data[name] = val + + return property(getter, setter, doc=doc) + + +def deprecated_field(): + """ For marking attributes on a SerializableObject deprecated. """ + + def getter(self): + raise DeprecationWarning + + def setter(self, val): + raise DeprecationWarning + + return property(getter, setter, doc="Deprecated field, do not use.") diff --git a/pype/vendor/python/python_2/opentimelineio/core/type_registry.py b/pype/vendor/python/python_2/opentimelineio/core/type_registry.py new file mode 100644 index 00000000000..de4824c42dc --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/type_registry.py @@ -0,0 +1,152 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Core type registry system for registering OTIO types for serialization.""" + +from .. import ( + exceptions +) + + +# Types decorate use register_type() to insert themselves into this map +_OTIO_TYPES = {} + +# maps types to a map of versions to upgrade functions +_UPGRADE_FUNCTIONS = {} + + +def schema_name_from_label(label): + """Return the schema name from the label name.""" + + return label.split(".")[0] + + +def schema_version_from_label(label): + """Return the schema version from the label name.""" + + return int(label.split(".")[1]) + + +def schema_label_from_name_version(schema_name, schema_version): + """Return the serializeable object schema label given the name and version.""" + + return "{}.{}".format(schema_name, schema_version) + + +def register_type(classobj, schemaname=None): + """ Register a class to a Schema Label. + + Normally this is used as a decorator. However, in special cases where a + type has been renamed, you might need to register the new type to multiple + schema names. To do this: + + >>> @core.register_type + ... class MyNewClass(...): + ... pass + + >>> core.register_type(MyNewClass, "MyOldName") + + This will parse the old schema name into the new class type. You may also + need to write an upgrade function if the schema itself has changed. + """ + + if schemaname is None: + schemaname = schema_name_from_label(classobj._serializable_label) + + _OTIO_TYPES[schemaname] = classobj + + return classobj + + +def upgrade_function_for(cls, version_to_upgrade_to): + """Decorator for identifying schema class upgrade functions. + + Example + >>> @upgrade_function_for(MyClass, 5) + ... def upgrade_to_version_five(data): + ... pass + + This will get called to upgrade a schema of MyClass to version 5. My class + must be a class deriving from otio.core.SerializableObject. + + The upgrade function should take a single argument - the dictionary to + upgrade, and return a dictionary with the fields upgraded. + + Remember that you don't need to provide an upgrade function for upgrades + that add or remove fields, only for schema versions that change the field + names. + """ + + def decorator_func(func): + """ Decorator for marking upgrade functions """ + + _UPGRADE_FUNCTIONS.setdefault(cls, {})[version_to_upgrade_to] = func + + return func + + return decorator_func + + +def instance_from_schema(schema_name, schema_version, data_dict): + """Return an instance, of the schema from data in the data_dict.""" + + if schema_name not in _OTIO_TYPES: + from .unknown_schema import UnknownSchema + + # create an object of UnknownSchema type to represent the data + schema_label = schema_label_from_name_version(schema_name, schema_version) + data_dict[UnknownSchema._original_label] = schema_label + unknown_label = UnknownSchema._serializable_label + schema_name = schema_name_from_label(unknown_label) + schema_version = schema_version_from_label(unknown_label) + + cls = _OTIO_TYPES[schema_name] + + schema_version = int(schema_version) + if cls.schema_version() < schema_version: + raise exceptions.UnsupportedSchemaError( + "Schema '{}' has highest version available '{}', which is lower " + "than requested schema version '{}'".format( + schema_name, + cls.schema_version(), + schema_version + ) + ) + + if cls.schema_version() != schema_version: + # since the keys are the versions to upgrade to, sorting the keys + # before iterating through them should ensure that upgrade functions + # are called in order. + for version, upgrade_func in sorted( + _UPGRADE_FUNCTIONS[cls].items() + ): + if version < schema_version: + continue + + data_dict = upgrade_func(data_dict) + + obj = cls() + obj._update(data_dict) + + return obj diff --git a/pype/vendor/python/python_2/opentimelineio/core/unknown_schema.py b/pype/vendor/python/python_2/opentimelineio/core/unknown_schema.py new file mode 100644 index 00000000000..94c187710ed --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/core/unknown_schema.py @@ -0,0 +1,50 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +""" +Implementation of the UnknownSchema schema. +""" + +from .serializable_object import SerializableObject +from .type_registry import register_type + + +@register_type +class UnknownSchema(SerializableObject): + """Represents an object whose schema is unknown to us.""" + + _serializable_label = "UnknownSchema.1" + _name = "UnknownSchema" + _original_label = "UnknownSchemaOriginalLabel" + + @property + def is_unknown_schema(self): + return True + + @property + def data(self): + """Exposes the data dictionary of the underlying SerializableObject + directly. + """ + return self._data diff --git a/pype/vendor/python/python_2/opentimelineio/exceptions.py b/pype/vendor/python/python_2/opentimelineio/exceptions.py new file mode 100644 index 00000000000..7726f2ef719 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/exceptions.py @@ -0,0 +1,89 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Exception classes for OpenTimelineIO""" + + +class OTIOError(Exception): + pass + + +class CouldNotReadFileError(OTIOError): + pass + + +class NoKnownAdapterForExtensionError(OTIOError): + pass + + +class ReadingNotSupportedError(OTIOError): + pass + + +class WritingNotSupportedError(OTIOError): + pass + + +class NotSupportedError(OTIOError): + pass + + +class InvalidSerializableLabelError(OTIOError): + pass + + +class CannotComputeAvailableRangeError(OTIOError): + pass + + +class AdapterDoesntSupportFunctionError(OTIOError): + pass + + +class UnsupportedSchemaError(OTIOError): + pass + + +class NotAChildError(OTIOError): + pass + + +class InstancingNotAllowedError(OTIOError): + pass + + +class TransitionFollowingATransitionError(OTIOError): + pass + + +class MisconfiguredPluginError(OTIOError): + pass + + +class CannotTrimTransitionsError(OTIOError): + pass + + +class NoDefaultMediaLinkerError(OTIOError): + pass diff --git a/pype/vendor/python/python_2/opentimelineio/hooks.py b/pype/vendor/python/python_2/opentimelineio/hooks.py new file mode 100644 index 00000000000..311154553d9 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/hooks.py @@ -0,0 +1,174 @@ +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +from . import ( + plugins, + core, +) + +__doc__ = """ +HookScripts are plugins that run at defined points ("Hooks"). + +They expose a hook_function with signature: +hook_function :: otio.schema.Timeline, Dict -> otio.schema.Timeline + +Both hook scripts and the hooks they attach to are defined in the plugin +manifest. + +You can attach multiple hook scripts to a hook. They will be executed in list +order, first to last. + +They are defined by the manifests HookScripts and hooks areas. + +>>> +{ + "OTIO_SCHEMA" : "PluginManifest.1", + "hook_scripts" : [ + { + "OTIO_SCHEMA" : "HookScript.1", + "name" : "example hook", + "execution_scope" : "in process", + "filepath" : "example.py" + } + ], + "hooks" : { + "pre_adapter_write" : ["example hook"], + "post_adapter_read" : [] + } +} + +The 'hook_scripts' area loads the python modules with the 'hook_function's to +call in them. The 'hooks' area defines the hooks (and any associated +scripts). You can further query and modify these from python. + +>>> import opentimelineio as otio +... hook_list = otio.hooks.scripts_attached_to("some_hook") # -> ['a','b','c'] +... +... # to run the hook scripts: +... otio.hooks.run("some_hook", some_timeline, optional_argument_dict) + +This will pass (some_timeline, optional_argument_dict) to 'a', which will +a new timeline that will get passed into 'b' with optional_argument_dict, +etc. + +To Edit the order, change the order in the list: + +>>> hook_list[0], hook_list[2] = hook_list[2], hook_list[0] +... print hook_list # ['c','b','a'] + +Now c will run, then b, then a. + +To delete a function the list: + +>>> del hook_list[1] +""" + + +@core.register_type +class HookScript(plugins.PythonPlugin): + _serializable_label = "HookScript.1" + + def __init__( + self, + name=None, + execution_scope=None, + filepath=None, + ): + """HookScript plugin constructor.""" + + super(HookScript, self).__init__(name, execution_scope, filepath) + + def run(self, in_timeline, argument_map={}): + """Run the hook_function associated with this plugin.""" + + # @TODO: should in_timeline be passed in place? or should a copy be + # made? + return self._execute_function( + "hook_function", + in_timeline=in_timeline, + argument_map=argument_map + ) + + def __str__(self): + return "HookScript({}, {}, {})".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath) + ) + + def __repr__(self): + return ( + "otio.hooks.HookScript(" + "name={}, " + "execution_scope={}, " + "filepath={}" + ")".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath) + ) + ) + + +def names(): + """Return a list of all the registered hooks.""" + + return plugins.ActiveManifest().hooks.keys() + + +def available_hookscript_names(): + """Return the names of HookScripts that have been registered.""" + + return [hs.name for hs in plugins.ActiveManifest().hook_scripts] + + +def available_hookscripts(): + """Return the HookScripts objects that have been registered.""" + return plugins.ActiveManifest().hook_scripts + + +def scripts_attached_to(hook): + """Return an editable list of all the hook scriptss that are attached to + the specified hook, in execution order. Changing this list will change the + order that scripts run in, and deleting a script will remove it from + executing + """ + + # @TODO: Should this return a copy? + return plugins.ActiveManifest().hooks[hook] + + +def run(hook, tl, extra_args=None): + """Run all the scripts associated with hook, passing in tl and extra_args. + + Will return the return value of the last hook script. + + If no hookscripts are defined, returns tl. + """ + + hook_scripts = plugins.ActiveManifest().hooks[hook] + for name in hook_scripts: + hs = plugins.ActiveManifest().from_name(name, "hook_scripts") + tl = hs.run(tl, extra_args) + return tl diff --git a/pype/vendor/python/python_2/opentimelineio/media_linker.py b/pype/vendor/python/python_2/opentimelineio/media_linker.py new file mode 100644 index 00000000000..25473ac1d59 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/media_linker.py @@ -0,0 +1,169 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +""" MediaLinker plugins fire after an adapter has read a file in order to +produce MediaReferences that point at valid, site specific media. + +They expose a "link_media_reference" function with the signature: +link_media_reference :: otio.schema.Clip -> otio.core.MediaReference + +or: + def linked_media_reference(from_clip): + result = otio.core.MediaReference() # whichever subclass + # do stuff + return result + +To get context information, they can inspect the metadata on the clip and on +the media reference. The .parent() method can be used to find the containing +track if metadata is stored there. + +Please raise an instance (or child instance) of +otio.exceptions.CannotLinkMediaError() if there is a problem linking the media. + +For example: + for clip in timeline.each_clip(): + try: + new_mr = otio.media_linker.linked_media_reference(clip) + clip.media_reference = new_mr + except otio.exceptions.CannotLinkMediaError: + # or report the error + pass +""" + +import os + +from . import ( + exceptions, + plugins, + core, +) + + +# Enum describing different media linker policies +class MediaLinkingPolicy: + DoNotLinkMedia = "__do_not_link_media" + ForceDefaultLinker = "__default" + + +# @TODO: wrap this up in the plugin system somehow? automatically generate? +def available_media_linker_names(): + """Return a string list of the available media linker plugins.""" + + return [str(adp.name) for adp in plugins.ActiveManifest().media_linkers] + + +def from_name(name): + """Fetch the media linker object by the name of the adapter directly.""" + + if name == MediaLinkingPolicy.ForceDefaultLinker or not name: + name = os.environ.get("OTIO_DEFAULT_MEDIA_LINKER", None) + + if not name: + return None + + # @TODO: make this handle the enums + try: + return plugins.ActiveManifest().from_name( + name, + kind_list="media_linkers" + ) + except exceptions.NotSupportedError: + raise exceptions.NotSupportedError( + "media linker not supported: {}, available: {}".format( + name, + available_media_linker_names() + ) + ) + + +def default_media_linker(): + try: + return os.environ['OTIO_DEFAULT_MEDIA_LINKER'] + except KeyError: + raise exceptions.NoDefaultMediaLinkerError( + "No default Media Linker set in $OTIO_DEFAULT_MEDIA_LINKER" + ) + + +def linked_media_reference( + target_clip, + media_linker_name=MediaLinkingPolicy.ForceDefaultLinker, + media_linker_argument_map=None +): + media_linker = from_name(media_linker_name) + + if not media_linker: + return target_clip + + # @TODO: connect this argument map up to the function call through to the + # real linker + if not media_linker_argument_map: + media_linker_argument_map = {} + + return media_linker.link_media_reference( + target_clip, + media_linker_argument_map + ) + + +@core.register_type +class MediaLinker(plugins.PythonPlugin): + _serializable_label = "MediaLinker.1" + + def __init__( + self, + name=None, + execution_scope=None, + filepath=None, + ): + super(MediaLinker, self).__init__(name, execution_scope, filepath) + + def link_media_reference(self, in_clip, media_linker_argument_map=None): + media_linker_argument_map = media_linker_argument_map or {} + + return self._execute_function( + "link_media_reference", + in_clip=in_clip, + media_linker_argument_map=media_linker_argument_map + ) + + def __str__(self): + return "MediaLinker({}, {}, {})".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath) + ) + + def __repr__(self): + return ( + "otio.media_linker.MediaLinker(" + "name={}, " + "execution_scope={}, " + "filepath={}" + ")".format( + repr(self.name), + repr(self.execution_scope), + repr(self.filepath) + ) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/opentime.py b/pype/vendor/python/python_2/opentimelineio/opentime.py new file mode 100644 index 00000000000..e7e58b9475a --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/opentime.py @@ -0,0 +1,856 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Library for expressing and transforming time. + +NOTE: This module is written specifically with a future port to C in mind. +When ported to C, Time will be a struct and these functions should be very +simple. +""" + +import math +import copy + + +VALID_NON_DROPFRAME_TIMECODE_RATES = ( + 1, + 12, + 23.976, + 23.98, + (24000 / 1001.0), + 24, + 25, + 30, + 29.97, + (30000 / 1001.0), + 48, + 50, + 59.94, + (60000 / 1001.0), + 60, +) + +VALID_DROPFRAME_TIMECODE_RATES = ( + 29.97, + (30000 / 1001.0), + 59.94, + (60000 / 1001.0), +) + +VALID_TIMECODE_RATES = ( + VALID_NON_DROPFRAME_TIMECODE_RATES + VALID_DROPFRAME_TIMECODE_RATES) + +_fn_cache = object.__setattr__ + + +class RationalTime(object): + """ Represents an instantaneous point in time, value * (1/rate) seconds + from time 0seconds. + """ + + # Locks RationalTime instances to only these attributes + __slots__ = ['value', 'rate'] + + def __init__(self, value=0.0, rate=1.0): + _fn_cache(self, "value", value) + _fn_cache(self, "rate", rate) + + def __setattr__(self, key, val): + """Enforces immutability """ + raise AttributeError("RationalTime is Immutable.") + + def __copy__(self, memodict=None): + return RationalTime(self.value, self.rate) + + # Always deepcopy, since we want this class to behave like a value type + __deepcopy__ = __copy__ + + def rescaled_to(self, new_rate): + """Returns the time for this time converted to new_rate""" + + try: + new_rate = new_rate.rate + except AttributeError: + pass + + if self.rate == new_rate: + return copy.copy(self) + + return RationalTime( + self.value_rescaled_to(new_rate), + new_rate + ) + + def value_rescaled_to(self, new_rate): + """Returns the time value for self converted to new_rate""" + + try: + new_rate = new_rate.rate + except AttributeError: + pass + + if new_rate == self.rate: + return self.value + + # TODO: This math probably needs some overrun protection + try: + return float(self.value) * float(new_rate) / float(self.rate) + except (AttributeError, TypeError, ValueError): + raise TypeError( + "Sorry, RationalTime cannot be rescaled to a value of type " + "'{}', only RationalTime and numbers are supported.".format( + type(new_rate) + ) + ) + + def almost_equal(self, other, delta=0.0): + try: + rescaled_value = self.value_rescaled_to(other.rate) + return abs(rescaled_value - other.value) <= delta + + except AttributeError: + return False + + def __add__(self, other): + """Returns a RationalTime object that is the sum of self and other. + + If self and other have differing time rates, the result will have the + have the rate of the faster time. + """ + + try: + if self.rate == other.rate: + return RationalTime(self.value + other.value, self.rate) + except AttributeError: + if not isinstance(other, RationalTime): + raise TypeError( + "RationalTime may only be added to other objects of type " + "RationalTime, not {}.".format(type(other)) + ) + raise + + if self.rate > other.rate: + scale = self.rate + value = self.value + other.value_rescaled_to(scale) + else: + scale = other.rate + value = self.value_rescaled_to(scale) + other.value + + return RationalTime(value, scale) + + # because RationalTime is immutable, += is sugar around + + __iadd__ = __add__ + + def __sub__(self, other): + """Returns a RationalTime object that is self - other. + + If self and other have differing time rates, the result will have the + have the rate of the faster time. + """ + + try: + if self.rate == other.rate: + return RationalTime(self.value - other.value, self.rate) + except AttributeError: + if not isinstance(other, RationalTime): + raise TypeError( + "RationalTime may only be added to other objects of type " + "RationalTime, not {}.".format(type(other)) + ) + raise + + if self.rate > other.rate: + scale = self.rate + value = self.value - other.value_rescaled_to(scale) + else: + scale = other.rate + value = self.value_rescaled_to(scale) - other.value + + return RationalTime(value=value, rate=scale) + + def _comparable_floats(self, other): + """Returns a tuple of two floats, (self, other), which are suitable + for comparison. + + If other is not of a type that can be compared, TypeError is raised + """ + try: + return ( + float(self.value) / self.rate, + float(other.value) / other.rate + ) + except AttributeError: + if not isinstance(other, RationalTime): + raise TypeError( + "RationalTime can only be compared to other objects of type " + "RationalTime, not {}".format(type(other)) + ) + raise + + def __gt__(self, other): + f_self, f_other = self._comparable_floats(other) + return f_self > f_other + + def __lt__(self, other): + f_self, f_other = self._comparable_floats(other) + return f_self < f_other + + def __le__(self, other): + f_self, f_other = self._comparable_floats(other) + return f_self <= f_other + + def __ge__(self, other): + f_self, f_other = self._comparable_floats(other) + return f_self >= f_other + + def __repr__(self): + return ( + "otio.opentime.RationalTime(value={value}," + " rate={rate})".format( + value=repr(self.value), + rate=repr(self.rate), + ) + ) + + def __str__(self): + return "RationalTime({}, {})".format( + str(self.value), + str(self.rate) + ) + + def __eq__(self, other): + try: + return self.value_rescaled_to(other.rate) == other.value + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash((self.value, self.rate)) + + +class TimeTransform(object): + """1D Transform for RationalTime. Has offset and scale.""" + + def __init__(self, offset=RationalTime(), scale=1.0, rate=None): + self.offset = copy.copy(offset) + self.scale = float(scale) + self.rate = float(rate) if rate else None + + def applied_to(self, other): + if isinstance(other, TimeRange): + return range_from_start_end_time( + start_time=self.applied_to(other.start_time), + end_time_exclusive=self.applied_to(other.end_time_exclusive()) + ) + + target_rate = self.rate if self.rate is not None else other.rate + if isinstance(other, TimeTransform): + return TimeTransform( + offset=self.offset + other.offset, + scale=self.scale * other.scale, + rate=target_rate + ) + elif isinstance(other, RationalTime): + value = other.value * self.scale + result = RationalTime(value, other.rate) + self.offset + if target_rate is not None: + result = result.rescaled_to(target_rate) + + return result + else: + raise TypeError( + "TimeTransform can only be applied to a TimeTransform or " + "RationalTime, not a {}".format(type(other)) + ) + + def __repr__(self): + return ( + "otio.opentime.TimeTransform(offset={}, scale={}, rate={})".format( + repr(self.offset), + repr(self.scale), + repr(self.rate) + ) + ) + + def __str__(self): + return ( + "TimeTransform({}, {}, {})".format( + str(self.offset), + str(self.scale), + str(self.rate) + ) + ) + + def __eq__(self, other): + try: + return ( + (self.offset, self.scale, self.rate) == + (other.offset, other.scale, self.rate) + ) + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash((self.offset, self.scale, self.rate)) + + +class BoundStrategy(object): + """Different bounding strategies for TimeRange """ + + Free = 1 + Clamp = 2 + + +class TimeRange(object): + """Contains a range of time, starting (and including) start_time and + lasting duration.value * (1/duration.rate) seconds. + + A 0 duration TimeRange is the same as a RationalTime, and contains only the + start_time of the TimeRange. + """ + + __slots__ = ['start_time', 'duration'] + + def __init__(self, start_time=None, duration=None): + if not isinstance(start_time, RationalTime) and start_time is not None: + raise TypeError( + "start_time must be a RationalTime, not " + "'{}'".format(start_time) + ) + if ( + duration is not None and ( + not isinstance(duration, RationalTime) + or duration.value < 0.0 + ) + ): + raise TypeError( + "duration must be a RationalTime with value >= 0, not " + "'{}'".format(duration) + ) + + # if the start time has not been passed in + if not start_time: + if duration: + # ...get the rate from the duration + start_time = RationalTime(rate=duration.rate) + else: + # otherwise use the default + start_time = RationalTime() + _fn_cache(self, "start_time", copy.copy(start_time)) + + if not duration: + # ...get the rate from the start_time + duration = RationalTime(rate=start_time.rate) + _fn_cache(self, "duration", copy.copy(duration)) + + def __setattr__(self, key, val): + raise AttributeError("TimeRange is Immutable.") + + def __copy__(self, memodict=None): + # Construct a new one directly to avoid the overhead of deepcopy + return TimeRange( + copy.copy(self.start_time), + copy.copy(self.duration) + ) + + # Always deepcopy, since we want this class to behave like a value type + __deepcopy__ = __copy__ + + def end_time_inclusive(self): + """The time of the last sample that contains data in the TimeRange. + + If the TimeRange goes from (0, 24) w/ duration (10, 24), this will be + (9, 24) + + If the TimeRange goes from (0, 24) w/ duration (10.5, 24): + (10, 24) + + In other words, the last frame with data (however fractional). + """ + + if ( + self.end_time_exclusive() - self.start_time.rescaled_to(self.duration) + ).value > 1: + + result = ( + self.end_time_exclusive() - RationalTime(1, self.start_time.rate) + ) + + # if the duration's value has a fractional component + if self.duration.value != math.floor(self.duration.value): + result = RationalTime( + math.floor(self.end_time_exclusive().value), + result.rate + ) + + return result + else: + return copy.deepcopy(self.start_time) + + def end_time_exclusive(self): + """"Time of the first sample outside the time range. + + If Start Frame is 10 and duration is 5, then end_time_exclusive is 15, + even though the last time with data in this range is 14. + + If Start Frame is 10 and duration is 5.5, then end_time_exclusive is + 15.5, even though the last time with data in this range is 15. + """ + + return self.duration + self.start_time.rescaled_to(self.duration) + + def extended_by(self, other): + """Construct a new TimeRange that is this one extended by another.""" + + if not isinstance(other, TimeRange): + raise TypeError( + "extended_by requires rtime be a TimeRange, not a '{}'".format( + type(other) + ) + ) + + start_time = min(self.start_time, other.start_time) + new_end_time = max( + self.end_time_exclusive(), + other.end_time_exclusive() + ) + duration = duration_from_start_end_time(start_time, new_end_time) + return TimeRange(start_time, duration) + + # @TODO: remove? + def clamped( + self, + other, + start_bound=BoundStrategy.Free, + end_bound=BoundStrategy.Free + ): + """Clamp 'other' (either a RationalTime or a TimeRange), according to + self.start_time/end_time_exclusive and the bound arguments. + """ + + if isinstance(other, RationalTime): + if start_bound == BoundStrategy.Clamp: + other = max(other, self.start_time) + if end_bound == BoundStrategy.Clamp: + # @TODO: this should probably be the end_time_inclusive, + # not exclusive + other = min(other, self.end_time_exclusive()) + return other + elif isinstance(other, TimeRange): + start_time = other.start_time + end = other.end_time_exclusive() + if start_bound == BoundStrategy.Clamp: + start_time = max(other.start_time, self.start_time) + if end_bound == BoundStrategy.Clamp: + end = min(self.end_time_exclusive(), end) + duration = duration_from_start_end_time(start_time, end) + return TimeRange(start_time, duration) + else: + raise TypeError( + "TimeRange can only be applied to RationalTime objects, not " + "{}".format(type(other)) + ) + return self + + def contains(self, other): + """Return true if self completely contains other. + + (RationalTime or TimeRange) + """ + + if isinstance(other, RationalTime): + return ( + self.start_time <= other and other < self.end_time_exclusive()) + elif isinstance(other, TimeRange): + return ( + self.start_time <= other.start_time and + self.end_time_exclusive() >= other.end_time_exclusive() + ) + raise TypeError( + "contains only accepts on otio.opentime.RationalTime or " + "otio.opentime.TimeRange, not {}".format(type(other)) + ) + + def overlaps(self, other): + """Return true if self overlaps any part of other. + + (RationalTime or TimeRange) + """ + + if isinstance(other, RationalTime): + return self.contains(other) + elif isinstance(other, TimeRange): + return ( + ( + self.start_time < other.end_time_exclusive() and + other.start_time < self.end_time_exclusive() + ) + ) + raise TypeError( + "overlaps only accepts on otio.opentime.RationalTime or " + "otio.opentime.TimeRange, not {}".format(type(other)) + ) + + def __hash__(self): + return hash((self.start_time, self.duration)) + + def __eq__(self, rhs): + try: + return ( + (self.start_time, self.duration) == + (rhs.start_time, rhs.duration) + ) + except AttributeError: + return False + + def __ne__(self, rhs): + return not (self == rhs) + + def __repr__(self): + return ( + "otio.opentime.TimeRange(start_time={}, duration={})".format( + repr(self.start_time), + repr(self.duration), + ) + ) + + def __str__(self): + return ( + "TimeRange({}, {})".format( + str(self.start_time), + str(self.duration), + ) + ) + + +def from_frames(frame, fps): + """Turn a frame number and fps into a time object. + :param frame: (:class:`int`) Frame number. + :param fps: (:class:`float`) Frame-rate for the (:class:`RationalTime`) instance. + + :return: (:class:`RationalTime`) Instance for the frame and fps provided. + """ + + return RationalTime(int(frame), fps) + + +def to_frames(time_obj, fps=None): + """Turn a RationalTime into a frame number.""" + + if not fps or time_obj.rate == fps: + return int(time_obj.value) + + return int(time_obj.value_rescaled_to(fps)) + + +def validate_timecode_rate(rate): + """Check if rate is of valid type and value. + Raises (:class:`TypeError` for wrong type of rate. + Raises (:class:`VaueError`) for invalid rate value. + + :param rate: (:class:`int`) or (:class:`float`) The frame rate in question + """ + if not isinstance(rate, (int, float)): + raise TypeError( + "rate must be or not {t}".format(t=type(rate))) + + if rate not in VALID_TIMECODE_RATES: + raise ValueError( + '{rate} is not a valid frame rate, ' + 'Please use one of these: {valid}'.format( + rate=rate, + valid=VALID_TIMECODE_RATES)) + + +def from_timecode(timecode_str, rate): + """Convert a timecode string into a RationalTime. + + :param timecode_str: (:class:`str`) A colon-delimited timecode. + :param rate: (:class:`float`) The frame-rate to calculate timecode in + terms of. + + :return: (:class:`RationalTime`) Instance for the timecode provided. + """ + # Validate rate + validate_timecode_rate(rate) + + # Check if rate is drop frame + rate_is_dropframe = rate in VALID_DROPFRAME_TIMECODE_RATES + + # Make sure only DF timecodes are treated as such + treat_as_df = rate_is_dropframe and ';' in timecode_str + + # Check if timecode indicates drop frame + if ';' in timecode_str: + if not rate_is_dropframe: + raise ValueError( + 'Timecode "{}" indicates drop-frame rate ' + 'due to the ";" frame divider. ' + 'Passed rate ({}) is of non-drop-frame rate. ' + 'Valid drop-frame rates are: {}'.format( + timecode_str, + rate, + VALID_DROPFRAME_TIMECODE_RATES)) + else: + timecode_str = timecode_str.replace(';', ':') + + hours, minutes, seconds, frames = timecode_str.split(":") + + # Timecode is declared in terms of nominal fps + nominal_fps = int(math.ceil(rate)) + + if int(frames) >= nominal_fps: + raise ValueError( + 'Frame rate mismatch. Timecode "{}" has frames beyond {}.'.format( + timecode_str, nominal_fps - 1)) + + dropframes = 0 + if treat_as_df: + if rate == 29.97: + dropframes = 2 + + elif rate == 59.94: + dropframes = 4 + + # To use for drop frame compensation + total_minutes = int(hours) * 60 + int(minutes) + + # convert to frames + value = ( + ((total_minutes * 60) + int(seconds)) * nominal_fps + int(frames)) - \ + (dropframes * (total_minutes - (total_minutes // 10))) + + return RationalTime(value, rate) + + +def to_timecode(time_obj, rate=None, drop_frame=None): + """Convert a RationalTime into a timecode string. + + :param time_obj: (:class:`RationalTime`) instance to express as timecode. + :param rate: (:class:`float`) The frame-rate to calculate timecode in + terms of. (Default time_obj.rate) + :param drop_frame: (:class:`bool`) ``True`` to make drop-frame timecode, + ``False`` for non-drop. If left ``None``, a format will be guessed + based on rate. + + :return: (:class:`str`) The timecode. + """ + if time_obj is None: + return None + + rate = rate or time_obj.rate + + # Validate rate + validate_timecode_rate(rate) + + # Check if rate is drop frame + rate_is_dropframe = rate in VALID_DROPFRAME_TIMECODE_RATES + if drop_frame and not rate_is_dropframe: + raise ValueError( + "Invalid rate for drop-frame timecode {}".format(time_obj.rate) + ) + + # if in auto-detect for DFTC, use the rate to decide + if drop_frame is None: + drop_frame = rate_is_dropframe + + dropframes = 0 + if drop_frame: + if rate in (29.97, (30000 / 1001.0)): + dropframes = 2 + + elif rate == 59.94: + dropframes = 4 + + # For non-dftc, use the integral frame rate + if not drop_frame: + rate = round(rate) + + # Number of frames in an hour + frames_per_hour = int(round(rate * 60 * 60)) + # Number of frames in a day - timecode rolls over after 24 hours + frames_per_24_hours = frames_per_hour * 24 + # Number of frames per ten minutes + frames_per_10_minutes = int(round(rate * 60 * 10)) + # Number of frames per minute is the round of the framerate * 60 minus + # the number of dropped frames + frames_per_minute = int(round(rate) * 60) - dropframes + + value = time_obj.value + + if value < 0: + raise ValueError( + "Negative values are not supported for converting to timecode.") + + # If frame_number is greater than 24 hrs, next operation will rollover + # clock + value %= frames_per_24_hours + + if drop_frame: + d = value // frames_per_10_minutes + m = value % frames_per_10_minutes + if m > dropframes: + value += (dropframes * 9 * d) + \ + dropframes * ((m - dropframes) // frames_per_minute) + else: + value += dropframes * 9 * d + + nominal_fps = int(math.ceil(rate)) + + frames = value % nominal_fps + seconds = (value // nominal_fps) % 60 + minutes = ((value // nominal_fps) // 60) % 60 + hours = (((value // nominal_fps) // 60) // 60) + + tc = "{HH:02d}:{MM:02d}:{SS:02d}{div}{FF:02d}" + + return tc.format( + HH=int(hours), + MM=int(minutes), + SS=int(seconds), + div=drop_frame and ";" or ":", + FF=int(frames)) + + +def from_time_string(time_str, rate): + """Convert a time with microseconds string into a RationalTime. + + :param time_str: (:class:`str`) A HH:MM:ss.ms time. + :param rate: (:class:`float`) The frame-rate to calculate timecode in + terms of. + + :return: (:class:`RationalTime`) Instance for the timecode provided. + """ + + if ';' in time_str: + raise ValueError('Drop-Frame timecodes not supported.') + + hours, minutes, seconds = time_str.split(":") + microseconds = "0" + if '.' in seconds: + seconds, microseconds = str(seconds).split('.') + microseconds = microseconds[0:6] + seconds = '.'.join([seconds, microseconds]) + time_obj = from_seconds( + float(seconds) + + (int(minutes) * 60) + + (int(hours) * 60 * 60) + ) + return time_obj.rescaled_to(rate) + + +def to_time_string(time_obj): + """ + Convert this timecode to time with microsecond, as formated in FFMPEG + + :return: Number formated string of time + """ + if time_obj is None: + return None + # convert time object to seconds + seconds = to_seconds(time_obj) + + # reformat in time string + time_units_per_minute = 60 + time_units_per_hour = time_units_per_minute * 60 + time_units_per_day = time_units_per_hour * 24 + + days, hour_units = divmod(seconds, time_units_per_day) + hours, minute_units = divmod(hour_units, time_units_per_hour) + minutes, seconds = divmod(minute_units, time_units_per_minute) + microseconds = "0" + seconds = str(seconds) + if '.' in seconds: + seconds, microseconds = str(seconds).split('.') + + # TODO: There are some rollover policy issues for days and hours, + # We need to research these + + return "{hours}:{minutes}:{seconds}.{microseconds}".format( + hours="{n:0{width}d}".format(n=int(hours), width=2), + minutes="{n:0{width}d}".format(n=int(minutes), width=2), + seconds="{n:0{width}d}".format(n=int(seconds), width=2), + microseconds=microseconds[0:6] + ) + + +def from_seconds(seconds): + """Convert a number of seconds into RationalTime""" + + # Note: in the future we may consider adding a preferred rate arg + time_obj = RationalTime(value=seconds, rate=1) + + return time_obj + + +def to_seconds(time_obj): + """ Convert a RationalTime into float seconds """ + return time_obj.value_rescaled_to(1) + + +def from_footage(footage): + raise NotImplementedError + + +def to_footage(time_obj): + raise NotImplementedError + + +def duration_from_start_end_time(start_time, end_time_exclusive): + """Compute duration of samples from first to last. This is not the same as + distance. For example, the duration of a clip from frame 10 to frame 15 + is 6 frames. Result in the rate of start_time. + """ + + # @TODO: what to do when start_time > end_time_exclusive? + + if start_time.rate == end_time_exclusive.rate: + return RationalTime( + end_time_exclusive.value - start_time.value, + start_time.rate + ) + else: + return RationalTime( + ( + end_time_exclusive.value_rescaled_to(start_time) + - start_time.value + ), + start_time.rate + ) + + +# @TODO: create range from start/end [in,ex]clusive +def range_from_start_end_time(start_time, end_time_exclusive): + """Create a TimeRange from start and end RationalTimes.""" + + return TimeRange( + start_time, + duration=duration_from_start_end_time(start_time, end_time_exclusive) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/plugins/__init__.py b/pype/vendor/python/python_2/opentimelineio/plugins/__init__.py new file mode 100644 index 00000000000..dedb3da37e2 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/plugins/__init__.py @@ -0,0 +1,33 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Plugin system for OTIO""" + +# flake8: noqa + +from .python_plugin import PythonPlugin +from .manifest import ( + manifest_from_file, + ActiveManifest, +) diff --git a/pype/vendor/python/python_2/opentimelineio/plugins/manifest.py b/pype/vendor/python/python_2/opentimelineio/plugins/manifest.py new file mode 100644 index 00000000000..2a769effec3 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/plugins/manifest.py @@ -0,0 +1,282 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of an adapter registry system for OTIO.""" + +import inspect +import logging +import os + +# on some python interpreters, pkg_resources is not available +try: + import pkg_resources +except ImportError: + pkg_resources = None + +from .. import ( + core, + exceptions, +) + + +def manifest_from_file(filepath): + """Read the .json file at filepath into a Manifest object.""" + + result = core.deserialize_json_from_file(filepath) + result.source_files.append(filepath) + result._update_plugin_source(filepath) + return result + + +def manifest_from_string(input_string): + """Deserialize the json string into a manifest object.""" + + result = core.deserialize_json_from_string(input_string) + + # try and get the caller's name + name = "unknown" + stack = inspect.stack() + if len(stack) > 1 and len(stack[1]) > 3: + # filename function name + name = "{}:{}".format(stack[1][1], stack[1][3]) + + # set the value in the manifest + src_string = "call to manifest_from_string() in " + name + result.source_files.append(src_string) + result._update_plugin_source(src_string) + + return result + + +@core.register_type +class Manifest(core.SerializableObject): + """Defines an OTIO plugin Manifest. + + This is an internal OTIO implementation detail. A manifest tracks a + collection of adapters and allows finding specific adapters by suffix + + For writing your own adapters, consult: + https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# + """ + _serializable_label = "PluginManifest.1" + + def __init__(self): + super(Manifest, self).__init__() + self.adapters = [] + self.schemadefs = [] + self.media_linkers = [] + self.source_files = [] + + # hook system stuff + self.hooks = {} + self.hook_scripts = [] + + adapters = core.serializable_field( + "adapters", + type([]), + "Adapters this manifest describes." + ) + schemadefs = core.serializable_field( + "schemadefs", + type([]), + "Schemadefs this manifest describes." + ) + media_linkers = core.serializable_field( + "media_linkers", + type([]), + "Media Linkers this manifest describes." + ) + hooks = core.serializable_field( + "hooks", + type({}), + "Hooks that hooks scripts can be attached to." + ) + hook_scripts = core.serializable_field( + "hook_scripts", + type([]), + "Scripts that can be attached to hooks." + ) + + def extend(self, another_manifest): + """ + Extend the adapters, schemadefs, and media_linkers lists of this manifest + by appending the contents of the corresponding lists of another_manifest. + """ + if another_manifest: + self.adapters.extend(another_manifest.adapters) + self.schemadefs.extend(another_manifest.schemadefs) + self.media_linkers.extend(another_manifest.media_linkers) + self.hook_scripts.extend(another_manifest.hook_scripts) + + for trigger_name, hooks in another_manifest.hooks.items(): + if trigger_name in self.hooks: + self.hooks[trigger_name].extend(hooks) + + def _update_plugin_source(self, path): + """Track the source .json for a given adapter.""" + + for thing in (self.adapters + self.schemadefs + + self.media_linkers + self.hook_scripts): + thing._json_path = path + + def from_filepath(self, suffix): + """Return the adapter object associated with a given file suffix.""" + + for adapter in self.adapters: + if suffix.lower() in adapter.suffixes: + return adapter + raise exceptions.NoKnownAdapterForExtensionError(suffix) + + def adapter_module_from_suffix(self, suffix): + """Return the adapter module associated with a given file suffix.""" + + adp = self.from_filepath(suffix) + return adp.module() + + def from_name(self, name, kind_list="adapters"): + """Return the adapter object associated with a given adapter name.""" + + for thing in getattr(self, kind_list): + if name == thing.name: + return thing + + raise exceptions.NotSupportedError( + "Could not find plugin: '{}' in kind_list: '{}'." + " options: {}".format( + name, + kind_list, + getattr(self, kind_list) + ) + ) + + def adapter_module_from_name(self, name): + """Return the adapter module associated with a given adapter name.""" + + adp = self.from_name(name) + return adp.module() + + def schemadef_module_from_name(self, name): + """Return the schemadef module associated with a given schemadef name.""" + + adp = self.from_name(name, kind_list="schemadefs") + return adp.module() + + +_MANIFEST = None + + +def load_manifest(): + # build the manifest of adapters, starting with builtin adapters + result = manifest_from_file( + os.path.join( + os.path.dirname(os.path.dirname(inspect.getsourcefile(core))), + "adapters", + "builtin_adapters.plugin_manifest.json" + ) + ) + + # layer contrib plugins after built in ones + try: + import opentimelineio_contrib as otio_c + + contrib_manifest = manifest_from_file( + os.path.join( + os.path.dirname(inspect.getsourcefile(otio_c)), + "adapters", + "contrib_adapters.plugin_manifest.json" + ) + ) + result.extend(contrib_manifest) + except ImportError: + pass + + # Discover setuptools-based plugins + if pkg_resources: + for plugin in pkg_resources.iter_entry_points( + "opentimelineio.plugins" + ): + plugin_name = plugin.name + try: + plugin_entry_point = plugin.load() + try: + plugin_manifest = plugin_entry_point.plugin_manifest() + except AttributeError: + if not pkg_resources.resource_exists( + plugin.module_name, + 'plugin_manifest.json' + ): + raise + manifest_stream = pkg_resources.resource_stream( + plugin.module_name, + 'plugin_manifest.json' + ) + plugin_manifest = core.deserialize_json_from_string( + manifest_stream.read().decode('utf-8') + ) + manifest_stream.close() + filepath = pkg_resources.resource_filename( + plugin.module_name, + 'plugin_manifest.json' + ) + plugin_manifest._update_plugin_source(filepath) + + except Exception: + logging.exception( + "could not load plugin: {}".format(plugin_name) + ) + continue + + result.extend(plugin_manifest) + else: + # XXX: Should we print some kind of warning that pkg_resources isn't + # available? + pass + + # read local adapter manifests, if they exist + _local_manifest_path = os.environ.get("OTIO_PLUGIN_MANIFEST_PATH", None) + if _local_manifest_path is not None: + for json_path in _local_manifest_path.split(":"): + if not os.path.exists(json_path): + # XXX: In case error reporting is requested + # print( + # "Warning: OpenTimelineIO cannot access path '{}' from " + # "$OTIO_PLUGIN_MANIFEST_PATH".format(json_path) + # ) + continue + + LOCAL_MANIFEST = manifest_from_file(json_path) + result.extend(LOCAL_MANIFEST) + + # force the schemadefs to load and add to schemadef module namespace + for s in result.schemadefs: + s.module() + return result + + +def ActiveManifest(force_reload=False): + global _MANIFEST + if not _MANIFEST or force_reload: + _MANIFEST = load_manifest() + + return _MANIFEST diff --git a/pype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py b/pype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py new file mode 100644 index 00000000000..c749bd5f9d6 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py @@ -0,0 +1,128 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Base class for OTIO plugins that are exposed by manifests.""" + +import os +import imp + +from .. import ( + core, + exceptions, +) + + +class PythonPlugin(core.SerializableObject): + """A class of plugin that is encoded in a python module, exposed via a + manifest. + """ + + _serializable_label = "PythonPlugin.1" + + def __init__( + self, + name=None, + execution_scope=None, + filepath=None, + ): + super(PythonPlugin, self).__init__() + self.name = name + self.execution_scope = execution_scope + self.filepath = filepath + self._json_path = None + self._module = None + + name = core.serializable_field("name", doc="Adapter name.") + execution_scope = core.serializable_field( + "execution_scope", + str, + doc=( + "Describes whether this adapter is executed in the current python" + " process or in a subshell. Options are: " + "['in process', 'out of process']." + ) + ) + filepath = core.serializable_field( + "filepath", + str, + doc=( + "Absolute path or relative path to adapter module from location of" + " json." + ) + ) + + def module_abs_path(self): + """Return an absolute path to the module implementing this adapter.""" + + filepath = self.filepath + if not os.path.isabs(filepath): + if not self._json_path: + raise exceptions.MisconfiguredPluginError( + "{} plugin is misconfigured, missing json path. " + "plugin: {}".format( + self.name, + repr(self) + ) + ) + + filepath = os.path.join(os.path.dirname(self._json_path), filepath) + + return filepath + + def _imported_module(self, namespace): + """Load the module this plugin points at.""" + + pyname = os.path.splitext(os.path.basename(self.module_abs_path()))[0] + pydir = os.path.dirname(self.module_abs_path()) + + (file_obj, pathname, description) = imp.find_module(pyname, [pydir]) + + with file_obj: + # this will reload the module if it has already been loaded. + mod = imp.load_module( + "opentimelineio.{}.{}".format(namespace, self.name), + file_obj, + pathname, + description + ) + + return mod + + def module(self): + """Return the module object for this adapter. """ + + if not self._module: + self._module = self._imported_module("adapters") + + return self._module + + def _execute_function(self, func_name, **kwargs): + """Execute func_name on this adapter with error checking.""" + + # collects the error handling into a common place. + if not hasattr(self.module(), func_name): + raise exceptions.AdapterDoesntSupportFunctionError( + "Sorry, {} doesn't support {}.".format(self.name, func_name) + ) + return (getattr(self.module(), func_name)(**kwargs)) diff --git a/pype/vendor/python/python_2/opentimelineio/schema/__init__.py b/pype/vendor/python/python_2/opentimelineio/schema/__init__.py new file mode 100644 index 00000000000..419f337bf64 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/__init__.py @@ -0,0 +1,75 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +# flake8: noqa + +"""User facing classes.""" + +from .missing_reference import ( + MissingReference +) +from .external_reference import ( + ExternalReference +) +from .clip import ( + Clip, +) +from .track import ( + Track, + TrackKind, + NeighborGapPolicy, +) +from .stack import ( + Stack, +) +from .timeline import ( + Timeline, + timeline_from_clips, +) +from .marker import ( + Marker, + MarkerColor, +) +from .gap import ( + Gap, +) +from .effect import ( + Effect, + TimeEffect, + LinearTimeWarp, + FreezeFrame, +) +from .transition import ( + Transition, + TransitionTypes, +) +from .serializable_collection import ( + SerializableCollection +) +from .generator_reference import ( + GeneratorReference +) +from .schemadef import ( + SchemaDef +) diff --git a/pype/vendor/python/python_2/opentimelineio/schema/clip.py b/pype/vendor/python/python_2/opentimelineio/schema/clip.py new file mode 100644 index 00000000000..44d38dfcf11 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/clip.py @@ -0,0 +1,130 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of the Clip class, for pointing at media.""" + +import copy + +from .. import ( + core, + exceptions, +) +from . import ( + missing_reference +) + + +@core.register_type +class Clip(core.Item): + """The base editable object in OTIO. + + Contains a media reference and a trim on that media reference. + """ + + _serializable_label = "Clip.1" + + def __init__( + self, + name=None, + media_reference=None, + source_range=None, + markers=[], + effects=[], + metadata=None, + ): + core.Item.__init__( + self, + name=name, + source_range=source_range, + markers=markers, + effects=effects, + metadata=metadata + ) + + if not media_reference: + media_reference = missing_reference.MissingReference() + self._media_reference = copy.deepcopy(media_reference) + + name = core.serializable_field("name", doc="Name of this clip.") + transform = core.deprecated_field() + _media_reference = core.serializable_field( + "media_reference", + core.MediaReference, + "Media reference to the media this clip represents." + ) + + @property + def media_reference(self): + if self._media_reference is None: + self._media_reference = missing_reference.MissingReference() + return self._media_reference + + @media_reference.setter + def media_reference(self, val): + if val is None: + val = missing_reference.MissingReference() + self._media_reference = val + + def available_range(self): + if not self.media_reference: + raise exceptions.CannotComputeAvailableRangeError( + "No media reference set on clip: {}".format(self) + ) + + if not self.media_reference.available_range: + raise exceptions.CannotComputeAvailableRangeError( + "No available_range set on media reference on clip: {}".format( + self + ) + ) + + return copy.copy(self.media_reference.available_range) + + def __str__(self): + return 'Clip("{}", {}, {}, {})'.format( + self.name, + self.media_reference, + self.source_range, + self.metadata + ) + + def __repr__(self): + return ( + 'otio.schema.Clip(' + 'name={}, ' + 'media_reference={}, ' + 'source_range={}, ' + 'metadata={}' + ')'.format( + repr(self.name), + repr(self.media_reference), + repr(self.source_range), + repr(self.metadata), + ) + ) + + def each_clip(self, search_range=None): + """Yields self.""" + + yield self diff --git a/pype/vendor/python/python_2/opentimelineio/schema/effect.py b/pype/vendor/python/python_2/opentimelineio/schema/effect.py new file mode 100644 index 00000000000..61eb4204faa --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/effect.py @@ -0,0 +1,130 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of Effect OTIO class.""" + +from .. import ( + core +) + +import copy + + +@core.register_type +class Effect(core.SerializableObject): + _serializable_label = "Effect.1" + + def __init__( + self, + name=None, + effect_name=None, + metadata=None + ): + super(Effect, self).__init__() + self.name = name + self.effect_name = effect_name + self.metadata = copy.deepcopy(metadata) if metadata else {} + + name = core.serializable_field( + "name", + doc="Name of this effect object. Example: 'BlurByHalfEffect'." + ) + effect_name = core.serializable_field( + "effect_name", + doc="Name of the kind of effect (example: 'Blur', 'Crop', 'Flip')." + ) + metadata = core.serializable_field( + "metadata", + dict, + doc="Metadata dictionary." + ) + + def __str__(self): + return ( + "Effect(" + "{}, " + "{}, " + "{}" + ")".format( + str(self.name), + str(self.effect_name), + str(self.metadata), + ) + ) + + def __repr__(self): + return ( + "otio.schema.Effect(" + "name={}, " + "effect_name={}, " + "metadata={}" + ")".format( + repr(self.name), + repr(self.effect_name), + repr(self.metadata), + ) + ) + + +@core.register_type +class TimeEffect(Effect): + "Base Time Effect Class" + _serializable_label = "TimeEffect.1" + pass + + +@core.register_type +class LinearTimeWarp(TimeEffect): + "A time warp that applies a linear scale across the entire clip" + _serializable_label = "LinearTimeWarp.1" + + def __init__(self, name=None, time_scalar=1, metadata=None): + Effect.__init__( + self, + name=name, + effect_name="LinearTimeWarp", + metadata=metadata + ) + self.time_scalar = time_scalar + + time_scalar = core.serializable_field( + "time_scalar", + doc="Linear time scalar applied to clip. " + "2.0 = double speed, 0.5 = half speed." + ) + + +@core.register_type +class FreezeFrame(LinearTimeWarp): + "Hold the first frame of the clip for the duration of the clip." + _serializable_label = "FreezeFrame.1" + + def __init__(self, name=None, metadata=None): + LinearTimeWarp.__init__( + self, + name=name, + time_scalar=0, + metadata=metadata + ) + self.effect_name = "FreezeFrame" diff --git a/pype/vendor/python/python_2/opentimelineio/schema/external_reference.py b/pype/vendor/python/python_2/opentimelineio/schema/external_reference.py new file mode 100644 index 00000000000..87db4d46525 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/external_reference.py @@ -0,0 +1,69 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +""" +Implementation of the ExternalReference media reference schema. +""" + +from .. import ( + core, +) + + +@core.register_type +class ExternalReference(core.MediaReference): + """Reference to media via a url, for example "file:///var/tmp/foo.mov" """ + + _serializable_label = "ExternalReference.1" + _name = "ExternalReference" + + def __init__( + self, + target_url=None, + available_range=None, + metadata=None, + ): + core.MediaReference.__init__( + self, + available_range=available_range, + metadata=metadata + ) + + self.target_url = target_url + + target_url = core.serializable_field( + "target_url", + doc=( + "URL at which this media lives. For local references, use the " + "'file://' format." + ) + ) + + def __str__(self): + return 'ExternalReference("{}")'.format(self.target_url) + + def __repr__(self): + return 'otio.schema.ExternalReference(target_url={})'.format( + repr(self.target_url) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/schema/gap.py b/pype/vendor/python/python_2/opentimelineio/schema/gap.py new file mode 100644 index 00000000000..4c8165db8ff --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/gap.py @@ -0,0 +1,82 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +from .. import ( + core, + opentime, +) + +"""Gap Item - represents a transparent gap in content.""" + + +@core.register_type +class Gap(core.Item): + _serializable_label = "Gap.1" + _class_path = "schema.Gap" + + def __init__( + self, + name=None, + # note - only one of the following two arguments is accepted + # if neither is provided, source_range will be set to an empty + # TimeRange + # Duration is provided as a convienence for creating a gap of a certain + # length. IE: Gap(duration=otio.opentime.RationalTime(300, 24)) + duration=None, + source_range=None, + effects=None, + markers=None, + metadata=None, + ): + if duration and source_range: + raise RuntimeError( + "Cannot instantiate with both a source range and a duration." + ) + + if duration: + source_range = opentime.TimeRange( + opentime.RationalTime(0, duration.rate), + duration + ) + elif source_range is None: + # if neither is provided, seed TimeRange as an empty Source Range. + source_range = opentime.TimeRange() + + core.Item.__init__( + self, + name=name, + source_range=source_range, + effects=effects, + markers=markers, + metadata=metadata + ) + + @staticmethod + def visible(): + return False + + +# the original name for "gap" was "filler" - this will turn "Filler" found in +# OTIO files into Gap automatically. +core.register_type(Gap, "Filler") diff --git a/pype/vendor/python/python_2/opentimelineio/schema/generator_reference.py b/pype/vendor/python/python_2/opentimelineio/schema/generator_reference.py new file mode 100644 index 00000000000..ef1dde836e2 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/generator_reference.py @@ -0,0 +1,76 @@ +""" +Generators are media references that _produce_ media rather than refer to it. +""" + +from .. import ( + core, +) + + +@core.register_type +class GeneratorReference(core.MediaReference): + """ + Base class for Generators. + + Generators are media references that become "generators" in editorial + systems. For example, color bars or a solid color. + """ + + _serializable_label = "GeneratorReference.1" + _name = "GeneratorReference" + + def __init__( + self, + name=None, + generator_kind=None, + available_range=None, + parameters=None, + metadata=None + ): + super(GeneratorReference, self).__init__( + name, + available_range, + metadata + ) + + if parameters is None: + parameters = {} + self.parameters = parameters + self.generator_kind = generator_kind + + parameters = core.serializable_field( + "parameters", + dict, + doc="Dictionary of parameters for generator." + ) + generator_kind = core.serializable_field( + "generator_kind", + required_type=type(""), + # @TODO: need to clarify if this also has an enum of supported types + # / generic + doc="Kind of generator reference, as defined by the " + "schema.generator_reference.GeneratorReferenceTypes enum." + ) + + def __str__(self): + return 'GeneratorReference("{}", "{}", {}, {})'.format( + self.name, + self.generator_kind, + self.parameters, + self.metadata + ) + + def __repr__(self): + return ( + 'otio.schema.GeneratorReference(' + 'name={}, ' + 'generator_kind={}, ' + 'parameters={}, ' + 'metadata={}' + ')'.format( + repr(self.name), + repr(self.generator_kind), + repr(self.parameters), + repr(self.metadata), + ) + ) diff --git a/pype/vendor/python/python_2/opentimelineio/schema/marker.py b/pype/vendor/python/python_2/opentimelineio/schema/marker.py new file mode 100644 index 00000000000..d8b6f1c2720 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/marker.py @@ -0,0 +1,128 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Marker class. Holds metadata over regions of time.""" + +from .. import ( + core, + opentime, +) + + +class MarkerColor: + """ Enum encoding colors of markers as strings. """ + + PINK = "PINK" + RED = "RED" + ORANGE = "ORANGE" + YELLOW = "YELLOW" + GREEN = "GREEN" + CYAN = "CYAN" + BLUE = "BLUE" + PURPLE = "PURPLE" + MAGENTA = "MAGENTA" + BLACK = "BLACK" + WHITE = "WHITE" + + +@core.register_type +class Marker(core.SerializableObject): + + """ Holds metadata over time on a timeline """ + + _serializable_label = "Marker.2" + _class_path = "marker.Marker" + + def __init__( + self, + name=None, + marked_range=None, + color=MarkerColor.RED, + metadata=None, + ): + core.SerializableObject.__init__( + self, + ) + self.name = name + self.marked_range = marked_range + self.color = color + self.metadata = metadata or {} + + name = core.serializable_field("name", doc="Name of this marker.") + + marked_range = core.serializable_field( + "marked_range", + opentime.TimeRange, + "Range this marker applies to, relative to the Item this marker is " + "attached to (e.g. the Clip or Track that owns this marker)." + ) + + color = core.serializable_field( + "color", + required_type=type(MarkerColor.RED), + doc="Color string for this marker (for example: 'RED'), based on the " + "otio.schema.marker.MarkerColor enum." + ) + + # old name + range = core.deprecated_field() + + metadata = core.serializable_field( + "metadata", + dict, + "Metadata dictionary." + ) + + def __repr__(self): + return ( + "otio.schema.Marker(" + "name={}, " + "marked_range={}, " + "metadata={}" + ")".format( + repr(self.name), + repr(self.marked_range), + repr(self.metadata), + ) + ) + + def __str__(self): + return ( + "Marker(" + "{}, " + "{}, " + "{}" + ")".format( + str(self.name), + str(self.marked_range), + str(self.metadata), + ) + ) + + +@core.upgrade_function_for(Marker, 2) +def _version_one_to_two(data): + data["marked_range"] = data["range"] + del data["range"] + return data diff --git a/pype/vendor/python/python_2/opentimelineio/schema/missing_reference.py b/pype/vendor/python/python_2/opentimelineio/schema/missing_reference.py new file mode 100644 index 00000000000..88bc1862fc7 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/missing_reference.py @@ -0,0 +1,43 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +""" +Implementation of the MissingReference media reference schema. +""" + +from .. import ( + core, +) + + +@core.register_type +class MissingReference(core.MediaReference): + """Represents media for which a concrete reference is missing.""" + + _serializable_label = "MissingReference.1" + _name = "MissingReference" + + @property + def is_missing_reference(self): + return True diff --git a/pype/vendor/python/python_2/opentimelineio/schema/schemadef.py b/pype/vendor/python/python_2/opentimelineio/schema/schemadef.py new file mode 100644 index 00000000000..5fb4e05abd9 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/schemadef.py @@ -0,0 +1,65 @@ + +from .. import ( + core, + exceptions, + plugins, + schemadef +) + + +@core.register_type +class SchemaDef(plugins.PythonPlugin): + _serializable_label = "SchemaDef.1" + + def __init__( + self, + name=None, + execution_scope=None, + filepath=None, + ): + super(SchemaDef, self).__init__(name, execution_scope, filepath) + + def module(self): + """ + Return the module object for this schemadef plugin. + If the module hasn't already been imported, it is imported and + injected into the otio.schemadefs namespace as a side-effect. + (redefines PythonPlugin.module()) + """ + + if not self._module: + self._module = self._imported_module("schemadef") + if self.name: + schemadef._add_schemadef_module(self.name, self._module) + + return self._module + + +def available_schemadef_names(): + """Return a string list of the available schemadefs.""" + + return [str(sd.name) for sd in plugins.ActiveManifest().schemadefs] + + +def from_name(name): + """Fetch the schemadef plugin object by the name of the schema directly.""" + + try: + return plugins.ActiveManifest().from_name(name, kind_list="schemadefs") + except exceptions.NotSupportedError: + raise exceptions.NotSupportedError( + "schemadef not supported: {}, available: {}".format( + name, + available_schemadef_names() + ) + ) + + +def module_from_name(name): + """Fetch the plugin's module by the name of the schemadef. + + Will load the plugin if it has not already been loaded. Reading a file that + contains the schemadef will also trigger a load of the plugin. + """ + plugin = from_name(name) + return plugin.module() diff --git a/pype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py b/pype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py new file mode 100644 index 00000000000..523ea77ddbb --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py @@ -0,0 +1,149 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""A serializable collection of SerializableObjects.""" + +import collections +import copy + +from .. import ( + core +) + +from . import ( + clip +) + + +@core.register_type +class SerializableCollection( + core.SerializableObject, + collections.MutableSequence +): + """A kind of composition which can hold any serializable object. + + This composition approximates the concept of a `bin` - a collection of + SerializableObjects that do not have any compositing meaning, but can + serialize to/from OTIO correctly, with metadata and a named collection. + """ + + _serializable_label = "SerializableCollection.1" + _class_path = "schema.SerializableCollection" + + def __init__( + self, + name=None, + children=None, + metadata=None, + ): + super(SerializableCollection, self).__init__() + + self.name = name + self._children = children or [] + self.metadata = copy.deepcopy(metadata) if metadata else {} + + name = core.serializable_field( + "name", + doc="SerializableCollection name." + ) + _children = core.serializable_field( + "children", + list, + "SerializableObject contained by this container." + ) + metadata = core.serializable_field( + "metadata", + dict, + doc="Metadata dictionary for this SerializableCollection." + ) + + # @{ Stringification + def __str__(self): + return "SerializableCollection({}, {}, {})".format( + str(self.name), + str(self._children), + str(self.metadata) + ) + + def __repr__(self): + return ( + "otio.{}(" + "name={}, " + "children={}, " + "metadata={}" + ")".format( + self._class_path, + repr(self.name), + repr(self._children), + repr(self.metadata) + ) + ) + # @} + + # @{ collections.MutableSequence implementation + def __getitem__(self, item): + return self._children[item] + + def __setitem__(self, key, value): + self._children[key] = value + + def insert(self, index, item): + self._children.insert(index, item) + + def __len__(self): + return len(self._children) + + def __delitem__(self, item): + del self._children[item] + # @} + + def each_child( + self, + search_range=None, + descended_from_type=core.composable.Composable + ): + for i, child in enumerate(self._children): + # filter out children who are not descended from the specified type + is_descendant = descended_from_type == core.composable.Composable + if is_descendant or isinstance(child, descended_from_type): + yield child + + # for children that are compositions, recurse into their children + if hasattr(child, "each_child"): + for valid_child in ( + c for c in child.each_child( + search_range, + descended_from_type + ) + ): + yield valid_child + + def each_clip(self, search_range=None): + return self.each_child(search_range, clip.Clip) + + +# the original name for "SerializableCollection" was "SerializeableCollection" +# this will turn this misspelling found in OTIO files into the correct instance +# automatically. +core.register_type(SerializableCollection, 'SerializeableCollection') diff --git a/pype/vendor/python/python_2/opentimelineio/schema/stack.py b/pype/vendor/python/python_2/opentimelineio/schema/stack.py new file mode 100644 index 00000000000..bf67158dc08 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/stack.py @@ -0,0 +1,120 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""A stack represents a series of composable.Composables that are arranged such +that their start times are at the same point. + +Most commonly, this would be a series of schema.Track objects that then +contain clips. The 0 time of those tracks would be coincide with the 0-time of +the stack. + +Stacks are in compositing order, with later children obscuring earlier +children. In other words, from bottom to top. If a stack has three children, +[A, B, C], C is above B which is above A. + +A stack is the length of its longest child. If a child ends before the other +children, then an earlier index child would be visible before it. +""" + +from .. import ( + core, + opentime, + exceptions +) + +from . import ( + clip +) + + +@core.register_type +class Stack(core.Composition): + _serializable_label = "Stack.1" + _composition_kind = "Stack" + _modname = "schema" + + def __init__( + self, + name=None, + children=None, + source_range=None, + markers=None, + effects=None, + metadata=None + ): + core.Composition.__init__( + self, + name=name, + children=children, + source_range=source_range, + markers=markers, + effects=effects, + metadata=metadata + ) + + def range_of_child_at_index(self, index): + try: + child = self[index] + except IndexError: + raise exceptions.NoSuchChildAtIndex(index) + + dur = child.duration() + + return opentime.TimeRange( + start_time=opentime.RationalTime(0, dur.rate), + duration=dur + ) + + def each_clip(self, search_range=None): + return self.each_child(search_range, clip.Clip) + + def available_range(self): + if len(self) == 0: + return opentime.TimeRange() + + duration = max(child.duration() for child in self) + + return opentime.TimeRange( + opentime.RationalTime(0, duration.rate), + duration=duration + ) + + def range_of_all_children(self): + child_map = {} + for i, c in enumerate(self._children): + child_map[c] = self.range_of_child_at_index(i) + return child_map + + def trimmed_range_of_child_at_index(self, index, reference_space=None): + range = self.range_of_child_at_index(index) + + if not self.source_range: + return range + + range = opentime.TimeRange( + start_time=self.source_range.start_time, + duration=min(range.duration, self.source_range.duration) + ) + + return range diff --git a/pype/vendor/python/python_2/opentimelineio/schema/timeline.py b/pype/vendor/python/python_2/opentimelineio/schema/timeline.py new file mode 100644 index 00000000000..fe7d6952ab2 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/timeline.py @@ -0,0 +1,133 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implementation of the OTIO built in schema, Timeline object.""" + +import copy + +from .. import ( + core, + opentime, +) + +from . import stack, track + + +@core.register_type +class Timeline(core.SerializableObject): + _serializable_label = "Timeline.1" + + def __init__( + self, + name=None, + tracks=None, + global_start_time=None, + metadata=None, + ): + super(Timeline, self).__init__() + self.name = name + self.global_start_time = copy.deepcopy(global_start_time) + + if tracks is None: + tracks = [] + self.tracks = stack.Stack(name="tracks", children=tracks) + + self.metadata = copy.deepcopy(metadata) if metadata else {} + + name = core.serializable_field("name", doc="Name of this timeline.") + tracks = core.serializable_field( + "tracks", + core.Composition, + doc="Stack of tracks containing items." + ) + metadata = core.serializable_field( + "metadata", + dict, + "Metadata dictionary." + ) + global_start_time = core.serializable_field( + "global_start_time", + opentime.RationalTime, + doc="Global starting time value and rate of the timeline." + ) + + def __str__(self): + return 'Timeline("{}", {})'.format(str(self.name), str(self.tracks)) + + def __repr__(self): + return ( + "otio.schema.Timeline(name={}, tracks={})".format( + repr(self.name), + repr(self.tracks) + ) + ) + + def each_child(self, search_range=None, descended_from_type=core.Composable): + return self.tracks.each_child(search_range, descended_from_type) + + def each_clip(self, search_range=None): + """Return a flat list of each clip, limited to the search_range.""" + + return self.tracks.each_clip(search_range) + + def duration(self): + """Duration of this timeline.""" + + return self.tracks.duration() + + def range_of_child(self, child): + """Range of the child object contained in this timeline.""" + + return self.tracks.range_of_child(child) + + def video_tracks(self): + """ + This convenience method returns a list of the top-level video tracks in + this timeline. + """ + return [ + trck for trck + in self.tracks + if (isinstance(trck, track.Track) and + trck.kind == track.TrackKind.Video) + ] + + def audio_tracks(self): + """ + This convenience method returns a list of the top-level audio tracks in + this timeline. + """ + return [ + trck for trck + in self.tracks + if (isinstance(trck, track.Track) and + trck.kind == track.TrackKind.Audio) + ] + + +def timeline_from_clips(clips): + """Convenience for making a single track timeline from a list of clips.""" + + trck = track.Track(children=clips) + return Timeline(tracks=[trck]) diff --git a/pype/vendor/python/python_2/opentimelineio/schema/track.py b/pype/vendor/python/python_2/opentimelineio/schema/track.py new file mode 100644 index 00000000000..29b0e7f1aeb --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/track.py @@ -0,0 +1,242 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Implement Track sublcass of composition.""" + +import collections + +from .. import ( + core, + opentime, +) + +from . import ( + gap, + transition, + clip, +) + + +class TrackKind: + Video = "Video" + Audio = "Audio" + + +class NeighborGapPolicy: + """ enum for deciding how to add gap when asking for neighbors """ + never = 0 + around_transitions = 1 + + +@core.register_type +class Track(core.Composition): + _serializable_label = "Track.1" + _composition_kind = "Track" + _modname = "schema" + + def __init__( + self, + name=None, + children=None, + kind=TrackKind.Video, + source_range=None, + markers=None, + effects=None, + metadata=None, + ): + core.Composition.__init__( + self, + name=name, + children=children, + source_range=source_range, + markers=markers, + effects=effects, + metadata=metadata + ) + self.kind = kind + + kind = core.serializable_field( + "kind", + doc="Composition kind (Stack, Track)" + ) + + def range_of_child_at_index(self, index): + child = self[index] + + # sum the durations of all the children leading up to the chosen one + start_time = sum( + ( + o_c.duration() + for o_c in (c for c in self[:index] if not c.overlapping()) + ), + opentime.RationalTime(value=0, rate=child.duration().rate) + ) + if isinstance(child, transition.Transition): + start_time -= child.in_offset + + return opentime.TimeRange(start_time, child.duration()) + + def trimmed_range_of_child_at_index(self, index, reference_space=None): + child_range = self.range_of_child_at_index(index) + + return self.trim_child_range(child_range) + + def handles_of_child(self, child): + """If media beyond the ends of this child are visible due to adjacent + Transitions (only applicable in a Track) then this will return the + head and tail offsets as a tuple of RationalTime objects. If no handles + are present on either side, then None is returned instead of a + RationalTime. + + Example usage + + >>> head, tail = track.handles_of_child(clip) + >>> if head: + ... print('do something') + >>> if tail: + ... print('do something else') + """ + head, tail = None, None + before, after = self.neighbors_of(child) + if isinstance(before, transition.Transition): + head = before.in_offset + if isinstance(after, transition.Transition): + tail = after.out_offset + + return head, tail + + def available_range(self): + # Sum up our child items' durations + duration = sum( + (c.duration() for c in self if isinstance(c, core.Item)), + opentime.RationalTime() + ) + + # Add the implicit gap when a Transition is at the start/end + if self and isinstance(self[0], transition.Transition): + duration += self[0].in_offset + if self and isinstance(self[-1], transition.Transition): + duration += self[-1].out_offset + + result = opentime.TimeRange( + start_time=opentime.RationalTime(0, duration.rate), + duration=duration + ) + + return result + + def each_clip(self, search_range=None, shallow_search=False): + return self.each_child(search_range, clip.Clip, shallow_search) + + def neighbors_of(self, item, insert_gap=NeighborGapPolicy.never): + """Returns the neighbors of the item as a namedtuple, (previous, next). + + Can optionally fill in gaps when transitions have no gaps next to them. + + with insert_gap == NeighborGapPolicy.never: + [A, B, C] :: neighbors_of(B) -> (A, C) + [A, B, C] :: neighbors_of(A) -> (None, B) + [A, B, C] :: neighbors_of(C) -> (B, None) + [A] :: neighbors_of(A) -> (None, None) + + with insert_gap == NeighborGapPolicy.around_transitions: + (assuming A and C are transitions) + [A, B, C] :: neighbors_of(B) -> (A, C) + [A, B, C] :: neighbors_of(A) -> (Gap, B) + [A, B, C] :: neighbors_of(C) -> (B, Gap) + [A] :: neighbors_of(A) -> (Gap, Gap) + """ + + try: + index = self.index(item) + except ValueError: + raise ValueError( + "item: {} is not in composition: {}".format( + item, + self + ) + ) + + previous, next_item = None, None + + # look before index + if index == 0: + if insert_gap == NeighborGapPolicy.around_transitions: + if isinstance(item, transition.Transition): + previous = gap.Gap( + source_range=opentime.TimeRange(duration=item.in_offset)) + elif index > 0: + previous = self[index - 1] + + if index == len(self) - 1: + if insert_gap == NeighborGapPolicy.around_transitions: + if isinstance(item, transition.Transition): + next_item = gap.Gap( + source_range=opentime.TimeRange(duration=item.out_offset)) + elif index < len(self) - 1: + next_item = self[index + 1] + + return collections.namedtuple('neighbors', ('previous', 'next'))( + previous, + next_item + ) + + def range_of_all_children(self): + """Return a dict mapping children to their range in this track.""" + + if not self._children: + return {} + + result_map = {} + + # Heuristic to guess what the rate should be set to based on the first + # thing in the track. + first_thing = self._children[0] + if isinstance(first_thing, transition.Transition): + rate = first_thing.in_offset.rate + else: + rate = first_thing.trimmed_range().duration.rate + + last_end_time = opentime.RationalTime(0, rate) + + for thing in self._children: + if isinstance(thing, transition.Transition): + result_map[thing] = opentime.TimeRange( + last_end_time - thing.in_offset, + thing.out_offset + thing.in_offset, + ) + else: + last_range = opentime.TimeRange( + last_end_time, + thing.trimmed_range().duration + ) + result_map[thing] = last_range + last_end_time = last_range.end_time_exclusive() + + return result_map + + +# the original name for "track" was "sequence" - this will turn "Sequence" +# found in OTIO files into Track automatically. +core.register_type(Track, "Sequence") diff --git a/pype/vendor/python/python_2/opentimelineio/schema/transition.py b/pype/vendor/python/python_2/opentimelineio/schema/transition.py new file mode 100644 index 00000000000..93b54ab1ab7 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schema/transition.py @@ -0,0 +1,159 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Transition base class""" + +from .. import ( + opentime, + core, + exceptions, +) + +import copy + + +class TransitionTypes: + """Enum encoding types of transitions. + + This is for representing "Dissolves" and "Wipes" defined by the + multi-source effect as defined by SMPTE 258M-2004 7.6.3.2 + + Other effects are handled by the `schema.Effect` class. + """ + + # @{ SMPTE transitions. + SMPTE_Dissolve = "SMPTE_Dissolve" + # SMPTE_Wipe = "SMPTE_Wipe" -- @TODO + # @} + + # Non SMPTE transitions. + Custom = "Custom_Transition" + + +@core.register_type +class Transition(core.Composable): + """Represents a transition between two items.""" + + _serializable_label = "Transition.1" + + def __init__( + self, + name=None, + transition_type=None, + # @TODO: parameters will be added later as needed (SMPTE_Wipe will + # probably require it) + # parameters=None, + in_offset=None, + out_offset=None, + metadata=None + ): + core.Composable.__init__( + self, + name=name, + metadata=metadata + ) + + # init everything as None first, so that we will catch uninitialized + # values via exceptions + # if parameters is None: + # parameters = {} + # self.parameters = parameters + self.transition_type = transition_type + self.in_offset = copy.deepcopy(in_offset) + self.out_offset = copy.deepcopy(out_offset) + + transition_type = core.serializable_field( + "transition_type", + required_type=type(TransitionTypes.SMPTE_Dissolve), + doc="Kind of transition, as defined by the " + "schema.transition.TransitionTypes enum." + ) + # parameters = core.serializable_field( + # "parameters", + # doc="Parameters of the transition." + # ) + in_offset = core.serializable_field( + "in_offset", + required_type=opentime.RationalTime, + doc="Amount of the previous clip this transition overlaps, exclusive." + ) + out_offset = core.serializable_field( + "out_offset", + required_type=opentime.RationalTime, + doc="Amount of the next clip this transition overlaps, exclusive." + ) + + def __str__(self): + return 'Transition("{}", "{}", {}, {}, {})'.format( + self.name, + self.transition_type, + self.in_offset, + self.out_offset, + # self.parameters, + self.metadata + ) + + def __repr__(self): + return ( + 'otio.schema.Transition(' + 'name={}, ' + 'transition_type={}, ' + 'in_offset={}, ' + 'out_offset={}, ' + # 'parameters={}, ' + 'metadata={}' + ')'.format( + repr(self.name), + repr(self.transition_type), + repr(self.in_offset), + repr(self.out_offset), + # repr(self.parameters), + repr(self.metadata), + ) + ) + + @staticmethod + def overlapping(): + return True + + def duration(self): + return self.in_offset + self.out_offset + + def range_in_parent(self): + """Find and return the range of this item in the parent.""" + if not self.parent(): + raise exceptions.NotAChildError( + "No parent of {}, cannot compute range in parent.".format(self) + ) + + return self.parent().range_of_child(self) + + def trimmed_range_in_parent(self): + """Find and return the timmed range of this item in the parent.""" + if not self.parent(): + raise exceptions.NotAChildError( + "No parent of {}, cannot compute range in parent.".format(self) + ) + + return self.parent().trimmed_range_of_child(self) diff --git a/pype/vendor/python/python_2/opentimelineio/schemadef/__init__.py b/pype/vendor/python/python_2/opentimelineio/schemadef/__init__.py new file mode 100644 index 00000000000..568b3eaaa7a --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/schemadef/__init__.py @@ -0,0 +1,5 @@ + +def _add_schemadef_module(name, mod): + """Insert a new module name and module object into schemadef namespace.""" + ns = globals() # the namespace dict of the schemadef package + ns[name] = mod diff --git a/pype/vendor/python/python_2/opentimelineio/test_utils.py b/pype/vendor/python/python_2/opentimelineio/test_utils.py new file mode 100644 index 00000000000..e173275ff58 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio/test_utils.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Utility assertions for OTIO Unit tests.""" + +import re + +from . import ( + adapters +) + + +class OTIOAssertions(object): + def assertJsonEqual(self, known, test_result): + """Convert to json and compare that (more readable).""" + self.maxDiff = None + + known_str = adapters.write_to_string(known, 'otio_json') + test_str = adapters.write_to_string(test_result, 'otio_json') + + def strip_trailing_decimal_zero(s): + return re.sub(r'"(value|rate)": (\d+)\.0', r'"\1": \2', s) + + self.assertMultiLineEqual( + strip_trailing_decimal_zero(known_str), + strip_trailing_decimal_zero(test_str) + ) + + def assertIsOTIOEquivalentTo(self, known, test_result): + """Test using the 'is equivalent to' method on SerializableObject""" + + self.assertTrue(known.is_equivalent_to(test_result)) diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/__init__.py b/pype/vendor/python/python_2/opentimelineio_contrib/__init__.py new file mode 100644 index 00000000000..7f7a82f46a2 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/__init__.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Unsupported contrib code for OpenTimelineIO.""" + +# flake8: noqa + +from . import ( + adapters +) + +__version__ = "0.11.0" +__author__ = "Pixar Animation Studios" +__author_email__ = "opentimelineio@pixar.com" +__license__ = "Modified Apache 2.0 License" diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py new file mode 100644 index 00000000000..9e283d37472 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py @@ -0,0 +1,764 @@ +# +# Copyright 2019 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""AAF Adapter Transcriber + +Specifies how to transcribe an OpenTimelineIO file into an AAF file. +""" + +import aaf2 +import abc +import uuid +import opentimelineio as otio +import os +import copy +import re + + +AAF_PARAMETERDEF_PAN = aaf2.auid.AUID("e4962322-2267-11d3-8a4c-0050040ef7d2") +AAF_OPERATIONDEF_MONOAUDIOPAN = aaf2.auid.AUID("9d2ea893-0968-11d3-8a38-0050040ef7d2") +AAF_PARAMETERDEF_AVIDPARAMETERBYTEORDER = uuid.UUID( + "c0038672-a8cf-11d3-a05b-006094eb75cb") +AAF_PARAMETERDEF_AVIDEFFECTID = uuid.UUID( + "93994bd6-a81d-11d3-a05b-006094eb75cb") +AAF_PARAMETERDEF_AFX_FG_KEY_OPACITY_U = uuid.UUID( + "8d56813d-847e-11d5-935a-50f857c10000") +AAF_PARAMETERDEF_LEVEL = uuid.UUID("e4962320-2267-11d3-8a4c-0050040ef7d2") +AAF_VVAL_EXTRAPOLATION_ID = uuid.UUID("0e24dd54-66cd-4f1a-b0a0-670ac3a7a0b3") +AAF_OPERATIONDEF_SUBMASTER = uuid.UUID("f1db0f3d-8d64-11d3-80df-006008143e6f") + + +class AAFAdapterError(otio.exceptions.OTIOError): + pass + + +class AAFValidationError(AAFAdapterError): + pass + + +class AAFFileTranscriber(object): + """ + AAFFileTranscriber + + AAFFileTranscriber manages the file-level knowledge during a conversion from + otio to aaf. This includes keeping track of unique tapemobs and mastermobs. + """ + + def __init__(self, input_otio, aaf_file, **kwargs): + """ + AAFFileTranscriber requires an input timeline and an output pyaaf2 file handle. + + Args: + input_otio: an input OpenTimelineIO timeline + aaf_file: a pyaaf2 file handle to an output file + """ + self.aaf_file = aaf_file + self.compositionmob = self.aaf_file.create.CompositionMob() + self.compositionmob.name = input_otio.name + self.compositionmob.usage = "Usage_TopLevel" + self.aaf_file.content.mobs.append(self.compositionmob) + self._unique_mastermobs = {} + self._unique_tapemobs = {} + self._clip_mob_ids_map = _gather_clip_mob_ids(input_otio, **kwargs) + + def _unique_mastermob(self, otio_clip): + """Get a unique mastermob, identified by clip metadata mob id.""" + mob_id = self._clip_mob_ids_map.get(otio_clip) + mastermob = self._unique_mastermobs.get(mob_id) + if not mastermob: + mastermob = self.aaf_file.create.MasterMob() + mastermob.name = otio_clip.name + mastermob.mob_id = aaf2.mobid.MobID(mob_id) + self.aaf_file.content.mobs.append(mastermob) + self._unique_mastermobs[mob_id] = mastermob + return mastermob + + def _unique_tapemob(self, otio_clip): + """Get a unique tapemob, identified by clip metadata mob id.""" + mob_id = self._clip_mob_ids_map.get(otio_clip) + tapemob = self._unique_tapemobs.get(mob_id) + if not tapemob: + tapemob = self.aaf_file.create.SourceMob() + tapemob.name = otio_clip.name + tapemob.descriptor = self.aaf_file.create.ImportDescriptor() + # If the edit_rate is not an integer, we need + # to use drop frame with a nominal integer fps. + edit_rate = otio_clip.visible_range().duration.rate + timecode_fps = round(edit_rate) + tape_timecode_slot = tapemob.create_timecode_slot( + edit_rate=edit_rate, + timecode_fps=timecode_fps, + drop_frame=(edit_rate != timecode_fps) + ) + timecode_start = ( + otio_clip.media_reference.available_range.start_time.value) + timecode_length = ( + otio_clip.media_reference.available_range.duration.value) + + tape_timecode_slot.segment.start = timecode_start + tape_timecode_slot.segment.length = timecode_length + self.aaf_file.content.mobs.append(tapemob) + self._unique_tapemobs[mob_id] = tapemob + return tapemob + + def track_transcriber(self, otio_track): + """Return an appropriate _TrackTranscriber given an otio track.""" + if otio_track.kind == otio.schema.TrackKind.Video: + transcriber = VideoTrackTranscriber(self, otio_track) + elif otio_track.kind == otio.schema.TrackKind.Audio: + transcriber = AudioTrackTranscriber(self, otio_track) + else: + raise otio.exceptions.NotSupportedError( + "Unsupported track kind: {}".format(otio_track.kind)) + return transcriber + + +def validate_metadata(timeline): + """Print a check of necessary metadata requirements for an otio timeline.""" + + all_checks = [__check(timeline, "duration().rate")] + edit_rate = __check(timeline, "duration().rate").value + + for child in timeline.each_child(): + checks = [] + if isinstance(child, otio.schema.Gap): + checks = [ + __check(child, "duration().rate").equals(edit_rate) + ] + if isinstance(child, otio.schema.Clip): + checks = [ + __check(child, "duration().rate").equals(edit_rate), + __check(child, "media_reference.available_range.duration.rate" + ).equals(edit_rate), + __check(child, "media_reference.available_range.start_time.rate" + ).equals(edit_rate) + ] + if isinstance(child, otio.schema.Transition): + checks = [ + __check(child, "duration().rate").equals(edit_rate), + __check(child, "metadata['AAF']['PointList']"), + __check(child, "metadata['AAF']['OperationGroup']['Operation']" + "['DataDefinition']['Name']"), + __check(child, "metadata['AAF']['OperationGroup']['Operation']" + "['Description']"), + __check(child, "metadata['AAF']['OperationGroup']['Operation']" + "['Name']"), + __check(child, "metadata['AAF']['CutPoint']") + ] + all_checks.extend(checks) + + if any(check.errors for check in all_checks): + raise AAFValidationError("\n" + "\n".join( + sum([check.errors for check in all_checks], []))) + + +def _gather_clip_mob_ids(input_otio, + prefer_file_mob_id=False, + use_empty_mob_ids=False, + **kwargs): + """ + Create dictionary of otio clips with their corresponding mob ids. + """ + + def _from_clip_metadata(clip): + """Get the MobID from the clip.metadata.""" + return clip.metadata.get("AAF", {}).get("SourceID") + + def _from_media_reference_metadata(clip): + """Get the MobID from the media_reference.metadata.""" + return (clip.media_reference.metadata.get("AAF", {}).get("MobID") or + clip.media_reference.metadata.get("AAF", {}).get("SourceID")) + + def _from_aaf_file(clip): + """ Get the MobID from the AAF file itself.""" + mob_id = None + target_url = clip.media_reference.target_url + if os.path.isfile(target_url) and target_url.endswith("aaf"): + with aaf2.open(clip.media_reference.target_url) as aaf_file: + mastermobs = list(aaf_file.content.mastermobs()) + if len(mastermobs) == 1: + mob_id = mastermobs[0].mob_id + return mob_id + + def _generate_empty_mobid(clip): + """Generate a meaningless MobID.""" + return aaf2.mobid.MobID.new() + + strategies = [ + _from_clip_metadata, + _from_media_reference_metadata, + _from_aaf_file + ] + + if prefer_file_mob_id: + strategies.remove(_from_aaf_file) + strategies.insert(0, _from_aaf_file) + + if use_empty_mob_ids: + strategies.append(_generate_empty_mobid) + + clip_mob_ids = {} + + for otio_clip in input_otio.each_clip(): + for strategy in strategies: + mob_id = strategy(otio_clip) + if mob_id: + clip_mob_ids[otio_clip] = mob_id + break + else: + raise AAFAdapterError("Cannot find mob ID for clip {}".format(otio_clip)) + + return clip_mob_ids + + +def _stackify_nested_groups(timeline): + """ + Ensure that all nesting in a given timeline is in a stack container. + This conforms with how AAF thinks about nesting, there needs + to be an outer container, even if it's just one object. + """ + copied = copy.deepcopy(timeline) + for track in copied.tracks: + for i, child in enumerate(track.each_child()): + is_nested = isinstance(child, otio.schema.Track) + is_parent_in_stack = isinstance(child.parent(), otio.schema.Stack) + if is_nested and not is_parent_in_stack: + stack = otio.schema.Stack() + track.remove(child) + stack.append(child) + track.insert(i, stack) + return copied + + +class _TrackTranscriber(object): + """ + _TrackTranscriber is the base class for the conversion of a given otio track. + + _TrackTranscriber is not meant to be used by itself. It provides the common + functionality to inherit from. We need an abstract base class because Audio and + Video are handled differently. + """ + __metaclass__ = abc.ABCMeta + + def __init__(self, root_file_transcriber, otio_track): + """ + _TrackTranscriber + + Args: + root_file_transcriber: the corresponding 'parent' AAFFileTranscriber object + otio_track: the given otio_track to convert + """ + self.root_file_transcriber = root_file_transcriber + self.compositionmob = root_file_transcriber.compositionmob + self.aaf_file = root_file_transcriber.aaf_file + self.otio_track = otio_track + self.edit_rate = next(self.otio_track.each_child()).duration().rate + self.timeline_mobslot, self.sequence = self._create_timeline_mobslot() + self.timeline_mobslot.name = self.otio_track.name + + def transcribe(self, otio_child): + """Transcribe otio child to corresponding AAF object""" + if isinstance(otio_child, otio.schema.Gap): + filler = self.aaf_filler(otio_child) + return filler + elif isinstance(otio_child, otio.schema.Transition): + transition = self.aaf_transition(otio_child) + return transition + elif isinstance(otio_child, otio.schema.Clip): + source_clip = self.aaf_sourceclip(otio_child) + return source_clip + elif isinstance(otio_child, otio.schema.Track): + sequence = self.aaf_sequence(otio_child) + return sequence + elif isinstance(otio_child, otio.schema.Stack): + operation_group = self.aaf_operation_group(otio_child) + return operation_group + else: + raise otio.exceptions.NotSupportedError( + "Unsupported otio child type: {}".format(type(otio_child))) + + @property + @abc.abstractmethod + def media_kind(self): + """Return the string for what kind of track this is.""" + pass + + @property + @abc.abstractmethod + def _master_mob_slot_id(self): + """ + Return the MasterMob Slot ID for the corresponding track media kind + """ + # MasterMob's and MasterMob slots have to be unique. We handle unique + # MasterMob's with _unique_mastermob(). We also need to protect against + # duplicate MasterMob slots. As of now, we mandate all picture clips to + # be created in MasterMob slot 1 and all sound clips to be created in + # MasterMob slot 2. While this is a little inadequate, it works for now + pass + + @abc.abstractmethod + def _create_timeline_mobslot(self): + """ + Return a timeline_mobslot and sequence for this track. + + In AAF, a TimelineMobSlot is a container for the Sequence. A Sequence is + analogous to an otio track. + + Returns: + Returns a tuple of (TimelineMobSlot, Sequence) + """ + pass + + @abc.abstractmethod + def default_descriptor(self, otio_clip): + pass + + @abc.abstractmethod + def _transition_parameters(self): + pass + + def aaf_filler(self, otio_gap): + """Convert an otio Gap into an aaf Filler""" + length = otio_gap.visible_range().duration.value + filler = self.aaf_file.create.Filler(self.media_kind, length) + return filler + + def aaf_sourceclip(self, otio_clip): + """Convert an otio Clip into an aaf SourceClip""" + tapemob, tapemob_slot = self._create_tapemob(otio_clip) + filemob, filemob_slot = self._create_filemob(otio_clip, tapemob, tapemob_slot) + mastermob, mastermob_slot = self._create_mastermob(otio_clip, + filemob, + filemob_slot) + + # We need both `start_time` and `duration` + # Here `start` is the offset between `first` and `in` values. + + offset = (otio_clip.visible_range().start_time - + otio_clip.available_range().start_time) + start = offset.value + length = otio_clip.visible_range().duration.value + + compmob_clip = self.compositionmob.create_source_clip( + slot_id=self.timeline_mobslot.slot_id, + start=start, + length=length, + media_kind=self.media_kind) + compmob_clip.mob = mastermob + compmob_clip.slot = mastermob_slot + compmob_clip.slot_id = mastermob_slot.slot_id + return compmob_clip + + def aaf_transition(self, otio_transition): + """Convert an otio Transition into an aaf Transition""" + if (otio_transition.transition_type != + otio.schema.transition.TransitionTypes.SMPTE_Dissolve): + print( + "Unsupported transition type: {}".format( + otio_transition.transition_type)) + return None + + transition_params, varying_value = self._transition_parameters() + + interpolation_def = self.aaf_file.create.InterpolationDef( + aaf2.misc.LinearInterp, "LinearInterp", "Linear keyframe interpolation") + self.aaf_file.dictionary.register_def(interpolation_def) + varying_value["Interpolation"].value = ( + self.aaf_file.dictionary.lookup_interperlationdef("LinearInterp")) + + pointlist = otio_transition.metadata["AAF"]["PointList"] + + c1 = self.aaf_file.create.ControlPoint() + c1["EditHint"].value = "Proportional" + c1.value = pointlist[0]["Value"] + c1.time = pointlist[0]["Time"] + + c2 = self.aaf_file.create.ControlPoint() + c2["EditHint"].value = "Proportional" + c2.value = pointlist[1]["Value"] + c2.time = pointlist[1]["Time"] + + varying_value["PointList"].extend([c1, c2]) + + op_group_metadata = otio_transition.metadata["AAF"]["OperationGroup"] + effect_id = op_group_metadata["Operation"].get("Identification") + is_time_warp = op_group_metadata["Operation"].get("IsTimeWarp") + by_pass = op_group_metadata["Operation"].get("Bypass") + number_inputs = op_group_metadata["Operation"].get("NumberInputs") + operation_category = op_group_metadata["Operation"].get("OperationCategory") + data_def_name = op_group_metadata["Operation"]["DataDefinition"]["Name"] + data_def = self.aaf_file.dictionary.lookup_datadef(str(data_def_name)) + description = op_group_metadata["Operation"]["Description"] + op_def_name = otio_transition.metadata["AAF"][ + "OperationGroup" + ]["Operation"]["Name"] + + # Create OperationDefinition + op_def = self.aaf_file.create.OperationDef(uuid.UUID(effect_id), op_def_name) + self.aaf_file.dictionary.register_def(op_def) + op_def.media_kind = self.media_kind + datadef = self.aaf_file.dictionary.lookup_datadef(self.media_kind) + op_def["IsTimeWarp"].value = is_time_warp + op_def["Bypass"].value = by_pass + op_def["NumberInputs"].value = number_inputs + op_def["OperationCategory"].value = str(operation_category) + op_def["ParametersDefined"].extend(transition_params) + op_def["DataDefinition"].value = data_def + op_def["Description"].value = str(description) + + # Create OperationGroup + length = otio_transition.duration().value + operation_group = self.aaf_file.create.OperationGroup(op_def, length) + operation_group["DataDefinition"].value = datadef + operation_group["Parameters"].append(varying_value) + + # Create Transition + transition = self.aaf_file.create.Transition(self.media_kind, length) + transition["OperationGroup"].value = operation_group + transition["CutPoint"].value = otio_transition.metadata["AAF"]["CutPoint"] + transition["DataDefinition"].value = datadef + return transition + + def aaf_sequence(self, otio_track): + """Convert an otio Track into an aaf Sequence""" + sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind) + length = 0 + for nested_otio_child in otio_track: + result = self.transcribe(nested_otio_child) + length += result.length + sequence.components.append(result) + sequence.length = length + return sequence + + def aaf_operation_group(self, otio_stack): + """ + Create and return an OperationGroup which will contain other AAF objects + to support OTIO nesting + """ + # Create OperationDefinition + op_def = self.aaf_file.create.OperationDef(AAF_OPERATIONDEF_SUBMASTER, + "Submaster") + self.aaf_file.dictionary.register_def(op_def) + op_def.media_kind = self.media_kind + datadef = self.aaf_file.dictionary.lookup_datadef(self.media_kind) + + # These values are necessary for pyaaf2 OperationDefinitions + op_def["IsTimeWarp"].value = False + op_def["Bypass"].value = 0 + op_def["NumberInputs"].value = -1 + op_def["OperationCategory"].value = "OperationCategory_Effect" + op_def["DataDefinition"].value = datadef + + # Create OperationGroup + operation_group = self.aaf_file.create.OperationGroup(op_def) + operation_group.media_kind = self.media_kind + operation_group["DataDefinition"].value = datadef + + length = 0 + for nested_otio_child in otio_stack: + result = self.transcribe(nested_otio_child) + length += result.length + operation_group.segments.append(result) + operation_group.length = length + return operation_group + + def _create_tapemob(self, otio_clip): + """ + Return a physical sourcemob for an otio Clip based on the MobID. + + Returns: + Returns a tuple of (TapeMob, TapeMobSlot) + """ + tapemob = self.root_file_transcriber._unique_tapemob(otio_clip) + tapemob_slot = tapemob.create_empty_slot(self.edit_rate, self.media_kind) + tapemob_slot.segment.length = ( + otio_clip.media_reference.available_range.duration.value) + return tapemob, tapemob_slot + + def _create_filemob(self, otio_clip, tapemob, tapemob_slot): + """ + Return a file sourcemob for an otio Clip. Needs a tapemob and tapemob slot. + + Returns: + Returns a tuple of (FileMob, FileMobSlot) + """ + filemob = self.aaf_file.create.SourceMob() + self.aaf_file.content.mobs.append(filemob) + + filemob.descriptor = self.default_descriptor(otio_clip) + filemob_slot = filemob.create_timeline_slot(self.edit_rate) + filemob_clip = filemob.create_source_clip( + slot_id=filemob_slot.slot_id, + length=tapemob_slot.segment.length, + media_kind=tapemob_slot.segment.media_kind) + filemob_clip.mob = tapemob + filemob_clip.slot = tapemob_slot + filemob_clip.slot_id = tapemob_slot.slot_id + filemob_slot.segment = filemob_clip + return filemob, filemob_slot + + def _create_mastermob(self, otio_clip, filemob, filemob_slot): + """ + Return a mastermob for an otio Clip. Needs a filemob and filemob slot. + + Returns: + Returns a tuple of (MasterMob, MasterMobSlot) + """ + mastermob = self.root_file_transcriber._unique_mastermob(otio_clip) + timecode_length = otio_clip.media_reference.available_range.duration.value + + try: + mastermob_slot = mastermob.slot_at(self._master_mob_slot_id) + except IndexError: + mastermob_slot = ( + mastermob.create_timeline_slot(edit_rate=self.edit_rate, + slot_id=self._master_mob_slot_id)) + mastermob_clip = mastermob.create_source_clip( + slot_id=mastermob_slot.slot_id, + length=timecode_length, + media_kind=self.media_kind) + mastermob_clip.mob = filemob + mastermob_clip.slot = filemob_slot + mastermob_clip.slot_id = filemob_slot.slot_id + mastermob_slot.segment = mastermob_clip + return mastermob, mastermob_slot + + +class VideoTrackTranscriber(_TrackTranscriber): + """Video track kind specialization of TrackTranscriber.""" + + @property + def media_kind(self): + return "picture" + + @property + def _master_mob_slot_id(self): + return 1 + + def _create_timeline_mobslot(self): + """ + Create a Sequence container (TimelineMobSlot) and Sequence. + + TimelineMobSlot --> Sequence + """ + timeline_mobslot = self.compositionmob.create_timeline_slot( + edit_rate=self.edit_rate) + sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind) + timeline_mobslot.segment = sequence + return timeline_mobslot, sequence + + def default_descriptor(self, otio_clip): + # TODO: Determine if these values are the correct, and if so, + # maybe they should be in the AAF metadata + descriptor = self.aaf_file.create.CDCIDescriptor() + descriptor["ComponentWidth"].value = 8 + descriptor["HorizontalSubsampling"].value = 2 + descriptor["ImageAspectRatio"].value = "16/9" + descriptor["StoredWidth"].value = 1920 + descriptor["StoredHeight"].value = 1080 + descriptor["FrameLayout"].value = "FullFrame" + descriptor["VideoLineMap"].value = [42, 0] + descriptor["SampleRate"].value = 24 + descriptor["Length"].value = 1 + return descriptor + + def _transition_parameters(self): + """ + Return video transition parameters + """ + # Create ParameterDef for AvidParameterByteOrder + byteorder_typedef = self.aaf_file.dictionary.lookup_typedef("aafUInt16") + param_byteorder = self.aaf_file.create.ParameterDef( + AAF_PARAMETERDEF_AVIDPARAMETERBYTEORDER, + "AvidParameterByteOrder", + "", + byteorder_typedef) + self.aaf_file.dictionary.register_def(param_byteorder) + + # Create ParameterDef for AvidEffectID + avid_effect_typdef = self.aaf_file.dictionary.lookup_typedef("AvidBagOfBits") + param_effect_id = self.aaf_file.create.ParameterDef( + AAF_PARAMETERDEF_AVIDEFFECTID, + "AvidEffectID", + "", + avid_effect_typdef) + self.aaf_file.dictionary.register_def(param_effect_id) + + # Create ParameterDef for AFX_FG_KEY_OPACITY_U + opacity_param_def = self.aaf_file.dictionary.lookup_typedef("Rational") + opacity_param = self.aaf_file.create.ParameterDef( + AAF_PARAMETERDEF_AFX_FG_KEY_OPACITY_U, + "AFX_FG_KEY_OPACITY_U", + "", + opacity_param_def) + self.aaf_file.dictionary.register_def(opacity_param) + + # Create VaryingValue + opacity_u = self.aaf_file.create.VaryingValue() + opacity_u.parameterdef = self.aaf_file.dictionary.lookup_parameterdef( + "AFX_FG_KEY_OPACITY_U") + opacity_u["VVal_Extrapolation"].value = AAF_VVAL_EXTRAPOLATION_ID + opacity_u["VVal_FieldCount"].value = 1 + + return [param_byteorder, param_effect_id], opacity_u + + +class AudioTrackTranscriber(_TrackTranscriber): + """Audio track kind specialization of TrackTranscriber.""" + + @property + def media_kind(self): + return "sound" + + @property + def _master_mob_slot_id(self): + return 2 + + def aaf_sourceclip(self, otio_clip): + # Parameter Definition + typedef = self.aaf_file.dictionary.lookup_typedef("Rational") + param_def = self.aaf_file.create.ParameterDef(AAF_PARAMETERDEF_PAN, + "Pan", + "Pan", + typedef) + self.aaf_file.dictionary.register_def(param_def) + interp_def = self.aaf_file.create.InterpolationDef(aaf2.misc.LinearInterp, + "LinearInterp", + "LinearInterp") + self.aaf_file.dictionary.register_def(interp_def) + # PointList + length = otio_clip.duration().value + c1 = self.aaf_file.create.ControlPoint() + c1["ControlPointSource"].value = 2 + c1["Time"].value = aaf2.rational.AAFRational("0/{}".format(length)) + c1["Value"].value = 0 + c2 = self.aaf_file.create.ControlPoint() + c2["ControlPointSource"].value = 2 + c2["Time"].value = aaf2.rational.AAFRational("{}/{}".format(length - 1, length)) + c2["Value"].value = 0 + varying_value = self.aaf_file.create.VaryingValue() + varying_value.parameterdef = param_def + varying_value["Interpolation"].value = interp_def + varying_value["PointList"].extend([c1, c2]) + opgroup = self.timeline_mobslot.segment + opgroup.parameters.append(varying_value) + + return super(AudioTrackTranscriber, self).aaf_sourceclip(otio_clip) + + def _create_timeline_mobslot(self): + """ + Create a Sequence container (TimelineMobSlot) and Sequence. + Sequence needs to be in an OperationGroup. + + TimelineMobSlot --> OperationGroup --> Sequence + """ + # TimelineMobSlot + timeline_mobslot = self.compositionmob.create_sound_slot( + edit_rate=self.edit_rate) + # OperationDefinition + opdef = self.aaf_file.create.OperationDef(AAF_OPERATIONDEF_MONOAUDIOPAN, + "Audio Pan") + opdef.media_kind = self.media_kind + opdef["NumberInputs"].value = 1 + self.aaf_file.dictionary.register_def(opdef) + # OperationGroup + total_length = sum([t.duration().value for t in self.otio_track]) + opgroup = self.aaf_file.create.OperationGroup(opdef) + opgroup.media_kind = self.media_kind + opgroup.length = total_length + timeline_mobslot.segment = opgroup + # Sequence + sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind) + sequence.length = total_length + opgroup.segments.append(sequence) + return timeline_mobslot, sequence + + def default_descriptor(self, otio_clip): + descriptor = self.aaf_file.create.PCMDescriptor() + descriptor["AverageBPS"].value = 96000 + descriptor["BlockAlign"].value = 2 + descriptor["QuantizationBits"].value = 16 + descriptor["AudioSamplingRate"].value = 48000 + descriptor["Channels"].value = 1 + descriptor["SampleRate"].value = 48000 + descriptor["Length"].value = ( + otio_clip.media_reference.available_range.duration.value) + return descriptor + + def _transition_parameters(self): + """ + Return audio transition parameters + """ + # Create ParameterDef for ParameterDef_Level + def_level_typedef = self.aaf_file.dictionary.lookup_typedef("Rational") + param_def_level = self.aaf_file.create.ParameterDef(AAF_PARAMETERDEF_LEVEL, + "ParameterDef_Level", + "", + def_level_typedef) + self.aaf_file.dictionary.register_def(param_def_level) + + # Create VaryingValue + level = self.aaf_file.create.VaryingValue() + level.parameterdef = ( + self.aaf_file.dictionary.lookup_parameterdef("ParameterDef_Level")) + + return [param_def_level], level + + +class __check(object): + """ + __check is a private helper class that safely gets values given to check + for existence and equality + """ + + def __init__(self, obj, tokenpath): + self.orig = obj + self.value = obj + self.errors = [] + self.tokenpath = tokenpath + try: + for token in re.split(r"[\.\[]", tokenpath): + if token.endswith("()"): + self.value = getattr(self.value, token.replace("()", ""))() + elif "]" in token: + self.value = self.value[token.strip("[]'\"")] + else: + self.value = getattr(self.value, token) + except Exception as e: + self.value = None + self.errors.append("{}{} {}.{} does not exist, {}".format( + self.orig.name if hasattr(self.orig, "name") else "", + type(self.orig), + type(self.orig).__name__, + self.tokenpath, e)) + + def equals(self, val): + """Check if the retrieved value is equal to a given value.""" + if self.value is not None and self.value != val: + self.errors.append( + "{}{} {}.{} not equal to {} (expected) != {} (actual)".format( + self.orig.name if hasattr(self.orig, "name") else "", + type(self.orig), + type(self.orig).__name__, self.tokenpath, val, self.value)) + return self diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py new file mode 100644 index 00000000000..6c21ea3e55d --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py @@ -0,0 +1,979 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO Advanced Authoring Format (AAF) Adapter + +Depending on if/where PyAAF is installed, you may need to set this env var: + OTIO_AAF_PYTHON_LIB - should point at the PyAAF module. +""" + +import os +import sys +import numbers +import copy +from collections import Iterable +import opentimelineio as otio + +lib_path = os.environ.get("OTIO_AAF_PYTHON_LIB") +if lib_path and lib_path not in sys.path: + sys.path.insert(0, lib_path) + +import aaf2 # noqa: E402 +import aaf2.content # noqa: E402 +import aaf2.mobs # noqa: E402 +import aaf2.components # noqa: E402 +import aaf2.core # noqa: E402 +from opentimelineio_contrib.adapters.aaf_adapter import aaf_writer # noqa: E402 + + +debug = False +__names = set() + + +def _get_parameter(item, parameter_name): + values = dict((value.name, value) for value in item.parameters.value) + return values.get(parameter_name) + + +def _get_name(item): + if isinstance(item, aaf2.components.SourceClip): + try: + return item.mob.name or "Untitled SourceClip" + except AttributeError: + # Some AAFs produce this error: + # RuntimeError: failed with [-2146303738]: mob not found + return "SourceClip Missing Mob?" + if hasattr(item, 'name'): + name = item.name + if name: + return name + return _get_class_name(item) + + +def _get_class_name(item): + if hasattr(item, "class_name"): + return item.class_name + else: + return item.__class__.__name__ + + +def _transcribe_property(prop): + # XXX: The unicode type doesn't exist in Python 3 (all strings are unicode) + # so we have to use type(u"") which works in both Python 2 and 3. + if isinstance(prop, (str, type(u""), numbers.Integral, float)): + return prop + + elif isinstance(prop, list): + result = {} + for child in prop: + if hasattr(child, "name") and hasattr(child, "value"): + result[child.name] = _transcribe_property(child.value) + else: + # @TODO: There may be more properties that we might want also. + # If you want to see what is being skipped, turn on debug. + if debug: + debug_message = \ + "Skipping unrecognized property: {} of parent {}" + print(debug_message.format(child, prop)) + return result + elif hasattr(prop, "properties"): + result = {} + for child in prop.properties(): + result[child.name] = _transcribe_property(child.value) + return result + else: + return str(prop) + + +def _find_timecode_mobs(item): + mobs = [item.mob] + + for c in item.walk(): + if isinstance(c, aaf2.components.SourceClip): + mob = c.mob + if mob: + mobs.append(mob) + else: + continue + else: + # This could be 'EssenceGroup', 'Pulldown' or other segment + # subclasses + # See also: https://jira.pixar.com/browse/SE-3457 + # For example: + # An EssenceGroup is a Segment that has one or more + # alternate choices, each of which represent different variations + # of one actual piece of content. + # According to the AAF Object Specification and Edit Protocol + # documents: + # "Typically the different representations vary in essence format, + # compression, or frame size. The application is responsible for + # choosing the appropriate implementation of the essence." + # It also says they should all have the same length, but + # there might be nested Sequences inside which we're not attempting + # to handle here (yet). We'll need a concrete example to ensure + # we're doing the right thing. + # TODO: Is the Timecode for an EssenceGroup correct? + # TODO: Try CountChoices() and ChoiceAt(i) + # For now, lets just skip it. + continue + + return mobs + + +def _extract_timecode_info(mob): + """Given a mob with a single timecode slot, return the timecode and length + in that slot as a tuple + """ + timecodes = [slot.segment for slot in mob.slots + if isinstance(slot.segment, aaf2.components.Timecode)] + + if len(timecodes) == 1: + timecode = timecodes[0] + timecode_start = timecode.getvalue('Start') + timecode_length = timecode.getvalue('Length') + + if timecode_start is None or timecode_length is None: + raise otio.exceptions.NotSupportedError( + "Unexpected timecode value(s) in mob named: `{}`." + " `Start`: {}, `Length`: {}".format(mob.name, + timecode_start, + timecode_length) + ) + + return timecode_start, timecode_length + elif len(timecodes) > 1: + raise otio.exceptions.NotSupportedError( + "Error: mob has more than one timecode slots, this is not" + " currently supported by the AAF adapter. found: {} slots, " + " mob name is: '{}'".format(len(timecodes), mob.name) + ) + else: + return None + + +def _add_child(parent, child, source): + if child is None: + if debug: + print("Adding null child? {}".format(source)) + elif isinstance(child, otio.schema.Marker): + parent.markers.append(child) + else: + parent.append(child) + + +def _transcribe(item, parents, editRate, masterMobs): + result = None + metadata = {} + + # First lets grab some standard properties that are present on + # many types of AAF objects... + metadata["Name"] = _get_name(item) + metadata["ClassName"] = _get_class_name(item) + + # Some AAF objects (like TimelineMobSlot) have an edit rate + # which should be used for all of the object's children. + # We will pass it on to any recursive calls to _transcribe() + if hasattr(item, "edit_rate"): + editRate = float(item.edit_rate) + + if isinstance(item, aaf2.components.Component): + metadata["Length"] = item.length + + if isinstance(item, aaf2.core.AAFObject): + for prop in item.properties(): + if hasattr(prop, 'name') and hasattr(prop, 'value'): + key = str(prop.name) + value = prop.value + metadata[key] = _transcribe_property(value) + + # Now we will use the item's class to determine which OTIO type + # to transcribe into. Note that the order of this if/elif/... chain + # is important, because the class hierarchy of AAF objects is more + # complex than OTIO. + + if isinstance(item, aaf2.content.ContentStorage): + result = otio.schema.SerializableCollection() + + # Gather all the Master Mobs, so we can find them later by MobID + # when we parse the SourceClips in the composition + if masterMobs is None: + masterMobs = {} + for mob in item.mastermobs(): + child = _transcribe(mob, parents + [item], editRate, masterMobs) + if child is not None: + mobID = child.metadata.get("AAF", {}).get("MobID") + masterMobs[mobID] = child + + for mob in item.compositionmobs(): + child = _transcribe(mob, parents + [item], editRate, masterMobs) + _add_child(result, child, mob) + + elif isinstance(item, aaf2.mobs.Mob): + result = otio.schema.Timeline() + + for slot in item.slots: + track = _transcribe(slot, parents + [item], editRate, masterMobs) + _add_child(result.tracks, track, slot) + + # Use a heuristic to find the starting timecode from + # this track and use it for the Timeline's global_start_time + start_time = _find_timecode_track_start(track) + if start_time: + result.global_start_time = start_time + + elif isinstance(item, aaf2.components.SourceClip): + result = otio.schema.Clip() + + # Evidently the last mob is the one with the timecode + mobs = _find_timecode_mobs(item) + # Get the Timecode start and length values + last_mob = mobs[-1] if mobs else None + timecode_info = _extract_timecode_info(last_mob) if last_mob else None + + source_start = int(metadata.get("StartTime", "0")) + source_length = item.length + media_start = source_start + media_length = item.length + + if timecode_info: + media_start, media_length = timecode_info + source_start += media_start + + # The goal here is to find a source range. Actual editorial opinions are found on SourceClips in the + # CompositionMobs. To figure out whether this clip is directly in the CompositionMob, we detect if our + # parent mobs are only CompositionMobs. If they were anything else - a MasterMob, a SourceMob, we would + # know that this is in some indirect relationship. + parent_mobs = filter(lambda parent: isinstance(parent, aaf2.mobs.Mob), parents) + is_directly_in_composition = all(isinstance(mob, aaf2.mobs.CompositionMob) for mob in parent_mobs) + if is_directly_in_composition: + result.source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(source_start, editRate), + otio.opentime.RationalTime(source_length, editRate) + ) + + # The goal here is to find an available range. Media ranges are stored in the related MasterMob, and there + # should only be one - hence the name "Master" mob. Somewhere down our chain (either a child or our parents) + # is a MasterMob. For SourceClips in the CompositionMob, it is our child. For everything else, it is a + # previously encountered parent. Find the MasterMob in our chain, and then extract the information from that. + child_mastermob = item.mob if isinstance(item.mob, aaf2.mobs.MasterMob) else None + parent_mastermobs = [parent for parent in parents if isinstance(parent, aaf2.mobs.MasterMob)] + parent_mastermob = parent_mastermobs[0] if len(parent_mastermobs) > 1 else None + mastermob = child_mastermob or parent_mastermob or None + + if mastermob: + media = otio.schema.MissingReference() + media.available_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(media_start, editRate), + otio.opentime.RationalTime(media_length, editRate) + ) + # copy the metadata from the master into the media_reference + mastermob_child = masterMobs.get(str(mastermob.mob_id)) + media.metadata["AAF"] = mastermob_child.metadata.get("AAF", {}) + result.media_reference = media + + elif isinstance(item, aaf2.components.Transition): + result = otio.schema.Transition() + + # Does AAF support anything else? + result.transition_type = otio.schema.TransitionTypes.SMPTE_Dissolve + + # Extract value and time attributes of both ControlPoints used for + # creating AAF Transition objects + varying_value = None + for param in item.getvalue('OperationGroup').parameters: + if isinstance(param, aaf2.misc.VaryingValue): + varying_value = param + break + + if varying_value is not None: + for control_point in varying_value.getvalue('PointList'): + value = control_point.value + time = control_point.time + metadata.setdefault('PointList', []).append({'Value': value, + 'Time': time}) + + in_offset = int(metadata.get("CutPoint", "0")) + out_offset = item.length - in_offset + result.in_offset = otio.opentime.RationalTime(in_offset, editRate) + result.out_offset = otio.opentime.RationalTime(out_offset, editRate) + + elif isinstance(item, aaf2.components.Filler): + result = otio.schema.Gap() + + length = item.length + result.source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, editRate), + otio.opentime.RationalTime(length, editRate) + ) + + elif isinstance(item, aaf2.components.NestedScope): + # TODO: Is this the right class? + result = otio.schema.Stack() + + for slot in item.slots: + child = _transcribe(slot, parents + [item], editRate, masterMobs) + _add_child(result, child, slot) + + elif isinstance(item, aaf2.components.Sequence): + result = otio.schema.Track() + + for component in item.components: + child = _transcribe(component, parents + [item], editRate, masterMobs) + _add_child(result, child, component) + + elif isinstance(item, aaf2.components.OperationGroup): + result = _transcribe_operation_group( + item, parents, metadata, editRate, masterMobs + ) + + elif isinstance(item, aaf2.mobslots.TimelineMobSlot): + result = otio.schema.Track() + + child = _transcribe(item.segment, parents + [item], editRate, masterMobs) + _add_child(result, child, item.segment) + + elif isinstance(item, aaf2.mobslots.MobSlot): + result = otio.schema.Track() + + child = _transcribe(item.segment, parents + [item], editRate, masterMobs) + _add_child(result, child, item.segment) + + elif isinstance(item, aaf2.components.Timecode): + pass + + elif isinstance(item, aaf2.components.Pulldown): + pass + + elif isinstance(item, aaf2.components.EdgeCode): + pass + + elif isinstance(item, aaf2.components.ScopeReference): + # TODO: is this like FILLER? + + result = otio.schema.Gap() + + length = item.length + result.source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, editRate), + otio.opentime.RationalTime(length, editRate) + ) + + elif isinstance(item, aaf2.components.DescriptiveMarker): + + # Markers come in on their own separate Track. + # TODO: We should consolidate them onto the same track(s) as the clips + # result = otio.schema.Marker() + pass + + elif isinstance(item, aaf2.components.Selector): + # If you mute a clip in media composer, it becomes one of these in the + # AAF. + result = _transcribe( + item.getvalue("Selected"), + parents + [item], + editRate, + masterMobs + ) + + alternates = [ + _transcribe(alt, parents + [item], editRate, masterMobs) + for alt in item.getvalue("Alternates") + ] + + # muted case -- if there is only one item its muted, otherwise its + # a multi cam thing + if alternates and len(alternates) == 1: + metadata['muted_clip'] = True + result.name = str(alternates[0].name) + "_MUTED" + + metadata['alternates'] = alternates + + # @TODO: There are a bunch of other AAF object types that we will + # likely need to add support for. I'm leaving this code here to help + # future efforts to extract the useful information out of these. + + # elif isinstance(item, aaf.storage.File): + # self.extendChildItems([item.header]) + + # elif isinstance(item, aaf.storage.Header): + # self.extendChildItems([item.storage()]) + # self.extendChildItems([item.dictionary()]) + + # elif isinstance(item, aaf.dictionary.Dictionary): + # l = [] + # l.append(DummyItem(list(item.class_defs()), 'ClassDefs')) + # l.append(DummyItem(list(item.codec_defs()), 'CodecDefs')) + # l.append(DummyItem(list(item.container_defs()), 'ContainerDefs')) + # l.append(DummyItem(list(item.data_defs()), 'DataDefs')) + # l.append(DummyItem(list(item.interpolation_defs()), + # 'InterpolationDefs')) + # l.append(DummyItem(list(item.klvdata_defs()), 'KLVDataDefs')) + # l.append(DummyItem(list(item.operation_defs()), 'OperationDefs')) + # l.append(DummyItem(list(item.parameter_defs()), 'ParameterDefs')) + # l.append(DummyItem(list(item.plugin_defs()), 'PluginDefs')) + # l.append(DummyItem(list(item.taggedvalue_defs()), 'TaggedValueDefs')) + # l.append(DummyItem(list(item.type_defs()), 'TypeDefs')) + # self.extendChildItems(l) + # + # elif isinstance(item, pyaaf.AxSelector): + # self.extendChildItems(list(item.EnumAlternateSegments())) + # + # elif isinstance(item, pyaaf.AxScopeReference): + # #print item, item.GetRelativeScope(),item.GetRelativeSlot() + # pass + # + # elif isinstance(item, pyaaf.AxEssenceGroup): + # segments = [] + # + # for i in xrange(item.CountChoices()): + # choice = item.GetChoiceAt(i) + # segments.append(choice) + # self.extendChildItems(segments) + # + # elif isinstance(item, pyaaf.AxProperty): + # self.properties['Value'] = str(item.GetValue()) + + elif isinstance(item, Iterable): + result = otio.schema.SerializableCollection() + for child in item: + result.append( + _transcribe( + child, + parents + [item], + editRate, + masterMobs + ) + ) + else: + # For everything else, we just ignore it. + # To see what is being ignored, turn on the debug flag + if debug: + print("SKIPPING: {}: {} -- {}".format(type(item), item, result)) + + # Did we get anything? If not, we're done + if result is None: + return None + + # Okay, now we've turned the AAF thing into an OTIO result + # There's a bit more we can do before we're ready to return the result. + + # If we didn't get a name yet, use the one we have in metadata + if result.name is None: + result.name = metadata["Name"] + + # Attach the AAF metadata + if not result.metadata: + result.metadata = {} + result.metadata["AAF"] = metadata + + # Double check that we got the length we expected + if isinstance(result, otio.core.Item): + length = metadata.get("Length") + if ( + length + and result.source_range is not None + and result.source_range.duration.value != length + ): + raise otio.exceptions.OTIOError( + "Wrong duration? {} should be {} in {}".format( + result.source_range.duration.value, + length, + result + ) + ) + + # Did we find a Track? + if isinstance(result, otio.schema.Track): + # Try to figure out the kind of Track it is + if hasattr(item, 'media_kind'): + media_kind = str(item.media_kind) + result.metadata["AAF"]["MediaKind"] = media_kind + if media_kind == "Picture": + result.kind = otio.schema.TrackKind.Video + elif media_kind in ("SoundMasterTrack", "Sound"): + result.kind = otio.schema.TrackKind.Audio + else: + # Timecode, Edgecode, others? + result.kind = None + + # Done! + return result + + +def _find_timecode_track_start(track): + # See if we can find a starting timecode in here... + aaf_metadata = track.metadata.get("AAF", {}) + + # Is this a Timecode track? + if aaf_metadata.get("MediaKind") == "Timecode": + edit_rate = aaf_metadata.get("EditRate", "0") + fps = aaf_metadata.get("Segment", {}).get("FPS", 0) + start = aaf_metadata.get("Segment", {}).get("Start", "0") + + # Often times there are several timecode tracks, so + # we use a heuristic to only pay attention to Timecode + # tracks with a FPS that matches the edit rate. + if edit_rate == str(fps): + return otio.opentime.RationalTime( + value=int(start), + rate=float(edit_rate) + ) + + # We didn't find anything useful + return None + + +def _transcribe_linear_timewarp(item, parameters): + # this is a linear time warp + effect = otio.schema.LinearTimeWarp() + + offset_map = _get_parameter(item, 'PARAM_SPEED_OFFSET_MAP_U') + + # If we have a LinearInterp with just 2 control points, then + # we can compute the time_scalar. Note that the SpeedRatio is + # NOT correct in many AAFs - we aren't sure why, but luckily we + # can compute the correct value this way. + points = offset_map.get("PointList") + if len(points) > 2: + # This is something complicated... try the fancy version + return _transcribe_fancy_timewarp(item, parameters) + elif ( + len(points) == 2 + and float(points[0].time) == 0 + and float(points[0].value) == 0 + ): + # With just two points, we can compute the slope + effect.time_scalar = float(points[1].value) / float(points[1].time) + else: + # Fall back to the SpeedRatio if we didn't understand the points + ratio = parameters.get("SpeedRatio") + if ratio == str(item.length): + # If the SpeedRatio == the length, this is a freeze frame + effect.time_scalar = 0 + elif '/' in ratio: + numerator, denominator = map(float, ratio.split('/')) + # OTIO time_scalar is 1/x from AAF's SpeedRatio + effect.time_scalar = denominator / numerator + else: + effect.time_scalar = 1.0 / float(ratio) + + # Is this is a freeze frame? + if effect.time_scalar == 0: + # Note: we might end up here if any of the code paths above + # produced a 0 time_scalar. + # Use the FreezeFrame class instead of LinearTimeWarp + effect = otio.schema.FreezeFrame() + + return effect + + +def _transcribe_fancy_timewarp(item, parameters): + + # For now, this is an unsupported time effect... + effect = otio.schema.TimeEffect() + effect.effect_name = None # Unsupported + effect.name = item.get("Name") + + return effect + + # TODO: Here is some sample code that pulls out the full + # details of a non-linear speed map. + + # speed_map = item.parameter['PARAM_SPEED_MAP_U'] + # offset_map = item.parameter['PARAM_SPEED_OFFSET_MAP_U'] + # Also? PARAM_OFFSET_MAP_U (without the word "SPEED" in it?) + # print(speed_map['PointList'].value) + # print(speed_map.count()) + # print(speed_map.interpolation_def().name) + # + # for p in speed_map.points(): + # print(" ", float(p.time), float(p.value), p.edit_hint) + # for prop in p.point_properties(): + # print(" ", prop.name, prop.value, float(prop.value)) + # + # print(offset_map.interpolation_def().name) + # for p in offset_map.points(): + # edit_hint = p.edit_hint + # time = p.time + # value = p.value + # + # pass + # # print " ", float(p.time), float(p.value) + # + # for i in range(100): + # float(offset_map.value_at("%i/100" % i)) + # + # # Test file PARAM_SPEED_MAP_U is AvidBezierInterpolator + # # currently no implement for value_at + # try: + # speed_map.value_at(.25) + # except NotImplementedError: + # pass + # else: + # raise + + +def _transcribe_operation_group(item, parents, metadata, editRate, masterMobs): + result = otio.schema.Stack() + + operation = metadata.get("Operation", {}) + parameters = metadata.get("Parameters", {}) + result.name = operation.get("Name") + + # Trust the length that is specified in the AAF + length = metadata.get("Length") + result.source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, editRate), + otio.opentime.RationalTime(length, editRate) + ) + + # Look for speed effects... + effect = None + if operation.get("IsTimeWarp"): + if operation.get("Name") == "Motion Control": + + offset_map = _get_parameter(item, 'PARAM_SPEED_OFFSET_MAP_U') + # TODO: We should also check the PARAM_OFFSET_MAP_U which has + # an interpolation_def().name as well. + if offset_map is not None: + interpolation = offset_map.interpolation.name + else: + interpolation = None + + if interpolation == "LinearInterp": + effect = _transcribe_linear_timewarp(item, parameters) + else: + effect = _transcribe_fancy_timewarp(item, parameters) + + else: + # Unsupported time effect + effect = otio.schema.TimeEffect() + effect.effect_name = None # Unsupported + effect.name = operation.get("Name") + else: + # Unsupported effect + effect = otio.schema.Effect() + effect.effect_name = None # Unsupported + effect.name = operation.get("Name") + + if effect is not None: + result.effects.append(effect) + effect.metadata = { + "AAF": { + "Operation": operation, + "Parameters": parameters + } + } + + for segment in item.getvalue("InputSegments"): + child = _transcribe(segment, parents + [item], editRate, masterMobs) + if child: + _add_child(result, child, segment) + + return result + + +def _fix_transitions(thing): + if isinstance(thing, otio.schema.Timeline): + _fix_transitions(thing.tracks) + elif ( + isinstance(thing, otio.core.Composition) + or isinstance(thing, otio.schema.SerializableCollection) + ): + if isinstance(thing, otio.schema.Track): + for c, child in enumerate(thing): + + # Don't touch the Transitions themselves, + # only the Clips & Gaps next to them. + if not isinstance(child, otio.core.Item): + continue + + # Was the item before us a Transition? + if c > 0 and isinstance( + thing[c - 1], + otio.schema.Transition + ): + pre_trans = thing[c - 1] + + if child.source_range is None: + child.source_range = child.trimmed_range() + csr = child.source_range + child.source_range = otio.opentime.TimeRange( + start_time=csr.start_time + pre_trans.in_offset, + duration=csr.duration - pre_trans.in_offset + ) + + # Is the item after us a Transition? + if c < len(thing) - 1 and isinstance( + thing[c + 1], + otio.schema.Transition + ): + post_trans = thing[c + 1] + + if child.source_range is None: + child.source_range = child.trimmed_range() + csr = child.source_range + child.source_range = otio.opentime.TimeRange( + start_time=csr.start_time, + duration=csr.duration - post_trans.out_offset + ) + + for child in thing: + _fix_transitions(child) + + +def _simplify(thing): + if isinstance(thing, otio.schema.SerializableCollection): + if len(thing) == 1: + return _simplify(thing[0]) + else: + for c, child in enumerate(thing): + thing[c] = _simplify(child) + return thing + + elif isinstance(thing, otio.schema.Timeline): + result = _simplify(thing.tracks) + + # Only replace the Timeline's stack if the simplified result + # was also a Stack. Otherwise leave it (the contents will have + # been simplified in place). + if isinstance(result, otio.schema.Stack): + thing.tracks = result + + return thing + + elif isinstance(thing, otio.core.Composition): + # simplify our children + for c, child in enumerate(thing): + thing[c] = _simplify(child) + + # remove empty children of Stacks + if isinstance(thing, otio.schema.Stack): + for c in reversed(range(len(thing))): + child = thing[c] + if not _contains_something_valuable(child): + # TODO: We're discarding metadata... should we retain it? + del thing[c] + + # Look for Stacks within Stacks + c = len(thing) - 1 + while c >= 0: + child = thing[c] + # Is my child a Stack also? (with no effects) + if ( + not _has_effects(child) + and + ( + isinstance(child, otio.schema.Stack) + or ( + isinstance(child, otio.schema.Track) + and len(child) == 1 + and isinstance(child[0], otio.schema.Stack) + and child[0] + and isinstance(child[0][0], otio.schema.Track) + ) + ) + ): + if isinstance(child, otio.schema.Track): + child = child[0] + + # Pull the child's children into the parent + num = len(child) + children_of_child = child[:] + # clear out the ownership of 'child' + del child[:] + thing[c:c + 1] = children_of_child + + # TODO: We may be discarding metadata, should we merge it? + # TODO: Do we need to offset the markers in time? + thing.markers.extend(child.markers) + # Note: we don't merge effects, because we already made + # sure the child had no effects in the if statement above. + + c = c + num + c = c - 1 + + # skip redundant containers + if _is_redundant_container(thing): + # TODO: We may be discarding metadata here, should we merge it? + result = thing[0].deepcopy() + # TODO: Do we need to offset the markers in time? + result.markers.extend(thing.markers) + # TODO: The order of the effects is probably important... + # should they be added to the end or the front? + # Intuitively it seems like the child's effects should come before + # the parent's effects. This will need to be solidified when we + # add more effects support. + result.effects.extend(thing.effects) + # Keep the parent's length, if it has one + if thing.source_range: + # make sure it has a source_range first + if not result.source_range: + try: + result.source_range = result.trimmed_range() + except otio.exceptions.CannotComputeAvailableRangeError: + result.source_range = copy.copy(thing.source_range) + # modify the duration, but leave the start_time as is + result.source_range = otio.opentime.TimeRange( + result.source_range.start_time, + thing.source_range.duration + ) + return result + + # if thing is the top level stack, all of its children must be in tracks + if isinstance(thing, otio.schema.Stack) and thing.parent() is None: + children_needing_tracks = [] + for child in thing: + if isinstance(child, otio.schema.Track): + continue + children_needing_tracks.append(child) + + for child in children_needing_tracks: + orig_index = thing.index(child) + del thing[orig_index] + new_track = otio.schema.Track() + new_track.append(child) + thing.insert(orig_index, new_track) + + return thing + + +def _has_effects(thing): + if isinstance(thing, otio.core.Item): + if len(thing.effects) > 0: + return True + + +def _is_redundant_container(thing): + + is_composition = isinstance(thing, otio.core.Composition) + if not is_composition: + return False + + has_one_child = len(thing) == 1 + if not has_one_child: + return False + + am_top_level_track = ( + type(thing) is otio.schema.Track + and type(thing.parent()) is otio.schema.Stack + and thing.parent().parent() is None + ) + + return ( + not am_top_level_track + # am a top level track but my only child is a track + or ( + type(thing) is otio.schema.Track + and type(thing[0]) is otio.schema.Track + ) + ) + + +def _contains_something_valuable(thing): + if isinstance(thing, otio.core.Item): + if len(thing.effects) > 0 or len(thing.markers) > 0: + return True + + if isinstance(thing, otio.core.Composition): + + if len(thing) == 0: + # NOT valuable because it is empty + return False + + for child in thing: + if _contains_something_valuable(child): + # valuable because this child is valuable + return True + + # none of the children were valuable, so thing is NOT valuable + return False + + if isinstance(thing, otio.schema.Gap): + # TODO: Are there other valuable things we should look for on a Gap? + return False + + # anything else is presumed to be valuable + return True + + +def read_from_file(filepath, simplify=True): + + with aaf2.open(filepath) as aaf_file: + + storage = aaf_file.content + + # Note: We're skipping: f.header + # Is there something valuable in there? + + __names.clear() + masterMobs = {} + + result = _transcribe(storage, parents=list(), editRate=None, masterMobs=masterMobs) + top = storage.toplevel() + if top: + # re-transcribe just the top-level mobs + # but use all the master mobs we found in the 1st pass + __names.clear() # reset the names back to 0 + result = _transcribe(top, parents=list(), editRate=None, masterMobs=masterMobs) + + # AAF is typically more deeply nested than OTIO. + # Lets try to simplify the structure by collapsing or removing + # unnecessary stuff. + if simplify: + result = _simplify(result) + + # OTIO represents transitions a bit different than AAF, so + # we need to iterate over them and modify the items on either side. + # Note that we do this *after* simplifying, since the structure + # may change during simplification. + _fix_transitions(result) + + return result + + +def write_to_file(input_otio, filepath, **kwargs): + with aaf2.open(filepath, "w") as f: + + timeline = aaf_writer._stackify_nested_groups(input_otio) + + aaf_writer.validate_metadata(timeline) + + otio2aaf = aaf_writer.AAFFileTranscriber(timeline, f, **kwargs) + + if not isinstance(timeline, otio.schema.Timeline): + raise otio.exceptions.NotSupportedError( + "Currently only supporting top level Timeline") + + for otio_track in timeline.tracks: + # Ensure track must have clip to get the edit_rate + if len(otio_track) == 0: + continue + + transcriber = otio2aaf.track_transcriber(otio_track) + + for otio_child in otio_track: + result = transcriber.transcribe(otio_child) + if result: + transcriber.sequence.components.append(result) diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py new file mode 100644 index 00000000000..150ed6d93d5 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py @@ -0,0 +1,318 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO Avid Log Exchange (ALE) Adapter""" +import re +import opentimelineio as otio + +DEFAULT_VIDEO_FORMAT = '1080' + + +def AVID_VIDEO_FORMAT_FROM_WIDTH_HEIGHT(width, height): + """Utility function to map a width and height to an Avid Project Format""" + + format_map = { + '1080': "1080", + '720': "720", + '576': "PAL", + '486': "NTSC", + } + mapped = format_map.get(str(height), "CUSTOM") + # check for the 2K DCI 1080 format + if mapped == '1080' and width > 1920: + mapped = "CUSTOM" + return mapped + + +class ALEParseError(otio.exceptions.OTIOError): + pass + + +def _parse_data_line(line, columns, fps): + row = line.split("\t") + + if len(row) < len(columns): + # Fill in blanks for any missing fields in this row + row.extend([""] * (len(columns) - len(row))) + + if len(row) > len(columns): + raise ALEParseError("Too many values on row: " + line) + + try: + + # Gather all the columns into a dictionary + # For expected columns, like Name, Start, etc. we will pop (remove) + # those from metadata, leaving the rest alone. + metadata = dict(zip(columns, row)) + + clip = otio.schema.Clip() + clip.name = metadata.pop("Name", None) + + # When looking for Start, Duration and End, they might be missing + # or blank. Treat None and "" as the same via: get(k,"")!="" + # To have a valid source range, you need Start and either Duration + # or End. If all three are provided, we check to make sure they match. + if metadata.get("Start", "") != "": + value = metadata.pop("Start") + try: + start = otio.opentime.from_timecode(value, fps) + except (ValueError, TypeError): + raise ALEParseError("Invalid Start timecode: {}".format(value)) + duration = None + end = None + if metadata.get("Duration", "") != "": + value = metadata.pop("Duration") + try: + duration = otio.opentime.from_timecode(value, fps) + except (ValueError, TypeError): + raise ALEParseError("Invalid Duration timecode: {}".format( + value + )) + if metadata.get("End", "") != "": + value = metadata.pop("End") + try: + end = otio.opentime.from_timecode(value, fps) + except (ValueError, TypeError): + raise ALEParseError("Invalid End timecode: {}".format( + value + )) + if duration is None: + duration = end - start + if end is None: + end = start + duration + if end != start + duration: + raise ALEParseError( + "Inconsistent Start, End, Duration: " + line + ) + clip.source_range = otio.opentime.TimeRange( + start, + duration + ) + + if metadata.get("Source File"): + source = metadata.pop("Source File") + clip.media_reference = otio.schema.ExternalReference( + target_url=source + ) + + # We've pulled out the key/value pairs that we treat specially. + # Put the remaining key/values into clip.metadata["ALE"] + clip.metadata["ALE"] = metadata + + return clip + except Exception as ex: + raise ALEParseError("Error parsing line: {}\n{}".format( + line, repr(ex) + )) + + +def _video_format_from_metadata(clips): + # Look for clips with Image Size metadata set + max_height = 0 + max_width = 0 + for clip in clips: + fields = clip.metadata.get("ALE", {}) + res = fields.get("Image Size", "") + m = re.search(r'([0-9]{1,})\s*[xX]\s*([0-9]{1,})', res) + if m and len(m.groups()) >= 2: + width = int(m.group(1)) + height = int(m.group(2)) + if height > max_height: + max_height = height + if width > max_width: + max_width = width + + # We don't have any image size information, use the defaut + if max_height == 0: + return DEFAULT_VIDEO_FORMAT + else: + return AVID_VIDEO_FORMAT_FROM_WIDTH_HEIGHT(max_width, max_height) + + +def read_from_string(input_str, fps=24): + + collection = otio.schema.SerializableCollection() + header = {} + columns = [] + + def nextline(lines): + return lines.pop(0) + + lines = input_str.splitlines() + while len(lines): + line = nextline(lines) + + # skip blank lines + if line.strip() == "": + continue + + if line.strip() == "Heading": + while len(lines): + line = nextline(lines) + + if line.strip() == "": + break + + if "\t" not in line: + raise ALEParseError("Invalid Heading line: " + line) + + segments = line.split("\t") + while len(segments) >= 2: + key, val = segments.pop(0), segments.pop(0) + header[key] = val + if len(segments) != 0: + raise ALEParseError("Invalid Heading line: " + line) + + if "FPS" in header: + fps = float(header["FPS"]) + + if line.strip() == "Column": + if len(lines) == 0: + raise ALEParseError("Unexpected end of file after: " + line) + + line = nextline(lines) + columns = line.split("\t") + + if line.strip() == "Data": + while len(lines): + line = nextline(lines) + + if line.strip() == "": + continue + + clip = _parse_data_line(line, columns, fps) + + collection.append(clip) + + collection.metadata["ALE"] = { + "header": header, + "columns": columns + } + + return collection + + +def write_to_string(input_otio, columns=None, fps=None, video_format=None): + + # Get all the clips we're going to export + clips = list(input_otio.each_clip()) + + result = "" + + result += "Heading\n" + header = dict(input_otio.metadata.get("ALE", {}).get("header", {})) + + # Force this, since we've hard coded tab delimiters + header["FIELD_DELIM"] = "TABS" + + if fps is None: + # If we weren't given a FPS, is there one in the header metadata? + if "FPS" in header: + fps = float(header["FPS"]) + else: + # Would it be better to infer this by inspecting the input clips? + fps = 24 + header["FPS"] = str(fps) + else: + # Put the value we were given into the header + header["FPS"] = str(fps) + + # Check if we have been supplied a VIDEO_FORMAT, if not lets set one + if video_format is None: + # Do we already have it in the header? If so, lets leave that as is + if "VIDEO_FORMAT" not in header: + header["VIDEO_FORMAT"] = _video_format_from_metadata(clips) + else: + header["VIDEO_FORMAT"] = str(video_format) + + headers = list(header.items()) + headers.sort() # make the output predictable + for key, val in headers: + result += "{}\t{}\n".format(key, val) + + # If the caller passed in a list of columns, use that, otherwise + # we need to discover the columns that should be output. + if columns is None: + # Is there a hint about the columns we want (and column ordering) + # at the top level? + columns = input_otio.metadata.get("ALE", {}).get("columns", []) + + # Scan all the clips for any extra columns + for clip in clips: + fields = clip.metadata.get("ALE", {}) + for key in fields.keys(): + if key not in columns: + columns.append(key) + + # Always output these + for c in ["Duration", "End", "Start", "Name", "Source File"]: + if c not in columns: + columns.insert(0, c) + + result += "\nColumn\n{}\n".format("\t".join(columns)) + + result += "\nData\n" + + def val_for_column(column, clip): + if column == "Name": + return clip.name + elif column == "Source File": + if ( + clip.media_reference and + hasattr(clip.media_reference, 'target_url') and + clip.media_reference.target_url + ): + return clip.media_reference.target_url + else: + return "" + elif column == "Start": + if not clip.source_range: + return "" + return otio.opentime.to_timecode( + clip.source_range.start_time, fps + ) + elif column == "Duration": + if not clip.source_range: + return "" + return otio.opentime.to_timecode( + clip.source_range.duration, fps + ) + elif column == "End": + if not clip.source_range: + return "" + return otio.opentime.to_timecode( + clip.source_range.end_time_exclusive(), fps + ) + else: + return clip.metadata.get("ALE", {}).get(column) + + for clip in clips: + row = [] + for column in columns: + val = str(val_for_column(column, clip) or "") + val.replace("\t", " ") # don't allow tabs inside a value + row.append(val) + result += "\t".join(row) + "\n" + + return result diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py new file mode 100644 index 00000000000..93741bbb146 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py @@ -0,0 +1,93 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# +"""FFMPEG Burnins Adapter""" +import os +import sys + + +def build_burnins(input_otio): + """ + Generates the burnin objects for each clip within the otio container + + :param input_otio: OTIO container + :rtype: [ffmpeg_burnins.Burnins(), ...] + """ + + if os.path.dirname(__file__) not in sys.path: + sys.path.append(os.path.dirname(__file__)) + + import ffmpeg_burnins + key = 'burnins' + + burnins = [] + for clip in input_otio.each_clip(): + + # per clip burnin data + burnin_data = clip.media_reference.metadata.get(key) + if not burnin_data: + # otherwise default to global burnin + burnin_data = input_otio.metadata.get(key) + + if not burnin_data: + continue + + media = clip.media_reference.target_url + if media.startswith('file://'): + media = media[7:] + streams = burnin_data.get('streams') + burnins.append(ffmpeg_burnins.Burnins(media, + streams=streams)) + burnins[-1].otio_media = media + burnins[-1].otio_overwrite = burnin_data.get('overwrite') + burnins[-1].otio_args = burnin_data.get('args') + + for burnin in burnin_data.get('burnins', []): + align = burnin.pop('align') + function = burnin.pop('function') + if function == 'text': + text = burnin.pop('text') + options = ffmpeg_burnins.TextOptions() + options.update(burnin) + burnins[-1].add_text(text, align, options=options) + elif function == 'frame_number': + options = ffmpeg_burnins.FrameNumberOptions() + options.update(burnin) + burnins[-1].add_frame_numbers(align, options=options) + elif function == 'timecode': + options = ffmpeg_burnins.TimeCodeOptions() + options.update(burnin) + burnins[-1].add_timecode(align, options=options) + else: + raise RuntimeError("Unknown function '%s'" % function) + + return burnins + + +def write_to_file(input_otio, filepath): + """required OTIO function hook""" + + for burnin in build_burnins(input_otio): + burnin.render(os.path.join(filepath, burnin.otio_media), + args=burnin.otio_args, + overwrite=burnin.otio_overwrite) diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json new file mode 100644 index 00000000000..ceaf0a3067f --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json @@ -0,0 +1,61 @@ +{ + "OTIO_SCHEMA" : "PluginManifest.1", + "adapters": [ + { + "OTIO_SCHEMA": "Adapter.1", + "name": "fcpx_xml", + "execution_scope": "in process", + "filepath": "fcpx_xml.py", + "suffixes": ["fcpxml"] + }, + { + "OTIO_SCHEMA": "Adapter.1", + "name": "hls_playlist", + "execution_scope": "in process", + "filepath": "hls_playlist.py", + "suffixes": ["m3u8"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "rv_session", + "execution_scope" : "in process", + "filepath" : "rv.py", + "suffixes" : ["rv"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "maya_sequencer", + "execution_scope" : "in process", + "filepath" : "maya_sequencer.py", + "suffixes" : ["ma","mb"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "ale", + "execution_scope" : "in process", + "filepath" : "ale.py", + "suffixes" : ["ale"] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "burnins", + "execution_scope" : "in process", + "filepath" : "burnins.py", + "suffixes" : [] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "AAF", + "execution_scope" : "in process", + "filepath" : "advanced_authoring_format.py", + "suffixes" : ["aaf"] + }, + { + "OTIO_SCHEMA": "Adapter.1", + "name": "xges", + "execution_scope": "in process", + "filepath": "xges.py", + "suffixes": ["xges"] + } + ] +} diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py new file mode 100644 index 00000000000..45d77976cf5 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py @@ -0,0 +1,261 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +import os +import sys + +# deal with renaming of default library from python 2 / 3 +try: + import urlparse as urllib_parse +except ImportError: + import urllib.parse as urllib_parse + +# import maya and handle standalone mode +from maya import cmds + +try: + cmds.ls +except AttributeError: + from maya import standalone + standalone.initialize(name='python') + +import opentimelineio as otio + +# Mapping of Maya FPS Enum to rate. +FPS = { + 'game': 15, + 'film': 24, + 'pal': 25, + 'ntsc': 30, + 'show': 48, + 'palf': 50, + 'ntscf': 60 +} + + +def _url_to_path(url): + if url is None: + return None + + return urllib_parse.urlparse(url).path + + +def _video_url_for_shot(shot): + current_file = os.path.normpath(cmds.file(q=True, sn=True)) + return os.path.join( + os.path.dirname(current_file), + 'playblasts', + '{base_name}_{shot_name}.mov'.format( + base_name=os.path.basename(os.path.splitext(current_file)[0]), + shot_name=cmds.shot(shot, q=True, shotName=True) + ) + ) + + +def _match_existing_shot(item, existing_shots): + if existing_shots is None: + return None + + if item.media_reference.is_missing_reference: + return None + + url_path = _url_to_path(item.media_reference.target_url) + return next( + ( + shot for shot in existing_shots + if _video_url_for_shot(shot) == url_path + ), + None + ) + + +# ------------------------ +# building single track +# ------------------------ + +def _build_shot(item, track_no, track_range, existing_shot=None): + camera = None + if existing_shot is None: + camera = cmds.camera(name=item.name.split('.')[0] + '_cam')[0] + cmds.shot( + existing_shot or item.name.split('.')[0], + e=existing_shot is not None, + shotName=item.name, + track=track_no, + currentCamera=camera, + startTime=item.trimmed_range().start_time.value, + endTime=item.trimmed_range().end_time_inclusive().value, + sequenceStartTime=track_range.start_time.value, + sequenceEndTime=track_range.end_time_inclusive().value + ) + + +def _build_track(track, track_no, existing_shots=None): + for n, item in enumerate(track): + if not isinstance(item, otio.schema.Clip): + continue + + track_range = track.range_of_child_at_index(n) + if existing_shots is not None: + existing_shot = _match_existing_shot(item, existing_shots) + else: + existing_shot = None + + _build_shot(item, track_no, track_range, existing_shot) + + +def build_sequence(timeline, clean=False): + existing_shots = cmds.ls(type='shot') or [] + if clean: + cmds.delete(existing_shots) + existing_shots = [] + + tracks = [ + track for track in timeline.tracks + if track.kind == otio.schema.TrackKind.Video + ] + + for track_no, track in enumerate(reversed(tracks)): + _build_track(track, track_no, existing_shots=existing_shots) + + +def read_from_file(path, clean=True): + timeline = otio.adapters.read_from_file(path) + build_sequence(timeline, clean=clean) + + +# ----------------------- +# parsing single track +# ----------------------- + +def _get_gap(duration): + rate = FPS.get(cmds.currentUnit(q=True, time=True), 25) + gap_range = otio.opentime.TimeRange( + duration=otio.opentime.RationalTime(duration, rate) + ) + return otio.schema.Gap(source_range=gap_range) + + +def _read_shot(shot): + rate = FPS.get(cmds.currentUnit(q=True, time=True), 25) + start = int(cmds.shot(shot, q=True, startTime=True)) + end = int(cmds.shot(shot, q=True, endTime=True)) + 1 + + video_reference = otio.schema.ExternalReference( + target_url=_video_url_for_shot(shot), + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(value=start, rate=rate), + otio.opentime.RationalTime(value=end - start, rate=rate) + ) + ) + + return otio.schema.Clip( + name=cmds.shot(shot, q=True, shotName=True), + media_reference=video_reference, + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(value=start, rate=rate), + otio.opentime.RationalTime(value=end - start, rate=rate) + ) + ) + + +def _read_track(shots): + v = otio.schema.Track(kind=otio.schema.track.TrackKind.Video) + + last_clip_end = 0 + for shot in shots: + seq_start = int(cmds.shot(shot, q=True, sequenceStartTime=True)) + seq_end = int(cmds.shot(shot, q=True, sequenceEndTime=True)) + + # add gap if necessary + fill_time = seq_start - last_clip_end + last_clip_end = seq_end + 1 + if fill_time: + v.append(_get_gap(fill_time)) + + # add clip + v.append(_read_shot(shot)) + + return v + + +def read_sequence(): + rate = FPS.get(cmds.currentUnit(q=True, time=True), 25) + shots = cmds.ls(type='shot') or [] + per_track = {} + + for shot in shots: + track_no = cmds.shot(shot, q=True, track=True) + if track_no not in per_track: + per_track[track_no] = [] + per_track[track_no].append(shot) + + timeline = otio.schema.Timeline() + timeline.global_start_time = otio.opentime.RationalTime(0, rate) + + for track_no in reversed(sorted(per_track.keys())): + track_shots = per_track[track_no] + timeline.tracks.append(_read_track(track_shots)) + + return timeline + + +def write_to_file(path): + timeline = read_sequence() + otio.adapters.write_to_file(timeline, path) + + +def main(): + read_write_arg = sys.argv[1] + filepath = sys.argv[2] + + write = False + if read_write_arg == "write": + write = True + + if write: + # read the input OTIO off stdin + input_otio = otio.adapters.read_from_string( + sys.stdin.read(), + 'otio_json' + ) + build_sequence(input_otio, clean=True) + cmds.file(rename=filepath) + cmds.file(save=True, type="mayaAscii") + else: + cmds.file(filepath, o=True) + sys.stdout.write( + "\nOTIO_JSON_BEGIN\n" + + otio.adapters.write_to_string( + read_sequence(), + "otio_json" + ) + + "\nOTIO_JSON_END\n" + ) + + cmds.quit(force=True) + + +if __name__ == "__main__": + main() diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py new file mode 100644 index 00000000000..f11295bb60a --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py @@ -0,0 +1,327 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""RV External Adapter component. + +Because the rv adapter requires being run from within the RV py-interp to take +advantage of modules inside of RV, this script gets shelled out to from the +RV OTIO adapter. + +Requires that you set the environment variables: + OTIO_RV_PYTHON_LIB - should point at the parent directory of rvSession + OTIO_RV_PYTHON_BIN - should point at py-interp from within rv +""" + +# python +import sys +import os + +# otio +import opentimelineio as otio + +# rv import +sys.path += [os.path.join(os.environ["OTIO_RV_PYTHON_LIB"], "rvSession")] +import rvSession # noqa + + +def main(): + """ entry point, should be called from the rv adapter in otio """ + + session_file = rvSession.Session() + + output_fname = sys.argv[1] + + # read the input OTIO off stdin + input_otio = otio.adapters.read_from_string(sys.stdin.read(), 'otio_json') + + result = write_otio(input_otio, session_file) + session_file.setViewNode(result) + session_file.write(output_fname) + + +# exception class @{ +class NoMappingForOtioTypeError(otio.exceptions.OTIOError): + pass +# @} + + +def write_otio(otio_obj, to_session, track_kind=None): + WRITE_TYPE_MAP = { + otio.schema.Timeline: _write_timeline, + otio.schema.Stack: _write_stack, + otio.schema.Track: _write_track, + otio.schema.Clip: _write_item, + otio.schema.Gap: _write_item, + otio.schema.Transition: _write_transition, + otio.schema.SerializableCollection: _write_collection, + } + + if type(otio_obj) in WRITE_TYPE_MAP: + return WRITE_TYPE_MAP[type(otio_obj)](otio_obj, to_session, track_kind) + + raise NoMappingForOtioTypeError( + str(type(otio_obj)) + " on object: {}".format(otio_obj) + ) + + +def _write_dissolve(pre_item, in_dissolve, post_item, to_session, track_kind=None): + rv_trx = to_session.newNode("CrossDissolve", str(in_dissolve.name)) + + rate = pre_item.trimmed_range().duration.rate + rv_trx.setProperty( + "CrossDissolve", + "", + "parameters", + "startFrame", + rvSession.gto.FLOAT, + 1.0 + ) + rv_trx.setProperty( + "CrossDissolve", + "", + "parameters", + "numFrames", + rvSession.gto.FLOAT, + int( + ( + in_dissolve.in_offset + + in_dissolve.out_offset + ).rescaled_to(rate).value + ) + ) + + rv_trx.setProperty( + "CrossDissolve", + "", + "output", + "fps", + rvSession.gto.FLOAT, + rate + ) + + pre_item_rv = write_otio(pre_item, to_session, track_kind) + rv_trx.addInput(pre_item_rv) + + post_item_rv = write_otio(post_item, to_session, track_kind) + + node_to_insert = post_item_rv + + if ( + hasattr(pre_item, "media_reference") + and pre_item.media_reference + and pre_item.media_reference.available_range + and hasattr(post_item, "media_reference") + and post_item.media_reference + and post_item.media_reference.available_range + and ( + post_item.media_reference.available_range.start_time.rate != + pre_item.media_reference.available_range.start_time.rate + ) + ): + # write a retime to make sure post_item is in the timebase of pre_item + rt_node = to_session.newNode("Retime", "transition_retime") + rt_node.setTargetFps( + pre_item.media_reference.available_range.start_time.rate + ) + + post_item_rv = write_otio(post_item, to_session, track_kind) + + rt_node.addInput(post_item_rv) + node_to_insert = rt_node + + rv_trx.addInput(node_to_insert) + + return rv_trx + + +def _write_transition( + pre_item, + in_trx, + post_item, + to_session, + track_kind=None +): + trx_map = { + otio.schema.TransitionTypes.SMPTE_Dissolve: _write_dissolve, + } + + if in_trx.transition_type not in trx_map: + return + + return trx_map[in_trx.transition_type]( + pre_item, + in_trx, + post_item, + to_session, + track_kind + ) + + +def _write_stack(in_stack, to_session, track_kind=None): + new_stack = to_session.newNode("Stack", str(in_stack.name) or "tracks") + + for seq in in_stack: + result = write_otio(seq, to_session, track_kind) + if result: + new_stack.addInput(result) + + return new_stack + + +def _write_track(in_seq, to_session, _=None): + new_seq = to_session.newNode("Sequence", str(in_seq.name) or "track") + + items_to_serialize = otio.algorithms.track_with_expanded_transitions( + in_seq + ) + + track_kind = in_seq.kind + + for thing in items_to_serialize: + if isinstance(thing, tuple): + result = _write_transition(*thing, to_session=to_session, + track_kind=track_kind) + elif thing.duration().value == 0: + continue + else: + result = write_otio(thing, to_session, track_kind) + + if result: + new_seq.addInput(result) + + return new_seq + + +def _write_timeline(tl, to_session, _=None): + result = write_otio(tl.tracks, to_session) + return result + + +def _write_collection(collection, to_session, track_kind=None): + results = [] + for item in collection: + result = write_otio(item, to_session, track_kind) + if result: + results.append(result) + + if results: + return results[0] + + +def _create_media_reference(item, src, track_kind=None): + if hasattr(item, "media_reference") and item.media_reference: + if isinstance(item.media_reference, otio.schema.ExternalReference): + media = [str(item.media_reference.target_url)] + + if track_kind == otio.schema.TrackKind.Audio: + # Create blank video media to accompany audio for valid source + blank = "{},start={},end={},fps={}.movieproc".format( + "blank", + item.available_range().start_time.value, + item.available_range().end_time_inclusive().value, + item.available_range().duration.rate + ) + # Inserting blank media here forces all content to only + # produce audio. We do it twice in case we look at this in + # stereo + media = [blank, blank] + media + + src.setMedia(media) + return True + + elif isinstance(item.media_reference, otio.schema.GeneratorReference): + if item.media_reference.generator_kind == "SMPTEBars": + kind = "smptebars" + src.setMedia( + [ + "{},start={},end={},fps={}.movieproc".format( + kind, + item.available_range().start_time.value, + item.available_range().end_time_inclusive().value, + item.available_range().duration.rate + ) + ] + ) + return True + + return False + + +def _write_item(it, to_session, track_kind=None): + src = to_session.newNode("Source", str(it.name) or "clip") + + src.setProperty( + "RVSourceGroup", + "source", + "attributes", + "otio_metadata", + rvSession.gto.STRING, str(it.metadata) + ) + + range_to_read = it.trimmed_range() + + if not range_to_read: + raise otio.exceptions.OTIOError( + "No valid range on clip: {0}.".format( + str(it) + ) + ) + + # because OTIO has no global concept of FPS, the rate of the duration is + # used as the rate for the range of the source. + # RationalTime.value_rescaled_to returns the time value of the object in + # time rate of the argument. + src.setCutIn( + range_to_read.start_time.value_rescaled_to( + range_to_read.duration + ) + ) + src.setCutOut( + range_to_read.end_time_inclusive().value_rescaled_to( + range_to_read.duration + ) + ) + src.setFPS(range_to_read.duration.rate) + + # if the media reference is missing + if not _create_media_reference(it, src, track_kind): + kind = "smptebars" + if isinstance(it, otio.schema.Gap): + kind = "blank" + src.setMedia( + [ + "{},start={},end={},fps={}.movieproc".format( + kind, + range_to_read.start_time.value, + range_to_read.end_time_inclusive().value, + range_to_read.duration.rate + ) + ] + ) + + return src + + +if __name__ == "__main__": + main() diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py new file mode 100644 index 00000000000..e219b58a1a6 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py @@ -0,0 +1,1182 @@ +# +# Copyright 2018 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO Final Cut Pro X XML Adapter. """ +import os +import subprocess +from xml.etree import cElementTree +from xml.dom import minidom +from fractions import Fraction +from datetime import date + +try: + from urllib import unquote +except ImportError: + from urllib.parse import unquote + +import opentimelineio as otio + +META_NAMESPACE = "fcpx_xml" + +COMPOSABLE_ELEMENTS = ("video", "audio", "ref-clip", "asset-clip") + +FRAMERATE_FRAMEDURATION = {23.98: "1001/24000s", + 24: "25/600s", + 25: "1/25s", + 29.97: "1001/30000s", + 30: "100/3000s", + 50: "1/50s", + 59.94: "1001/60000s", + 60: "1/60s"} + + +def format_name(frame_rate, path): + """ + Helper to get the formatName used in FCP X XML format elements. This + uses ffprobe to get the frame size of the the clip at the provided path. + + Args: + frame_rate (int): The frame rate of the clip at the provided path + path (str): The path to the clip to probe + + Returns: + str: The format name. If empty, then ffprobe couldn't find the item + """ + + path = path.replace("file://", "") + path = unquote(path) + if not os.path.exists(path): + return "" + + try: + frame_size = subprocess.check_output( + [ + "ffprobe", + "-v", + "error", + "-select_streams", + "v:0", + "-show_entries", + "stream=height,width", + "-of", + "csv=s=x:p=0", + path + ] + ) + except (subprocess.CalledProcessError, OSError): + frame_size = "" + + if not frame_size: + return "" + + frame_size = frame_size.rstrip() + + if "1920" in frame_size: + frame_size = "1080" + + if frame_size.endswith("1280"): + frame_size = "720" + + return "FFVideoFormat{}p{}".format(frame_size, frame_rate) + + +def to_rational_time(rational_number, fps): + """ + This converts a rational number value to an otio RationalTime object + + Args: + rational_number (str): This is a rational number from an FCP X XML + fps (int): The frame rate to use for calculating the rational time + + Returns: + RationalTime: A RationalTime object + """ + + if rational_number == "0s" or rational_number is None: + frames = 0 + else: + parts = rational_number.split("/") + if len(parts) > 1: + frames = int( + float(parts[0]) / float(parts[1].replace("s", "")) * float(fps) + ) + else: + frames = int(float(parts[0].replace("s", "")) * float(fps)) + + return otio.opentime.RationalTime(frames, int(fps)) + + +def from_rational_time(rational_time): + """ + This converts a RationalTime object to a rational number as a string + + Args: + rational_time (RationalTime): a rational time object + + Returns: + str: A rational number as a string + """ + + if int(rational_time.value) == 0: + return "0s" + result = Fraction( + float(rational_time.value) / float(rational_time.rate) + ).limit_denominator() + if str(result.denominator) == "1": + return "{}s".format(result.numerator) + return "{}/{}s".format(result.numerator, result.denominator) + + +class FcpxOtio(object): + """ + This object is responsible for knowing how to convert an otio into an + FCP X XML + """ + + def __init__(self, otio_timeline): + self.otio_timeline = otio_timeline + self.fcpx_xml = cElementTree.Element("fcpxml", version="1.8") + self.resource_element = cElementTree.SubElement( + self.fcpx_xml, + "resources" + ) + if self.otio_timeline.schema_name() == "Timeline": + self.timelines = [self.otio_timeline] + else: + self.timelines = list( + self.otio_timeline.each_child( + descended_from_type=otio.schema.Timeline + ) + ) + + if len(self.timelines) > 1: + self.event_resource = cElementTree.SubElement( + self.fcpx_xml, + "event", + {"name": self._event_name()} + ) + else: + self.event_resource = self.fcpx_xml + + self.resource_count = 0 + + def to_xml(self): + """ + Convert an otio to an FCP X XML + + Returns: + str: FCPX XML content + """ + + for project in self.timelines: + top_sequence = self._stack_to_sequence(project.tracks) + + project_element = cElementTree.Element( + "project", + { + "name": project.name, + "uid": project.metadata.get("fcpx", {}).get("uid", "") + } + ) + project_element.append(top_sequence) + self.event_resource.append(project_element) + + if not self.timelines: + for clip in self._clips(): + if not clip.parent(): + self._add_asset(clip) + + for stack in self._stacks(): + ref_element = self._element_for_item( + stack, + None, + ref_only=True, + compound=True + ) + self.event_resource.append(ref_element) + child_parent_map = {c: p for p in self.fcpx_xml.iter() for c in p} + + for marker in [marker for marker in self.fcpx_xml.iter("marker")]: + parent = child_parent_map.get(marker) + marker_attribs = marker.attrib.copy() + parent.remove(marker) + cElementTree.SubElement( + parent, + "marker", + marker_attribs + ) + + xml = cElementTree.tostring( + self.fcpx_xml, + encoding="UTF-8", + method="xml" + ) + dom = minidom.parseString(xml) + pretty = dom.toprettyxml(indent=" ") + return pretty.replace( + '', + '\n\n' + ) + + def _stack_to_sequence(self, stack, compound_clip=False): + format_element = self._find_or_create_format_from(stack) + sequence_element = cElementTree.Element( + "sequence", + { + "duration": self._calculate_rational_number( + stack.duration().value, + stack.duration().rate + ), + "format": str(format_element.get("id")) + } + ) + spine = cElementTree.SubElement(sequence_element, "spine") + video_tracks = [ + t for t in stack + if t.kind == otio.schema.TrackKind.Video + ] + audio_tracks = [ + t for t in stack + if t.kind == otio.schema.TrackKind.Audio + ] + + for idx, track in enumerate(video_tracks): + self._track_for_spine(track, idx, spine, compound_clip) + + for idx, track in enumerate(audio_tracks): + lane_id = -(idx + 1) + self._track_for_spine(track, lane_id, spine, compound_clip) + return sequence_element + + def _track_for_spine(self, track, lane_id, spine, compound): + for child in self._lanable_items(track.each_child()): + if self._item_in_compound_clip(child) and not compound: + continue + child_element = self._element_for_item( + child, + lane_id, + compound=compound + ) + if not lane_id: + spine.append(child_element) + continue + if child.schema_name() == "Gap": + continue + + parent_element = self._find_parent_element( + spine, + track.trimmed_range_of_child(child).start_time, + self._find_or_create_format_from(track).get("id") + ) + offset = self._offset_based_on_parent( + child_element, + parent_element, + self._find_or_create_format_from(track).get("id") + ) + child_element.set( + "offset", + from_rational_time(offset) + ) + + parent_element.append(child_element) + return [] + + def _find_parent_element(self, spine, trimmed_range, format_id): + for item in spine.iter(): + if item.tag not in ("clip", "asset-clip", "gap", "ref-clip"): + continue + if item.get("lane") is not None: + continue + if item.tag == "gap" and item.find("./audio") is not None: + continue + offset = to_rational_time( + item.get("offset"), + self._frame_rate_from_element(item, format_id) + ) + duration = to_rational_time( + item.get("duration"), + self._frame_rate_from_element(item, format_id) + ) + total_time = offset + duration + if offset > trimmed_range: + continue + if total_time > trimmed_range: + return item + return None + + def _offset_based_on_parent(self, child, parent, default_format_id): + parent_offset = to_rational_time( + parent.get("offset"), + self._frame_rate_from_element(parent, default_format_id) + ) + child_offset = to_rational_time( + child.get("offset"), + self._frame_rate_from_element(child, default_format_id) + ) + + parent_start = to_rational_time( + parent.get("start"), + self._frame_rate_from_element(parent, default_format_id) + ) + return (child_offset - parent_offset) + parent_start + + def _frame_rate_from_element(self, element, default_format_id): + if element.tag == "gap": + format_id = default_format_id + + if element.tag == "ref-clip": + media_element = self._media_by_id(element.get("ref")) + asset = media_element.find("./sequence") + format_id = asset.get("format") + + if element.tag == "clip": + if element.find("./gap") is not None: + asset_id = element.find("./gap").find("./audio").get("ref") + else: + asset_id = element.find("./video").get("ref") + asset = self._asset_by_id(asset_id) + format_id = asset.get("format") + + if element.tag == "asset-clip": + asset = self._asset_by_id(element.get("ref")) + format_id = asset.get("format") + + format_element = self.resource_element.find( + "./format[@id='{}']".format(format_id) + ) + total, rate = format_element.get("frameDuration").split("/") + rate = rate.replace("s", "") + return int(float(rate) / float(total)) + + def _element_for_item(self, item, lane, ref_only=False, compound=False): + element = None + duration = self._calculate_rational_number( + item.duration().value, + item.duration().rate + ) + if item.schema_name() == "Clip": + asset_id = self._add_asset(item, compound_only=compound) + element = self._element_for_clip(item, asset_id, duration, lane) + + if item.schema_name() == "Gap": + element = self._element_for_gap(item, duration) + + if item.schema_name() == "Stack": + element = self._element_for_stack(item, duration, ref_only) + + if element is None: + return None + if lane: + element.set("lane", str(lane)) + for marker in item.markers: + marker_attribs = { + "start": from_rational_time(marker.marked_range.start_time), + "duration": from_rational_time(marker.marked_range.duration), + "value": marker.name + } + marker_element = cElementTree.Element( + "marker", + marker_attribs + ) + if marker.color == otio.schema.MarkerColor.RED: + marker_element.set("completed", "0") + if marker.color == otio.schema.MarkerColor.GREEN: + marker_element.set("completed", "1") + element.append(marker_element) + return element + + def _lanable_items(self, items): + return [ + item for item in items + if item.schema_name() in ["Gap", "Stack", "Clip"] + ] + + def _element_for_clip(self, item, asset_id, duration, lane): + element = cElementTree.Element( + "clip", + { + "name": item.name, + "offset": from_rational_time( + item.trimmed_range_in_parent().start_time + ), + "duration": duration + } + ) + start = from_rational_time(item.source_range.start_time) + if start != "0s": + element.set("start", str(start)) + if item.parent().kind == otio.schema.TrackKind.Video: + cElementTree.SubElement( + element, + "video", + { + "offset": "0s", + "ref": asset_id, + "duration": self._find_asset_duration(item) + } + ) + else: + gap_element = cElementTree.SubElement( + element, + "gap", + { + "name": "Gap", + "offset": "0s", + "duration": self._find_asset_duration(item) + } + ) + audio = cElementTree.SubElement( + gap_element, + "audio", + { + "offset": "0s", + "ref": asset_id, + "duration": self._find_asset_duration(item) + } + ) + if lane: + audio.set("lane", str(lane)) + return element + + def _element_for_gap(self, item, duration): + element = cElementTree.Element( + "gap", + { + "name": "Gap", + "duration": duration, + "offset": from_rational_time( + item.trimmed_range_in_parent().start_time + ), + "start": "3600s" + } + ) + return element + + def _element_for_stack(self, item, duration, ref_only): + media_element = self._add_compound_clip(item) + asset_id = media_element.get("id") + element = cElementTree.Element( + "ref-clip", + { + "name": item.name, + "duration": duration, + "ref": str(asset_id) + } + ) + if not ref_only: + element.set( + "offset", + from_rational_time( + item.trimmed_range_in_parent().start_time + ) + ) + element.set( + "start", + from_rational_time(item.source_range.start_time) + ) + if item.parent() and item.parent().kind == otio.schema.TrackKind.Audio: + element.set("srcEnable", "audio") + return element + + def _find_asset_duration(self, item): + if (item.media_reference and + not item.media_reference.is_missing_reference): + return self._calculate_rational_number( + item.media_reference.available_range.duration.value, + item.media_reference.available_range.duration.rate + ) + return self._calculate_rational_number( + item.duration().value, + item.duration().rate + ) + + def _find_asset_start(self, item): + if (item.media_reference and + not item.media_reference.is_missing_reference): + return self._calculate_rational_number( + item.media_reference.available_range.start_time.value, + item.media_reference.available_range.start_time.rate + ) + return self._calculate_rational_number( + item.source_range.start_time.value, + item.source_range.start_time.rate + ) + + def _clip_format_name(self, clip): + if clip.schema_name() in ("Stack", "Track"): + return "" + if not clip.media_reference: + return "" + + if clip.media_reference.is_missing_reference: + return "" + + return format_name( + clip.duration().rate, + clip.media_reference.target_url + ) + + def _find_or_create_format_from(self, clip): + frame_duration = self._framerate_to_frame_duration( + clip.duration().rate + ) + format_element = self._format_by_frame_rate(clip.duration().rate) + if format_element is None: + format_element = cElementTree.SubElement( + self.resource_element, + "format", + { + "id": self._resource_id_generator(), + "frameDuration": frame_duration, + "name": self._clip_format_name(clip) + } + ) + if format_element.get("name", "") == "": + format_element.set("name", self._clip_format_name(clip)) + return format_element + + def _add_asset(self, clip, compound_only=False): + format_element = self._find_or_create_format_from(clip) + asset = self._create_asset_element(clip, format_element) + + if not compound_only and not self._asset_clip_by_name(clip.name): + self._create_asset_clip_element( + clip, + format_element, + asset.get("id") + ) + + if not clip.parent(): + asset.set("hasAudio", "1") + asset.set("hasVideo", "1") + return asset.get("id") + if clip.parent().kind == otio.schema.TrackKind.Audio: + asset.set("hasAudio", "1") + if clip.parent().kind == otio.schema.TrackKind.Video: + asset.set("hasVideo", "1") + return asset.get("id") + + def _create_asset_clip_element(self, clip, format_element, resource_id): + duration = self._find_asset_duration(clip) + a_clip = cElementTree.SubElement( + self.event_resource, + "asset-clip", + { + "name": clip.name, + "format": format_element.get("id"), + "ref": resource_id, + "duration": duration + } + ) + if clip.media_reference and not clip.media_reference.is_missing_reference: + fcpx_metadata = clip.media_reference.metadata.get("fcpx", {}) + note_element = self._create_note_element( + fcpx_metadata.get("note", None) + ) + keyword_elements = self._create_keyword_elements( + fcpx_metadata.get("keywords", []) + ) + metadata_element = self._create_metadata_elements( + fcpx_metadata.get("metadata", None) + ) + + if note_element is not None: + a_clip.append(note_element) + if keyword_elements: + for keyword_element in keyword_elements: + a_clip.append(keyword_element) + if metadata_element is not None: + a_clip.append(metadata_element) + + def _create_asset_element(self, clip, format_element): + target_url = self._target_url_from_clip(clip) + asset = self._asset_by_path(target_url) + if asset is not None: + return asset + + asset = cElementTree.SubElement( + self.resource_element, + "asset", + { + "name": clip.name, + "src": target_url, + "format": format_element.get("id"), + "id": self._resource_id_generator(), + "duration": self._find_asset_duration(clip), + "start": self._find_asset_start(clip), + "hasAudio": "0", + "hasVideo": "0" + } + ) + return asset + + def _add_compound_clip(self, item): + media_element = self._media_by_name(item.name) + if media_element is not None: + return media_element + resource_id = self._resource_id_generator() + media_element = cElementTree.SubElement( + self.resource_element, + "media", + { + "name": self._compound_clip_name(item, resource_id), + "id": resource_id + } + ) + if item.metadata.get("fcpx", {}).get("uid", False): + media_element.set("uid", item.metadata.get("fcpx", {}).get("uid")) + media_element.append(self._stack_to_sequence(item, compound_clip=True)) + return media_element + + def _stacks(self): + return self.otio_timeline.each_child( + descended_from_type=otio.schema.Stack + ) + + def _clips(self): + return self.otio_timeline.each_child( + descended_from_type=otio.schema.Clip + ) + + def _resource_id_generator(self): + self.resource_count += 1 + return "r{}".format(self.resource_count) + + def _event_name(self): + if self.otio_timeline.name: + return self.otio_timeline.name + return date.strftime(date.today(), "%m-%e-%y") + + def _asset_by_path(self, path): + return self.resource_element.find("./asset[@src='{}']".format(path)) + + def _asset_by_id(self, asset_id): + return self.resource_element.find("./asset[@id='{}']".format(asset_id)) + + def _media_by_name(self, name): + return self.resource_element.find("./media[@name='{}']".format(name)) + + def _media_by_id(self, media_id): + return self.resource_element.find("./media[@id='{}']".format(media_id)) + + def _format_by_frame_rate(self, frame_rate): + frame_duration = self._framerate_to_frame_duration(frame_rate) + return self.resource_element.find( + "./format[@frameDuration='{}']".format(frame_duration) + ) + + def _asset_clip_by_name(self, name): + return self.event_resource.find( + "./asset-clip[@name='{}']".format(name) + ) + + # -------------------- + # static methods + # -------------------- + + @staticmethod + def _framerate_to_frame_duration(framerate): + frame_duration = FRAMERATE_FRAMEDURATION.get(int(framerate), "") + if not frame_duration: + frame_duration = FRAMERATE_FRAMEDURATION.get(float(framerate), "") + return frame_duration + + @staticmethod + def _target_url_from_clip(clip): + if (clip.media_reference and + not clip.media_reference.is_missing_reference): + return clip.media_reference.target_url + return "file:///tmp/{}".format(clip.name) + + @staticmethod + def _calculate_rational_number(duration, rate): + if int(duration) == 0: + return "0s" + result = Fraction(float(duration) / float(rate)).limit_denominator() + return "{}/{}s".format(result.numerator, result.denominator) + + @staticmethod + def _compound_clip_name(compound_clip, resource_id): + if compound_clip.name: + return compound_clip.name + return "compound_clip_{}".format(resource_id) + + @staticmethod + def _item_in_compound_clip(item): + stack_count = 0 + parent = item.parent() + while parent is not None: + if parent.schema_name() == "Stack": + stack_count += 1 + parent = parent.parent() + return stack_count > 1 + + @staticmethod + def _create_metadata_elements(metadata): + if metadata is None: + return None + metadata_element = cElementTree.Element( + "metadata" + ) + for metadata_dict in metadata: + cElementTree.SubElement( + metadata_element, + "md", + { + "key": list(metadata_dict.keys())[0], + "value": list(metadata_dict.values())[0] + } + ) + return metadata_element + + @staticmethod + def _create_keyword_elements(keywords): + keyword_elements = [] + for keyword_dict in keywords: + keyword_elements.append( + cElementTree.Element( + "keyword", + keyword_dict + ) + ) + return keyword_elements + + @staticmethod + def _create_note_element(note): + if not note: + return None + note_element = cElementTree.Element( + "note" + ) + note_element.text = note + return note_element + + +class FcpxXml(object): + """ + This object is responsible for knowing how to convert an FCP X XML + otio into an otio timeline + """ + + def __init__(self, xml_string): + self.fcpx_xml = cElementTree.fromstring(xml_string) + self.child_parent_map = {c: p for p in self.fcpx_xml.iter() for c in p} + + def to_otio(self): + """ + Convert an FCP X XML to an otio + + Returns: + OpenTimeline: An OpenTimeline Timeline object + """ + + if self.fcpx_xml.find("./library") is not None: + return self._from_library() + if self.fcpx_xml.find("./event") is not None: + return self._from_event(self.fcpx_xml.find("./event")) + if self.fcpx_xml.find("./project") is not None: + return self._from_project(self.fcpx_xml.find("./project")) + if ((self.fcpx_xml.find("./asset-clip") is not None) or + (self.fcpx_xml.find("./ref-clip") is not None)): + return self._from_clips() + + def _from_library(self): + # We are just grabbing the first even in the project for now + return self._from_event(self.fcpx_xml.find("./library/event")) + + def _from_event(self, event_element): + container = otio.schema.SerializableCollection( + name=event_element.get("name") + ) + for project in event_element.findall("./project"): + container.append(self._from_project(project)) + return container + + def _from_project(self, project_element): + timeline = otio.schema.Timeline(name=project_element.get("name", "")) + timeline.tracks = self._squence_to_stack( + project_element.find("./sequence", {}) + ) + return timeline + + def _from_clips(self): + container = otio.schema.SerializableCollection() + if self.fcpx_xml.find("./asset-clip") is not None: + for asset_clip in self.fcpx_xml.findall("./asset-clip"): + container.append( + self._build_composable( + asset_clip, + asset_clip.get("format") + ) + ) + + if self.fcpx_xml.find("./ref-clip") is not None: + for ref_clip in self.fcpx_xml.findall("./ref-clip"): + container.append( + self._build_composable( + ref_clip, + "r1" + ) + ) + return container + + def _squence_to_stack(self, sequence_element, name="", source_range=None): + timeline_items = [] + lanes = [] + stack = otio.schema.Stack(name=name, source_range=source_range) + for element in sequence_element.iter(): + if element.tag not in COMPOSABLE_ELEMENTS: + continue + composable = self._build_composable( + element, + sequence_element.get("format") + ) + + offset, lane = self._offset_and_lane( + element, + sequence_element.get("format") + ) + + timeline_items.append( + { + "track": lane, + "offset": offset, + "composable": composable, + "audio_only": self._audio_only(element) + } + ) + + lanes.append(lane) + sorted_lanes = list(set(lanes)) + sorted_lanes.sort() + for lane in sorted_lanes: + sorted_items = self._sorted_items(lane, timeline_items) + track = otio.schema.Track( + name=lane, + kind=self._track_type(sorted_items) + ) + + for item in sorted_items: + frame_diff = ( + int(item["offset"].value) - track.duration().value + ) + if frame_diff > 0: + track.append( + self._create_gap( + 0, + frame_diff, + sequence_element.get("format") + ) + ) + track.append(item["composable"]) + stack.append(track) + return stack + + def _build_composable(self, element, default_format): + timing_clip = self._timing_clip(element) + source_range = self._time_range( + timing_clip, + self._format_id_for_clip(element, default_format) + ) + + if element.tag != "ref-clip": + otio_composable = otio.schema.Clip( + name=timing_clip.get("name"), + media_reference=self._reference_from_id( + element.get("ref"), + default_format + ), + source_range=source_range + ) + else: + media_element = self._compound_clip_by_id(element.get("ref")) + otio_composable = self._squence_to_stack( + media_element.find("./sequence"), + name=media_element.get("name"), + source_range=source_range + ) + + for marker in timing_clip.findall(".//marker"): + otio_composable.markers.append( + self._marker(marker, default_format) + ) + + return otio_composable + + def _marker(self, element, default_format): + if element.get("completed", None) and element.get("completed") == "1": + color = otio.schema.MarkerColor.GREEN + if element.get("completed", None) and element.get("completed") == "0": + color = otio.schema.MarkerColor.RED + if not element.get("completed", None): + color = otio.schema.MarkerColor.PURPLE + + otio_marker = otio.schema.Marker( + name=element.get("value", ""), + marked_range=self._time_range(element, default_format), + color=color + ) + return otio_marker + + def _audio_only(self, element): + if element.tag == "audio": + return True + if element.tag == "asset-clip": + asset = self._asset_by_id(element.get("ref", None)) + if asset and asset.get("hasVideo", "0") == "0": + return True + if element.tag == "ref-clip": + if element.get("srcEnable", "video") == "audio": + return True + return False + + def _create_gap(self, start_frame, number_of_frames, defualt_format): + fps = self._format_frame_rate(defualt_format) + source_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(start_frame, fps), + duration=otio.opentime.RationalTime(number_of_frames, fps) + ) + return otio.schema.Gap(source_range=source_range) + + def _timing_clip(self, clip): + while clip.tag not in ("clip", "asset-clip", "ref-clip"): + clip = self.child_parent_map.get(clip) + return clip + + def _offset_and_lane(self, clip, default_format): + clip_format_id = self._format_id_for_clip(clip, default_format) + clip = self._timing_clip(clip) + parent = self.child_parent_map.get(clip) + + parent_format_id = self._format_id_for_clip(parent, default_format) + + if parent.tag == "spine" and parent.get("lane", None): + lane = parent.get("lane") + parent = self.child_parent_map.get(parent) + spine = True + else: + lane = clip.get("lane", "0") + spine = False + + clip_offset_frames = self._number_of_frames( + clip.get("offset"), + clip_format_id + ) + + if spine: + parent_start_frames = 0 + else: + parent_start_frames = self._number_of_frames( + parent.get("start", None), + parent_format_id + ) + + parent_offset_frames = self._number_of_frames( + parent.get("offset", None), + parent_format_id + ) + + clip_offset_frames = ( + (int(clip_offset_frames) - int(parent_start_frames)) + + int(parent_offset_frames) + ) + + offset = otio.opentime.RationalTime( + clip_offset_frames, + self._format_frame_rate(clip_format_id) + ) + + return offset, lane + + def _format_id_for_clip(self, clip, default_format): + if not clip.get("ref", None) or clip.tag == "gap": + return default_format + + resource = self._asset_by_id(clip.get("ref")) + + if resource is None: + resource = self._compound_clip_by_id( + clip.get("ref") + ).find("sequence") + + return resource.get("format", default_format) + + def _reference_from_id(self, asset_id, default_format): + asset = self._asset_by_id(asset_id) + if not asset.get("src", ""): + return otio.schema.MissingReference() + + available_range = otio.opentime.TimeRange( + start_time=to_rational_time( + asset.get("start"), + self._format_frame_rate( + asset.get("format", default_format) + ) + ), + duration=to_rational_time( + asset.get("duration"), + self._format_frame_rate( + asset.get("format", default_format) + ) + ) + ) + asset_clip = self._assetclip_by_ref(asset_id) + metadata = {} + if asset_clip: + metadata = self._create_metadta(asset_clip) + return otio.schema.ExternalReference( + target_url=asset.get("src"), + available_range=available_range, + metadata={"fcpx": metadata} + ) + + def _create_metadta(self, item): + metadata = {} + for element in item.iter(): + if element.tag == "md": + metadata.setdefault("metadata", []).append( + {element.attrib.get("key"): element.attrib.get("value")} + ) + # metadata.update( + # {element.attrib.get("key"): element.attrib.get("value")} + # ) + if element.tag == "note": + metadata.update({"note": element.text}) + if element.tag == "keyword": + metadata.setdefault("keywords", []).append(element.attrib) + return metadata + + # -------------------- + # time helpers + # -------------------- + def _format_frame_duration(self, format_id): + media_format = self._format_by_id(format_id) + total, rate = media_format.get("frameDuration").split("/") + rate = rate.replace("s", "") + return total, rate + + def _format_frame_rate(self, format_id): + fd_total, fd_rate = self._format_frame_duration(format_id) + return int(float(fd_rate) / float(fd_total)) + + def _number_of_frames(self, time_value, format_id): + if time_value == "0s" or time_value is None: + return 0 + fd_total, fd_rate = self._format_frame_duration(format_id) + time_value = time_value.split("/") + + if len(time_value) > 1: + time_value_a, time_value_b = time_value + return int( + (float(time_value_a) / float(time_value_b.replace("s", ""))) * + (float(fd_rate) / float(fd_total)) + ) + + return int( + int(time_value[0].replace("s", "")) * + (float(fd_rate) / float(fd_total)) + ) + + def _time_range(self, element, format_id): + return otio.opentime.TimeRange( + start_time=to_rational_time( + element.get("start", "0s"), + self._format_frame_rate(format_id) + ), + duration=to_rational_time( + element.get("duration"), + self._format_frame_rate(format_id) + ) + ) + # -------------------- + # search helpers + # -------------------- + + def _asset_by_id(self, asset_id): + return self.fcpx_xml.find( + "./resources/asset[@id='{}']".format(asset_id) + ) + + def _assetclip_by_ref(self, asset_id): + event = self.fcpx_xml.find("./event") + if event is None: + return self.fcpx_xml.find("./asset-clip[@ref='{}']".format(asset_id)) + else: + return event.find("./asset-clip[@ref='{}']".format(asset_id)) + + def _format_by_id(self, format_id): + return self.fcpx_xml.find( + "./resources/format[@id='{}']".format(format_id) + ) + + def _compound_clip_by_id(self, compound_id): + return self.fcpx_xml.find( + "./resources/media[@id='{}']".format(compound_id) + ) + + # -------------------- + # static methods + # -------------------- + @staticmethod + def _track_type(lane_items): + audio_only_items = [l for l in lane_items if l["audio_only"]] + if len(audio_only_items) == len(lane_items): + return otio.schema.TrackKind.Audio + return otio.schema.TrackKind.Video + + @staticmethod + def _sorted_items(lane, otio_objects): + lane_items = [item for item in otio_objects if item["track"] == lane] + return sorted(lane_items, key=lambda k: k["offset"]) + + +# -------------------- +# adapter requirements +# -------------------- +def read_from_string(input_str): + """ + Necessary read method for otio adapter + + Args: + input_str (str): An FCP X XML string + + Returns: + OpenTimeline: An OpenTimeline object + """ + + return FcpxXml(input_str).to_otio() + + +def write_to_string(input_otio): + """ + Necessary write method for otio adapter + + Args: + input_otio (OpenTimeline): An OpenTimeline object + + Returns: + str: The string contents of an FCP X XML + """ + + return FcpxOtio(input_otio).to_xml() diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py new file mode 100644 index 00000000000..28f0b97f55f --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py @@ -0,0 +1,424 @@ +# MIT License +# +# Copyright (c) 2017 Ed Caspersen +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# allcopies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module provides an interface to allow users to easily +build out an FFMPEG command with all the correct filters +for applying text (with a background) to the rendered media. +""" +import os +import sys +import json +from subprocess import Popen, PIPE +from PIL import ImageFont + + +def _is_windows(): + """ + queries if the current operating system is Windows + + :rtype: bool + """ + return sys.platform.startswith('win') or \ + sys.platform.startswith('cygwin') + + +def _system_font(): + """ + attempts to determine a default system font + + :rtype: str + """ + if _is_windows(): + font_path = os.path.join(os.environ['WINDIR'], 'Fonts') + fonts = ('arial.ttf', 'calibri.ttf', 'times.ttf') + elif sys.platform.startswith('darwin'): + font_path = '/System/Library/Fonts' + fonts = ('Menlo.ttc',) + else: + # assuming linux + font_path = 'usr/share/fonts/msttcorefonts' + fonts = ('arial.ttf', 'times.ttf', 'couri.ttf') + + system_font = None + backup = None + for font in fonts: + font = os.path.join(font_path, font) + if os.path.exists(font): + system_font = font + break + else: + if os.path.exists(font_path): + for each in os.listdir(font_path): + ext = os.path.splitext(each)[-1] + if ext[1:].startswith('tt'): + system_font = os.path.join(font_path, each) + return system_font or backup + + +# Default valuues +FONT = _system_font() +FONT_SIZE = 16 +FONT_COLOR = 'white' +BG_COLOR = 'black' +BG_PADDING = 5 + +# FFMPEG command strings +FFMPEG = ('ffmpeg -loglevel panic -i %(input)s ' + '%(filters)s %(args)s%(output)s') +FFPROBE = ('ffprobe -v quiet -print_format json -show_format ' + '-show_streams %(source)s') +BOX = 'box=1:boxborderw=%(border)d:boxcolor=%(color)s@%(opacity).1f' +DRAWTEXT = ("drawtext=text='%(text)s':x=%(x)s:y=%(y)s:fontcolor=" + "%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'") +TIMECODE = ("drawtext=timecode='%(text)s':timecode_rate=%(fps).2f" + ":x=%(x)s:y=%(y)s:fontcolor=" + "%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'") + + +# Valid aligment parameters. +TOP_CENTERED = 'top_centered' +BOTTOM_CENTERED = 'bottom_centered' +TOP_LEFT = 'top_left' +BOTTOM_LEFT = 'bottom_left' +TOP_RIGHT = 'top_right' +BOTTOM_RIGHT = 'bottom_right' + + +class Options(dict): + """ + Base options class. + """ + _params = { + 'opacity': 1, + 'x_offset': 0, + 'y_offset': 0, + 'font': FONT, + 'font_size': FONT_SIZE, + 'bg_color': BG_COLOR, + 'bg_padding': BG_PADDING, + 'font_color': FONT_COLOR + } + + def __init__(self, **kwargs): + super(Options, self).__init__() + params = self._params.copy() + params.update(kwargs) + super(Options, self).update(**params) + + def __setitem__(self, key, value): + if key not in self._params: + raise KeyError("Not a valid option key '%s'" % key) + super(Options, self).update({key: value}) + + +class FrameNumberOptions(Options): + """ + :key int frame_offset: offset the frame numbers + :key float opacity: opacity value (0-1) + :key str expression: expression that would be used instead of text + :key bool x_offset: X position offset + (does not apply to centered alignments) + :key bool y_offset: Y position offset + :key str font: path to the font file + :key int font_size: size to render the font in + :key str bg_color: background color of the box + :key int bg_padding: padding between the font and box + :key str font_color: color to render + """ + + def __init__(self, **kwargs): + self._params.update({ + 'frame_offset': 0, + 'expression': None + }) + super(FrameNumberOptions, self).__init__(**kwargs) + + +class TextOptions(Options): + """ + :key float opacity: opacity value (0-1) + :key str expression: expression that would be used instead of text + :key bool x_offset: X position offset + (does not apply to centered alignments) + :key bool y_offset: Y position offset + :key str font: path to the font file + :key int font_size: size to render the font in + :key str bg_color: background color of the box + :key int bg_padding: padding between the font and box + :key str font_color: color to render + """ + + +class TimeCodeOptions(Options): + """ + :key int frame_offset: offset the frame numbers + :key float fps: frame rate to calculate the timecode by + :key float opacity: opacity value (0-1) + :key bool x_offset: X position offset + (does not apply to centered alignments) + :key bool y_offset: Y position offset + :key str font: path to the font file + :key int font_size: size to render the font in + :key str bg_color: background color of the box + :key int bg_padding: padding between the font and box + :key str font_color: color to render + """ + + def __init__(self, **kwargs): + self._params.update({ + 'frame_offset': 0, + 'fps': 24 + }) + super(TimeCodeOptions, self).__init__(**kwargs) + + +class Burnins(object): + """ + Class that provides convenience API for building filter + flags for the FFMPEG command. + """ + + def __init__(self, source, streams=None): + """ + :param str source: source media file + :param [] streams: ffprobe stream data if parsed as a pre-process + """ + self.source = source + self.filters = { + 'drawtext': [] + } + self._streams = streams or _streams(self.source) + + def __repr__(self): + return '' % os.path.basename(self.source) + + @property + def start_frame(self): + """ + :rtype: int + """ + start_time = float(self._video_stream['start_time']) + return round(start_time * self.frame_rate) + + @property + def end_frame(self): + """ + :rtype: int + """ + end_time = float(self._video_stream['duration']) + return round(end_time * self.frame_rate) + + @property + def frame_rate(self): + """ + :rtype: int + """ + data = self._video_stream + tokens = data['r_frame_rate'].split('/') + return int(tokens[0]) / int(tokens[1]) + + @property + def _video_stream(self): + video_stream = None + for each in self._streams: + if each.get('codec_type') == 'video': + video_stream = each + break + else: + raise RuntimeError("Failed to locate video stream " + "from '%s'" % self.source) + return video_stream + + @property + def resolution(self): + """ + :rtype: (int, int) + """ + data = self._video_stream + return data['width'], data['height'] + + @property + def filter_string(self): + """ + Generates the filter string that would be applied + to the `-vf` argument + + :rtype: str + """ + return ','.join(self.filters['drawtext']) + + def add_timecode(self, align, options=None): + """ + Convenience method to create the frame number expression. + + :param enum align: alignment, must use provided enum flags + :param dict options: recommended to use TimeCodeOptions + """ + options = options or TimeCodeOptions() + timecode = _frames_to_timecode(options['frame_offset'], + self.frame_rate) + options = options.copy() + if not options.get('fps'): + options['fps'] = self.frame_rate + self._add_burnin(timecode.replace(':', r'\:'), + align, + options, + TIMECODE) + + def add_frame_numbers(self, align, options=None): + """ + Convenience method to create the frame number expression. + + :param enum align: alignment, must use provided enum flags + :param dict options: recommended to use FrameNumberOptions + """ + options = options or FrameNumberOptions() + options['expression'] = r'%%{eif\:n+%d\:d}' % options['frame_offset'] + text = str(int(self.end_frame + options['frame_offset'])) + self._add_burnin(text, align, options, DRAWTEXT) + + def add_text(self, text, align, options=None): + """ + Adding static text to a filter. + + :param str text: text to apply to the drawtext + :param enum align: alignment, must use provided enum flags + :param dict options: recommended to use TextOptions + """ + options = options or TextOptions() + self._add_burnin(text, align, options, DRAWTEXT) + + def _add_burnin(self, text, align, options, draw): + """ + Generic method for building the filter flags. + + :param str text: text to apply to the drawtext + :param enum align: alignment, must use provided enum flags + :param dict options: + """ + resolution = self.resolution + data = { + 'text': options.get('expression') or text, + 'color': options['font_color'], + 'size': options['font_size'] + } + data.update(options) + data.update(_drawtext(align, resolution, text, options)) + if 'font' in data and _is_windows(): + data['font'] = data['font'].replace(os.sep, r'\\' + os.sep) + data['font'] = data['font'].replace(':', r'\:') + self.filters['drawtext'].append(draw % data) + + if options.get('bg_color') is not None: + box = BOX % { + 'border': options['bg_padding'], + 'color': options['bg_color'], + 'opacity': options['opacity'] + } + self.filters['drawtext'][-1] += ':%s' % box + + def command(self, output=None, args=None, overwrite=False): + """ + Generate the entire FFMPEG command. + + :param str output: output file + :param str args: additional FFMPEG arguments + :param bool overwrite: overwrite the output if it exists + :returns: completed command + :rtype: str + """ + output = output or '' + if overwrite: + output = '-y %s' % output + return (FFMPEG % { + 'input': self.source, + 'output': output, + 'args': '%s ' % args if args else '', + 'filters': '-vf "%s"' % self.filter_string + }).strip() + + def render(self, output, args=None, overwrite=False): + """ + Render the media to a specified destination. + + :param str output: output file + :param str args: additional FFMPEG arguments + :param bool overwrite: overwrite the output if it exists + """ + if not overwrite and os.path.exists(output): + raise RuntimeError("Destination '%s' exists, please " + "use overwrite" % output) + command = self.command(output=output, + args=args, + overwrite=overwrite) + proc = Popen(command, shell=True) + proc.communicate() + if proc.returncode != 0: + raise RuntimeError("Failed to render '%s': %s'" + % (output, command)) + if not os.path.exists(output): + raise RuntimeError("Failed to generate '%s'" % output) + + +def _streams(source): + """ + :param str source: source media file + :rtype: [{}, ...] + """ + command = FFPROBE % {'source': source} + proc = Popen(command, shell=True, stdout=PIPE) + out = proc.communicate()[0] + if proc.returncode != 0: + raise RuntimeError("Failed to run: %s" % command) + return json.loads(out)['streams'] + + +def _drawtext(align, resolution, text, options): + """ + :rtype: {'x': int, 'y': int} + """ + x_pos = '0' + if align in (TOP_CENTERED, BOTTOM_CENTERED): + x_pos = 'w/2-tw/2' + elif align in (TOP_RIGHT, BOTTOM_RIGHT): + ifont = ImageFont.truetype(options['font'], + options['font_size']) + box_size = ifont.getsize(text) + x_pos = resolution[0] - (box_size[0] + options['x_offset']) + elif align in (TOP_LEFT, BOTTOM_LEFT): + x_pos = options['x_offset'] + + if align in (TOP_CENTERED, + TOP_RIGHT, + TOP_LEFT): + y_pos = '%d' % options['y_offset'] + else: + y_pos = 'h-text_h-%d' % (options['y_offset']) + return {'x': x_pos, 'y': y_pos} + + +def _frames_to_timecode(frames, framerate): + return '{0:02d}:{1:02d}:{2:02d}:{3:02d}'.format( + int(frames / (3600 * framerate)), + int(frames / (60 * framerate) % 60), + int(frames / framerate % 60), + int(frames % framerate)) diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py new file mode 100644 index 00000000000..e0e3f8f8724 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py @@ -0,0 +1,1781 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""HLS Playlist OpenTimelineIO adapter + +This adapter supports authoring of HLS playlists within OpenTimelineIO by using +clips to represent media fragments. + +Status: + - Export of Media Playlists well supported + - Export of Master Playlists supported + - Import of Media Playlists well supported + - Import of Master Playlists unsupported + - Explicit Variant Stream controls in Master Playlists unsupported + +In general, you can author otio as follows: + t = otio.schema.Timeline() + track = otio.schema.Track("v1") + track.metadata['HLS'] = { + "EXT-X-INDEPENDENT-SEGMENTS": None, + "EXT-X-PLAYLIST-TYPE": "VOD" + } + t.tracks.append(track) + + # Make a prototype media ref with the fragment's initialization metadata + fragmented_media_ref = otio.schema.ExternalReference( + target_url='video1.mp4', + metadata={ + "streaming": { + "init_byterange": { + "byte_count": 729, + "byte_offset": 0 + }, + "init_uri": "media-video-1.mp4" + } + } + ) + + # Make a copy of the media ref specifying the byte range for the fragment + media_ref1 = fragmented_media_ref.deepcopy() + media_ref1.available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 1), + otio.opentime.RationalTime(2.002, 1) + ) + media_ref1.metadata['streaming'].update( + { + "byte_count": 534220, + "byte_offset": 1361 + } + ) + + # make the fragment and append it + fragment1 = otio.schema.Clip(media_reference=media_ref1) + track.append(fragment1) + + # (repeat to define each fragment) + +The code above would yield an HLS playlist like: + #EXTM3U + #EXT-X-VERSION:7 + #EXT-X-TARGETDURATION:2 + #EXT-X-PLAYLIST-TYPE:VOD + #EXT-X-INDEPENDENT-SEGMENTS + #EXT-X-MEDIA-SEQUENCE:1 + #EXT-X-MAP:BYTERANGE="729@0",URI="media-video-1.mp4" + #EXTINF:2.00200, + #EXT-X-BYTERANGE:534220@1361 + video1.mp4 + #EXT-X-ENDLIST + +If you add min_segment_duration and max_segment_duration to the timeline's +metadata dictionary as RationalTime objects, you can control the rule set +deciding how many fragments to accumulate into a single segment. When nothing +is specified for these metadata keys, the adapter will create one segment per +fragment. + +In general, any metadata added to the track metadata dict under the HLS +namespace will be included at the top level of the exported playlist (see +``EXT-X-INDEPENDENT-SEGMENTS`` and ``EXT-X-PLAYLIST-TYPE`` in the example +above). Each segment will pass through any metadata in the HLS namespace from +the media_reference. + +If you write a Timeline with more than one track specified, then the adapter +will create an HLS master playlist. + +The following track metadata keys will be used to inform exported master +playlist metadata per variant stream: + bandwidth + codec + language + mimeType + group_id (audio) + autoselect (audio) + default (audio) +These values are translated to EXT-X-STREAM-INF and EXT-X-MEDIA +attributes as defined in sections 4.3.4.2 and 4.3.4.1 of +draft-pantos-http-live-streaming, respectively. +""" + +import re +import copy + +import opentimelineio as otio + +# TODO: determine output version based on features used +OUTPUT_PLAYLIST_VERSION = "7" + +# TODO: make sure all strings get sanitized through encoding and decoding +PLAYLIST_STRING_ENCODING = "utf-8" + +# Enable isinstance(my_instance, basestring) tests in Python 3 +# This can be phased out when Python 2 support is dropped. Replace tests with: +# isinstance(my_instance, str) + +try: + basestring +except NameError: + basestring = str + +""" +Matches a single key/value pair from an HLS Attribute List. +See section 4.2 of draft-pantos-http-live-streaming for more detail. +""" +ATTRIBUTE_RE = re.compile( + r'(?P[A-Z0-9-]+)' + r'\=' + + r'(?P(?:\"[^\r\n"]*\")|[^,]+)' + r',?' +) + +""" +Matches AttributeValue of the above regex into appropriate data types. +Note that these are meant to be joined using regex "or" in this order. +""" +_ATTRIBUTE_RE_VALUE_STR_LIST = [ + r'(?P(?P[0-9]+)x(?P[0-9]+))\Z', + r'(?P0[xX](?P[0-9A-F]+))\Z', + r'(?P-?[0-9]+\.[0-9]+)\Z', + r'(?P[0-9]+)\Z', + r'(?P\"(?P[^\r\n"]*)\")\Z', + r'(?P[^",\s]+)\Z' +] +ATTRIBUTE_VALUE_RE = re.compile("|".join(_ATTRIBUTE_RE_VALUE_STR_LIST)) + +""" +Matches a byterange as used in various contexts. +See section 4.3.2.2 of draft-pantos-http-live-streaming for an example use of +this byterange form. +""" +BYTERANGE_RE = re.compile(r'(?P\d+)(?:@(?P\d+))?') + +""" +Matches HLS Playlist tags or comments, respective. +See section 4.1 of draft-pantos-http-live-streaming for more detail. +""" +TAG_RE = re.compile( + r'#(?PEXT[^:\s]+)(?P:?)(?P.*)' +) +COMMENT_RE = re.compile(r'#(?!EXT)(?P.*)') + + +class AttributeListEnum(str): + """ A subclass allowing us to differentiate enums in HLS attribute lists + """ + + +def _value_from_raw_attribute_value(raw_attribute_value): + """ + Takes in a raw AttributeValue and returns an appopritate Python type. + If there is a problem decoding the value, None is returned. + """ + value_match = ATTRIBUTE_VALUE_RE.match(raw_attribute_value) + if not value_match: + return None + + group_dict = value_match.groupdict() + # suss out the match + for k, v in group_dict.items(): + # not a successful group match + if v is None: + continue + + # decode the string + if k == 'resolution': + return v + elif k == 'enumerated': + return AttributeListEnum(v) + elif k == 'hexcidecimal': + return int(group_dict['hex_value'], base=16) + elif k == 'floating_point': + return float(v) + elif k == 'decimal': + return int(v) + elif k == 'string': + # grab only the data within the quotes, excluding the quotes + string_value = group_dict['string_value'] + return string_value + + return None + + +class AttributeList(dict): + """ + Dictionary-like object representing an HLS AttributeList. + See section 4.2 of draft-pantos-http-live-streaming for more detail. + """ + + def __init__(self, other=None): + """ + contstructs an :class:`AttributeList`. + + ``Other`` can be either another dictionary-like object or a list of + key/value pairs + """ + if not other: + return + + try: + items = other.items() + except AttributeError: + items = other + + for k, v in items: + self[k] = v + + def __str__(self): + """ + Construct attribute list string as it would exist in an HLS playlist. + """ + attr_list_entries = [] + # Use a sorted version of the dictionary to ensure consistency + for k, v in sorted(self.items(), key=lambda i: i[0]): + out_value = '' + if isinstance(v, AttributeListEnum): + out_value = v + elif isinstance(v, basestring): + out_value = '"{}"'.format(v) + else: + out_value = str(v) + + attr_list_entries.append('{}={}'.format(k, out_value)) + + return ','.join(attr_list_entries) + + @classmethod + def from_string(cls, attrlist_string): + """ + Accepts an attribute list string and returns an :class:`AttributeList`. + + The values will be transformed to Python types. + """ + attr_list = cls() + match = ATTRIBUTE_RE.search(attrlist_string) + while match: + # unpack the values from the match + group_dict = match.groupdict() + name = group_dict['AttributeName'] + raw_value = group_dict['AttributeValue'] + + # parse the raw value + value = _value_from_raw_attribute_value(raw_value) + attr_list[name] = value + + # search for the next attribute in the string + match_end = match.span()[1] + match = ATTRIBUTE_RE.search(attrlist_string, match_end) + + return attr_list + + +# some special top-levle keys that HLS metadata will be decoded into +FORMAT_METADATA_KEY = 'HLS' +""" +Some concepts are translatable between HLS and other streaming formats (DASH). +These metadata keys are used on OTIO objects outside the HLS namespace because +they are higher level concepts. +""" +STREAMING_METADATA_KEY = 'streaming' +INIT_BYTERANGE_KEY = 'init_byterange' +INIT_URI_KEY = 'init_uri' +SEQUENCE_NUM_KEY = 'sequence_num' +BYTE_OFFSET_KEY = 'byte_offset' +BYTE_COUNT_KEY = 'byte_count' + + +class Byterange(object): + """Offers interpretation of HLS byte ranges in various forms.""" + + count = None + """(:class:`int`) Number of bytes included in the range.""" + + offset = None + """(:class:`int`) Byte offset at which the range starts.""" + + def __init__(self, count=None, offset=None): + """Constructs a :class:`Byterange` object. + + :param count: (:class:`int`) Number of bytes included in the range. + :param offset: (:class:`int`) Byte offset at which the range starts. + """ + self.count = (count if count is not None else 0) + self.offset = offset + + def __eq__(self, other): + if not isinstance(other, Byterange): + # fall back on identity, this should always be False + return (self is other) + return (self.count == other.count and self.offset == other.offset) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return '{}(offset = {}, count = {})'.format( + type(self), + str(self.offset), + str(self.count) + ) + + def __str__(self): + """returns a string in HLS format""" + + out_str = str(self.count) + if self.offset is not None: + out_str += '@{}'.format(str(self.offset)) + + return out_str + + def to_dict(self): + """Returns a dict suitable for storing in otio metadata. + + :return: (:class:`dict`) serializable version of byterange. + """ + range_dict = {BYTE_COUNT_KEY: self.count} + if self.offset is not None: + range_dict[BYTE_OFFSET_KEY] = self.offset + + return range_dict + + @classmethod + def from_string(cls, byterange_string): + """Construct a :class:`Byterange` given a string in HLS format. + + :param byterange_string: (:class:`str`) a byterange string. + :return: (:class:`Byterange`) The instance for the provided string. + """ + m = BYTERANGE_RE.match(byterange_string) + + return cls.from_match_dict(m.groupdict()) + + @classmethod + def from_match_dict(cls, match_dict): + """ + Construct a :class:`Byterange` given a groupdict from ``BYTERANGE_RE`` + + :param match_dict: (:class:`dict`) the ``match_dict``. + :return: (:class:`Byterange`) The instance for the provided string. + """ + byterange = cls(count=int(match_dict['n'])) + + try: + byterange.offset = int(match_dict['o']) + except KeyError: + pass + + return byterange + + @classmethod + def from_dict(cls, info_dict): + """ Creates a :class:`Byterange` given a dictionary containing keys + like generated from the :meth:`to_dict method`. + + :param info_dict: (:class:`dict`) Dictionary byterange. + :return: (:class:`Byterange`) an equivalent instance. + """ + byterange = cls( + count=info_dict.get(BYTE_COUNT_KEY), + offset=info_dict.get(BYTE_OFFSET_KEY) + ) + + return byterange + + +""" +For a given collection of media, HLS has two playlist types: + - Media Playlist + - Master Playlist + +The media playlist refers directly to the individual segments that make up an +audio or video track of a given program. The master playlist refers to a +collection of media playlists and provides ways to use them together +(rendition groups). + +See section 2 of draft-pantos-http-live-streaming for more detail. + +The constants below define which tags belong to which schema. +""" + +""" +Basic tags appear in both media and master playlists. +See section 4.3.1 of draft-pantos-http-live-streaming for more detail. +""" +BASIC_TAGS = set([ + "EXTM3U", + "EXT-X-VERSION" +]) + +""" +Media segment tags apply to either the following media or all subsequent +segments. They MUST NOT appear in master playlists. +See section 4.3.2 of draft-pantos-http-live-streaming for more detail. +""" +MEDIA_SEGMENT_TAGS = set([ + 'EXTINF', + 'EXT-X-BYTERANGE', + 'EXT-X-DISCONTINUITY', + 'EXT-X-KEY', + 'EXT-X-MAP', + 'EXT-X-PROGRAM-DATE-TIME', + 'EXT-X-DATERANGE' +]) + +""" The subset of above tags that apply to every segment following them """ +MEDIA_SEGMENT_SUBSEQUENT_TAGS = set([ + 'EXT-X-KEY', + 'EXT-X-MAP', +]) + +""" +Media Playlist tags must only occur once per playlist, and must not appear in +Master Playlists. +See section 4.3.3 of draft-pantos-http-live-streaming for more detail. +""" +MEDIA_PLAYLIST_TAGS = set([ + 'EXT-X-TARGETDURATION', + 'EXT-X-MEDIA-SEQUENCE', + 'EXT-X-DISCONTINUITY-SEQUENCE', + 'EXT-X-ENDLIST', + 'EXT-X-PLAYLIST-TYPE', + 'EXT-X-I-FRAMES-ONLY' +]) + +""" +Master playlist tags declare global parameters for the presentation. +They must not appear in media playlists. +See section 4.3.4 of draft-pantos-http-live-streaming for more detail. +""" +MASTER_PLAYLIST_TAGS = set([ + 'EXT-X-MEDIA', + 'EXT-X-STREAM-INF', + 'EXT-X-I-FRAME-STREAM-INF', + 'EXT-X-SESSION-DATA', + 'EXT-X-SESSION-KEY', +]) + +""" +Media or Master Playlist tags can appear in either media or master playlists. +See section 4.3.5 of draft-pantos-http-live-streaming for more detail. +These tags SHOULD appear in either the media or master playlist. If they occur +in both, their values MUST agree. +These values MUST NOT appear more than once in a playlist. +""" +MEDIA_OR_MASTER_TAGS = set([ + "EXT-X-INDEPENDENT-SEGMENTS", + "EXT-X-START" +]) + +""" +Some special tags used by the parser. +""" +PLAYLIST_START_TAG = "EXTM3U" +PLAYLIST_END_TAG = "EXT-X-ENDLIST" +PLAYLIST_VERSION_TAG = "EXT-X-VERSION" +PLAYLIST_SEGMENT_INF_TAG = "EXTINF" + +""" +attribute list entries to omit from EXT-I-FRAME-STREAM-INF tags +See section 4.3.4.3 of draft-pantos-http-live-streaming for more detail. +""" +I_FRAME_OMIT_ATTRS = set([ + 'FRAME-RATE', + 'AUDIO', + 'SUBTITLES', + 'CLOSED-CAPTIONS' +]) + +""" enum for kinds of playlist entries """ +EntryType = type('EntryType', (), { + 'tag': 'tag', + 'comment': 'comment', + 'URI': 'URI' +}) + +""" enum for types of playlists """ +PlaylistType = type('PlaylistType', (), { + 'media': 'media', + 'master': 'master' +}) + +""" mapping from HLS track type to otio ``TrackKind`` """ +HLS_TRACK_TYPE_TO_OTIO_KIND = { + AttributeListEnum('AUDIO'): otio.schema.TrackKind.Audio, + AttributeListEnum('VIDEO'): otio.schema.TrackKind.Video, + # TODO: determine how to handle SUBTITLES and CLOSED-CAPTIONS +} + +""" mapping from otio ``TrackKind`` to HLS track type """ +OTIO_TRACK_KIND_TO_HLS_TYPE = dict(( + (v, k) for k, v in HLS_TRACK_TYPE_TO_OTIO_KIND.items() +)) + + +class HLSPlaylistEntry(object): + """An entry in an HLS playlist. + + Entries can be a tag, a comment, or a URI. All HLS playlists are parsed + into lists of :class:`HLSPlaylistEntry` instances that can then be + interpreted against the HLS schema. + """ + + # TODO: rename this to entry_type to fix builtin masking + # type = None + """ (``EntryType``) the type of entry """ + + comment_string = None + """ + (:class:`str`) value of comment (if the ``entry_type`` is + ``EntryType.comment``). + """ + + tag_name = None + """ + (:class:`str`) Name of tag (if the ``entry_type`` is ``EntryType.tag``). + """ + + tag_value = None + """ + (:class:`str`) Value of tag (if the ``entry_type`` is ``EntryType.tag``). + """ + + uri = None + """ + (:class:`str`) Value of the URI (if the ``entry_type is ``EntryType.uri``). + """ + + def __init__(self, type): + """ + Constructs an :class:`HLSPlaylistEntry`. + + :param type: (``EntryType``) Type of entry. + """ + self.type = type + + def __repr__(self): + base_str = 'otio.adapter.HLSPlaylistEntry(type={}'.format( + self.type) + if self.type == EntryType.tag: + base_str += ', tag_name={}, tag_value={}'.format( + repr(self.tag_name), + repr(self.tag_value) + ) + elif self.type == EntryType.comment: + base_str += ', comment={}'.format(repr(self.comment_string)) + elif self.type == EntryType.URI: + base_str += ', URI={}'.format(repr(self.uri)) + + return base_str + ')' + + def __str__(self): + """ + Returns a string as it would appear in an HLS playlist. + + :return: (:class:`str`) HLS playlist entry string. + """ + if self.type == EntryType.comment and self.comment_string: + return "# {}".format(self.comment_string) + elif self.type == EntryType.comment: + # empty comments are blank lines + return "" + elif self.type == EntryType.URI: + return self.uri + elif self.type == EntryType.tag: + out_tag_name = self.tag_name + if self.tag_value is not None: + return '#{}:{}'.format(out_tag_name, self.tag_value) + else: + return '#{}'.format(out_tag_name) + + @classmethod + def tag_entry(cls, name, value=None): + """ + Creates an ``EntryType.tag`` :class:`HLSPlaylistEntry`. + + :param name: (:class:`str`) tag name. + :param value: (:class:`str`) tag value. + :return: (:class:`HLSPlaylistEntry`) Entry instance. + """ + entry = cls(EntryType.tag) + entry.tag_name = name + entry.tag_value = value + + return entry + + @classmethod + def comment_entry(cls, comment): + """Creates an ``EntryType.comment`` :class:`HLSPlaylistEntry`. + + :param comment: (:class:`str`) the comment. + :return: (:class:`HLSPlaylistEntry`) Entry instance. + """ + entry = cls(EntryType.comment) + entry.comment_string = comment + + return entry + + @classmethod + def uri_entry(cls, uri): + """Creates an ``EntryType.uri`` :class:`HLSPlaylistEntry`. + + :param uri: (:class:`str`) A URI string. + :return: (:class:`HLSPlaylistEntry`) Entry instance. + """ + entry = cls(EntryType.URI) + entry.uri = uri + + return entry + + @classmethod + def from_string(cls, entry_string): + """Creates an `:class:`HLSPlaylistEntry` given a string as it appears + in an HLS playlist. + + :param entry_string: (:class:`str`) String from an HLS playlist. + :return: (:class:`HLSPlaylistEntry`) Entry instance. + """ + # Empty lines are skipped + if not entry_string.strip(): + return None + + # Attempt to parse as a tag + m = TAG_RE.match(entry_string) + if m: + group_dict = m.groupdict() + tag_value = ( + group_dict['tagvalue'] + if group_dict['hasvalue'] else None + ) + entry = cls.tag_entry(group_dict['tagname'], tag_value) + return entry + + # Attempt to parse as a comment + m = COMMENT_RE.match(entry_string) + if m: + entry = cls.comment_entry(m.groupdict()['comment']) + return entry + + # If it's not the others, treat as a URI + entry = cls.uri_entry(entry_string) + + return entry + + """A dispatch dictionary for grabbing the right Regex to parse tags.""" + TAG_VALUE_RE_MAP = { + "EXTINF": re.compile(r'(?P\d+(\.\d*)?),(?P.*$)'), + "EXT-X-BYTERANGE": BYTERANGE_RE, + "EXT-X-KEY": re.compile(r'(?P<attribute_list>.*$)'), + "EXT-X-MAP": re.compile(r'(?P<attribute_list>.*$)'), + "EXT-X-MEDIA-SEQUENCE": re.compile(r'(?P<number>\d+)'), + "EXT-X-PLAYLIST-TYPE": re.compile(r'(?P<type>EVENT|VOD)'), + PLAYLIST_VERSION_TAG: re.compile(r'(?P<n>\d+)') + } + + def parsed_tag_value(self, playlist_version=None): + """Parses and returns ``self.tag_value`` based on the HLS schema. + + The value will be a dictionary where the keys are the names used in the + draft Pantos HTTP Live Streaming doc. When "attribute-list" is + specified, an entry "attribute_list" will be present containing + an :class:`AttributeList` instance. + + :param playlist_version: (:class:`int`) version number of the playlist. + If none is provided, a best guess will be made. + :return: The parsed value. + """ + if self.type != EntryType.tag: + return None + + try: + tag_re = self.TAG_VALUE_RE_MAP[self.tag_name] + except KeyError: + return None + + # parse the tag + m = tag_re.match(self.tag_value) + group_dict = m.groupdict() + + if not m: + return None + + # If the tag value has an attribute list, parse it and add it + try: + attribute_list = group_dict['attribute_list'] + attr_list = AttributeList.from_string(attribute_list) + group_dict['attributes'] = attr_list + except KeyError: + pass + + return group_dict + + +class HLSPlaylistParser(object): + """Bootstraps HLS parsing and hands the playlist string off to the + appropriate parser for the type + """ + + def __init__(self, edl_string): + self.timeline = otio.schema.Timeline() + self.playlist_type = None + + self._parse_playlist(edl_string) + + def _parse_playlist(self, edl_string): + """Parses the HLS Playlist string line-by-line.""" + # parse lines until we encounter one that identifies the playlist type + # then hand off + start_encountered = False + end_encountered = False + playlist_entries = [] + playlist_version = 1 + for line in edl_string.splitlines(): + # attempt to parse the entry + entry = HLSPlaylistEntry.from_string(line) + if entry is None: + continue + + entry_is_tag = (entry.type == EntryType.tag) + + # identify if the playlist start/end is encountered + if (entry_is_tag and not (start_encountered and end_encountered)): + if entry.tag_name == PLAYLIST_START_TAG: + start_encountered = True + elif entry.tag_name == PLAYLIST_END_TAG: + end_encountered = True + + # if the playlist starting tag hasn't been encountered, ignore + if not start_encountered: + continue + + # Store the parsed entry + playlist_entries.append(entry) + + # Determine if this tells us the playlist type + if not self.playlist_type and entry_is_tag: + if entry.tag_name in MASTER_PLAYLIST_TAGS: + self.playlist_type = PlaylistType.master + elif entry.tag_name in MEDIA_PLAYLIST_TAGS: + self.playlist_type = PlaylistType.media + + if end_encountered: + break + + # try to grab the version from the playlist + if entry_is_tag and entry.tag_name == PLAYLIST_VERSION_TAG: + playlist_version = int(entry.parsed_tag_value()['n']) + + # dispatch to the appropriate schema interpreter + if self.playlist_type is None: + self.timeline = None + raise otio.exceptions.ReadingNotSupportedError( + "could not determine playlist type" + ) + elif self.playlist_type == PlaylistType.master: + self.timeline = None + raise otio.exceptions.AdapterDoesntSupportFunction( + "HLS master playlists are not yet supported" + ) + elif self.playlist_type == PlaylistType.media: + parser = MediaPlaylistParser(playlist_entries, playlist_version) + if len(parser.track): + self.timeline.tracks.append(parser.track) + + +class MediaPlaylistParser(object): + """Parses an HLS Media playlist returning a SEQUENCE""" + + def __init__(self, playlist_entries, playlist_version=None): + self.track = otio.schema.Track( + metadata={FORMAT_METADATA_KEY: {}} + ) + + self._parse_entries(playlist_entries, playlist_version) + + def _handle_track_metadata(self, entry, playlist_version, clip): + """Stashes the tag value in the track metadata""" + value = entry.tag_value + self.track.metadata[FORMAT_METADATA_KEY][entry.tag_name] = value + + def _handle_discarded_metadata(self, entry, playlist_version, clip): + """Handler for tags that are discarded. This is done when a tag's + information is represented by the native OTIO concepts. + + For instance, the EXT-X-TARGETDURATION tag simply gives a rounded + value for the maximum segment size in the playlist. This can easily + be found in OTIO by examining the clips. + """ + # Do nothing + + def _metadata_dict_for_MAP(self, entry, playlist_version): + entry_data = entry.parsed_tag_value() + attributes = entry_data['attributes'] + map_dict = {} + for attr, value in attributes.items(): + if attr == 'BYTERANGE': + byterange = Byterange.from_string(value) + map_dict[INIT_BYTERANGE_KEY] = byterange.to_dict() + elif attr == 'URI': + map_dict[INIT_URI_KEY] = value + + return map_dict + + def _handle_INF(self, entry, playlist_version, clip): + # This specifies segment duration and optional title + info_dict = entry.parsed_tag_value(playlist_version) + segment_duration = float(info_dict['duration']) + segment_title = info_dict['title'] + available_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 1), + otio.opentime.RationalTime(segment_duration, 1) + ) + + # Push the info to the clip + clip.media_reference.available_range = available_range + clip.source_range = available_range + clip.name = segment_title + + def _handle_BYTERANGE(self, entry, playlist_version, clip): + reference_metadata = clip.media_reference.metadata + ref_streaming_metadata = reference_metadata.setdefault( + STREAMING_METADATA_KEY, + {} + ) + + # Pull out the byte count and offset + byterange = Byterange.from_match_dict( + entry.parsed_tag_value(playlist_version) + ) + ref_streaming_metadata.update(byterange.to_dict()) + + """ + Specifies handlers for specific HLS tags. + """ + TAG_HANDLERS = { + "EXTINF": _handle_INF, + PLAYLIST_VERSION_TAG: _handle_track_metadata, + "EXT-X-TARGETDURATION": _handle_discarded_metadata, + "EXT-X-MEDIA-SEQUENCE": _handle_discarded_metadata, + "EXT-X-PLAYLIST-TYPE": _handle_track_metadata, + "EXT-X-INDEPENDENT-SEGMENTS": _handle_track_metadata, + "EXT-X-BYTERANGE": _handle_BYTERANGE + } + + def _parse_entries(self, playlist_entries, playlist_version): + """Interpret the entries through the lens of the schema""" + current_clip = otio.schema.Clip( + media_reference=otio.schema.ExternalReference( + metadata={ + FORMAT_METADATA_KEY: {}, + STREAMING_METADATA_KEY: {} + } + ) + ) + current_media_ref = current_clip.media_reference + segment_metadata = {} + current_map_data = {} + # per section 4.3.3.2 of Pantos HLS, 0 is default start track + current_track = 0 + for entry in playlist_entries: + if entry.type == EntryType.URI: + # the URI ends the segment definition + current_media_ref.target_url = entry.uri + current_media_ref.metadata[FORMAT_METADATA_KEY].update( + segment_metadata + ) + current_media_ref.metadata[STREAMING_METADATA_KEY].update( + current_map_data + ) + current_clip.metadata.setdefault( + STREAMING_METADATA_KEY, + {} + )[SEQUENCE_NUM_KEY] = current_track + self.track.append(current_clip) + current_track += 1 + + # Set up the next segment definition + current_clip = otio.schema.Clip( + media_reference=otio.schema.ExternalReference( + metadata={ + FORMAT_METADATA_KEY: {}, + STREAMING_METADATA_KEY: {} + } + ) + ) + current_media_ref = current_clip.media_reference + continue + elif entry.type != EntryType.tag: + # the rest of the code deals only with tags + continue + + # Explode the EXT-X-MAP info out + if entry.tag_name == "EXT-X-MAP": + map_data = self._metadata_dict_for_MAP(entry, playlist_version) + current_map_data.update(map_data) + continue + + # Grab the track when it comes around + if entry.tag_name == "EXT-X-MEDIA-SEQUENCE": + entry_data = entry.parsed_tag_value() + current_track = int(entry_data['number']) + + # If the segment tag is one that applies to all that follow + # store the value to be applied to each segment + if entry.tag_name in MEDIA_SEGMENT_SUBSEQUENT_TAGS: + segment_metadata[entry.tag_name] = entry.tag_value + continue + + # use a handler if available + try: + handler = self.TAG_HANDLERS[entry.tag_name] + handler(self, entry, playlist_version, current_clip) + continue + except KeyError: + pass + + # add the tag to the reference metadata at the correct level + if entry.tag_name in [PLAYLIST_START_TAG, PLAYLIST_END_TAG]: + continue + elif entry.tag_name in MEDIA_SEGMENT_TAGS: + # Media segments translate into media refs + hls_metadata = current_media_ref.metadata[FORMAT_METADATA_KEY] + hls_metadata[entry.tag_name] = entry.tag_value + elif entry.tag_name in MEDIA_PLAYLIST_TAGS: + # Media playlists translate into tracks + hls_metadata = self.track.metadata[FORMAT_METADATA_KEY] + hls_metadata[entry.tag_name] = entry.tag_value + + +""" +Compatibility version list: + EXT-X-BYTERANGE >= 4 + EXT-X-I-FRAMES-ONLY >= 4 + EXT-X-MAP in media playlist with EXT-X-I-FRAMES-ONLY >= 5 + EXT-X-MAP in media playlist without I-FRAMES-ONLY >= 6 + EXT-X-KEY constrants are by attributes specified: + - IV >= 2 + - KEYFORMAT >= 5 + - KEYFORMATVERSIONS >= 5 + EXTINF with floating point vaules >= 3 + + master playlist: + EXT-X-MEDIA with INSTREAM-ID="SERVICE" +""" + + +def entries_for_segment( + uri, + segment_duration, + segment_name=None, + segment_byterange=None, + segment_tags=None +): + """Creates a set of :class:`HLSPlaylistEntries` with the given parameters. + + :param uri: (:class:`str`) The uri for the segment media. + :param segment_duration: (:class:`opentimelineio.opentime.RationalTime`) + playback duration of the segment. + :param segment_byterange: (:class:`ByteRange`) The data range for the + segment in the media (if required) + :param segment_tags: (:class:`dict`) key/value pairs of to become + additional tags for the segment + + :return: (:class:`list`) a group of :class:`HLSPlaylistEntry` instances for + the segment + """ + # Create the tags dict to build + if segment_tags: + tags = copy.deepcopy(segment_tags) + else: + tags = {} + + # Start building the entries list + segment_entries = [] + + # add the EXTINF + name = segment_name if segment_name is not None else '' + tag_value = '{0:.5f},{1}'.format( + otio.opentime.to_seconds(segment_duration), + name + ) + extinf_entry = HLSPlaylistEntry.tag_entry('EXTINF', tag_value) + segment_entries.append(extinf_entry) + + # add the additional tags + tag_entries = [ + HLSPlaylistEntry.tag_entry(k, v) for k, v in + tags.items() + ] + segment_entries.extend(tag_entries) + + # Now add the byterange for the entry + if segment_byterange: + byterange_entry = HLSPlaylistEntry.tag_entry( + 'EXT-X-BYTERANGE', + str(segment_byterange) + ) + segment_entries.append(byterange_entry) + + # Add the URI + # this method expects all fragments come from the same source file + uri_entry = HLSPlaylistEntry.uri_entry(uri) + segment_entries.append(uri_entry) + + return segment_entries + + +def stream_inf_attr_list_for_track(track): + """ Builds an :class:`AttributeList` instance for use in ``STREAM-INF`` + tags for the provided track. + + :param track: (:class:`otio.schema.Track`) A track representing a + variant stream + :return: (:class:`AttributeList`) The instance from the metadata + """ + streaming_metadata = track.metadata.get(STREAMING_METADATA_KEY, {}) + + attributes = [] + bandwidth = streaming_metadata.get('bandwidth') + if bandwidth is not None: + attributes.append(('BANDWIDTH', bandwidth)) + + codec = streaming_metadata.get('codec') + if codec is not None: + attributes.append(('CODECS', codec)) + + frame_rate = streaming_metadata.get('frame_rate') + if frame_rate is not None: + attributes.append(('FRAME-RATE', frame_rate)) + + if 'width' in streaming_metadata and 'height' in streaming_metadata: + resolution = "{}x{}".format( + streaming_metadata['width'], + streaming_metadata['height'] + ) + attributes.append(('RESOLUTION', AttributeListEnum(resolution))) + + al = AttributeList(attributes) + + return al + + +def master_playlist_to_string(master_timeline): + """Writes a master playlist describing the tracks""" + + # start with a version number of 1, as features are encountered, we will + # update the version accordingly + version_requirements = set([1]) + + # TODO: detect rather than forcing version 6 + version_requirements.add(6) + + header_tags = copy.copy( + master_timeline.metadata.get(FORMAT_METADATA_KEY, {}) + ) + + # Filter out any values from the HLS metadata that aren't meant to become + # tags, such as the directive to force an HLS master playlist + hls_md_blacklist = ['master_playlist'] + for key in hls_md_blacklist: + try: + del(header_tags[key]) + except KeyError: + pass + + playlist_entries = [] + + # First declare the non-visual media + hls_type_count = {} + video_tracks = [] + audio_tracks = [ + t for t in master_timeline.tracks if + t.kind == otio.schema.TrackKind.Audio + ] + for track in master_timeline.tracks: + if track.kind == otio.schema.TrackKind.Video: + # video is done later, skip + video_tracks.append(track) + continue + + # Determine the HLS type + hls_type = OTIO_TRACK_KIND_TO_HLS_TYPE[track.kind] + + streaming_metadata = track.metadata.get(STREAMING_METADATA_KEY, {}) + + # Find the group name + try: + group_id = streaming_metadata['group_id'] + except KeyError: + sub_id = hls_type_count.setdefault(hls_type, 1) + group_id = '{}{}'.format(hls_type, sub_id) + hls_type_count[hls_type] += 1 + + media_playlist_default_uri = "{}.m3u8".format(track.name) + try: + track_uri = track.metadata[FORMAT_METADATA_KEY].get( + 'uri', + media_playlist_default_uri + ) + except KeyError: + track_uri = media_playlist_default_uri + + # Build the attribute list + attributes = AttributeList( + [ + ('TYPE', hls_type), + ('GROUP-ID', group_id), + ('URI', track_uri), + ('NAME', track.name), + ] + ) + + if streaming_metadata.get('autoselect'): + attributes['AUTOSELECT'] = AttributeListEnum('YES') + + if streaming_metadata.get('default'): + attributes['DEFAULT'] = AttributeListEnum('YES') + + # Finally, create the tag + entry = HLSPlaylistEntry.tag_entry( + 'EXT-X-MEDIA', + str(attributes) + ) + + playlist_entries.append(entry) + + # Add a blank line in the playlist to separate sections + if playlist_entries: + playlist_entries.append(HLSPlaylistEntry.comment_entry('')) + + # First write any i-frame playlist entires + iframe_list_entries = [] + for track in video_tracks: + try: + iframe_uri = track.metadata[FORMAT_METADATA_KEY]['iframe_uri'] + except KeyError: + # don't include iframe playlist + continue + + # Create the attribute list + attribute_list = stream_inf_attr_list_for_track(track) + + # Remove entries to not be included for I-Frame streams + for attr in I_FRAME_OMIT_ATTRS: + try: + del(attribute_list[attr]) + except KeyError: + pass + + # Add the URI + attribute_list['URI'] = iframe_uri + + iframe_list_entries.append( + HLSPlaylistEntry.tag_entry( + 'EXT-X-I-FRAME-STREAM-INF', + str(attribute_list) + ) + ) + + if iframe_list_entries: + iframe_list_entries.append(HLSPlaylistEntry.comment_entry('')) + + playlist_entries.extend(iframe_list_entries) + + # Write an EXT-STREAM-INF for each rendition set + for track in video_tracks: + # create the base attribute list for the video track + al = stream_inf_attr_list_for_track(track) + + # Create the uri + media_playlist_default_uri = "{}.m3u8".format(track.name) + try: + track_uri = track.metadata[FORMAT_METADATA_KEY].get( + 'uri', media_playlist_default_uri + ) + except KeyError: + track_uri = media_playlist_default_uri + uri_entry = HLSPlaylistEntry.uri_entry(track_uri) + + # TODO: this will break when we have subtitle and CC tracks + added_entry = False + for audio_track in audio_tracks: + if track.name not in audio_track.metadata['linked_tracks']: + continue + + # Write an entry for using these together + try: + audio_track_streaming_metadata = audio_track.metadata[ + STREAMING_METADATA_KEY + ] + aud_group = audio_track_streaming_metadata['group_id'] + aud_codec = audio_track_streaming_metadata['codec'] + aud_bandwidth = audio_track_streaming_metadata['bandwidth'] + except KeyError: + raise TypeError( + "HLS audio tracks must have 'codec', 'group_id', and" + " 'bandwidth' specified in metadata" + ) + + combo_al = copy.copy(al) + combo_al['CODECS'] += ',{}'.format(aud_codec) + combo_al['AUDIO'] = aud_group + combo_al['BANDWIDTH'] += aud_bandwidth + + entry = HLSPlaylistEntry.tag_entry( + 'EXT-X-STREAM-INF', + str(combo_al) + ) + playlist_entries.append(entry) + playlist_entries.append(uri_entry) + + added_entry = True + + if not added_entry: + # write out one simple entry + entry = HLSPlaylistEntry.tag_entry( + 'EXT-X-STREAM-INF', + str(al) + ) + playlist_entries.append(entry) + playlist_entries.append(uri_entry) + + # add a break before the next grouping of entries + playlist_entries.append(HLSPlaylistEntry.comment_entry('')) + + out_entries = [HLSPlaylistEntry.tag_entry(PLAYLIST_START_TAG, None)] + + playlist_version = max(version_requirements) + playlist_version_entry = HLSPlaylistEntry.tag_entry( + PLAYLIST_VERSION_TAG, + str(playlist_version) + ) + + out_entries.append(playlist_version_entry) + + out_entries += ( + HLSPlaylistEntry.tag_entry(k, v) for k, v in header_tags.items() + ) + + # separate the header entries from the rest of the entries + out_entries.append(HLSPlaylistEntry.comment_entry('')) + + out_entries += playlist_entries + + playlist_string = '\n'.join( + (str(entry) for entry in out_entries) + ) + + return playlist_string + + +class MediaPlaylistWriter(): + + def __init__( + self, + media_track, + min_seg_duration=None, + max_seg_duration=None + ): + # Default to one segment per fragment + if min_seg_duration is None: + min_seg_duration = otio.opentime.RationalTime(0, 1) + if max_seg_duration is None: + max_seg_duration = otio.opentime.RationalTime(0, 1) + + self._min_seg_duration = min_seg_duration + self._max_seg_duration = max_seg_duration + + self._playlist_entries = [] + self._playlist_tags = {} + + # Whenever an entry is added that has a minimum version requirement, + # we add that version to this set. The max value from this set is the + # playlist's version requirement + self._versions_used = set([1]) + + # TODO: detect rather than forcing version 7 + self._versions_used.add(7) + + # Start the build + self._build_playlist_with_track(media_track) + + def _build_playlist_with_track(self, media_track): + """ + Executes methods to result in a fully populated _playlist_entries list + """ + self._copy_HLS_metadata(media_track) + self._setup_track_info(media_track) + self._add_segment_entries(media_track) + self._finalize_entries(media_track) + + def _copy_HLS_metadata(self, media_track): + """ + Copies any metadata in the "HLS" namespace from the track to the + playlist-global tags + """ + # Grab any metadata provided on the otio + try: + track_metadata = media_track.metadata[FORMAT_METADATA_KEY] + self._playlist_tags.update(track_metadata) + + # Remove the version tag from the track metadata, we'll compute + # based on what we write out + del(self._playlist_tags[PLAYLIST_VERSION_TAG]) + + except KeyError: + pass + + # additionally remove metadata keys added for providing master + # playlist URIs + for key in ('uri', 'iframe_uri'): + try: + del(self._playlist_tags[key]) + except KeyError: + pass + + def _setup_track_info(self, media_track): + """sets up playlist global metadata""" + + # Setup the track start + if 'EXT-X-I-FRAMES-ONLY' in media_track.metadata.get( + FORMAT_METADATA_KEY, + {} + ): + # I-Frame playlists start at zero no matter what + track_start = 0 + else: + # Pull the track num from the first clip, if provided + first_segment_streaming_metadata = media_track[0].metadata.get( + STREAMING_METADATA_KEY, + {} + ) + track_start = first_segment_streaming_metadata.get( + SEQUENCE_NUM_KEY + ) + + # If we found a track start or one isn't already set in the + # metadata, create the tag for it. + if ( + track_start is not None or + 'EXT-X-MEDIA-SEQUENCE' not in self._playlist_tags + ): + # Choose a reasonable track start default + if track_start is None: + track_start = 1 + self._playlist_tags['EXT-X-MEDIA-SEQUENCE'] = str(track_start) + + def _add_map_entry(self, fragment): + """adds an EXT-X-MAP entry from the given fragment + + returns the added entry + """ + + media_ref = fragment.media_reference + + # Extract useful tag data + media_ref_streaming_metadata = media_ref.metadata[ + STREAMING_METADATA_KEY + ] + uri = media_ref_streaming_metadata[INIT_URI_KEY] + seg_map_byterange_dict = media_ref_streaming_metadata.get( + INIT_BYTERANGE_KEY + ) + + # Create the attrlist + map_attr_list = AttributeList([ + ('URI', uri), + ]) + + # Add the byterange if provided + if seg_map_byterange_dict is not None: + seg_map_byterange = Byterange.from_dict(seg_map_byterange_dict) + map_attr_list['BYTERANGE'] = str(seg_map_byterange) + + # Construct the entry with the attrlist as the value + map_tag_str = str(map_attr_list) + entry = HLSPlaylistEntry.tag_entry("EXT-X-MAP", map_tag_str) + + self._playlist_entries.append(entry) + + return entry + + def _add_entries_for_segment_from_fragments( + self, + fragments, + omit_hls_keys=None, + is_iframe_playlist=False + ): + """ + For the given list of otio clips representing fragments in the mp4, + add playlist entries for single HLS segment. + + :param fragments: (:clas:`list`) :class:`opentimelineio.schema.Clip` + objects to write as a contiguous segment. + :param omit_hls_keys: (:class:`list`) metadata keys from the original + "HLS" metadata namespeaces will not be passed through. + :param is_iframe_playlist: (:class:`bool`) If true, writes one segment + per fragment, otherwise writes all fragments as a single segment + + :return: (:class:`list` the :class:`HLSPlaylistEntry` instances added + to the playlist + """ + if is_iframe_playlist: + entries = [] + for fragment in fragments: + name = '' + fragment_range = Byterange.from_dict( + fragment.media_reference.metadata[STREAMING_METADATA_KEY] + ) + + segment_tags = {} + frag_tags = fragment.media_reference.metadata.get( + FORMAT_METADATA_KEY, + {} + ) + segment_tags.update(copy.deepcopy(frag_tags)) + + # scrub any metadata marked for omission + omit_hls_keys = omit_hls_keys or [] + for key in omit_hls_keys: + try: + del(segment_tags[key]) + except KeyError: + pass + + segment_entries = entries_for_segment( + fragment.media_reference.target_url, + fragment.duration(), + name, + fragment_range, + segment_tags + ) + entries.extend(segment_entries) + + self._playlist_entries.extend(entries) + return entries + + segment_tags = {} + for fragment in fragments: + frag_tags = fragment.media_reference.metadata.get( + FORMAT_METADATA_KEY, + {} + ) + segment_tags.update(copy.deepcopy(frag_tags)) + + # scrub any metadata marked for omission + omit_hls_keys = omit_hls_keys or [] + for key in omit_hls_keys: + try: + del(segment_tags[key]) + except KeyError: + pass + + # Calculate the byterange for the segment (if byteranges are specified) + first_ref = fragments[0].media_reference + first_ref_streaming_md = first_ref.metadata[STREAMING_METADATA_KEY] + if 'byte_offset' in first_ref_streaming_md and len(fragments) == 1: + segment_range = Byterange.from_dict(first_ref_streaming_md) + elif 'byte_offset' in first_ref_streaming_md: + # Find the byterange encapsulating everything + last_ref = fragments[-1].media_reference + last_ref_streaming_md = last_ref.metadata[STREAMING_METADATA_KEY] + first_range = Byterange.from_dict(first_ref_streaming_md) + last_range = Byterange.from_dict(last_ref_streaming_md) + + segment_offset = first_range.offset + segment_end = (last_range.offset + last_range.count) + segment_count = segment_end - segment_offset + segment_range = Byterange(segment_count, segment_offset) + else: + segment_range = None + + uri = fragments[0].media_reference.target_url + + # calculate the combined duration + segment_duration = fragments[0].duration() + for frag in fragments[1:]: + segment_duration += frag.duration() + + # TODO: Determine how to pass a segment name in + segment_name = '' + segment_entries = entries_for_segment( + uri, + segment_duration, + segment_name, + segment_range, + segment_tags + ) + + self._playlist_entries.extend(segment_entries) + return segment_entries + + def _fragments_have_same_map(self, fragment, following_fragment): + """ + Given fragment and following_fragment, returns whether or not their + initialization data is the same (what becomes EXT-X-MAP) + """ + media_ref = fragment.media_reference + media_ref_streaming_md = media_ref.metadata.get( + STREAMING_METADATA_KEY, + {} + ) + following_ref = following_fragment.media_reference + following_ref_streaming_md = following_ref.metadata.get( + STREAMING_METADATA_KEY, + {} + ) + # Check the init file + init_uri = media_ref_streaming_md.get(INIT_URI_KEY) + following_init_uri = media_ref_streaming_md.get(INIT_URI_KEY) + if init_uri != following_init_uri: + return False + + # Check the init byterange + init_dict = media_ref_streaming_md.get(INIT_BYTERANGE_KEY) + following_init_dict = following_ref_streaming_md.get( + INIT_BYTERANGE_KEY + ) + + dummy_range = Byterange(0, 0) + init_range = ( + Byterange.from_dict(init_dict) if init_dict else dummy_range + ) + following_range = ( + Byterange.from_dict(following_init_dict) + if following_init_dict else dummy_range + ) + + if init_range != following_range: + return False + + return True + + def _fragments_are_contiguous(self, fragment, following_fragment): + """ Given fragment and following_fragment (otio clips) returns whether + or not they are contiguous. + + To be contiguous the fragments must: + 1. have the same file URL + 2. have the same initialization data (what becomes EXT-X-MAP) + 3. be adjacent in the file (follwoing_fragment's first byte directly + follows fragment's last byte) + + Returns True if following_fragment is contiguous from fragment + """ + # Fragments are contiguous if: + # 1. They have the file url + # 2. They have the same map info + # 3. Their byte ranges are contiguous + media_ref = fragment.media_reference + media_ref_streaming_md = media_ref.metadata.get( + STREAMING_METADATA_KEY, + {} + ) + following_ref = following_fragment.media_reference + following_ref_streaming_md = following_ref.metadata.get( + STREAMING_METADATA_KEY, + {} + ) + if media_ref.target_url != following_ref.target_url: + return False + + if ( + media_ref_streaming_md.get(INIT_URI_KEY) != + following_ref_streaming_md.get(INIT_URI_KEY) + ): + return False + + if not self._fragments_have_same_map(fragment, following_fragment): + return False + + # Check if fragments are contiguous in file + try: + frag_end = ( + media_ref_streaming_md['byte_offset'] + + media_ref_streaming_md['byte_count'] + ) + if frag_end != following_ref_streaming_md['byte_offset']: + return False + except KeyError: + return False + + # since we haven't returned yet, all checks must have passed! + return True + + def _add_segment_entries(self, media_track): + """given a media track, generates the segment entries""" + + # Determine whether or not this is an I-Frame playlist + track_hls_metadata = media_track.metadata.get('HLS') + is_iframe_playlist = 'EXT-X-I-FRAMES-ONLY' in track_hls_metadata + + # Make a list copy of the fragments + fragments = [clip for clip in media_track] + + segment_durations = [] + previous_fragment = None + map_changed = True + while fragments: + # There should be at least one fragment per segment + frag_it = iter(fragments) + first_frag = next(frag_it) + gathered_fragments = [first_frag] + gathered_duration = first_frag.duration() + + # Determine this segment will need a new EXT-X-MAP entry + map_changed = ( + True if previous_fragment is None else + not self._fragments_have_same_map( + previous_fragment, + first_frag + ) + ) + + # Iterate through the remaining fragments until a discontinuity + # is found, our time limit is met, or we add all the fragments to + # the segment + for fragment in frag_it: + # Determine whther or not the fragments are contiguous + previous_fragment = gathered_fragments[-1] + contiguous = self._fragments_are_contiguous( + previous_fragment, + fragment + ) + + # Determine if we've hit our segment time conditions + new_duration = gathered_duration + fragment.duration() + segment_full = ( + gathered_duration >= self._min_seg_duration or + new_duration > self._max_seg_duration + ) + + # End condition met, cut the segment + if not contiguous or segment_full: + break + + # Include the fragment + gathered_duration = new_duration + gathered_fragments.append(fragment) + + # Write out the segment and start the next + start_fragment = gathered_fragments[0] + + # If the map for this segment was a change, write it + if map_changed: + self._add_map_entry(start_fragment) + + # add the entries for the segment. Omit any EXT-X-MAP metadata + # that may have come in from reading a file (we're updating) + self._add_entries_for_segment_from_fragments( + gathered_fragments, + omit_hls_keys=('EXT-X-MAP'), + is_iframe_playlist=is_iframe_playlist + ) + + duration_seconds = otio.opentime.to_seconds(gathered_duration) + segment_durations.append(duration_seconds) + + # in the next iteration, start where we left off + fragments = fragments[len(gathered_fragments):] + + # Set the max segment duration + max_duration = round(max(segment_durations)) + self._playlist_tags['EXT-X-TARGETDURATION'] = str(int(max_duration)) + + def _finalize_entries(self, media_track): + """Does final wrap-up of playlist entries""" + + self._playlist_tags['EXT-X-PLAYLIST-TYPE'] = 'VOD' + + # add the end + end_entry = HLSPlaylistEntry.tag_entry(PLAYLIST_END_TAG) + self._playlist_entries.append(end_entry) + + # find the maximum HLS feature version we've used + playlist_version = max(self._versions_used) + playlist_version_entry = HLSPlaylistEntry.tag_entry( + PLAYLIST_VERSION_TAG, + str(playlist_version) + ) + + # now that we know what was used, let's prepend the header + playlist_header_entries = [ + HLSPlaylistEntry.tag_entry(PLAYLIST_START_TAG), + playlist_version_entry + ] + + # add in the rest of the header entries in a deterministic order + playlist_header_entries += ( + HLSPlaylistEntry.tag_entry(k, v) + for k, v in sorted(self._playlist_tags.items(), key=lambda i: i[0]) + ) + + # Prepend the entries with the header entries + self._playlist_entries = ( + playlist_header_entries + self._playlist_entries + ) + + def playlist_string(self): + """Returns the string representation of the playlist entries""" + + return '\n'.join( + (str(entry) for entry in self._playlist_entries) + ) + +# Public interface + + +def read_from_string(input_str): + """Adapter entry point for reading.""" + + parser = HLSPlaylistParser(input_str) + return parser.timeline + + +def write_to_string(input_otio): + """Adapter entry point for writing.""" + + if len(input_otio.tracks) == 0: + return None + + # Determine whether we should write a media or master playlist + try: + write_master = input_otio.metadata['HLS']['master_playlist'] + except KeyError: + # If no explicit directive, infer + write_master = (len(input_otio.tracks) > 1) + + if write_master: + return master_playlist_to_string(input_otio) + else: + media_track = input_otio.tracks[0] + track_streaming_md = input_otio.metadata.get( + STREAMING_METADATA_KEY, + {} + ) + min_seg_duration = track_streaming_md.get('min_segment_duration') + max_seg_duration = track_streaming_md.get('max_segment_duration') + + writer = MediaPlaylistWriter( + media_track, + min_seg_duration, + max_seg_duration + ) + return writer.playlist_string() diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py new file mode 100644 index 00000000000..03e6cf87637 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py @@ -0,0 +1,132 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Maya Sequencer Adapter Harness""" + +import os +import subprocess + +from .. import adapters + + +def write_to_file(input_otio, filepath): + if "OTIO_MAYA_PYTHON_BIN" not in os.environ: + raise RuntimeError( + "'OTIO_MAYA_PYTHON_BIN' not set, please set this to path to " + "mayapy within the Maya installation." + ) + maya_python_path = os.environ["OTIO_MAYA_PYTHON_BIN"] + if not os.path.exists(maya_python_path): + raise RuntimeError( + 'Cannot access file at OTIO_MAYA_PYTHON_BIN: "{}"'.format( + maya_python_path + ) + ) + if os.path.isdir(maya_python_path): + raise RuntimeError( + "OTIO_MAYA_PYTHON_BIN contains a path to a directory, not to an " + "executable file: {}".format(maya_python_path) + ) + + input_data = adapters.write_to_string(input_otio, "otio_json") + + os.environ['PYTHONPATH'] = ( + os.pathsep.join( + [ + os.environ.setdefault('PYTHONPATH', ''), + os.path.dirname(__file__) + ] + ) + ) + + proc = subprocess.Popen( + [ + os.environ["OTIO_MAYA_PYTHON_BIN"], + '-m', + 'extern_maya_sequencer', + 'write', + filepath + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + env=os.environ + ) + proc.stdin.write(input_data) + out, err = proc.communicate() + + if proc.returncode: + raise RuntimeError( + "ERROR: extern_maya_sequencer (called through the maya sequencer " + "file adapter) failed. stderr output: " + err + ) + + +def read_from_file(filepath): + if "OTIO_MAYA_PYTHON_BIN" not in os.environ: + raise RuntimeError( + "'OTIO_MAYA_PYTHON_BIN' not set, please set this to path to " + "mayapy within the Maya installation." + ) + + os.environ['PYTHONPATH'] = ( + os.pathsep.join( + [ + os.environ.setdefault('PYTHONPATH', ''), + os.path.dirname(__file__) + ] + ) + ) + + proc = subprocess.Popen( + [ + os.environ["OTIO_MAYA_PYTHON_BIN"], + '-m', + 'extern_maya_sequencer', + 'read', + filepath + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + env=os.environ + ) + out, err = proc.communicate() + + # maya probably puts a bunch of crap on the stdout + sentinel_str = "OTIO_JSON_BEGIN\n" + end_sentinel_str = "\nOTIO_JSON_END\n" + start = out.find(sentinel_str) + end = out.find(end_sentinel_str) + result = adapters.read_from_string( + out[start + len(sentinel_str):end], + "otio_json" + ) + + if proc.returncode: + raise RuntimeError( + "ERROR: extern_maya_sequencer (called through the maya sequencer " + "file adapter) failed. stderr output: " + err + ) + return result diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py new file mode 100644 index 00000000000..33d00ce8c79 --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py @@ -0,0 +1,84 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""RvSession Adapter harness""" + +import subprocess +import os +import copy + +from .. import adapters + + +def write_to_file(input_otio, filepath): + if "OTIO_RV_PYTHON_BIN" not in os.environ: + raise RuntimeError( + "'OTIO_RV_PYTHON_BIN' not set, please set this to path to " + "py-interp within the RV installation." + ) + + if "OTIO_RV_PYTHON_LIB" not in os.environ: + raise RuntimeError( + "'OTIO_RV_PYTHON_LIB' not set, please set this to path to python " + "directory within the RV installation." + ) + + input_data = adapters.write_to_string(input_otio, "otio_json") + + base_environment = copy.deepcopy(os.environ) + + base_environment['PYTHONPATH'] = ( + os.pathsep.join( + [ + base_environment.setdefault('PYTHONPATH', ''), + os.path.dirname(__file__) + ] + ) + ) + + proc = subprocess.Popen( + [ + base_environment["OTIO_RV_PYTHON_BIN"], + '-m', + 'extern_rv', + filepath + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + env=base_environment + ) + proc.stdin.write(input_data) + out, err = proc.communicate() + + if out.strip(): + print("stdout: {}".format(out)) + if err.strip(): + print("stderr: {}".format(err)) + + if proc.returncode: + raise RuntimeError( + "ERROR: extern_rv (called through the rv session file adapter) " + "failed. stderr output: " + err + ) diff --git a/pype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py new file mode 100644 index 00000000000..525a8a4649b --- /dev/null +++ b/pype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py @@ -0,0 +1,819 @@ +# +# Copyright (C) 2019 Igalia S.L +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO GStreamer Editing Services XML Adapter. """ +import re +import unittest + +from decimal import Decimal +from fractions import Fraction +from xml.etree import cElementTree +from xml.dom import minidom +import opentimelineio as otio + +META_NAMESPACE = "XGES" + + +FRAMERATE_FRAMEDURATION = {23.98: "24000/1001", + 24: "600/25", + 25: "25/1", + 29.97: "30000/1001", + 30: "30/1", + 50: "50/1", + 59.94: "60000/1001", + 60: "60/1"} + + +TRANSITION_MAP = { + "crossfade": otio.schema.TransitionTypes.SMPTE_Dissolve +} +# Two way map +TRANSITION_MAP.update(dict([(v, k) for k, v in TRANSITION_MAP.items()])) + + +class GstParseError(otio.exceptions.OTIOError): + pass + + +class GstStructure(object): + """ + GstStructure parser with a "dictionary" like API. + """ + UNESCAPE = re.compile(r'(?<!\\)\\(.)') + INT_TYPES = "".join( + ("int", "uint", "int8", "uint8", "int16", + "uint16", "int32", "uint32", "int64", "uint64") + ) + + def __init__(self, text): + self.text = text + self.modified = False + self.name, self.types, self.values = GstStructure._parse(text + ";") + + def __repr__(self): + if not self.modified: + return self.text + + res = self.name + for key, value in self.values.items(): + value_type = self.types[key] + res += ', %s=(%s)"%s"' % (key, value_type, self.escape(value)) + res += ';' + + return res + + def __getitem__(self, key): + return self.values[key] + + def set(self, key, value_type, value): + if self.types.get(key) == value_type and self.values.get(key) == value: + return + + self.modified = True + self.types[key] = value_type + self.values[key] = value + + def get(self, key, default=None): + return self.values.get(key, default) + + @staticmethod + def _find_eos(s): + # find next '"' without preceeding '\' + line = 0 + while 1: # faster than regexp for '[^\\]\"' + p = s.index('"') + line += p + 1 + if s[p - 1] != '\\': + return line + s = s[(p + 1):] + return -1 + + @staticmethod + def escape(s): + # XXX: The unicode type doesn't exist in Python 3 (all strings are unicode) + # so we have to use type(u"") which works in both Python 2 and 3. + if type(s) not in (str, type(u"")): + return s + return s.replace(" ", "\\ ") + + @staticmethod + def _parse(s): + in_string = s + types = {} + values = {} + scan = True + # parse id + p = s.find(',') + if p == -1: + try: + p = s.index(';') + except ValueError: + p = len(s) + scan = False + name = s[:p] + # parse fields + while scan: + comma_space_it = p + # skip 'name, ' / 'value, ' + while s[comma_space_it] in [' ', ',']: + comma_space_it += 1 + s = s[comma_space_it:] + p = s.index('=') + k = s[:p] + if not s[p + 1] == '(': + raise ValueError("In %s position: %d" % (in_string, p)) + s = s[(p + 2):] # skip 'key=(' + p = s.index(')') + t = s[:p] + s = s[(p + 1):] # skip 'type)' + + if s[0] == '"': + s = s[1:] # skip '"' + p = GstStructure._find_eos(s) + if p == -1: + raise ValueError + v = s[:(p - 1)] + if s[p] == ';': + scan = False + # unescape \., but not \\. (using a backref) + # need a reverse for re.escape() + v = v.replace('\\\\', '\\') + v = GstStructure.UNESCAPE.sub(r'\1', v) + else: + p = s.find(',') + if p == -1: + p = s.index(';') + scan = False + v = s[:p] + + if t == 'structure': + v = GstStructure(v) + elif t == 'string' and len(v) and v[0] == '"': + v = v[1:-1] + elif t == 'boolean': + v = (v == '1') + elif t in GstStructure.INT_TYPES: + v = int(v) + types[k] = t + values[k] = v + + return (name, types, values) + + +class GESTrackType: + UNKNOWN = 1 << 0 + AUDIO = 1 << 1 + VIDEO = 1 << 2 + TEXT = 1 << 3 + CUSTOM = 1 << 4 + + @staticmethod + def to_otio_type(_type): + if _type == GESTrackType.AUDIO: + return otio.schema.TrackKind.Audio + elif _type == GESTrackType.VIDEO: + return otio.schema.TrackKind.Video + + raise GstParseError("Can't translate track type %s" % _type) + + +GST_CLOCK_TIME_NONE = 18446744073709551615 +GST_SECOND = 1000000000 + + +def to_gstclocktime(rational_time): + """ + This converts a RationalTime object to a GstClockTime + + Args: + rational_time (RationalTime): This is a RationalTime object + + Returns: + int: A time in nanosecond + """ + + return int(rational_time.value_rescaled_to(1) * GST_SECOND) + + +def get_from_structure(xmlelement, fieldname, default=None, attribute="properties"): + structure = GstStructure(xmlelement.get(attribute, attribute)) + return structure.get(fieldname, default) + + +class XGES: + """ + This object is responsible for knowing how to convert an xGES + project into an otio timeline + """ + + def __init__(self, xml_string): + self.xges_xml = cElementTree.fromstring(xml_string) + self.rate = 25 + + def _set_rate_from_timeline(self, timeline): + metas = GstStructure(timeline.attrib.get("metadatas", "metadatas")) + framerate = metas.get("framerate") + if framerate: + rate = Fraction(framerate) + else: + video_track = timeline.find("./track[@track-type='4']") + rate = None + if video_track is not None: + properties = GstStructure( + video_track.get("properties", "properties;")) + restriction_caps = GstStructure(properties.get( + "restriction-caps", "restriction-caps")) + rate = restriction_caps.get("framerate") + + if rate is None: + return + + self.rate = float(Fraction(rate)) + if self.rate == int(self.rate): + self.rate = int(self.rate) + else: + self.rate = float(round(Decimal(self.rate), 2)) + + def to_rational_time(self, ns_timestamp): + """ + This converts a GstClockTime value to an otio RationalTime object + + Args: + ns_timestamp (int): This is a GstClockTime value (nanosecond absolute value) + + Returns: + RationalTime: A RationalTime object + """ + return otio.opentime.RationalTime(round(int(ns_timestamp) / + (GST_SECOND / self.rate)), self.rate) + + def to_otio(self): + """ + Convert an xges to an otio + + Returns: + OpenTimeline: An OpenTimeline Timeline object + """ + + project = self.xges_xml.find("./project") + metas = GstStructure(project.attrib.get("metadatas", "metadatas")) + otio_project = otio.schema.SerializableCollection( + name=metas.get('name'), + metadata={ + META_NAMESPACE: {"metadatas": project.attrib.get( + "metadatas", "metadatas")} + } + ) + timeline = project.find("./timeline") + self._set_rate_from_timeline(timeline) + + otio_timeline = otio.schema.Timeline( + name=metas.get('name', "unnamed"), + metadata={ + META_NAMESPACE: { + "metadatas": timeline.attrib.get("metadatas", "metadatas"), + "properties": timeline.attrib.get("properties", "properties") + } + } + ) + + all_names = set() + self._add_layers(timeline, otio_timeline, all_names) + otio_project.append(otio_timeline) + + return otio_project + + def _add_layers(self, timeline, otio_timeline, all_names): + for layer in timeline.findall("./layer"): + tracks = self._build_tracks_from_layer_clips(layer, all_names) + otio_timeline.tracks.extend(tracks) + + def _get_clips_for_type(self, clips, track_type): + if not clips: + return False + + clips_for_type = [] + for clip in clips: + if int(clip.attrib['track-types']) & track_type: + clips_for_type.append(clip) + + return clips_for_type + + def _build_tracks_from_layer_clips(self, layer, all_names): + all_clips = layer.findall('./clip') + + tracks = [] + for track_type in [GESTrackType.VIDEO, GESTrackType.AUDIO]: + clips = self._get_clips_for_type(all_clips, track_type) + if not clips: + continue + + track = otio.schema.Track() + track.kind = GESTrackType.to_otio_type(track_type) + self._add_clips_in_track(clips, track, all_names) + + tracks.append(track) + + return tracks + + def _add_clips_in_track(self, clips, track, all_names): + for clip in clips: + otio_clip = self._create_otio_clip(clip, all_names) + if otio_clip is None: + continue + + clip_offset = self.to_rational_time(int(clip.attrib['start'])) + if clip_offset > track.duration(): + track.append( + self._create_otio_gap( + 0, + (clip_offset - track.duration()) + ) + ) + + track.append(otio_clip) + + return track + + def _get_clip_name(self, clip, all_names): + i = 0 + tmpname = name = clip.get("name", GstStructure( + clip.get("properties", "properties;")).get("name")) + while True: + if tmpname not in all_names: + all_names.add(tmpname) + return tmpname + + i += 1 + tmpname = name + '_%d' % i + + def _create_otio_transition(self, clip, all_names): + start = self.to_rational_time(clip.attrib["start"]) + end = start + self.to_rational_time(clip.attrib["duration"]) + cut_point = otio.opentime.RationalTime((end.value - start.value) / + 2, start.rate) + + return otio.schema.Transition( + name=self._get_clip_name(clip, all_names), + transition_type=TRANSITION_MAP.get( + clip.attrib["asset-id"], otio.schema.TransitionTypes.Custom + ), + in_offset=cut_point, + out_offset=cut_point, + ) + + def _create_otio_uri_clip(self, clip, all_names): + source_range = otio.opentime.TimeRange( + start_time=self.to_rational_time(clip.attrib["inpoint"]), + duration=self.to_rational_time(clip.attrib["duration"]), + ) + + otio_clip = otio.schema.Clip( + name=self._get_clip_name(clip, all_names), + source_range=source_range, + media_reference=self._reference_from_id( + clip.get("asset-id"), clip.get("type-name")), + ) + + return otio_clip + + def _create_otio_clip(self, clip, all_names): + otio_clip = None + + if clip.get("type-name") == "GESTransitionClip": + otio_clip = self._create_otio_transition(clip, all_names) + elif clip.get("type-name") == "GESUriClip": + otio_clip = self._create_otio_uri_clip(clip, all_names) + + if otio_clip is None: + print("Could not represent: %s" % clip.attrib) + return None + + otio_clip.metadata[META_NAMESPACE] = { + "properties": clip.get("properties", "properties;"), + "metadatas": clip.get("metadatas", "metadatas;"), + } + + return otio_clip + + def _create_otio_gap(self, start, duration): + source_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(start), + duration=duration + ) + return otio.schema.Gap(source_range=source_range) + + def _reference_from_id(self, asset_id, asset_type="GESUriClip"): + asset = self._asset_by_id(asset_id, asset_type) + if asset is None: + return None + if not asset.get("id", ""): + return otio.schema.MissingReference() + + duration = GST_CLOCK_TIME_NONE + if asset_type == "GESUriClip": + duration = get_from_structure(asset, "duration", duration) + + available_range = otio.opentime.TimeRange( + start_time=self.to_rational_time(0), + duration=self.to_rational_time(duration) + ) + ref = otio.schema.ExternalReference( + target_url=asset.get("id"), + available_range=available_range + ) + + ref.metadata[META_NAMESPACE] = { + "properties": asset.get("properties"), + "metadatas": asset.get("metadatas"), + } + + return ref + + # -------------------- + # search helpers + # -------------------- + def _asset_by_id(self, asset_id, asset_type): + return self.xges_xml.find( + "./project/ressources/asset[@id='{}'][@extractable-type-name='{}']".format( + asset_id, asset_type) + ) + + def _timeline_element_by_name(self, timeline, name): + for clip in timeline.findall("./layer/clip"): + if get_from_structure(clip, 'name') == name: + return clip + + return None + + +class XGESOtio: + + def __init__(self, input_otio): + self.container = input_otio + self.rate = 25 + + def _insert_new_sub_element(self, into_parent, tag, attrib=None, text=''): + elem = cElementTree.SubElement(into_parent, tag, **attrib or {}) + elem.text = text + return elem + + def _get_element_properties(self, element): + return element.metadata.get(META_NAMESPACE, {}).get("properties", "properties;") + + def _get_element_metadatas(self, element): + return element.metadata.get(META_NAMESPACE, + {"GES": {}}).get("metadatas", "metadatas;") + + def _serialize_ressource(self, ressources, ressource, asset_type): + if isinstance(ressource, otio.schema.MissingReference): + return + + if ressources.find("./asset[@id='%s'][@extractable-type-name='%s']" % ( + ressource.target_url, asset_type)) is not None: + return + + properties = GstStructure(self._get_element_properties(ressource)) + if properties.get('duration') is None: + properties.set('duration', 'guin64', + to_gstclocktime(ressource.available_range.duration)) + + self._insert_new_sub_element( + ressources, 'asset', + attrib={ + "id": ressource.target_url, + "extractable-type-name": 'GESUriClip', + "properties": str(properties), + "metadatas": self._get_element_metadatas(ressource), + } + ) + + def _get_transition_times(self, offset, otio_transition): + rational_offset = otio.opentime.RationalTime( + round(int(offset) / (GST_SECOND / self.rate)), + self.rate + ) + start = rational_offset - otio_transition.in_offset + end = rational_offset + otio_transition.out_offset + + return 0, to_gstclocktime(start), to_gstclocktime(end - start) + + def _serialize_clip( + self, + otio_track, + layer, + layer_priority, + ressources, + otio_clip, + clip_id, + offset + ): + + # FIXME - Figure out a proper way to determine clip type! + asset_id = "GESTitleClip" + asset_type = "GESTitleClip" + + if isinstance(otio_clip, otio.schema.Transition): + asset_type = "GESTransitionClip" + asset_id = TRANSITION_MAP.get(otio_clip.transition_type, "crossfade") + inpoint, offset, duration = self._get_transition_times(offset, otio_clip) + else: + inpoint = to_gstclocktime(otio_clip.source_range.start_time) + duration = to_gstclocktime(otio_clip.source_range.duration) + + if not isinstance(otio_clip.media_reference, otio.schema.MissingReference): + asset_id = otio_clip.media_reference.target_url + asset_type = "GESUriClip" + + self._serialize_ressource(ressources, otio_clip.media_reference, + asset_type) + + if otio_track.kind == otio.schema.TrackKind.Audio: + track_types = GESTrackType.AUDIO + elif otio_track.kind == otio.schema.TrackKind.Video: + track_types = GESTrackType.VIDEO + else: + raise ValueError("Unhandled track type: %s" % otio_track.kind) + + properties = otio_clip.metadata.get( + META_NAMESPACE, + { + "properties": 'properties, name=(string)"%s"' % ( + GstStructure.escape(otio_clip.name) + ) + }).get("properties") + return self._insert_new_sub_element( + layer, 'clip', + attrib={ + "id": str(clip_id), + "properties": properties, + "asset-id": str(asset_id), + "type-name": str(asset_type), + "track-types": str(track_types), + "layer-priority": str(layer_priority), + "start": str(offset), + "rate": '0', + "inpoint": str(inpoint), + "duration": str(duration), + "metadatas": self._get_element_metadatas(otio_clip), + } + ) + + def _serialize_tracks(self, timeline, otio_timeline): + audio_vals = ( + 'properties', + 'restriction-caps=(string)audio/x-raw(ANY)', + 'framerate=(GstFraction)1', + otio_timeline.duration().rate + ) + + properties = '%s, %s,%s/%s' % audio_vals + self._insert_new_sub_element( + timeline, 'track', + attrib={ + "caps": "audio/x-raw(ANY)", + "track-type": '2', + 'track-id': '0', + 'properties': properties + } + ) + + video_vals = ( + 'properties', + 'restriction-caps=(string)video/x-raw(ANY)', + 'framerate=(GstFraction)1', + otio_timeline.duration().rate + ) + + properties = '%s, %s,%s/%s' % video_vals + for otio_track in otio_timeline.tracks: + if otio_track.kind == otio.schema.TrackKind.Video: + self._insert_new_sub_element( + timeline, 'track', + attrib={ + "caps": "video/x-raw(ANY)", + "track-type": '4', + 'track-id': '1', + 'properties': properties, + } + ) + + return + + def _serialize_layer(self, timeline, layers, layer_priority): + if layer_priority not in layers: + layers[layer_priority] = self._insert_new_sub_element( + timeline, 'layer', + attrib={ + "priority": str(layer_priority), + } + ) + + def _serialize_timeline_element(self, timeline, layers, layer_priority, + offset, otio_track, otio_element, + ressources, all_clips): + self._serialize_layer(timeline, layers, layer_priority) + layer = layers[layer_priority] + if isinstance(otio_element, (otio.schema.Clip, otio.schema.Transition)): + element = self._serialize_clip(otio_track, layer, layer_priority, + ressources, otio_element, + str(len(all_clips)), offset) + all_clips.add(element) + if isinstance(otio_element, otio.schema.Transition): + # Make next clip overlap + return int(element.get("start")) - offset + elif not isinstance(otio_element, otio.schema.Gap): + print("FIXME: Add support for %s" % type(otio_element)) + return 0 + + return to_gstclocktime(otio_element.source_range.duration) + + def _make_element_names_unique(self, all_names, otio_element): + if isinstance(otio_element, otio.schema.Gap): + return + + if not isinstance(otio_element, otio.schema.Track): + i = 0 + name = otio_element.name + while True: + if name not in all_names: + otio_element.name = name + break + + i += 1 + name = otio_element.name + '_%d' % i + all_names.add(otio_element.name) + + if isinstance(otio_element, (otio.schema.Stack, otio.schema.Track)): + for sub_element in otio_element: + self._make_element_names_unique(all_names, sub_element) + + def _make_timeline_elements_names_unique(self, otio_timeline): + element_names = set() + for track in otio_timeline.tracks: + for element in track: + self._make_element_names_unique(element_names, element) + + def _serialize_timeline(self, project, ressources, otio_timeline): + metadatas = GstStructure(self._get_element_metadatas(otio_timeline)) + metadatas.set( + "framerate", "fraction", self._framerate_to_frame_duration( + otio_timeline.duration().rate + ) + ) + timeline = self._insert_new_sub_element( + project, 'timeline', + attrib={ + "properties": self._get_element_properties(otio_timeline), + "metadatas": str(metadatas), + } + ) + self._serialize_tracks(timeline, otio_timeline) + + self._make_timeline_elements_names_unique(otio_timeline) + + all_clips = set() + layers = {} + for layer_priority, otio_track in enumerate(otio_timeline.tracks): + self._serialize_layer(timeline, layers, layer_priority) + offset = 0 + for otio_element in otio_track: + offset += self._serialize_timeline_element( + timeline, layers, layer_priority, offset, + otio_track, otio_element, ressources, all_clips, + ) + + for layer in layers.values(): + layer[:] = sorted(layer, key=lambda child: int(child.get("start"))) + + # -------------------- + # static methods + # -------------------- + @staticmethod + def _framerate_to_frame_duration(framerate): + frame_duration = FRAMERATE_FRAMEDURATION.get(int(framerate), "") + if not frame_duration: + frame_duration = FRAMERATE_FRAMEDURATION.get(float(framerate), "") + return frame_duration + + def to_xges(self): + xges = cElementTree.Element('ges', version="0.4") + + metadatas = GstStructure(self._get_element_metadatas(self.container)) + if self.container.name is not None: + metadatas.set("name", "string", self.container.name) + if not isinstance(self.container, otio.schema.Timeline): + project = self._insert_new_sub_element( + xges, 'project', + attrib={ + "properties": self._get_element_properties(self.container), + "metadatas": str(metadatas), + } + ) + + if len(self.container) > 1: + print( + "WARNING: Only one timeline supported, using *only* the first one.") + + otio_timeline = self.container[0] + + else: + project = self._insert_new_sub_element( + xges, 'project', + attrib={ + "metadatas": str(metadatas), + } + ) + otio_timeline = self.container + + ressources = self._insert_new_sub_element(project, 'ressources') + self.rate = otio_timeline.duration().rate + self._serialize_timeline(project, ressources, otio_timeline) + + # with indentations. + string = cElementTree.tostring(xges, encoding="UTF-8") + dom = minidom.parseString(string) + return dom.toprettyxml(indent=' ') + + +# -------------------- +# adapter requirements +# -------------------- +def read_from_string(input_str): + """ + Necessary read method for otio adapter + + Args: + input_str (str): A GStreamer Editing Services formated project + + Returns: + OpenTimeline: An OpenTimeline object + """ + + return XGES(input_str).to_otio() + + +def write_to_string(input_otio): + """ + Necessary write method for otio adapter + + Args: + input_otio (OpenTimeline): An OpenTimeline object + + Returns: + str: The string contents of an FCP X XML + """ + + return XGESOtio(input_otio).to_xges() + + +# -------------------- +# Some unit check for internal types +# -------------------- + +class XGESTests(unittest.TestCase): + + def test_gst_structure_parsing(self): + struct = GstStructure('properties, name=(string)"%s";' % ( + GstStructure.escape("sc01 sh010_anim.mov")) + ) + self.assertEqual(struct["name"], "sc01 sh010_anim.mov") + + def test_gst_structure_editing(self): + struct = GstStructure('properties, name=(string)"%s";' % ( + GstStructure.escape("sc01 sh010_anim.mov")) + ) + self.assertEqual(struct["name"], "sc01 sh010_anim.mov") + + struct.set("name", "string", "test") + self.assertEqual(struct["name"], "test") + self.assertEqual(str(struct), 'properties, name=(string)"test";') + + def test_empty_string(self): + struct = GstStructure('properties, name=(string)"";') + self.assertEqual(struct["name"], "") + + +if __name__ == '__main__': + unittest.main()