Skip to content
This repository has been archived by the owner on Sep 20, 2024. It is now read-only.

DWAA/DWAB support on windows #795

Merged
merged 5 commits into from
Dec 17, 2020
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion pype/lib/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,11 @@
filter_pyblish_plugins,
source_hash,
get_unique_layer_name,
get_background_layers
get_background_layers,
oiio_supported,
decompress,
get_decompress_dir,
should_decompress
)

from .path_tools import (
Expand Down Expand Up @@ -64,6 +68,10 @@
"filter_pyblish_plugins",
"get_unique_layer_name",
"get_background_layers",
"oiio_supported",
"decompress",
"get_decompress_dir",
"should_decompress",

"version_up",
"get_version_from_path",
Expand Down
101 changes: 101 additions & 0 deletions pype/lib/plugin_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
import logging
import re
import json
import pype.api
import tempfile

from ..api import config

Expand Down Expand Up @@ -134,3 +136,102 @@ def get_background_layers(file_url):
layer.get("filename")).
replace("\\", "/"))
return layers


def oiio_supported():
"""
Checks if oiiotool is configured for this platform.

'should_decompress' will throw exception if configured,
but not present or working.
"""
oiio_path = os.getenv("PYPE_OIIO_PATH", "")
if not oiio_path or not os.path.exists(oiio_path):
raise IOError("Files do not exists in `{}`".format(oiio_path))

return True


def decompress(target_dir, file_url,
input_frame_start=None, input_frame_end=None, log=None):
"""
Decompresses DWAA 'file_url' .exr to 'target_dir'.

Creates uncompressed files in 'target_dir', they need to be cleaned.

File url could be for single file or for a sequence, in that case
%0Xd will be as a placeholder for frame number AND input_frame* will
be filled.
In that case single oiio command with '--frames' will be triggered for
all frames, this should be faster then looping and running sequentially

Args:
target_dir (str): extended from stagingDir
file_url (str): full urls to source file (with or without %0Xd)
input_frame_start (int) (optional): first frame
input_frame_end (int) (optional): last frame
log (Logger) (optional): pype logger
"""
is_sequence = input_frame_start is not None and \
input_frame_end is not None and \
(int(input_frame_end) > int(input_frame_start))

oiio_cmd = []
oiio_cmd.append(os.getenv("PYPE_OIIO_PATH"))

oiio_cmd.append("--compression none")

base_file_name = os.path.basename(file_url)
oiio_cmd.append(file_url)

if is_sequence:
oiio_cmd.append("--frames {}-{}".format(input_frame_start,
input_frame_end))

oiio_cmd.append("-o")
oiio_cmd.append(os.path.join(target_dir, base_file_name))

subprocess_exr = " ".join(oiio_cmd)

if not log:
log = logging.getLogger(__name__)

log.debug("Decompressing {}".format(subprocess_exr))
pype.api.subprocess(
subprocess_exr, shell=True, logger=log
)


def get_decompress_dir():
"""
Creates temporary folder for decompressing.
Its local, in case of farm it is 'local' to the farm machine.

Should be much faster, needs to be cleaned up later.
"""
return os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)


def should_decompress(file_url):
"""
Tests that 'file_url' is compressed with DWAA.

Uses 'oiio_supported' to check that OIIO tool is available for this
platform

Args:
file_url (str): path to rendered file (in sequence it would be
first file, if that compressed it is expected that whole seq
will be too)
Returns:
(bool): 'file_url' is DWAA compressed and should be decompressed
"""
if oiio_supported():
output = pype.api.subprocess([os.getenv("PYPE_OIIO_PATH"),
"--info", "-v", file_url])
return "compression: \"dwaa\"" in output or \
"compression: \"dwab\"" in output

return False
48 changes: 38 additions & 10 deletions pype/plugins/global/publish/extract_burnin.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@

import pype.api
import pyblish
from pype.lib import oiio_supported, should_decompress, \
get_decompress_dir, decompress
import shutil


class ExtractBurnin(pype.api.Extractor):
Expand All @@ -28,7 +31,8 @@ class ExtractBurnin(pype.api.Extractor):
"premiere",
"standalonepublisher",
"harmony",
"fusion"
"fusion",
"aftereffects"
]
optional = True

Expand All @@ -54,15 +58,16 @@ class ExtractBurnin(pype.api.Extractor):
def process(self, instance):
# ffmpeg doesn't support multipart exrs
if instance.data.get("multipartExr") is True:
instance_label = (
getattr(instance, "label", None)
or instance.data.get("label")
or instance.data.get("name")
)
self.log.info((
"Instance \"{}\" contain \"multipartExr\". Skipped."
).format(instance_label))
return
if not oiio_supported():
instance_label = (
getattr(instance, "label", None)
or instance.data.get("label")
or instance.data.get("name")
)
self.log.info((
"Instance \"{}\" contain \"multipartExr\". Skipped."
).format(instance_label))
return

# QUESTION what is this for and should we raise an exception?
if "representations" not in instance.data:
Expand Down Expand Up @@ -212,6 +217,26 @@ def main_process(self, instance):
# Prepare paths and files for process.
self.input_output_paths(new_repre, temp_data, filename_suffix)

decompressed_dir = ''
full_input_path = temp_data["full_input_path"]
do_decompress = should_decompress(full_input_path)
if do_decompress:
decompressed_dir = get_decompress_dir()

decompress(
decompressed_dir,
full_input_path,
temp_data["frame_start"],
temp_data["frame_end"],
self.log
)

# input path changed, 'decompressed' added
input_file = os.path.basename(full_input_path)
temp_data["full_input_path"] = os.path.join(
decompressed_dir,
input_file)

# Data for burnin script
script_data = {
"input": temp_data["full_input_path"],
Expand Down Expand Up @@ -271,6 +296,9 @@ def main_process(self, instance):
os.remove(filepath)
self.log.debug("Removed: \"{}\"".format(filepath))

if do_decompress and os.path.exists(decompressed_dir):
shutil.rmtree(decompressed_dir)

def prepare_basic_data(self, instance):
"""Pick data from instance for processing and for burnin strings.

Expand Down
29 changes: 23 additions & 6 deletions pype/plugins/global/publish/extract_jpeg.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
import pyblish.api
import pype.api
import pype.lib
from pype.lib import oiio_supported, should_decompress, \
get_decompress_dir, decompress
import shutil


class ExtractJpegEXR(pyblish.api.InstancePlugin):
Expand All @@ -22,9 +25,11 @@ def process(self, instance):
if 'crypto' in instance.data['subset']:
return

# ffmpeg doesn't support multipart exrs
do_decompress = False
# ffmpeg doesn't support multipart exrs, use oiiotool if available
if instance.data.get("multipartExr") is True:
return
if not oiio_supported():
return

# Skip review when requested.
if not instance.data.get("review", True):
Expand All @@ -36,10 +41,6 @@ def process(self, instance):
# filter out mov and img sequences
representations_new = representations[:]

if instance.data.get("multipartExr"):
# ffmpeg doesn't support multipart exrs
return

for repre in representations:
tags = repre.get("tags", [])
self.log.debug(repre)
Expand All @@ -60,6 +61,19 @@ def process(self, instance):
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))

decompressed_dir = ''
do_decompress = should_decompress(full_input_path)
if do_decompress:
decompressed_dir = get_decompress_dir()

decompress(
decompressed_dir,
full_input_path)
# input path changed, 'decompressed' added
full_input_path = os.path.join(
decompressed_dir,
input_file)

filename = os.path.splitext(input_file)[0]
if not filename.endswith('.'):
filename += "."
Expand Down Expand Up @@ -111,4 +125,7 @@ def process(self, instance):
self.log.debug("Adding: {}".format(representation))
representations_new.append(representation)

if do_decompress and os.path.exists(decompressed_dir):
shutil.rmtree(decompressed_dir)

instance.data["representations"] = representations_new
48 changes: 39 additions & 9 deletions pype/plugins/global/publish/extract_review.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
import clique
import pype.api
import pype.lib
from pype.lib import oiio_supported, should_decompress, \
get_decompress_dir, decompress


class ExtractReview(pyblish.api.InstancePlugin):
Expand All @@ -14,7 +16,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
Compulsory attribute of representation is tags list with "review",
otherwise the representation is ignored.

All new represetnations are created and encoded by ffmpeg following
All new representations are created and encoded by ffmpeg following
presets found in `pype-config/presets/plugins/global/
publish.json:ExtractReview:outputs`.
"""
Expand Down Expand Up @@ -58,7 +60,9 @@ def process(self, instance):
return

# ffmpeg doesn't support multipart exrs
if instance.data.get("multipartExr") is True:
if instance.data.get("multipartExr") is True \
and not oiio_supported():

instance_label = (
getattr(instance, "label", None)
or instance.data.get("label")
Expand Down Expand Up @@ -318,9 +322,9 @@ def _ffmpeg_arguments(self, output_def, instance, new_repre, temp_data):
Args:
output_def (dict): Currently processed output definition.
instance (Instance): Currently processed instance.
new_repre (dict): Reprensetation representing output of this
new_repre (dict): Representation representing output of this
process.
temp_data (dict): Base data for successfull process.
temp_data (dict): Base data for successful process.
"""

# Get FFmpeg arguments from profile presets
Expand All @@ -331,9 +335,35 @@ def _ffmpeg_arguments(self, output_def, instance, new_repre, temp_data):
ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or []
ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or []

if isinstance(new_repre['files'], list):
input_files_urls = [os.path.join(new_repre["stagingDir"], f) for f
in new_repre['files']]
do_decompress = should_decompress(input_files_urls[0])
else:
test_path = os.path.join(
new_repre["stagingDir"], new_repre['files'])
do_decompress = should_decompress(test_path)

if do_decompress:
# change stagingDir, decompress first
# calculate all paths with modified directory, used on too many
# places
# will be purged by cleanup.py automatically
orig_staging_dir = new_repre["stagingDir"]
new_repre["stagingDir"] = get_decompress_dir()

# Prepare input and output filepaths
self.input_output_paths(new_repre, output_def, temp_data)

if do_decompress:
input_file = temp_data["full_input_path"].\
replace(new_repre["stagingDir"], orig_staging_dir)

decompress(new_repre["stagingDir"], input_file,
temp_data["frame_start"],
temp_data["frame_end"],
self.log)

# Set output frames len to 1 when ouput is single image
if (
temp_data["output_ext_is_image"]
Expand Down Expand Up @@ -930,7 +960,7 @@ def compile_list_of_regexes(self, in_list):
return regexes

def validate_value_by_regexes(self, value, in_list):
"""Validates in any regexe from list match entered value.
"""Validates in any regex from list match entered value.

Args:
in_list (list): List with regexes.
Expand All @@ -955,9 +985,9 @@ def validate_value_by_regexes(self, value, in_list):
def profile_exclusion(self, matching_profiles):
"""Find out most matching profile byt host, task and family match.

Profiles are selectivelly filtered. Each profile should have
Profiles are selectively filtered. Each profile should have
"__value__" key with list of booleans. Each boolean represents
existence of filter for specific key (host, taks, family).
existence of filter for specific key (host, tasks, family).
Profiles are looped in sequence. In each sequence are split into
true_list and false_list. For next sequence loop are used profiles in
true_list if there are any profiles else false_list is used.
Expand Down Expand Up @@ -1036,7 +1066,7 @@ def find_matching_profile(self, host_name, task_name, family):

highest_profile_points = -1
# Each profile get 1 point for each matching filter. Profile with most
# points is returnd. For cases when more than one profile will match
# points is returned. For cases when more than one profile will match
# are also stored ordered lists of matching values.
for profile in self.profiles:
profile_points = 0
Expand Down Expand Up @@ -1648,7 +1678,7 @@ def legacy_process(self, instance):

def add_video_filter_args(self, args, inserting_arg):
"""
Fixing video filter argumets to be one long string
Fixing video filter arguments to be one long string

Args:
args (list): list of string arguments
Expand Down